반응형
안녕하세요!
이번에는 VGG16 에 대해 공부한걸 작성해보겠습니다.
잘못된게 있다면 조언,충고 부탁합니다 :)
import torch.nn as nn
import torch
x = torch.randn((1,3,256,256)).float()
class VGG_A(nn.Module):
def __init__(self, num_classes: int = 1000, init_weights: bool = True):
super(VGG_A, self).__init__()
self.convnet = nn.Sequential(
# Input Channel (RGB: 3)
nn.Conv2d(in_channels=3, out_channels=64, kernel_size=3, padding=1, stride=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # 256 -> 128
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=3, padding=1, stride=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # 128 -> 64
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=3, padding=1, stride=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=256, out_channels=256, kernel_size=3, padding=1, stride=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # 64 -> 32
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=3, padding=1, stride=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # 32 -> 16
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1, stride=1),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # 16 -> 8
)
self.fclayer = nn.Sequential(
nn.Linear(512 * 8 * 8, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(4096, 4096),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(4096, num_classes),
# nn.Softmax(dim=1), # Loss인 Cross Entropy Loss 에서 softmax를 포함한다.
)
def forward(self, x:torch.Tensor):
x = self.convnet(x)
x = torch.flatten(x, 1)
x = self.fclayer(x)
return x
vgg16 = VGG_A(num_classes = 2)
y = vgg16(x)
print(y.shape)
(수정중에 있습니다.)
반응형
'파이썬' 카테고리의 다른 글
[Pytorch] resnet18 pretrained 저장 장소 (0) | 2023.01.12 |
---|
댓글