diff --git a/.~lock.2024 - ECE new poster presentation template - landscape.pptx# b/.~lock.2024 - ECE new poster presentation template - landscape.pptx# new file mode 100644 index 0000000..e76c378 --- /dev/null +++ b/.~lock.2024 - ECE new poster presentation template - landscape.pptx# @@ -0,0 +1 @@ +,sharpe,dhcp-150-250-221-61,25.04.2024 11:36,file:///home/sharpe/.var/app/org.libreoffice.LibreOffice/config/libreoffice/4; \ No newline at end of file diff --git a/2024 - ECE new poster presentation template - landscape.pptx b/2024 - ECE new poster presentation template - landscape.pptx new file mode 100644 index 0000000..4b87a90 Binary files /dev/null and b/2024 - ECE new poster presentation template - landscape.pptx differ diff --git a/Filter_Analysis/__pycache__/vgg.cpython-311.pyc b/Filter_Analysis/__pycache__/vgg.cpython-311.pyc new file mode 100644 index 0000000..c92bc5f Binary files /dev/null and b/Filter_Analysis/__pycache__/vgg.cpython-311.pyc differ diff --git a/Filter_Analysis/cifar10.py b/Filter_Analysis/cifar10.py index 6c1dd29..3d0578c 100644 --- a/Filter_Analysis/cifar10.py +++ b/Filter_Analysis/cifar10.py @@ -11,46 +11,62 @@ import torch.nn as nn import torch.nn.functional as F #import dla +import vgg -EPOCHS = 200 +EPOCHS = 40 + +class CnnBlock(nn.Module): + def __init__(self, in_channels=3, out_channels=16): + super(CnnBlock, self).__init__() + self.conv1 = nn.Conv2d(in_channels, 2*in_channels, 3, 1) + self.conv2 = nn.Conv2d(2*in_channels, out_channels, 3, 1) + self.bn1 = nn.BatchNorm2d(2*in_channels) + self.bn2 = nn.BatchNorm2d(out_channels) + + def forward(self, x): + x = self.conv1(x) + x = self.bn1(x) + x = F.relu(x) + x = self.conv2(x) + x = self.bn2(x) + x = F.relu(x) + return x class CifarCNN(nn.Module): def __init__(self): super(CifarCNN, self).__init__() - self.conv1 = nn.Conv2d(3, 96, 3, 1) - self.conv2 = nn.Conv2d(96, 192, 3, 1) + self.block1 = CnnBlock(3, 16) + self.block2 = CnnBlock(16, 64) + self.block3 = CnnBlock(64, 128) self.dropout1 = nn.Dropout(0.25) self.dropout2 = nn.Dropout(0.5) - self.fc1 = nn.Linear(37632, 128) - self.fc2 = nn.Linear(128, 10) + self.fc1 = nn.Linear(128, 10) def forward(self, x): - x = self.conv1(x) - x = F.relu(x) - x = self.conv2(x) - x = F.relu(x) - x = F.max_pool2d(x,2) + x = self.block1(x) x = self.dropout1(x) + x = F.max_pool2d(x,2) + x = self.block2(x) + x = F.max_pool2d(x,2) + x = self.dropout2(x) + x = self.block3(x) x = torch.flatten(x,1) x = self.fc1(x) - x = F.relu(x) - x = self.dropout2(x) - x = self.fc2(x) output = F.log_softmax(x, dim=1) return output -def train(model, trainloader, optimizer, epoch): +def train(model, trainloader, device, optimizer, criterion, epoch): running_loss = 0.0 for i, [data, target] in enumerate(trainloader, 0): - + data, target = data.to(device), target.to(device) # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize output = model(data) - loss = F.nll_loss(output, target) + loss = criterion(output, target) loss.backward() optimizer.step() @@ -61,13 +77,14 @@ def train(model, trainloader, optimizer, epoch): running_loss = 0.0 -def test(model, testloader, classes): +def test(model, testloader, device, classes): correct = 0 total = 0 # since we're not training, we don't need to calculate the gradients for our outputs with torch.no_grad(): for data, target in testloader: + data, target = data.to(device), target.to(device) # calculate outputs by running images through the network output = model(data) # the class with the highest energy is what we choose as prediction @@ -85,6 +102,7 @@ def test(model, testloader, classes): # again no gradients needed with torch.no_grad(): for data, target in testloader: + data, target = data.to(device), target.to(device) output = model(data) _, predictions = torch.max(output, 1) # collect the correct predictions for each class @@ -121,15 +139,16 @@ def main(): 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') - model = CifarCNN().to(device) + model = vgg.VGG('VGG16').to(device) - optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9) + optimizer = optim.SGD(model.parameters(), lr=0.0001, momentum=0.9) + criterion = nn.CrossEntropyLoss() for epoch in range(EPOCHS): - train(model, trainloader, optimizer, epoch) - test(model, testloader, classes) + train(model, trainloader, device, optimizer, criterion, epoch) + test(model, testloader, device, classes) - PATH = './cifar_cnn.pth' + PATH = './cifar_vgg.pth' torch.save(model.state_dict(), PATH) diff --git a/Filter_Analysis/cifar_cnn.pth b/Filter_Analysis/cifar_cnn.pth new file mode 100644 index 0000000..a9e54cd Binary files /dev/null and b/Filter_Analysis/cifar_cnn.pth differ diff --git a/Filter_Analysis/vgg.py b/Filter_Analysis/vgg.py new file mode 100644 index 0000000..08347ff --- /dev/null +++ b/Filter_Analysis/vgg.py @@ -0,0 +1,47 @@ +'''VGG11/13/16/19 in Pytorch.''' +import torch +import torch.nn as nn + + +cfg = { + 'VGG11': [64, 'M', 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'VGG13': [64, 64, 'M', 128, 128, 'M', 256, 256, 'M', 512, 512, 'M', 512, 512, 'M'], + 'VGG16': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 'M', 512, 512, 512, 'M', 512, 512, 512, 'M'], + 'VGG19': [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512, 'M'], +} + + +class VGG(nn.Module): + def __init__(self, vgg_name): + super(VGG, self).__init__() + self.features = self._make_layers(cfg[vgg_name]) + self.classifier = nn.Linear(512, 10) + + def forward(self, x): + out = self.features(x) + out = out.view(out.size(0), -1) + out = self.classifier(out) + return out + + def _make_layers(self, cfg): + layers = [] + in_channels = 3 + for x in cfg: + if x == 'M': + layers += [nn.MaxPool2d(kernel_size=2, stride=2)] + else: + layers += [nn.Conv2d(in_channels, x, kernel_size=3, padding=1), + nn.BatchNorm2d(x), + nn.ReLU(inplace=True)] + in_channels = x + layers += [nn.AvgPool2d(kernel_size=1, stride=1)] + return nn.Sequential(*layers) + + +def test(): + net = VGG('VGG11') + x = torch.randn(2,3,32,32) + y = net(x) + print(y.size()) + +# test()