Trained CIFAR-10 CNN, 60% accuracy, switching to DLA
This commit is contained in:
parent
1f289157e2
commit
b4a1aee0db
BIN
Filter_Analysis/__pycache__/cifar10.cpython-312.pyc
Normal file
BIN
Filter_Analysis/__pycache__/cifar10.cpython-312.pyc
Normal file
Binary file not shown.
Binary file not shown.
BIN
Filter_Analysis/__pycache__/dla.cpython-312.pyc
Normal file
BIN
Filter_Analysis/__pycache__/dla.cpython-312.pyc
Normal file
Binary file not shown.
@ -1,181 +1,29 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
"""
|
|
||||||
Training a Classifier
|
|
||||||
=====================
|
|
||||||
|
|
||||||
This is it. You have seen how to define neural networks, compute loss and make
|
|
||||||
updates to the weights of the network.
|
|
||||||
|
|
||||||
Now you might be thinking,
|
|
||||||
|
|
||||||
What about data?
|
|
||||||
----------------
|
|
||||||
|
|
||||||
Generally, when you have to deal with image, text, audio or video data,
|
|
||||||
you can use standard python packages that load data into a numpy array.
|
|
||||||
Then you can convert this array into a ``torch.*Tensor``.
|
|
||||||
|
|
||||||
- For images, packages such as Pillow, OpenCV are useful
|
|
||||||
- For audio, packages such as scipy and librosa
|
|
||||||
- For text, either raw Python or Cython based loading, or NLTK and
|
|
||||||
SpaCy are useful
|
|
||||||
|
|
||||||
Specifically for vision, we have created a package called
|
|
||||||
``torchvision``, that has data loaders for common datasets such as
|
|
||||||
ImageNet, CIFAR10, MNIST, etc. and data transformers for images, viz.,
|
|
||||||
``torchvision.datasets`` and ``torch.utils.data.DataLoader``.
|
|
||||||
|
|
||||||
This provides a huge convenience and avoids writing boilerplate code.
|
|
||||||
|
|
||||||
For this tutorial, we will use the CIFAR10 dataset.
|
|
||||||
It has the classes: ‘airplane’, ‘automobile’, ‘bird’, ‘cat’, ‘deer’,
|
|
||||||
‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘truck’. The images in CIFAR-10 are of
|
|
||||||
size 3x32x32, i.e. 3-channel color images of 32x32 pixels in size.
|
|
||||||
|
|
||||||
.. figure:: /_static/img/cifar10.png
|
|
||||||
:alt: cifar10
|
|
||||||
|
|
||||||
cifar10
|
|
||||||
|
|
||||||
|
|
||||||
Training an image classifier
|
|
||||||
----------------------------
|
|
||||||
|
|
||||||
We will do the following steps in order:
|
|
||||||
|
|
||||||
1. Load and normalize the CIFAR10 training and test datasets using
|
|
||||||
``torchvision``
|
|
||||||
2. Define a Convolutional Neural Network
|
|
||||||
3. Define a loss function
|
|
||||||
4. Train the network on the training data
|
|
||||||
5. Test the network on the test data
|
|
||||||
|
|
||||||
1. Load and normalize CIFAR10
|
|
||||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
|
|
||||||
Using ``torchvision``, it’s extremely easy to load CIFAR10.
|
|
||||||
"""
|
|
||||||
import torch
|
import torch
|
||||||
import torchvision
|
import torchvision
|
||||||
import torchvision.transforms as transforms
|
import torchvision.transforms as transforms
|
||||||
|
|
||||||
########################################################################
|
|
||||||
# The output of torchvision datasets are PILImage images of range [0, 1].
|
|
||||||
# We transform them to Tensors of normalized range [-1, 1].
|
|
||||||
|
|
||||||
########################################################################
|
|
||||||
# .. note::
|
|
||||||
# If running on Windows and you get a BrokenPipeError, try setting
|
|
||||||
# the num_worker of torch.utils.data.DataLoader() to 0.
|
|
||||||
|
|
||||||
transform = transforms.Compose(
|
|
||||||
[transforms.ToTensor(),
|
|
||||||
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
|
|
||||||
|
|
||||||
batch_size = 4
|
|
||||||
|
|
||||||
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
|
|
||||||
download=True, transform=transform)
|
|
||||||
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
|
|
||||||
shuffle=True, num_workers=2)
|
|
||||||
|
|
||||||
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
|
|
||||||
download=True, transform=transform)
|
|
||||||
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
|
|
||||||
shuffle=False, num_workers=2)
|
|
||||||
|
|
||||||
classes = ('plane', 'car', 'bird', 'cat',
|
|
||||||
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
|
|
||||||
|
|
||||||
########################################################################
|
|
||||||
# Let us show some of the training images, for fun.
|
|
||||||
|
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
# functions to show an image
|
import torch.optim as optim
|
||||||
|
|
||||||
|
|
||||||
def imshow(img):
|
|
||||||
img = img / 2 + 0.5 # unnormalize
|
|
||||||
npimg = img.numpy()
|
|
||||||
plt.imshow(np.transpose(npimg, (1, 2, 0)))
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
|
|
||||||
# get some random training images
|
|
||||||
dataiter = iter(trainloader)
|
|
||||||
images, labels = next(dataiter)
|
|
||||||
|
|
||||||
# show images
|
|
||||||
imshow(torchvision.utils.make_grid(images))
|
|
||||||
# print labels
|
|
||||||
print(' '.join(f'{classes[labels[j]]:5s}' for j in range(batch_size)))
|
|
||||||
|
|
||||||
|
|
||||||
########################################################################
|
|
||||||
# 2. Define a Convolutional Neural Network
|
|
||||||
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
# Copy the neural network from the Neural Networks section before and modify it to
|
|
||||||
# take 3-channel images (instead of 1-channel images as it was defined).
|
|
||||||
|
|
||||||
import torch.nn as nn
|
import torch.nn as nn
|
||||||
import torch.nn.functional as F
|
import torch.nn.functional as F
|
||||||
|
|
||||||
|
import dla
|
||||||
class Net(nn.Module):
|
|
||||||
def __init__(self):
|
|
||||||
super().__init__()
|
|
||||||
self.conv1 = nn.Conv2d(3, 6, 5)
|
|
||||||
self.pool = nn.MaxPool2d(2, 2)
|
|
||||||
self.conv2 = nn.Conv2d(6, 16, 5)
|
|
||||||
self.fc1 = nn.Linear(16 * 5 * 5, 120)
|
|
||||||
self.fc2 = nn.Linear(120, 84)
|
|
||||||
self.fc3 = nn.Linear(84, 10)
|
|
||||||
|
|
||||||
def forward(self, x):
|
|
||||||
x = self.pool(F.relu(self.conv1(x)))
|
|
||||||
x = self.pool(F.relu(self.conv2(x)))
|
|
||||||
x = torch.flatten(x, 1) # flatten all dimensions except batch
|
|
||||||
x = F.relu(self.fc1(x))
|
|
||||||
x = F.relu(self.fc2(x))
|
|
||||||
x = self.fc3(x)
|
|
||||||
return x
|
|
||||||
|
|
||||||
|
|
||||||
net = Net()
|
def train(model, trainloader, optimizer, epoch):
|
||||||
|
|
||||||
########################################################################
|
|
||||||
# 3. Define a Loss function and optimizer
|
|
||||||
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
# Let's use a Classification Cross-Entropy loss and SGD with momentum.
|
|
||||||
|
|
||||||
import torch.optim as optim
|
|
||||||
|
|
||||||
criterion = nn.CrossEntropyLoss()
|
|
||||||
optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9)
|
|
||||||
|
|
||||||
########################################################################
|
|
||||||
# 4. Train the network
|
|
||||||
# ^^^^^^^^^^^^^^^^^^^^
|
|
||||||
#
|
|
||||||
# This is when things start to get interesting.
|
|
||||||
# We simply have to loop over our data iterator, and feed the inputs to the
|
|
||||||
# network and optimize.
|
|
||||||
|
|
||||||
for epoch in range(2): # loop over the dataset multiple times
|
|
||||||
|
|
||||||
running_loss = 0.0
|
running_loss = 0.0
|
||||||
for i, data in enumerate(trainloader, 0):
|
for i, [data, target] in enumerate(trainloader, 0):
|
||||||
# get the inputs; data is a list of [inputs, labels]
|
|
||||||
inputs, labels = data
|
|
||||||
|
|
||||||
# zero the parameter gradients
|
# zero the parameter gradients
|
||||||
optimizer.zero_grad()
|
optimizer.zero_grad()
|
||||||
|
|
||||||
# forward + backward + optimize
|
# forward + backward + optimize
|
||||||
outputs = net(inputs)
|
outputs = model(data)
|
||||||
loss = criterion(outputs, labels)
|
criterion = nn.CrossEntropyLoss()
|
||||||
|
loss = criterion(outputs, target)
|
||||||
loss.backward()
|
loss.backward()
|
||||||
optimizer.step()
|
optimizer.step()
|
||||||
|
|
||||||
@ -185,183 +33,78 @@ for epoch in range(2): # loop over the dataset multiple times
|
|||||||
print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}')
|
print(f'[{epoch + 1}, {i + 1:5d}] loss: {running_loss / 2000:.3f}')
|
||||||
running_loss = 0.0
|
running_loss = 0.0
|
||||||
|
|
||||||
print('Finished Training')
|
|
||||||
|
|
||||||
########################################################################
|
def test(model, testloader, classes):
|
||||||
# Let's quickly save our trained model:
|
correct = 0
|
||||||
|
total = 0
|
||||||
|
|
||||||
PATH = './cifar_net.pth'
|
# since we're not training, we don't need to calculate the gradients for our outputs
|
||||||
torch.save(net.state_dict(), PATH)
|
with torch.no_grad():
|
||||||
|
for data, target in testloader:
|
||||||
########################################################################
|
|
||||||
# See `here <https://pytorch.org/docs/stable/notes/serialization.html>`_
|
|
||||||
# for more details on saving PyTorch models.
|
|
||||||
#
|
|
||||||
# 5. Test the network on the test data
|
|
||||||
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
|
||||||
#
|
|
||||||
# We have trained the network for 2 passes over the training dataset.
|
|
||||||
# But we need to check if the network has learnt anything at all.
|
|
||||||
#
|
|
||||||
# We will check this by predicting the class label that the neural network
|
|
||||||
# outputs, and checking it against the ground-truth. If the prediction is
|
|
||||||
# correct, we add the sample to the list of correct predictions.
|
|
||||||
#
|
|
||||||
# Okay, first step. Let us display an image from the test set to get familiar.
|
|
||||||
|
|
||||||
dataiter = iter(testloader)
|
|
||||||
images, labels = next(dataiter)
|
|
||||||
|
|
||||||
# print images
|
|
||||||
imshow(torchvision.utils.make_grid(images))
|
|
||||||
print('GroundTruth: ', ' '.join(f'{classes[labels[j]]:5s}' for j in range(4)))
|
|
||||||
|
|
||||||
########################################################################
|
|
||||||
# Next, let's load back in our saved model (note: saving and re-loading the model
|
|
||||||
# wasn't necessary here, we only did it to illustrate how to do so):
|
|
||||||
|
|
||||||
net = Net()
|
|
||||||
net.load_state_dict(torch.load(PATH))
|
|
||||||
|
|
||||||
########################################################################
|
|
||||||
# Okay, now let us see what the neural network thinks these examples above are:
|
|
||||||
|
|
||||||
outputs = net(images)
|
|
||||||
|
|
||||||
########################################################################
|
|
||||||
# The outputs are energies for the 10 classes.
|
|
||||||
# The higher the energy for a class, the more the network
|
|
||||||
# thinks that the image is of the particular class.
|
|
||||||
# So, let's get the index of the highest energy:
|
|
||||||
_, predicted = torch.max(outputs, 1)
|
|
||||||
|
|
||||||
print('Predicted: ', ' '.join(f'{classes[predicted[j]]:5s}'
|
|
||||||
for j in range(4)))
|
|
||||||
|
|
||||||
########################################################################
|
|
||||||
# The results seem pretty good.
|
|
||||||
#
|
|
||||||
# Let us look at how the network performs on the whole dataset.
|
|
||||||
|
|
||||||
correct = 0
|
|
||||||
total = 0
|
|
||||||
# since we're not training, we don't need to calculate the gradients for our outputs
|
|
||||||
with torch.no_grad():
|
|
||||||
for data in testloader:
|
|
||||||
images, labels = data
|
|
||||||
# calculate outputs by running images through the network
|
# calculate outputs by running images through the network
|
||||||
outputs = net(images)
|
outputs = model(data)
|
||||||
# the class with the highest energy is what we choose as prediction
|
# the class with the highest energy is what we choose as prediction
|
||||||
_, predicted = torch.max(outputs.data, 1)
|
_, predicted = torch.max(outputs.data, 1)
|
||||||
total += labels.size(0)
|
total += target.size(0)
|
||||||
correct += (predicted == labels).sum().item()
|
correct += (predicted == target).sum().item()
|
||||||
|
|
||||||
print(f'Accuracy of the network on the 10000 test images: {100 * correct // total} %')
|
print(f'Accuracy of the network on the 10000 test images: {100 * correct // total} %')
|
||||||
|
|
||||||
########################################################################
|
|
||||||
# That looks way better than chance, which is 10% accuracy (randomly picking
|
|
||||||
# a class out of 10 classes).
|
|
||||||
# Seems like the network learnt something.
|
|
||||||
#
|
|
||||||
# Hmmm, what are the classes that performed well, and the classes that did
|
|
||||||
# not perform well:
|
|
||||||
|
|
||||||
# prepare to count predictions for each class
|
# prepare to count predictions for each class
|
||||||
correct_pred = {classname: 0 for classname in classes}
|
correct_pred = {classname: 0 for classname in classes}
|
||||||
total_pred = {classname: 0 for classname in classes}
|
total_pred = {classname: 0 for classname in classes}
|
||||||
|
|
||||||
# again no gradients needed
|
# again no gradients needed
|
||||||
with torch.no_grad():
|
with torch.no_grad():
|
||||||
for data in testloader:
|
for data, target in testloader:
|
||||||
images, labels = data
|
outputs = model(data)
|
||||||
outputs = net(images)
|
|
||||||
_, predictions = torch.max(outputs, 1)
|
_, predictions = torch.max(outputs, 1)
|
||||||
# collect the correct predictions for each class
|
# collect the correct predictions for each class
|
||||||
for label, prediction in zip(labels, predictions):
|
for label, prediction in zip(target, predictions):
|
||||||
if label == prediction:
|
if label == prediction:
|
||||||
correct_pred[classes[label]] += 1
|
correct_pred[classes[label]] += 1
|
||||||
total_pred[classes[label]] += 1
|
total_pred[classes[label]] += 1
|
||||||
|
|
||||||
|
|
||||||
# print accuracy for each class
|
# print accuracy for each class
|
||||||
for classname, correct_count in correct_pred.items():
|
for classname, correct_count in correct_pred.items():
|
||||||
accuracy = 100 * float(correct_count) / total_pred[classname]
|
accuracy = 100 * float(correct_count) / total_pred[classname]
|
||||||
print(f'Accuracy for class: {classname:5s} is {accuracy:.1f} %')
|
print(f'Accuracy for class: {classname:5s} is {accuracy:.1f} %')
|
||||||
|
|
||||||
########################################################################
|
|
||||||
# Okay, so what next?
|
|
||||||
#
|
|
||||||
# How do we run these neural networks on the GPU?
|
|
||||||
#
|
|
||||||
# Training on GPU
|
|
||||||
# ----------------
|
|
||||||
# Just like how you transfer a Tensor onto the GPU, you transfer the neural
|
|
||||||
# net onto the GPU.
|
|
||||||
#
|
|
||||||
# Let's first define our device as the first visible cuda device if we have
|
|
||||||
# CUDA available:
|
|
||||||
|
|
||||||
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
def main():
|
||||||
|
transform = transforms.Compose(
|
||||||
|
[transforms.ToTensor(),
|
||||||
|
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
|
||||||
|
|
||||||
# Assuming that we are on a CUDA machine, this should print a CUDA device:
|
batch_size = 4
|
||||||
|
|
||||||
print(device)
|
trainset = torchvision.datasets.CIFAR10(root='./data', train=True,
|
||||||
|
download=True, transform=transform)
|
||||||
|
trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size,
|
||||||
|
shuffle=True, num_workers=2)
|
||||||
|
|
||||||
########################################################################
|
testset = torchvision.datasets.CIFAR10(root='./data', train=False,
|
||||||
# The rest of this section assumes that ``device`` is a CUDA device.
|
download=True, transform=transform)
|
||||||
#
|
testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size,
|
||||||
# Then these methods will recursively go over all modules and convert their
|
shuffle=False, num_workers=2)
|
||||||
# parameters and buffers to CUDA tensors:
|
|
||||||
#
|
|
||||||
# .. code:: python
|
|
||||||
#
|
|
||||||
net.to(device)
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# Remember that you will have to send the inputs and targets at every step
|
|
||||||
# to the GPU too:
|
|
||||||
#
|
|
||||||
# .. code:: python
|
|
||||||
#
|
|
||||||
inputs, labels = data[0].to(device), data[1].to(device)
|
|
||||||
#
|
|
||||||
# Why don't I notice MASSIVE speedup compared to CPU? Because your network
|
|
||||||
# is really small.
|
|
||||||
#
|
|
||||||
# **Exercise:** Try increasing the width of your network (argument 2 of
|
|
||||||
# the first ``nn.Conv2d``, and argument 1 of the second ``nn.Conv2d`` –
|
|
||||||
# they need to be the same number), see what kind of speedup you get.
|
|
||||||
#
|
|
||||||
# **Goals achieved**:
|
|
||||||
#
|
|
||||||
# - Understanding PyTorch's Tensor library and neural networks at a high level.
|
|
||||||
# - Train a small neural network to classify images
|
|
||||||
#
|
|
||||||
# Training on multiple GPUs
|
|
||||||
# -------------------------
|
|
||||||
# If you want to see even more MASSIVE speedup using all of your GPUs,
|
|
||||||
# please check out :doc:`data_parallel_tutorial`.
|
|
||||||
#
|
|
||||||
# Where do I go next?
|
|
||||||
# -------------------
|
|
||||||
#
|
|
||||||
# - :doc:`Train neural nets to play video games </intermediate/reinforcement_q_learning>`
|
|
||||||
# - `Train a state-of-the-art ResNet network on imagenet`_
|
|
||||||
# - `Train a face generator using Generative Adversarial Networks`_
|
|
||||||
# - `Train a word-level language model using Recurrent LSTM networks`_
|
|
||||||
# - `More examples`_
|
|
||||||
# - `More tutorials`_
|
|
||||||
# - `Discuss PyTorch on the Forums`_
|
|
||||||
# - `Chat with other users on Slack`_
|
|
||||||
#
|
|
||||||
# .. _Train a state-of-the-art ResNet network on imagenet: https://github.com/pytorch/examples/tree/master/imagenet
|
|
||||||
# .. _Train a face generator using Generative Adversarial Networks: https://github.com/pytorch/examples/tree/master/dcgan
|
|
||||||
# .. _Train a word-level language model using Recurrent LSTM networks: https://github.com/pytorch/examples/tree/master/word_language_model
|
|
||||||
# .. _More examples: https://github.com/pytorch/examples
|
|
||||||
# .. _More tutorials: https://github.com/pytorch/tutorials
|
|
||||||
# .. _Discuss PyTorch on the Forums: https://discuss.pytorch.org/
|
|
||||||
# .. _Chat with other users on Slack: https://pytorch.slack.com/messages/beginner/
|
|
||||||
|
|
||||||
# %%%%%%INVISIBLE_CODE_BLOCK%%%%%%
|
classes = ('plane', 'car', 'bird', 'cat',
|
||||||
del dataiter
|
'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
|
||||||
# %%%%%%INVISIBLE_CODE_BLOCK%%%%%%
|
|
||||||
|
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
||||||
|
model = dla.DLA().to(device)
|
||||||
|
|
||||||
|
optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9)
|
||||||
|
|
||||||
|
for epoch in range(14):
|
||||||
|
train(model, trainloader, optimizer, epoch)
|
||||||
|
test(model, testloader, classes)
|
||||||
|
|
||||||
|
PATH = './cifar_net.pth'
|
||||||
|
torch.save(model.state_dict(), PATH)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
main()
|
||||||
|
BIN
Filter_Analysis/cifar_net.pth
Normal file
BIN
Filter_Analysis/cifar_net.pth
Normal file
Binary file not shown.
@ -20,33 +20,33 @@ def pttensor_to_images(data):
|
|||||||
return images
|
return images
|
||||||
|
|
||||||
|
|
||||||
def gaussian_kuwahara(data, batch_size=64, radius=5):
|
def gaussian_kuwahara(data, dimensions, radius=5):
|
||||||
images = pttensor_to_images(data)
|
images = pttensor_to_images(data)
|
||||||
filtered_images = np.ndarray((batch_size,28,28,1))
|
filtered_images = np.ndarray(dimensions)
|
||||||
|
|
||||||
for i in range(batch_size):
|
for i in range(dimensions[0]):
|
||||||
filtered_images[i] = kuwahara(images[i], method='gaussian', radius=radius, image_2d=images[i])
|
filtered_images[i] = kuwahara(images[i], method='gaussian', radius=radius, image_2d=images[i])
|
||||||
|
|
||||||
filtered_images = filtered_images.transpose(0,3,1,2)
|
filtered_images = filtered_images.transpose(0,3,1,2)
|
||||||
return torch.tensor(filtered_images).float()
|
return torch.tensor(filtered_images).float()
|
||||||
|
|
||||||
|
|
||||||
def mean_kuwahara(data, batch_size=64, radius=5):
|
def mean_kuwahara(data, dimensions, radius=5):
|
||||||
images = pttensor_to_images(data)
|
images = pttensor_to_images(data)
|
||||||
filtered_images = np.ndarray((batch_size,28,28,1))
|
filtered_images = np.ndarray(dimensions)
|
||||||
|
|
||||||
for i in range(batch_size):
|
for i in range(dimensions[0]):
|
||||||
filtered_images[i] = kuwahara(images[i], method='mean', radius=radius, image_2d=images[i])
|
filtered_images[i] = kuwahara(images[i], method='mean', radius=radius, image_2d=images[i])
|
||||||
|
|
||||||
filtered_images = filtered_images.transpose(0,3,1,2)
|
filtered_images = filtered_images.transpose(0,3,1,2)
|
||||||
return torch.tensor(filtered_images).float()
|
return torch.tensor(filtered_images).float()
|
||||||
|
|
||||||
|
|
||||||
def random_noise(data, batch_size=64, intensity=0.001):
|
def random_noise(data, dimensions, intensity=0.001):
|
||||||
images = pttensor_to_images(data)
|
images = pttensor_to_images(data)
|
||||||
filtered_images = np.ndarray((batch_size,28,28,1))
|
filtered_images = np.ndarray(dimensions)
|
||||||
|
|
||||||
for i in range(batch_size):
|
for i in range(dimensions[0]):
|
||||||
mean = 0
|
mean = 0
|
||||||
stddev = 180
|
stddev = 180
|
||||||
noise = np.zeros(images[i].shape, images[i].dtype)
|
noise = np.zeros(images[i].shape, images[i].dtype)
|
||||||
@ -57,33 +57,33 @@ def random_noise(data, batch_size=64, intensity=0.001):
|
|||||||
return torch.tensor(filtered_images).float()
|
return torch.tensor(filtered_images).float()
|
||||||
|
|
||||||
|
|
||||||
def gaussian_blur(data, batch_size=64, ksize=(5,5)):
|
def gaussian_blur(data, dimensions, ksize=(5,5)):
|
||||||
images = pttensor_to_images(data)
|
images = pttensor_to_images(data)
|
||||||
filtered_images = np.ndarray((batch_size,28,28,1))
|
filtered_images = np.ndarray(dimensions)
|
||||||
|
|
||||||
for i in range(batch_size):
|
for i in range(dimensions[0]):
|
||||||
filtered_images[i] = cv2.GaussianBlur(images[i], ksize=ksize, sigmaX=0).reshape(filtered_images[i].shape)
|
filtered_images[i] = cv2.GaussianBlur(images[i], ksize=ksize, sigmaX=0).reshape(filtered_images[i].shape)
|
||||||
|
|
||||||
filtered_images = filtered_images.transpose(0,3,1,2)
|
filtered_images = filtered_images.transpose(0,3,1,2)
|
||||||
return torch.tensor(filtered_images).float()
|
return torch.tensor(filtered_images).float()
|
||||||
|
|
||||||
|
|
||||||
def bilateral_filter(data, batch_size=64, d=5, sigma=50):
|
def bilateral_filter(data, dimensions, d=5, sigma=50):
|
||||||
images = pttensor_to_images(data)
|
images = pttensor_to_images(data)
|
||||||
filtered_images = np.ndarray((batch_size,28,28,1))
|
filtered_images = np.ndarray(dimensions)
|
||||||
|
|
||||||
for i in range(batch_size):
|
for i in range(dimensions[0]):
|
||||||
filtered_images[i] = cv2.bilateralFilter(images[i], d, sigma, sigma).reshape(filtered_images[i].shape)
|
filtered_images[i] = cv2.bilateralFilter(images[i], d, sigma, sigma).reshape(filtered_images[i].shape)
|
||||||
|
|
||||||
filtered_images = filtered_images.transpose(0,3,1,2)
|
filtered_images = filtered_images.transpose(0,3,1,2)
|
||||||
return torch.tensor(filtered_images).float()
|
return torch.tensor(filtered_images).float()
|
||||||
|
|
||||||
|
|
||||||
def threshold_filter(data, batch_size=64, threshold=0.5):
|
def threshold_filter(data, dimensions, threshold=0.5):
|
||||||
images = pttensor_to_images(data)
|
images = pttensor_to_images(data)
|
||||||
filtered_images = np.ndarray((batch_size,28,28,1))
|
filtered_images = np.ndarray(dimensions)
|
||||||
|
|
||||||
for i in range(batch_size):
|
for i in range(dimensions[0]):
|
||||||
# If the channel contains any negative values, define the lowest negative value as black
|
# If the channel contains any negative values, define the lowest negative value as black
|
||||||
min_value = np.min(images[i])
|
min_value = np.min(images[i])
|
||||||
if min_value < 0:
|
if min_value < 0:
|
||||||
@ -104,11 +104,11 @@ def threshold_filter(data, batch_size=64, threshold=0.5):
|
|||||||
return torch.tensor(filtered_images).float()
|
return torch.tensor(filtered_images).float()
|
||||||
|
|
||||||
|
|
||||||
def bit_depth(data, batch_size=64, bits=16):
|
def bit_depth(data, dimensions, bits=16):
|
||||||
images = pttensor_to_images(data)
|
images = pttensor_to_images(data)
|
||||||
filtered_images = np.ndarray((batch_size,28,28,1))
|
filtered_images = np.ndarray(dimensions)
|
||||||
|
|
||||||
for i in range(batch_size):
|
for i in range(dimensions[0]):
|
||||||
filtered_images[i] = (images[i]*(2**bits)).astype(int).astype(float)/(2**bits)
|
filtered_images[i] = (images[i]*(2**bits)).astype(int).astype(float)/(2**bits)
|
||||||
|
|
||||||
filtered_images = filtered_images.transpose(0,3,1,2)
|
filtered_images = filtered_images.transpose(0,3,1,2)
|
||||||
@ -125,23 +125,23 @@ Filter Options:
|
|||||||
- bilateral_filter
|
- bilateral_filter
|
||||||
- bit_depth
|
- bit_depth
|
||||||
'''
|
'''
|
||||||
def filtered(data, batch_size=64, strength=0, filter="gaussian_blur"):
|
def filtered(data, dimensions, strength=0, filter="gaussian_blur"):
|
||||||
if filter == "threshold_filter":
|
if filter == "threshold_filter":
|
||||||
threshold = (2*strength + 1) / 10
|
threshold = (2*strength + 1) / 10
|
||||||
return threshold_filter(data, batch_size, threshold)
|
return threshold_filter(data, dimensions, threshold)
|
||||||
elif filter == "bit_depth":
|
elif filter == "bit_depth":
|
||||||
bits = 2**strength
|
bits = 2**strength
|
||||||
return bit_depth(data, batch_size, bits)
|
return bit_depth(data, dimensions, bits)
|
||||||
elif filter == "random_noise":
|
elif filter == "random_noise":
|
||||||
intensity = 0.0005*(2*strength + 1)
|
intensity = 0.0005*(2*strength + 1)
|
||||||
return random_noise(data, batch_size, intensity)
|
return random_noise(data, dimensions, intensity)
|
||||||
else:
|
else:
|
||||||
strength = (2*strength + 1)
|
strength = (2*strength + 1)
|
||||||
if filter == "gaussian_blur":
|
if filter == "gaussian_blur":
|
||||||
return gaussian_blur(data, batch_size, ksize=(strength, strength))
|
return gaussian_blur(data, dimensions, ksize=(strength, strength))
|
||||||
elif filter == "bilateral_filter":
|
elif filter == "bilateral_filter":
|
||||||
return bilateral_filter(data, batch_size, d=strength)
|
return bilateral_filter(data, dimensions, d=strength)
|
||||||
elif filter == "gaussian_kuwahara":
|
elif filter == "gaussian_kuwahara":
|
||||||
return gaussian_kuwahara(data, batch_size, strength)
|
return gaussian_kuwahara(data, dimensions, strength)
|
||||||
elif filter == "mean_kuwahara":
|
elif filter == "mean_kuwahara":
|
||||||
return mean_kuwahara(data, batch_size, strength)
|
return mean_kuwahara(data, dimensions, strength)
|
||||||
|
135
Filter_Analysis/dla.py
Normal file
135
Filter_Analysis/dla.py
Normal file
@ -0,0 +1,135 @@
|
|||||||
|
'''DLA in PyTorch.
|
||||||
|
|
||||||
|
Reference:
|
||||||
|
Deep Layer Aggregation. https://arxiv.org/abs/1707.06484
|
||||||
|
'''
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.nn.functional as F
|
||||||
|
|
||||||
|
|
||||||
|
class BasicBlock(nn.Module):
|
||||||
|
expansion = 1
|
||||||
|
|
||||||
|
def __init__(self, in_planes, planes, stride=1):
|
||||||
|
super(BasicBlock, self).__init__()
|
||||||
|
self.conv1 = nn.Conv2d(
|
||||||
|
in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
|
||||||
|
self.bn1 = nn.BatchNorm2d(planes)
|
||||||
|
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3,
|
||||||
|
stride=1, padding=1, bias=False)
|
||||||
|
self.bn2 = nn.BatchNorm2d(planes)
|
||||||
|
|
||||||
|
self.shortcut = nn.Sequential()
|
||||||
|
if stride != 1 or in_planes != self.expansion*planes:
|
||||||
|
self.shortcut = nn.Sequential(
|
||||||
|
nn.Conv2d(in_planes, self.expansion*planes,
|
||||||
|
kernel_size=1, stride=stride, bias=False),
|
||||||
|
nn.BatchNorm2d(self.expansion*planes)
|
||||||
|
)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
out = F.relu(self.bn1(self.conv1(x)))
|
||||||
|
out = self.bn2(self.conv2(out))
|
||||||
|
out += self.shortcut(x)
|
||||||
|
out = F.relu(out)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class Root(nn.Module):
|
||||||
|
def __init__(self, in_channels, out_channels, kernel_size=1):
|
||||||
|
super(Root, self).__init__()
|
||||||
|
self.conv = nn.Conv2d(
|
||||||
|
in_channels, out_channels, kernel_size,
|
||||||
|
stride=1, padding=(kernel_size - 1) // 2, bias=False)
|
||||||
|
self.bn = nn.BatchNorm2d(out_channels)
|
||||||
|
|
||||||
|
def forward(self, xs):
|
||||||
|
x = torch.cat(xs, 1)
|
||||||
|
out = F.relu(self.bn(self.conv(x)))
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class Tree(nn.Module):
|
||||||
|
def __init__(self, block, in_channels, out_channels, level=1, stride=1):
|
||||||
|
super(Tree, self).__init__()
|
||||||
|
self.level = level
|
||||||
|
if level == 1:
|
||||||
|
self.root = Root(2*out_channels, out_channels)
|
||||||
|
self.left_node = block(in_channels, out_channels, stride=stride)
|
||||||
|
self.right_node = block(out_channels, out_channels, stride=1)
|
||||||
|
else:
|
||||||
|
self.root = Root((level+2)*out_channels, out_channels)
|
||||||
|
for i in reversed(range(1, level)):
|
||||||
|
subtree = Tree(block, in_channels, out_channels,
|
||||||
|
level=i, stride=stride)
|
||||||
|
self.__setattr__('level_%d' % i, subtree)
|
||||||
|
self.prev_root = block(in_channels, out_channels, stride=stride)
|
||||||
|
self.left_node = block(out_channels, out_channels, stride=1)
|
||||||
|
self.right_node = block(out_channels, out_channels, stride=1)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
xs = [self.prev_root(x)] if self.level > 1 else []
|
||||||
|
for i in reversed(range(1, self.level)):
|
||||||
|
level_i = self.__getattr__('level_%d' % i)
|
||||||
|
x = level_i(x)
|
||||||
|
xs.append(x)
|
||||||
|
x = self.left_node(x)
|
||||||
|
xs.append(x)
|
||||||
|
x = self.right_node(x)
|
||||||
|
xs.append(x)
|
||||||
|
out = self.root(xs)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
class DLA(nn.Module):
|
||||||
|
def __init__(self, block=BasicBlock, num_classes=10):
|
||||||
|
super(DLA, self).__init__()
|
||||||
|
self.base = nn.Sequential(
|
||||||
|
nn.Conv2d(3, 16, kernel_size=3, stride=1, padding=1, bias=False),
|
||||||
|
nn.BatchNorm2d(16),
|
||||||
|
nn.ReLU(True)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.layer1 = nn.Sequential(
|
||||||
|
nn.Conv2d(16, 16, kernel_size=3, stride=1, padding=1, bias=False),
|
||||||
|
nn.BatchNorm2d(16),
|
||||||
|
nn.ReLU(True)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.layer2 = nn.Sequential(
|
||||||
|
nn.Conv2d(16, 32, kernel_size=3, stride=1, padding=1, bias=False),
|
||||||
|
nn.BatchNorm2d(32),
|
||||||
|
nn.ReLU(True)
|
||||||
|
)
|
||||||
|
|
||||||
|
self.layer3 = Tree(block, 32, 64, level=1, stride=1)
|
||||||
|
self.layer4 = Tree(block, 64, 128, level=2, stride=2)
|
||||||
|
self.layer5 = Tree(block, 128, 256, level=2, stride=2)
|
||||||
|
self.layer6 = Tree(block, 256, 512, level=1, stride=2)
|
||||||
|
self.linear = nn.Linear(512, num_classes)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
out = self.base(x)
|
||||||
|
out = self.layer1(out)
|
||||||
|
out = self.layer2(out)
|
||||||
|
out = self.layer3(out)
|
||||||
|
out = self.layer4(out)
|
||||||
|
out = self.layer5(out)
|
||||||
|
out = self.layer6(out)
|
||||||
|
out = F.avg_pool2d(out, 4)
|
||||||
|
out = out.view(out.size(0), -1)
|
||||||
|
out = self.linear(out)
|
||||||
|
return out
|
||||||
|
|
||||||
|
|
||||||
|
def test():
|
||||||
|
net = DLA()
|
||||||
|
print(net)
|
||||||
|
x = torch.randn(1, 3, 32, 32)
|
||||||
|
y = net(x)
|
||||||
|
print(y.size())
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
test()
|
79
Filter_Analysis/results/cifar10_fgsm.json
Normal file
79
Filter_Analysis/results/cifar10_fgsm.json
Normal file
@ -0,0 +1,79 @@
|
|||||||
|
{
|
||||||
|
"attack": "FGSM",
|
||||||
|
"dataset": "CIFAR-10",
|
||||||
|
"epsilons": [
|
||||||
|
0.0,
|
||||||
|
0.025,
|
||||||
|
0.05,
|
||||||
|
0.07500000000000001,
|
||||||
|
0.1,
|
||||||
|
0.125,
|
||||||
|
0.15000000000000002,
|
||||||
|
0.17500000000000002,
|
||||||
|
0.2,
|
||||||
|
0.225,
|
||||||
|
0.25,
|
||||||
|
0.275,
|
||||||
|
0.30000000000000004
|
||||||
|
],
|
||||||
|
"filters": {
|
||||||
|
"gaussian_blur": [
|
||||||
|
[
|
||||||
|
0.5442,
|
||||||
|
0.5442,
|
||||||
|
0.3986,
|
||||||
|
0.3178,
|
||||||
|
0.2561,
|
||||||
|
0.2342
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.0575,
|
||||||
|
0.0575,
|
||||||
|
0.1289,
|
||||||
|
0.1818,
|
||||||
|
0.1955,
|
||||||
|
0.1889
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.0308,
|
||||||
|
0.0308,
|
||||||
|
0.0516,
|
||||||
|
0.1029,
|
||||||
|
0.1501,
|
||||||
|
0.1572
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.0298,
|
||||||
|
0.0298,
|
||||||
|
0.0349,
|
||||||
|
0.0655,
|
||||||
|
0.1144,
|
||||||
|
0.1306
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.0327,
|
||||||
|
0.0327,
|
||||||
|
0.0294,
|
||||||
|
0.0497,
|
||||||
|
0.0933,
|
||||||
|
0.1081
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.0364,
|
||||||
|
0.0364,
|
||||||
|
0.031,
|
||||||
|
0.046,
|
||||||
|
0.0817,
|
||||||
|
0.0967
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.0449,
|
||||||
|
0.0449,
|
||||||
|
0.0319,
|
||||||
|
0.0439,
|
||||||
|
0.0733,
|
||||||
|
0.0885
|
||||||
|
]
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"attack": "FGSM",
|
|
||||||
"dataset": "MNIST",
|
"dataset": "MNIST",
|
||||||
|
"attack": "FGSM",
|
||||||
"epsilons": [
|
"epsilons": [
|
||||||
0.0,
|
0.0,
|
||||||
0.025,
|
0.025,
|
||||||
@ -19,12 +19,653 @@
|
|||||||
"filters": {
|
"filters": {
|
||||||
"gaussian_blur": [
|
"gaussian_blur": [
|
||||||
[
|
[
|
||||||
0.992,
|
|
||||||
0.992,
|
0.992,
|
||||||
0.9879,
|
0.9879,
|
||||||
0.9682,
|
0.9682,
|
||||||
0.7731,
|
0.7731,
|
||||||
0.525
|
0.525
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9796,
|
||||||
|
0.9801,
|
||||||
|
0.9512,
|
||||||
|
0.7381,
|
||||||
|
0.4862
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.96,
|
||||||
|
0.9674,
|
||||||
|
0.9271,
|
||||||
|
0.6922,
|
||||||
|
0.4446
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.926,
|
||||||
|
0.946,
|
||||||
|
0.8939,
|
||||||
|
0.6427,
|
||||||
|
0.3989
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.8753,
|
||||||
|
0.9133,
|
||||||
|
0.8516,
|
||||||
|
0.5881,
|
||||||
|
0.3603
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.8104,
|
||||||
|
0.869,
|
||||||
|
0.7989,
|
||||||
|
0.5278,
|
||||||
|
0.3263
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.7229,
|
||||||
|
0.8135,
|
||||||
|
0.7415,
|
||||||
|
0.471,
|
||||||
|
0.2968
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.6207,
|
||||||
|
0.7456,
|
||||||
|
0.6741,
|
||||||
|
0.4224,
|
||||||
|
0.2683
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.5008,
|
||||||
|
0.6636,
|
||||||
|
0.5983,
|
||||||
|
0.3755,
|
||||||
|
0.2453
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.3894,
|
||||||
|
0.5821,
|
||||||
|
0.5243,
|
||||||
|
0.3359,
|
||||||
|
0.2269
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.2922,
|
||||||
|
0.505,
|
||||||
|
0.4591,
|
||||||
|
0.3034,
|
||||||
|
0.2112
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.2149,
|
||||||
|
0.429,
|
||||||
|
0.3998,
|
||||||
|
0.2743,
|
||||||
|
0.1983
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.1599,
|
||||||
|
0.3648,
|
||||||
|
0.3481,
|
||||||
|
0.2493,
|
||||||
|
0.1884
|
||||||
|
]
|
||||||
|
],
|
||||||
|
"gaussian_kuwahara": [
|
||||||
|
[
|
||||||
|
0.9897,
|
||||||
|
0.9766,
|
||||||
|
0.9066,
|
||||||
|
0.7355,
|
||||||
|
0.5131
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9808,
|
||||||
|
0.9667,
|
||||||
|
0.8909,
|
||||||
|
0.7035,
|
||||||
|
0.4824
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9651,
|
||||||
|
0.9547,
|
||||||
|
0.87,
|
||||||
|
0.6713,
|
||||||
|
0.4538
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9412,
|
||||||
|
0.9334,
|
||||||
|
0.8447,
|
||||||
|
0.6354,
|
||||||
|
0.426
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9035,
|
||||||
|
0.9107,
|
||||||
|
0.8123,
|
||||||
|
0.597,
|
||||||
|
0.3915
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.8539,
|
||||||
|
0.8785,
|
||||||
|
0.7751,
|
||||||
|
0.5616,
|
||||||
|
0.362
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.7925,
|
||||||
|
0.8328,
|
||||||
|
0.7328,
|
||||||
|
0.5236,
|
||||||
|
0.3344
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.7078,
|
||||||
|
0.7808,
|
||||||
|
0.6816,
|
||||||
|
0.4868,
|
||||||
|
0.309
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.6125,
|
||||||
|
0.7179,
|
||||||
|
0.6301,
|
||||||
|
0.4513,
|
||||||
|
0.2865
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.4979,
|
||||||
|
0.646,
|
||||||
|
0.5773,
|
||||||
|
0.4242,
|
||||||
|
0.2702
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.3927,
|
||||||
|
0.564,
|
||||||
|
0.5197,
|
||||||
|
0.3859,
|
||||||
|
0.2493
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.3023,
|
||||||
|
0.4761,
|
||||||
|
0.4594,
|
||||||
|
0.3494,
|
||||||
|
0.2354
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.2289,
|
||||||
|
0.3839,
|
||||||
|
0.3981,
|
||||||
|
0.3182,
|
||||||
|
0.2232
|
||||||
|
]
|
||||||
|
],
|
||||||
|
"mean_kuwahara": [
|
||||||
|
[
|
||||||
|
0.988,
|
||||||
|
0.7536,
|
||||||
|
0.3667,
|
||||||
|
0.1763,
|
||||||
|
0.1339
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9795,
|
||||||
|
0.7359,
|
||||||
|
0.3496,
|
||||||
|
0.171,
|
||||||
|
0.1318
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.965,
|
||||||
|
0.7129,
|
||||||
|
0.3295,
|
||||||
|
0.1637,
|
||||||
|
0.1286
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.946,
|
||||||
|
0.6871,
|
||||||
|
0.3119,
|
||||||
|
0.1578,
|
||||||
|
0.1244
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.916,
|
||||||
|
0.6617,
|
||||||
|
0.2841,
|
||||||
|
0.1497,
|
||||||
|
0.1228
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.8746,
|
||||||
|
0.6317,
|
||||||
|
0.2587,
|
||||||
|
0.1422,
|
||||||
|
0.1211
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.8235,
|
||||||
|
0.6019,
|
||||||
|
0.2395,
|
||||||
|
0.136,
|
||||||
|
0.1193
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.7499,
|
||||||
|
0.5699,
|
||||||
|
0.2253,
|
||||||
|
0.134,
|
||||||
|
0.1164
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.665,
|
||||||
|
0.542,
|
||||||
|
0.2168,
|
||||||
|
0.1335,
|
||||||
|
0.1138
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.5642,
|
||||||
|
0.5087,
|
||||||
|
0.2064,
|
||||||
|
0.1328,
|
||||||
|
0.1129
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.4739,
|
||||||
|
0.4773,
|
||||||
|
0.1993,
|
||||||
|
0.1306,
|
||||||
|
0.1145
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.3638,
|
||||||
|
0.437,
|
||||||
|
0.1921,
|
||||||
|
0.1309,
|
||||||
|
0.1159
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.2659,
|
||||||
|
0.3912,
|
||||||
|
0.1854,
|
||||||
|
0.1307,
|
||||||
|
0.1166
|
||||||
|
]
|
||||||
|
],
|
||||||
|
"random_noise": [
|
||||||
|
[
|
||||||
|
0.9913,
|
||||||
|
0.9899,
|
||||||
|
0.9872,
|
||||||
|
0.9719,
|
||||||
|
0.9226
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9793,
|
||||||
|
0.9782,
|
||||||
|
0.9711,
|
||||||
|
0.9453,
|
||||||
|
0.8802
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9603,
|
||||||
|
0.9568,
|
||||||
|
0.9436,
|
||||||
|
0.9049,
|
||||||
|
0.8184
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9253,
|
||||||
|
0.9183,
|
||||||
|
0.895,
|
||||||
|
0.8392,
|
||||||
|
0.7432
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.8743,
|
||||||
|
0.8653,
|
||||||
|
0.8309,
|
||||||
|
0.7656,
|
||||||
|
0.6606
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.809,
|
||||||
|
0.7948,
|
||||||
|
0.7486,
|
||||||
|
0.6709,
|
||||||
|
0.5588
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.721,
|
||||||
|
0.6999,
|
||||||
|
0.6485,
|
||||||
|
0.5625,
|
||||||
|
0.4577
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.6157,
|
||||||
|
0.5881,
|
||||||
|
0.5377,
|
||||||
|
0.4548,
|
||||||
|
0.3647
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.5005,
|
||||||
|
0.4802,
|
||||||
|
0.4267,
|
||||||
|
0.3571,
|
||||||
|
0.2885
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.385,
|
||||||
|
0.3668,
|
||||||
|
0.3295,
|
||||||
|
0.2696,
|
||||||
|
0.2223
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.2918,
|
||||||
|
0.2758,
|
||||||
|
0.244,
|
||||||
|
0.2039,
|
||||||
|
0.1724
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.215,
|
||||||
|
0.2016,
|
||||||
|
0.1832,
|
||||||
|
0.1555,
|
||||||
|
0.1326
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.1591,
|
||||||
|
0.154,
|
||||||
|
0.1371,
|
||||||
|
0.1163,
|
||||||
|
0.1021
|
||||||
|
]
|
||||||
|
],
|
||||||
|
"bilateral_filter": [
|
||||||
|
[
|
||||||
|
0.9887,
|
||||||
|
0.9887,
|
||||||
|
0.9391,
|
||||||
|
0.5584,
|
||||||
|
0.2568
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9809,
|
||||||
|
0.9809,
|
||||||
|
0.9184,
|
||||||
|
0.5198,
|
||||||
|
0.241
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9695,
|
||||||
|
0.9695,
|
||||||
|
0.8902,
|
||||||
|
0.4831,
|
||||||
|
0.2245
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9482,
|
||||||
|
0.9482,
|
||||||
|
0.8533,
|
||||||
|
0.4436,
|
||||||
|
0.2079
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9142,
|
||||||
|
0.9142,
|
||||||
|
0.8133,
|
||||||
|
0.4019,
|
||||||
|
0.1915
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.8714,
|
||||||
|
0.8714,
|
||||||
|
0.7656,
|
||||||
|
0.3641,
|
||||||
|
0.1792
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.8169,
|
||||||
|
0.8169,
|
||||||
|
0.7098,
|
||||||
|
0.3299,
|
||||||
|
0.1681
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.7477,
|
||||||
|
0.7477,
|
||||||
|
0.641,
|
||||||
|
0.2978,
|
||||||
|
0.161
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.6619,
|
||||||
|
0.6619,
|
||||||
|
0.5683,
|
||||||
|
0.2723,
|
||||||
|
0.1563
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.5767,
|
||||||
|
0.5767,
|
||||||
|
0.5003,
|
||||||
|
0.2476,
|
||||||
|
0.1517
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.4922,
|
||||||
|
0.4922,
|
||||||
|
0.4381,
|
||||||
|
0.2288,
|
||||||
|
0.1484
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.4133,
|
||||||
|
0.4133,
|
||||||
|
0.3836,
|
||||||
|
0.2126,
|
||||||
|
0.146
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.3468,
|
||||||
|
0.3468,
|
||||||
|
0.3364,
|
||||||
|
0.1999,
|
||||||
|
0.1444
|
||||||
|
]
|
||||||
|
],
|
||||||
|
"bit_depth": [
|
||||||
|
[
|
||||||
|
0.9894,
|
||||||
|
0.9913,
|
||||||
|
0.9916,
|
||||||
|
0.992,
|
||||||
|
0.992
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9862,
|
||||||
|
0.9823,
|
||||||
|
0.9807,
|
||||||
|
0.9796,
|
||||||
|
0.9796
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9808,
|
||||||
|
0.9781,
|
||||||
|
0.965,
|
||||||
|
0.9604,
|
||||||
|
0.96
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9744,
|
||||||
|
0.9228,
|
||||||
|
0.9219,
|
||||||
|
0.926,
|
||||||
|
0.926
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9424,
|
||||||
|
0.8818,
|
||||||
|
0.8747,
|
||||||
|
0.8751,
|
||||||
|
0.8753
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9307,
|
||||||
|
0.8621,
|
||||||
|
0.821,
|
||||||
|
0.8094,
|
||||||
|
0.8104
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9157,
|
||||||
|
0.8408,
|
||||||
|
0.7427,
|
||||||
|
0.7235,
|
||||||
|
0.7229
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.8972,
|
||||||
|
0.7794,
|
||||||
|
0.6554,
|
||||||
|
0.6229,
|
||||||
|
0.6207
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.8799,
|
||||||
|
0.7496,
|
||||||
|
0.559,
|
||||||
|
0.5046,
|
||||||
|
0.5008
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.8581,
|
||||||
|
0.5289,
|
||||||
|
0.4547,
|
||||||
|
0.3914,
|
||||||
|
0.3894
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.7603,
|
||||||
|
0.4301,
|
||||||
|
0.3113,
|
||||||
|
0.2927,
|
||||||
|
0.2922
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.7227,
|
||||||
|
0.3992,
|
||||||
|
0.2414,
|
||||||
|
0.2168,
|
||||||
|
0.2149
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.2624,
|
||||||
|
0.2091,
|
||||||
|
0.1874,
|
||||||
|
0.161,
|
||||||
|
0.1599
|
||||||
|
]
|
||||||
|
],
|
||||||
|
"threshold_filter": [
|
||||||
|
[
|
||||||
|
0.982,
|
||||||
|
0.9817,
|
||||||
|
0.9799,
|
||||||
|
0.9713,
|
||||||
|
0.9502
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.978,
|
||||||
|
0.9755,
|
||||||
|
0.9751,
|
||||||
|
0.9655,
|
||||||
|
0.9334
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9728,
|
||||||
|
0.9713,
|
||||||
|
0.9696,
|
||||||
|
0.9578,
|
||||||
|
0.9077
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9678,
|
||||||
|
0.9668,
|
||||||
|
0.9645,
|
||||||
|
0.9508,
|
||||||
|
0.1837
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9644,
|
||||||
|
0.9604,
|
||||||
|
0.9583,
|
||||||
|
0.9407,
|
||||||
|
0.1818
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9586,
|
||||||
|
0.9537,
|
||||||
|
0.9477,
|
||||||
|
0.9238,
|
||||||
|
0.1817
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9522,
|
||||||
|
0.9458,
|
||||||
|
0.9343,
|
||||||
|
0.9032,
|
||||||
|
0.1845
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9418,
|
||||||
|
0.9387,
|
||||||
|
0.9236,
|
||||||
|
0.8766,
|
||||||
|
0.1849
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9331,
|
||||||
|
0.9297,
|
||||||
|
0.9108,
|
||||||
|
0.8358,
|
||||||
|
0.1869
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9215,
|
||||||
|
0.9188,
|
||||||
|
0.8927,
|
||||||
|
0.2164,
|
||||||
|
0.1904
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.9079,
|
||||||
|
0.9053,
|
||||||
|
0.8758,
|
||||||
|
0.223,
|
||||||
|
0.1927
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.8943,
|
||||||
|
0.8882,
|
||||||
|
0.8508,
|
||||||
|
0.2275,
|
||||||
|
0.1979
|
||||||
|
],
|
||||||
|
[
|
||||||
|
0.8761,
|
||||||
|
0.8687,
|
||||||
|
0.8142,
|
||||||
|
0.2348,
|
||||||
|
0.2025
|
||||||
]
|
]
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
@ -1,672 +0,0 @@
|
|||||||
{
|
|
||||||
"dataset": "MNIST",
|
|
||||||
"attack": "FGSM",
|
|
||||||
"epsilons": [
|
|
||||||
0.0,
|
|
||||||
0.025,
|
|
||||||
0.05,
|
|
||||||
0.07500000000000001,
|
|
||||||
0.1,
|
|
||||||
0.125,
|
|
||||||
0.15000000000000002,
|
|
||||||
0.17500000000000002,
|
|
||||||
0.2,
|
|
||||||
0.225,
|
|
||||||
0.25,
|
|
||||||
0.275,
|
|
||||||
0.30000000000000004
|
|
||||||
],
|
|
||||||
"filters": {
|
|
||||||
"gaussian_blur": [
|
|
||||||
[
|
|
||||||
0.992,
|
|
||||||
0.9879,
|
|
||||||
0.9682,
|
|
||||||
0.7731,
|
|
||||||
0.525
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9796,
|
|
||||||
0.9801,
|
|
||||||
0.9512,
|
|
||||||
0.7381,
|
|
||||||
0.4862
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.96,
|
|
||||||
0.9674,
|
|
||||||
0.9271,
|
|
||||||
0.6922,
|
|
||||||
0.4446
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.926,
|
|
||||||
0.946,
|
|
||||||
0.8939,
|
|
||||||
0.6427,
|
|
||||||
0.3989
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.8753,
|
|
||||||
0.9133,
|
|
||||||
0.8516,
|
|
||||||
0.5881,
|
|
||||||
0.3603
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.8104,
|
|
||||||
0.869,
|
|
||||||
0.7989,
|
|
||||||
0.5278,
|
|
||||||
0.3263
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.7229,
|
|
||||||
0.8135,
|
|
||||||
0.7415,
|
|
||||||
0.471,
|
|
||||||
0.2968
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.6207,
|
|
||||||
0.7456,
|
|
||||||
0.6741,
|
|
||||||
0.4224,
|
|
||||||
0.2683
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.5008,
|
|
||||||
0.6636,
|
|
||||||
0.5983,
|
|
||||||
0.3755,
|
|
||||||
0.2453
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.3894,
|
|
||||||
0.5821,
|
|
||||||
0.5243,
|
|
||||||
0.3359,
|
|
||||||
0.2269
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.2922,
|
|
||||||
0.505,
|
|
||||||
0.4591,
|
|
||||||
0.3034,
|
|
||||||
0.2112
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.2149,
|
|
||||||
0.429,
|
|
||||||
0.3998,
|
|
||||||
0.2743,
|
|
||||||
0.1983
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.1599,
|
|
||||||
0.3648,
|
|
||||||
0.3481,
|
|
||||||
0.2493,
|
|
||||||
0.1884
|
|
||||||
]
|
|
||||||
],
|
|
||||||
"gaussian_kuwahara": [
|
|
||||||
[
|
|
||||||
0.9897,
|
|
||||||
0.9766,
|
|
||||||
0.9066,
|
|
||||||
0.7355,
|
|
||||||
0.5131
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9808,
|
|
||||||
0.9667,
|
|
||||||
0.8909,
|
|
||||||
0.7035,
|
|
||||||
0.4824
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9651,
|
|
||||||
0.9547,
|
|
||||||
0.87,
|
|
||||||
0.6713,
|
|
||||||
0.4538
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9412,
|
|
||||||
0.9334,
|
|
||||||
0.8447,
|
|
||||||
0.6354,
|
|
||||||
0.426
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9035,
|
|
||||||
0.9107,
|
|
||||||
0.8123,
|
|
||||||
0.597,
|
|
||||||
0.3915
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.8539,
|
|
||||||
0.8785,
|
|
||||||
0.7751,
|
|
||||||
0.5616,
|
|
||||||
0.362
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.7925,
|
|
||||||
0.8328,
|
|
||||||
0.7328,
|
|
||||||
0.5236,
|
|
||||||
0.3344
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.7078,
|
|
||||||
0.7808,
|
|
||||||
0.6816,
|
|
||||||
0.4868,
|
|
||||||
0.309
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.6125,
|
|
||||||
0.7179,
|
|
||||||
0.6301,
|
|
||||||
0.4513,
|
|
||||||
0.2865
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.4979,
|
|
||||||
0.646,
|
|
||||||
0.5773,
|
|
||||||
0.4242,
|
|
||||||
0.2702
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.3927,
|
|
||||||
0.564,
|
|
||||||
0.5197,
|
|
||||||
0.3859,
|
|
||||||
0.2493
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.3023,
|
|
||||||
0.4761,
|
|
||||||
0.4594,
|
|
||||||
0.3494,
|
|
||||||
0.2354
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.2289,
|
|
||||||
0.3839,
|
|
||||||
0.3981,
|
|
||||||
0.3182,
|
|
||||||
0.2232
|
|
||||||
]
|
|
||||||
],
|
|
||||||
"mean_kuwahara": [
|
|
||||||
[
|
|
||||||
0.988,
|
|
||||||
0.7536,
|
|
||||||
0.3667,
|
|
||||||
0.1763,
|
|
||||||
0.1339
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9795,
|
|
||||||
0.7359,
|
|
||||||
0.3496,
|
|
||||||
0.171,
|
|
||||||
0.1318
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.965,
|
|
||||||
0.7129,
|
|
||||||
0.3295,
|
|
||||||
0.1637,
|
|
||||||
0.1286
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.946,
|
|
||||||
0.6871,
|
|
||||||
0.3119,
|
|
||||||
0.1578,
|
|
||||||
0.1244
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.916,
|
|
||||||
0.6617,
|
|
||||||
0.2841,
|
|
||||||
0.1497,
|
|
||||||
0.1228
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.8746,
|
|
||||||
0.6317,
|
|
||||||
0.2587,
|
|
||||||
0.1422,
|
|
||||||
0.1211
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.8235,
|
|
||||||
0.6019,
|
|
||||||
0.2395,
|
|
||||||
0.136,
|
|
||||||
0.1193
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.7499,
|
|
||||||
0.5699,
|
|
||||||
0.2253,
|
|
||||||
0.134,
|
|
||||||
0.1164
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.665,
|
|
||||||
0.542,
|
|
||||||
0.2168,
|
|
||||||
0.1335,
|
|
||||||
0.1138
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.5642,
|
|
||||||
0.5087,
|
|
||||||
0.2064,
|
|
||||||
0.1328,
|
|
||||||
0.1129
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.4739,
|
|
||||||
0.4773,
|
|
||||||
0.1993,
|
|
||||||
0.1306,
|
|
||||||
0.1145
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.3638,
|
|
||||||
0.437,
|
|
||||||
0.1921,
|
|
||||||
0.1309,
|
|
||||||
0.1159
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.2659,
|
|
||||||
0.3912,
|
|
||||||
0.1854,
|
|
||||||
0.1307,
|
|
||||||
0.1166
|
|
||||||
]
|
|
||||||
],
|
|
||||||
"random_noise": [
|
|
||||||
[
|
|
||||||
0.9913,
|
|
||||||
0.9899,
|
|
||||||
0.9872,
|
|
||||||
0.9719,
|
|
||||||
0.9226
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9793,
|
|
||||||
0.9782,
|
|
||||||
0.9711,
|
|
||||||
0.9453,
|
|
||||||
0.8802
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9603,
|
|
||||||
0.9568,
|
|
||||||
0.9436,
|
|
||||||
0.9049,
|
|
||||||
0.8184
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9253,
|
|
||||||
0.9183,
|
|
||||||
0.895,
|
|
||||||
0.8392,
|
|
||||||
0.7432
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.8743,
|
|
||||||
0.8653,
|
|
||||||
0.8309,
|
|
||||||
0.7656,
|
|
||||||
0.6606
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.809,
|
|
||||||
0.7948,
|
|
||||||
0.7486,
|
|
||||||
0.6709,
|
|
||||||
0.5588
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.721,
|
|
||||||
0.6999,
|
|
||||||
0.6485,
|
|
||||||
0.5625,
|
|
||||||
0.4577
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.6157,
|
|
||||||
0.5881,
|
|
||||||
0.5377,
|
|
||||||
0.4548,
|
|
||||||
0.3647
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.5005,
|
|
||||||
0.4802,
|
|
||||||
0.4267,
|
|
||||||
0.3571,
|
|
||||||
0.2885
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.385,
|
|
||||||
0.3668,
|
|
||||||
0.3295,
|
|
||||||
0.2696,
|
|
||||||
0.2223
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.2918,
|
|
||||||
0.2758,
|
|
||||||
0.244,
|
|
||||||
0.2039,
|
|
||||||
0.1724
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.215,
|
|
||||||
0.2016,
|
|
||||||
0.1832,
|
|
||||||
0.1555,
|
|
||||||
0.1326
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.1591,
|
|
||||||
0.154,
|
|
||||||
0.1371,
|
|
||||||
0.1163,
|
|
||||||
0.1021
|
|
||||||
]
|
|
||||||
],
|
|
||||||
"bilateral_filter": [
|
|
||||||
[
|
|
||||||
0.9887,
|
|
||||||
0.9887,
|
|
||||||
0.9391,
|
|
||||||
0.5584,
|
|
||||||
0.2568
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9809,
|
|
||||||
0.9809,
|
|
||||||
0.9184,
|
|
||||||
0.5198,
|
|
||||||
0.241
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9695,
|
|
||||||
0.9695,
|
|
||||||
0.8902,
|
|
||||||
0.4831,
|
|
||||||
0.2245
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9482,
|
|
||||||
0.9482,
|
|
||||||
0.8533,
|
|
||||||
0.4436,
|
|
||||||
0.2079
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9142,
|
|
||||||
0.9142,
|
|
||||||
0.8133,
|
|
||||||
0.4019,
|
|
||||||
0.1915
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.8714,
|
|
||||||
0.8714,
|
|
||||||
0.7656,
|
|
||||||
0.3641,
|
|
||||||
0.1792
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.8169,
|
|
||||||
0.8169,
|
|
||||||
0.7098,
|
|
||||||
0.3299,
|
|
||||||
0.1681
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.7477,
|
|
||||||
0.7477,
|
|
||||||
0.641,
|
|
||||||
0.2978,
|
|
||||||
0.161
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.6619,
|
|
||||||
0.6619,
|
|
||||||
0.5683,
|
|
||||||
0.2723,
|
|
||||||
0.1563
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.5767,
|
|
||||||
0.5767,
|
|
||||||
0.5003,
|
|
||||||
0.2476,
|
|
||||||
0.1517
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.4922,
|
|
||||||
0.4922,
|
|
||||||
0.4381,
|
|
||||||
0.2288,
|
|
||||||
0.1484
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.4133,
|
|
||||||
0.4133,
|
|
||||||
0.3836,
|
|
||||||
0.2126,
|
|
||||||
0.146
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.3468,
|
|
||||||
0.3468,
|
|
||||||
0.3364,
|
|
||||||
0.1999,
|
|
||||||
0.1444
|
|
||||||
]
|
|
||||||
],
|
|
||||||
"bit_depth": [
|
|
||||||
[
|
|
||||||
0.9894,
|
|
||||||
0.9913,
|
|
||||||
0.9916,
|
|
||||||
0.992,
|
|
||||||
0.992
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9862,
|
|
||||||
0.9823,
|
|
||||||
0.9807,
|
|
||||||
0.9796,
|
|
||||||
0.9796
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9808,
|
|
||||||
0.9781,
|
|
||||||
0.965,
|
|
||||||
0.9604,
|
|
||||||
0.96
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9744,
|
|
||||||
0.9228,
|
|
||||||
0.9219,
|
|
||||||
0.926,
|
|
||||||
0.926
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9424,
|
|
||||||
0.8818,
|
|
||||||
0.8747,
|
|
||||||
0.8751,
|
|
||||||
0.8753
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9307,
|
|
||||||
0.8621,
|
|
||||||
0.821,
|
|
||||||
0.8094,
|
|
||||||
0.8104
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9157,
|
|
||||||
0.8408,
|
|
||||||
0.7427,
|
|
||||||
0.7235,
|
|
||||||
0.7229
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.8972,
|
|
||||||
0.7794,
|
|
||||||
0.6554,
|
|
||||||
0.6229,
|
|
||||||
0.6207
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.8799,
|
|
||||||
0.7496,
|
|
||||||
0.559,
|
|
||||||
0.5046,
|
|
||||||
0.5008
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.8581,
|
|
||||||
0.5289,
|
|
||||||
0.4547,
|
|
||||||
0.3914,
|
|
||||||
0.3894
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.7603,
|
|
||||||
0.4301,
|
|
||||||
0.3113,
|
|
||||||
0.2927,
|
|
||||||
0.2922
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.7227,
|
|
||||||
0.3992,
|
|
||||||
0.2414,
|
|
||||||
0.2168,
|
|
||||||
0.2149
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.2624,
|
|
||||||
0.2091,
|
|
||||||
0.1874,
|
|
||||||
0.161,
|
|
||||||
0.1599
|
|
||||||
]
|
|
||||||
],
|
|
||||||
"threshold_filter": [
|
|
||||||
[
|
|
||||||
0.982,
|
|
||||||
0.9817,
|
|
||||||
0.9799,
|
|
||||||
0.9713,
|
|
||||||
0.9502
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.978,
|
|
||||||
0.9755,
|
|
||||||
0.9751,
|
|
||||||
0.9655,
|
|
||||||
0.9334
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9728,
|
|
||||||
0.9713,
|
|
||||||
0.9696,
|
|
||||||
0.9578,
|
|
||||||
0.9077
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9678,
|
|
||||||
0.9668,
|
|
||||||
0.9645,
|
|
||||||
0.9508,
|
|
||||||
0.1837
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9644,
|
|
||||||
0.9604,
|
|
||||||
0.9583,
|
|
||||||
0.9407,
|
|
||||||
0.1818
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9586,
|
|
||||||
0.9537,
|
|
||||||
0.9477,
|
|
||||||
0.9238,
|
|
||||||
0.1817
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9522,
|
|
||||||
0.9458,
|
|
||||||
0.9343,
|
|
||||||
0.9032,
|
|
||||||
0.1845
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9418,
|
|
||||||
0.9387,
|
|
||||||
0.9236,
|
|
||||||
0.8766,
|
|
||||||
0.1849
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9331,
|
|
||||||
0.9297,
|
|
||||||
0.9108,
|
|
||||||
0.8358,
|
|
||||||
0.1869
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9215,
|
|
||||||
0.9188,
|
|
||||||
0.8927,
|
|
||||||
0.2164,
|
|
||||||
0.1904
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.9079,
|
|
||||||
0.9053,
|
|
||||||
0.8758,
|
|
||||||
0.223,
|
|
||||||
0.1927
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.8943,
|
|
||||||
0.8882,
|
|
||||||
0.8508,
|
|
||||||
0.2275,
|
|
||||||
0.1979
|
|
||||||
],
|
|
||||||
[
|
|
||||||
0.8761,
|
|
||||||
0.8687,
|
|
||||||
0.8142,
|
|
||||||
0.2348,
|
|
||||||
0.2025
|
|
||||||
]
|
|
||||||
]
|
|
||||||
}
|
|
||||||
}
|
|
@ -6,7 +6,8 @@ from torchvision import datasets, transforms
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from scipy import stats
|
from scipy import stats
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
from mnist import Net
|
import mnist
|
||||||
|
import cifar10
|
||||||
|
|
||||||
import json
|
import json
|
||||||
import sys
|
import sys
|
||||||
@ -16,30 +17,39 @@ import defense_filters
|
|||||||
|
|
||||||
|
|
||||||
ATTACK = "FGSM"
|
ATTACK = "FGSM"
|
||||||
DATASET = "MNIST"
|
DATASET = "CIFAR-10"
|
||||||
|
|
||||||
|
RES_X = 32
|
||||||
|
RES_Y = 32
|
||||||
|
CHANNELS = 3
|
||||||
|
|
||||||
MAX_EPSILON = 0.3
|
MAX_EPSILON = 0.3
|
||||||
EPSILON_STEP = 0.025
|
EPSILON_STEP = 0.025
|
||||||
|
|
||||||
TESTED_STRENGTH_COUNT = 5
|
TESTED_STRENGTH_COUNT = 5
|
||||||
epsilons = np.arange(0.0, MAX_EPSILON+EPSILON_STEP, EPSILON_STEP)
|
epsilons = np.arange(0.0, MAX_EPSILON+EPSILON_STEP, EPSILON_STEP)
|
||||||
pretrained_model = "mnist_cnn_unfiltered.pt"
|
pretrained_model = "cifar_net.pth"
|
||||||
use_cuda=False
|
use_cuda=False
|
||||||
|
|
||||||
torch.manual_seed(69)
|
torch.manual_seed(69)
|
||||||
|
|
||||||
|
|
||||||
test_loader = torch.utils.data.DataLoader(
|
#test_loader = torch.utils.data.DataLoader(
|
||||||
datasets.MNIST('data/', train=False, download=True, transform=transforms.Compose([
|
# datasets.MNIST('data/', train=False, download=True, transform=transforms.Compose([
|
||||||
transforms.ToTensor(),
|
# transforms.ToTensor(),
|
||||||
transforms.Normalize((0.1307,), (0.3081,)),
|
# transforms.Normalize((0.1307,), (0.3081,)),
|
||||||
])),
|
# ])),
|
||||||
batch_size=1, shuffle=True)
|
# batch_size=1, shuffle=True)
|
||||||
|
|
||||||
|
transform = transforms.Compose([transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
|
||||||
|
batch_size = 1
|
||||||
|
testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
|
||||||
|
test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=True, num_workers=2)
|
||||||
|
|
||||||
print("CUDA Available: ", torch.cuda.is_available())
|
print("CUDA Available: ", torch.cuda.is_available())
|
||||||
device = torch.device("cuda" if use_cuda and torch.cuda.is_available() else "cpu")
|
device = torch.device("cuda" if use_cuda and torch.cuda.is_available() else "cpu")
|
||||||
|
|
||||||
model = Net().to(device)
|
model = cifar10.Net().to(device)
|
||||||
|
|
||||||
model.load_state_dict(torch.load(pretrained_model, map_location=device))
|
model.load_state_dict(torch.load(pretrained_model, map_location=device))
|
||||||
|
|
||||||
@ -126,7 +136,7 @@ def test(model, device, test_loader, epsilon, filter):
|
|||||||
# Evaluate performance for
|
# Evaluate performance for
|
||||||
for i in range(TESTED_STRENGTH_COUNT):
|
for i in range(TESTED_STRENGTH_COUNT):
|
||||||
# Apply the filter with the specified strength
|
# Apply the filter with the specified strength
|
||||||
filtered_input = defense_filters.filtered(perturbed_data_normalized, batch_size=len(perturbed_data_normalized), strength=i, filter=filter)
|
filtered_input = defense_filters.filtered(perturbed_data_normalized, dimensions=(len(perturbed_data_normalized), RES_X, RES_Y, CHANNELS), strength=i, filter=filter)
|
||||||
# Evaluate the model on the filtered images
|
# Evaluate the model on the filtered images
|
||||||
filtered_output = model(filtered_input)
|
filtered_output = model(filtered_input)
|
||||||
# Get the predicted classification
|
# Get the predicted classification
|
||||||
@ -186,7 +196,7 @@ for filter in filters:
|
|||||||
results["filters"][filter].append(accuracies)
|
results["filters"][filter].append(accuracies)
|
||||||
|
|
||||||
results_json = json.dumps(results, indent=4)
|
results_json = json.dumps(results, indent=4)
|
||||||
with open("results/mnist_fgsm.json", "w") as outfile:
|
with open("results/cifar10_fgsm.json", "w") as outfile:
|
||||||
outfile.write(results_json)
|
outfile.write(results_json)
|
||||||
|
|
||||||
# Plot the results
|
# Plot the results
|
||||||
|
Loading…
Reference in New Issue
Block a user