Moved to separate project on Gitea
This commit is contained in:
commit
b36fd83da4
9084
Adaptive_Kuwahara_filter.pdf
Normal file
9084
Adaptive_Kuwahara_filter.pdf
Normal file
File diff suppressed because one or more lines are too long
BIN
Carlini_and_Wagner.pdf
Normal file
BIN
Carlini_and_Wagner.pdf
Normal file
Binary file not shown.
Binary file not shown.
BIN
DeepFool.pdf
Normal file
BIN
DeepFool.pdf
Normal file
Binary file not shown.
BIN
Explaining_and_Harnessing_Adversarial_Examples.pdf
Normal file
BIN
Explaining_and_Harnessing_Adversarial_Examples.pdf
Normal file
Binary file not shown.
BIN
Filter_Analysis/DatasetImage.png
Normal file
BIN
Filter_Analysis/DatasetImage.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 202 B |
BIN
Filter_Analysis/Whale-Attacked.png
Normal file
BIN
Filter_Analysis/Whale-Attacked.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 44 KiB |
BIN
Filter_Analysis/Whale-Original.png
Normal file
BIN
Filter_Analysis/Whale-Original.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 44 KiB |
BIN
Filter_Analysis/__pycache__/mnist.cpython-311.pyc
Normal file
BIN
Filter_Analysis/__pycache__/mnist.cpython-311.pyc
Normal file
Binary file not shown.
BIN
Filter_Analysis/data/MNIST/raw/t10k-images-idx3-ubyte
Normal file
BIN
Filter_Analysis/data/MNIST/raw/t10k-images-idx3-ubyte
Normal file
Binary file not shown.
BIN
Filter_Analysis/data/MNIST/raw/t10k-images-idx3-ubyte.gz
Normal file
BIN
Filter_Analysis/data/MNIST/raw/t10k-images-idx3-ubyte.gz
Normal file
Binary file not shown.
BIN
Filter_Analysis/data/MNIST/raw/t10k-labels-idx1-ubyte
Normal file
BIN
Filter_Analysis/data/MNIST/raw/t10k-labels-idx1-ubyte
Normal file
Binary file not shown.
BIN
Filter_Analysis/data/MNIST/raw/t10k-labels-idx1-ubyte.gz
Normal file
BIN
Filter_Analysis/data/MNIST/raw/t10k-labels-idx1-ubyte.gz
Normal file
Binary file not shown.
BIN
Filter_Analysis/data/MNIST/raw/train-images-idx3-ubyte
Normal file
BIN
Filter_Analysis/data/MNIST/raw/train-images-idx3-ubyte
Normal file
Binary file not shown.
BIN
Filter_Analysis/data/MNIST/raw/train-images-idx3-ubyte.gz
Normal file
BIN
Filter_Analysis/data/MNIST/raw/train-images-idx3-ubyte.gz
Normal file
Binary file not shown.
BIN
Filter_Analysis/data/MNIST/raw/train-labels-idx1-ubyte
Normal file
BIN
Filter_Analysis/data/MNIST/raw/train-labels-idx1-ubyte
Normal file
Binary file not shown.
BIN
Filter_Analysis/data/MNIST/raw/train-labels-idx1-ubyte.gz
Normal file
BIN
Filter_Analysis/data/MNIST/raw/train-labels-idx1-ubyte.gz
Normal file
Binary file not shown.
BIN
Filter_Analysis/data/lenet_mnist_model.pth
Normal file
BIN
Filter_Analysis/data/lenet_mnist_model.pth
Normal file
Binary file not shown.
BIN
Filter_Analysis/difference.png
Normal file
BIN
Filter_Analysis/difference.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 14 KiB |
141
Filter_Analysis/fgsm.py
Normal file
141
Filter_Analysis/fgsm.py
Normal file
@ -0,0 +1,141 @@
|
|||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.nn.functional as F
|
||||||
|
import torch.optim as optim
|
||||||
|
from torchvision import datasets, transforms
|
||||||
|
import numpy as np
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
import cv2
|
||||||
|
from mnist import Net
|
||||||
|
from pykuwahara import kuwahara
|
||||||
|
|
||||||
|
epsilons = np.arange(0.05,0.35,0.05)
|
||||||
|
pretrained_model = "data/lenet_mnist_model.pth"
|
||||||
|
use_cuda=False
|
||||||
|
|
||||||
|
torch.manual_seed(69)
|
||||||
|
|
||||||
|
|
||||||
|
test_loader = torch.utils.data.DataLoader(
|
||||||
|
datasets.MNIST('data/', train=False, download=True, transform=transforms.Compose([
|
||||||
|
transforms.ToTensor(),
|
||||||
|
transforms.Normalize((0.1307,), (0.3081,)),
|
||||||
|
])),
|
||||||
|
batch_size=1, shuffle=True)
|
||||||
|
|
||||||
|
print("CUDA Available: ", torch.cuda.is_available())
|
||||||
|
device = torch.device("cuda" if use_cuda and torch.cuda.is_available() else "cpu")
|
||||||
|
|
||||||
|
model = Net().to(device)
|
||||||
|
|
||||||
|
model.load_state_dict(torch.load(pretrained_model, map_location=device))
|
||||||
|
|
||||||
|
model.eval()
|
||||||
|
|
||||||
|
def fgsm_attack(image, epsilon, data_grad):
|
||||||
|
sign_data_grad = data_grad.sign()
|
||||||
|
perturbed_image = image + epsilon*sign_data_grad
|
||||||
|
perturbed_image = torch.clamp(perturbed_image, 0, 1)
|
||||||
|
return perturbed_image
|
||||||
|
|
||||||
|
def denorm(batch, mean=[0.1307], std=[0.3081]):
|
||||||
|
"""
|
||||||
|
Convert a batch of tensors to their original scale.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
batch (torch.Tensor): Batch of normalized tensors.
|
||||||
|
mean (torch.Tensor or list): Man used for normalization.
|
||||||
|
std (torch.Tensor or list): Standard deviation used for normalization.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
torch.Tensor: batch of tensors without normalization applied to them.
|
||||||
|
"""
|
||||||
|
|
||||||
|
if isinstance(mean, list):
|
||||||
|
mean = torch.tensor(mean).to(device)
|
||||||
|
if isinstance(std, list):
|
||||||
|
std = torch.tensor(std).to(device)
|
||||||
|
|
||||||
|
return batch * std.view(1, -1, 1, 1) + mean.view(1, -1, 1, 1)
|
||||||
|
|
||||||
|
def test(model, device, test_loader, epsilon):
|
||||||
|
orig_correct = 0
|
||||||
|
attacked_correct = 0
|
||||||
|
filtered_correct = 0
|
||||||
|
adv_examples = []
|
||||||
|
incorrect=0
|
||||||
|
|
||||||
|
for data, target in test_loader:
|
||||||
|
data, target = data.to(device), target.to(device)
|
||||||
|
data.requires_grad = True
|
||||||
|
|
||||||
|
output_orig = model(data)
|
||||||
|
orig_pred = output_orig.max(1, keepdim=True)[1]
|
||||||
|
|
||||||
|
loss = F.nll_loss(output_orig, target)
|
||||||
|
|
||||||
|
model.zero_grad()
|
||||||
|
|
||||||
|
loss.backward()
|
||||||
|
data_grad = data.grad.data
|
||||||
|
|
||||||
|
data_denorm = denorm(data)
|
||||||
|
|
||||||
|
perturbed_data = fgsm_attack(data_denorm, epsilon, data_grad)
|
||||||
|
|
||||||
|
perturbed_data_normalized = transforms.Normalize((0.1307,), (0.3081,))(perturbed_data)
|
||||||
|
|
||||||
|
# convert the perturbed data tensor to a cv2 readable image
|
||||||
|
image = perturbed_data_normalized.detach().numpy().transpose(0,2,3,1)
|
||||||
|
|
||||||
|
# apply the Kuwahara filter
|
||||||
|
filtered_image = np.ndarray((1,28,28,1))
|
||||||
|
filtered_image[0] = kuwahara(image[0], method='gaussian', radius=3, image_2d=image[0])
|
||||||
|
|
||||||
|
# convert the cv2 image back to a torch tensor
|
||||||
|
filtered_image = filtered_image.transpose(0,3,1,2)
|
||||||
|
perturbed_data_filtered = torch.tensor(filtered_image).float()
|
||||||
|
|
||||||
|
# evaluate the model on the attacked and filtered images
|
||||||
|
output_attacked = model(perturbed_data_normalized)
|
||||||
|
output_filtered = model(perturbed_data_filtered)
|
||||||
|
|
||||||
|
attacked_pred = output_attacked.max(1, keepdim=True)[1]
|
||||||
|
filtered_pred = output_filtered.max(1, keepdim=True)[1]
|
||||||
|
|
||||||
|
if orig_pred.item() == target.item():
|
||||||
|
orig_correct += 1
|
||||||
|
else:
|
||||||
|
incorrect += 1
|
||||||
|
|
||||||
|
if attacked_pred.item() == target.item():
|
||||||
|
attacked_correct += 1
|
||||||
|
|
||||||
|
if epsilon == 0 and len(adv_examples) < 5:
|
||||||
|
adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
|
||||||
|
adv_examples.append( (orig_pred.item(), attacked_pred.item(), adv_ex) )
|
||||||
|
|
||||||
|
if filtered_pred.item() == target.item():
|
||||||
|
filtered_correct += 1
|
||||||
|
|
||||||
|
if epsilon == 0 and len(adv_examples) < 5:
|
||||||
|
adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
|
||||||
|
adv_examples.append( (orig_pred.item(), filtered_pred.item(), adv_ex) )
|
||||||
|
|
||||||
|
orig_acc = orig_correct/float(len(test_loader))
|
||||||
|
attacked_acc = attacked_correct/float(len(test_loader))
|
||||||
|
filtered_acc = filtered_correct/float(len(test_loader))
|
||||||
|
print(f"Epsilon: {epsilon}")
|
||||||
|
print(f"Original Accuracy = {orig_correct} / {len(test_loader)} = {orig_acc}")
|
||||||
|
print(f"Attacked Accuracy = {attacked_correct} / {len(test_loader)} = {attacked_acc}")
|
||||||
|
print(f"Filtered Accuracy = {filtered_correct} / {len(test_loader)} = {filtered_acc}")
|
||||||
|
|
||||||
|
return attacked_acc, adv_examples
|
||||||
|
|
||||||
|
accuracies = []
|
||||||
|
examples = []
|
||||||
|
|
||||||
|
for eps in epsilons:
|
||||||
|
acc, ex = test(model, device, test_loader, eps)
|
||||||
|
accuracies.append(acc)
|
||||||
|
examples.append(ex)
|
13
Filter_Analysis/imgComp.py
Normal file
13
Filter_Analysis/imgComp.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
import cv2
|
||||||
|
from pykuwahara import kuwahara
|
||||||
|
import sys
|
||||||
|
|
||||||
|
def main(in_path1:str, in_path2:str, out_path:str) -> None:
|
||||||
|
image1 = cv2.imread(in_path1);
|
||||||
|
image2 = cv2.imread(in_path2);
|
||||||
|
diff_image = cv2.absdiff(image1, image2)
|
||||||
|
cv2.imwrite(out_path, diff_image)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main(in_path1=sys.argv[1], in_path2=sys.argv[2], out_path=sys.argv[3])
|
50
Filter_Analysis/mnist.py
Normal file
50
Filter_Analysis/mnist.py
Normal file
@ -0,0 +1,50 @@
|
|||||||
|
from __future__ import print_function
|
||||||
|
import argparse
|
||||||
|
import torch
|
||||||
|
import torch.nn as nn
|
||||||
|
import torch.nn.functional as F
|
||||||
|
import torch.optim as optim
|
||||||
|
from torchvision import datasets, transforms
|
||||||
|
from torch.optim.lr_scheduler import StepLR
|
||||||
|
|
||||||
|
|
||||||
|
class Net(nn.Module):
|
||||||
|
def __init__(self):
|
||||||
|
super(Net, self).__init__()
|
||||||
|
self.conv1 = nn.Conv2d(1, 32, 3, 1)
|
||||||
|
self.conv2 = nn.Conv2d(32, 64, 3, 1)
|
||||||
|
self.dropout1 = nn.Dropout(0.25)
|
||||||
|
self.dropout2 = nn.Dropout(0.5)
|
||||||
|
self.fc1 = nn.Linear(9216, 128)
|
||||||
|
self.fc2 = nn.Linear(128, 10)
|
||||||
|
|
||||||
|
def forward(self, x):
|
||||||
|
x = self.conv1(x)
|
||||||
|
x = F.relu(x)
|
||||||
|
x = self.conv2(x)
|
||||||
|
x = F.relu(x)
|
||||||
|
x = F.max_pool2d(x,2)
|
||||||
|
x = self.dropout1(x)
|
||||||
|
x = torch.flatten(x,1)
|
||||||
|
x = self.fc1(x)
|
||||||
|
x = F.relu(x)
|
||||||
|
x = self.dropout2(x)
|
||||||
|
x = self.fc2(x)
|
||||||
|
output = F.log_softmax(x, dim=1)
|
||||||
|
return output
|
||||||
|
|
||||||
|
def train(args, model, device, train_loader, optimizer, epoch):
|
||||||
|
model.train()
|
||||||
|
for batch_idx, (data, target) in enumerate(train_loader):
|
||||||
|
data, target = data.to(device), target.to(device)
|
||||||
|
optimizer.zero_grad()
|
||||||
|
output = model(data)
|
||||||
|
loss = F.nll_loss(output, target)
|
||||||
|
loss.backward()
|
||||||
|
optimizer.step()
|
||||||
|
if batch_idx % args.log_interval == 0:
|
||||||
|
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(epoch, batch.idx*len(data), len(train_loader.dataset), 100.*batch_idx/len(train_loader), loss.item()))
|
||||||
|
if args.dry_run:
|
||||||
|
break
|
||||||
|
|
||||||
|
|
BIN
Filter_Analysis/whale_full.png
Normal file
BIN
Filter_Analysis/whale_full.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 266 KiB |
4
Filter_Analysis/wiki/Approach.wiki
Normal file
4
Filter_Analysis/wiki/Approach.wiki
Normal file
@ -0,0 +1,4 @@
|
|||||||
|
= The Approach =
|
||||||
|
|
||||||
|
The goal is to use a filtering algorithm such as the [[https://en.wikipedia.org/wiki/Kuwahara_filter#|Kuwahara Filter]] to
|
||||||
|
|
7
Filter_Analysis/wiki/FilterAnalysis.wiki
Normal file
7
Filter_Analysis/wiki/FilterAnalysis.wiki
Normal file
@ -0,0 +1,7 @@
|
|||||||
|
= Halting Gradient Attacks with Non-Gradient Defenses =
|
||||||
|
|
||||||
|
== Contents ==
|
||||||
|
- [[Tests]]
|
||||||
|
- [[Approach]]
|
||||||
|
- [[Rationale]]
|
||||||
|
- [[Notes]]
|
18
Filter_Analysis/wiki/Notes.wiki
Normal file
18
Filter_Analysis/wiki/Notes.wiki
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
= Notes on Filter-Based Defenses =
|
||||||
|
|
||||||
|
== Engineering Design Principles ==
|
||||||
|
1. Clearly defined problem
|
||||||
|
a) Defending gradient-based attacks using denoising filters as a buffer between an attacked image and a classifier
|
||||||
|
2. Requirements
|
||||||
|
3. Constraints
|
||||||
|
4. Engineering standards
|
||||||
|
5. Cite applicable references
|
||||||
|
6. Considered alternatives
|
||||||
|
a) Iterate on the design
|
||||||
|
i) Advantages
|
||||||
|
ii) Disadvantages
|
||||||
|
iii) Risks
|
||||||
|
7. Evaluation process
|
||||||
|
a) Validation
|
||||||
|
8. Deliverables and timeline
|
||||||
|
9.
|
42
Filter_Analysis/wiki/Tests.wiki
Normal file
42
Filter_Analysis/wiki/Tests.wiki
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
= Test Process for Non-Gradient Filter Pipeline =
|
||||||
|
|
||||||
|
For each attack, the following tests are to be evaluated. The performance of each attack should be evaluated using cross validation with $k=5$.
|
||||||
|
|
||||||
|
| Training | Test |
|
||||||
|
|----------|-------------------------|
|
||||||
|
| Clean | Clean |
|
||||||
|
| Clean | Attacked |
|
||||||
|
| Clean | Filtered (Not Attacked) |
|
||||||
|
| Clean | Filtered (Attacked) |
|
||||||
|
| Filtered | Filtered (Not Attacked) |
|
||||||
|
| Filtered | Filtered (Attacked) |
|
||||||
|
|
||||||
|
Epsilon: 0.05
|
||||||
|
Original Accuracy = 9912 / 10000 = 0.9912
|
||||||
|
Attacked Accuracy = 9605 / 10000 = 0.9605
|
||||||
|
Filtered Accuracy = 9522 / 10000 = 0.9522
|
||||||
|
|
||||||
|
Epsilon: 0.1
|
||||||
|
Original Accuracy = 9912 / 10000 = 0.9912
|
||||||
|
Attacked Accuracy = 8743 / 10000 = 0.8743
|
||||||
|
Filtered Accuracy = 9031 / 10000 = 0.9031
|
||||||
|
|
||||||
|
Epsilon: 0.15000000000000002
|
||||||
|
Original Accuracy = 9912 / 10000 = 0.9912
|
||||||
|
Attacked Accuracy = 7107 / 10000 = 0.7107
|
||||||
|
Filtered Accuracy = 8138 / 10000 = 0.8138
|
||||||
|
|
||||||
|
Epsilon: 0.2
|
||||||
|
Original Accuracy = 9912 / 10000 = 0.9912
|
||||||
|
Attacked Accuracy = 4876 / 10000 = 0.4876
|
||||||
|
Filtered Accuracy = 6921 / 10000 = 0.6921
|
||||||
|
|
||||||
|
Epsilon: 0.25
|
||||||
|
Original Accuracy = 9912 / 10000 = 0.9912
|
||||||
|
Attacked Accuracy = 2714 / 10000 = 0.2714
|
||||||
|
Filtered Accuracy = 5350 / 10000 = 0.535
|
||||||
|
|
||||||
|
Epsilon: 0.3
|
||||||
|
Original Accuracy = 9912 / 10000 = 0.9912
|
||||||
|
Attacked Accuracy = 1418 / 10000 = 0.1418
|
||||||
|
Filtered Accuracy = 3605 / 10000 = 0.3605
|
BIN
McGuire_MS_Thesis_Broad_Spectrum_Defense.pdf
Normal file
BIN
McGuire_MS_Thesis_Broad_Spectrum_Defense.pdf
Normal file
Binary file not shown.
Loading…
Reference in New Issue
Block a user