Compare commits

..

No commits in common. "47b14362debcdab05ffff16ae1066d2e08ffbc8c" and "c0372d8e8f6581b4dd83e6c0e866f15fbe8c64c1" have entirely different histories.

12 changed files with 52 additions and 32133 deletions

Binary file not shown.

View File

@ -70,10 +70,8 @@ def test(model, device, test_loader, epsilon):
orig_correct = 0 orig_correct = 0
# Attacked dataset correct classifications # Attacked dataset correct classifications
attacked_correct = 0 attacked_correct = 0
kuwahara_correct = 0 # Filtered attacked dataset correct classifications
bilateral_correct = 0 filtered_correct = 0
gaussian_blur_correct = 0
noisy_correct = 0
adv_examples = [] adv_examples = []
@ -106,23 +104,14 @@ def test(model, device, test_loader, epsilon):
perturbed_data_normalized = transforms.Normalize((0.1307,), (0.3081,))(perturbed_data) perturbed_data_normalized = transforms.Normalize((0.1307,), (0.3081,))(perturbed_data)
# Filter the attacked image # Filter the attacked image
kuwahara_data = filtered(perturbed_data_normalized, len(perturbed_data_normalized), filter="kuwahara") perturbed_data_filtered = filtered(perturbed_data_normalized, len(perturbed_data_normalized))
bilateral_data = filtered(perturbed_data_normalized, len(perturbed_data_normalized), filter="bilateral")
gaussian_blur_data = filtered(perturbed_data_normalized, len(perturbed_data_normalized), filter="gaussian_blur")
noisy_data = filtered(perturbed_data_normalized, len(perturbed_data_normalized), filter="noise")
# evaluate the model on the attacked and filtered images # evaluate the model on the attacked and filtered images
output_attacked = model(perturbed_data_normalized) output_attacked = model(perturbed_data_normalized)
output_kuwahara = model(kuwahara_data) output_filtered = model(perturbed_data_filtered)
output_bilateral = model(bilateral_data)
output_gaussian_blur = model(gaussian_blur_data)
output_noisy = model(noisy_data)
attacked_pred = output_attacked.max(1, keepdim=True)[1] attacked_pred = output_attacked.max(1, keepdim=True)[1]
kuwahara_pred = output_kuwahara.max(1, keepdim=True)[1] filtered_pred = output_filtered.max(1, keepdim=True)[1]
bilateral_pred = output_bilateral.max(1, keepdim=True)[1]
gaussian_blur_pred = output_gaussian_blur.max(1, keepdim=True)[1]
noisy_pred = output_noisy.max(1, keepdim=True)[1]
if orig_pred.item() == target.item(): if orig_pred.item() == target.item():
orig_correct += 1 orig_correct += 1
@ -130,37 +119,30 @@ def test(model, device, test_loader, epsilon):
if attacked_pred.item() == target.item(): if attacked_pred.item() == target.item():
attacked_correct += 1 attacked_correct += 1
if kuwahara_pred.item() == target.item(): if epsilon == 0 and len(adv_examples) < 5:
kuwahara_correct += 1 adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
adv_examples.append( (orig_pred.item(), attacked_pred.item(), adv_ex) )
if bilateral_pred.item() == target.item(): if filtered_pred.item() == target.item():
bilateral_correct += 1 filtered_correct += 1
if gaussian_blur_pred.item() == target.item(): if epsilon == 0 and len(adv_examples) < 5:
gaussian_blur_correct += 1 adv_ex = perturbed_data.squeeze().detach().cpu().numpy()
adv_examples.append( (orig_pred.item(), filtered_pred.item(), adv_ex) )
if noisy_pred.item() == target.item():
noisy_correct += 1
orig_acc = orig_correct/float(len(test_loader)) orig_acc = orig_correct/float(len(test_loader))
attacked_acc = attacked_correct/float(len(test_loader)) attacked_acc = attacked_correct/float(len(test_loader))
kuwahara_acc = kuwahara_correct/float(len(test_loader)) filtered_acc = filtered_correct/float(len(test_loader))
bilateral_acc = bilateral_correct/float(len(test_loader))
gaussian_blur_acc = gaussian_blur_correct/float(len(test_loader))
noisy_acc = noisy_correct/float(len(test_loader))
print(f"Epsilon: {epsilon}") print(f"Epsilon: {epsilon}")
print(f"Original Accuracy = {orig_correct} / {len(test_loader)} = {orig_acc}") print(f"Original Accuracy = {orig_correct} / {len(test_loader)} = {orig_acc}")
print(f"Attacked Accuracy = {attacked_correct} / {len(test_loader)} = {attacked_acc}") print(f"Attacked Accuracy = {attacked_correct} / {len(test_loader)} = {attacked_acc}")
print(f"Kuwahara Accuracy = {kuwahara_correct} / {len(test_loader)} = {kuwahara_acc}") print(f"Filtered Accuracy = {filtered_correct} / {len(test_loader)} = {filtered_acc}")
print(f"Bilateral Accuracy = {bilateral_correct} / {len(test_loader)} = {bilateral_acc}") print(f"Filtered:Attacked = {filtered_acc} / {attacked_acc} = {filtered_acc/attacked_acc}")
print(f"Gaussian Blur Accuracy = {gaussian_blur_correct} / {len(test_loader)} = {gaussian_blur_acc}")
print(f"Noisy Accuracy = {noisy_correct} / {len(test_loader)} = {noisy_acc}")
return attacked_acc, kuwahara_acc, bilateral_acc, gaussian_blur_acc, noisy_acc return attacked_acc, filtered_acc, adv_examples
def filtered(data, batch_size=64, filter="kuwahara"): def filtered(data, batch_size=64):
# Turn the tensor into an image # Turn the tensor into an image
images = None images = None
try: try:
@ -171,57 +153,35 @@ def filtered(data, batch_size=64, filter="kuwahara"):
# Apply the Kuwahara filter # Apply the Kuwahara filter
filtered_images = np.ndarray((batch_size,28,28,1)) filtered_images = np.ndarray((batch_size,28,28,1))
if filter == "kuwahara": for i in range(batch_size):
for i in range(batch_size): filtered_images[i] = kuwahara(images[i], method='gaussian', radius=5, image_2d=images[i])
filtered_images[i] = kuwahara(images[i], method='gaussian', radius=5, image_2d=images[i])
elif filter == "aniso_diff":
for i in range(batch_size):
img_3ch = np.zeros((np.array(images[i]), np.array(images[i]).shape[1], 3))
img_3ch[:,:,0] = images[i]
img_3ch[:,:,1] = images[i]
img_3ch[:,:,2] = images[i]
img_3ch_filtered = cv2.ximgproc.anisotropicDiffusion(img2, alpha=0.2, K=0.5, niters=5)
filtered_images[i] = cv2.cvtColor(img_3ch_filtered, cv2.COLOR_RGB2GRAY)
plt.imshow(filtered_images[i])
plt.show()
elif filter == "noise":
for i in range(batch_size):
mean = 0
stddev = 180
noise = np.zeros(images[i].shape, images[i].dtype)
cv2.randn(noise, mean, stddev)
filtered_images[i] = cv2.addWeighted(images[i], 1.0, noise, 0.001, 0.0).reshape(filtered_images[i].shape)
elif filter == "gaussian_blur":
for i in range(batch_size):
filtered_images[i] = cv2.GaussianBlur(images[i], ksize=(5,5), sigmaX=0).reshape(filtered_images[i].shape)
elif filter == "bilateral":
for i in range(batch_size):
filtered_images[i] = cv2.bilateralFilter(images[i], 5, 50, 50).reshape(filtered_images[i].shape)
# Modify the data with the filtered image # Modify the data with the filtered image
filtered_images = filtered_images.transpose(0,3,1,2) filtered_images = filtered_images.transpose(0,3,1,2)
return torch.tensor(filtered_images).float() return torch.tensor(filtered_images).float()
attacked_accuracies = [] attacked_accuracies = []
kuwahara_accuracies = [] filtered_accuracies = []
bilateral_accuracies = [] ratios = []
gaussian_blur_accuracies = [] examples = []
noisy_accuracies = []
print(f"Model: {pretrained_model}") print(f"Model: {pretrained_model}")
for eps in epsilons: for eps in epsilons:
aacc, kacc, bacc, gacc, nacc = test(model, device, test_loader, eps) aacc, facc, ex = test(model, device, test_loader, eps)
attacked_accuracies.append(aacc) attacked_accuracies.append(aacc)
kuwahara_accuracies.append(kacc) filtered_accuracies.append(facc)
bilateral_accuracies.append(bacc) ratios.append(facc/aacc)
gaussian_blur_accuracies.append(gacc) examples.append(ex)
noisy_accuracies.append(nacc)
# Plot the results # Plot the results
plt.subplot(121)
plt.plot(epsilons, attacked_accuracies, label="Attacked Accuracy") plt.plot(epsilons, attacked_accuracies, label="Attacked Accuracy")
plt.plot(epsilons, kuwahara_accuracies, label="Kuwahara Accuracy") plt.plot(epsilons, filtered_accuracies, label="Filtered Accuracy")
plt.legend() plt.legend()
plt.subplot(122)
plt.plot(epsilons, ratios, label="Filtered:Attacked")
plt.legend()
plt.show() plt.show()

View File

@ -47,8 +47,8 @@ def train(args, model, device, train_loader, optimizer, epoch):
data, target = data.to(device), target.to(device) data, target = data.to(device), target.to(device)
# Apply Kuwahara filter to training data on a batch-by-batch basis # Apply Kuwahara filter to training data on a batch-by-batch basis
if args.filter != 'none': if args.filter:
data = filtered(data, len(data), args.filter) data = filtered(data, len(data))
optimizer.zero_grad() optimizer.zero_grad()
output = model(data) output = model(data)
@ -70,8 +70,8 @@ def test(args, model, device, test_loader):
data, target = data.to(device), target.to(device) data, target = data.to(device), target.to(device)
# Apply Kuwahara filter to test data on a batch-by-batch basis # Apply Kuwahara filter to test data on a batch-by-batch basis
if args.filter != 'none': if args.filter:
data = filtered(data, len(data), args.filter) data = filtered(data, len(data))
output = model(data) output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
@ -108,8 +108,8 @@ def main():
help='how many batches to wait before logging training status') help='how many batches to wait before logging training status')
parser.add_argument('--save-model', action='store_true', default=False, parser.add_argument('--save-model', action='store_true', default=False,
help='For Saving the current Model') help='For Saving the current Model')
parser.add_argument('--filter', type=str, metavar='S', default='none', parser.add_argument('--filter', action='store_true', default=False,
help='Apply a filter at runtime') help='Apply Kuwahara filter at runtime')
args = parser.parse_args() args = parser.parse_args()
train_kwargs = {'batch_size': args.batch_size} train_kwargs = {'batch_size': args.batch_size}
@ -127,7 +127,7 @@ def main():
train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs) train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs) test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)
print(f'Filter Type: {args.filter}') print(f'Kuwahara filter: {args.filter}')
model = Net().to(device) model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=args.lr) optimizer = optim.Adadelta(model.parameters(), lr=args.lr)
@ -140,40 +140,21 @@ def main():
scheduler.step() scheduler.step()
if args.save_model: if args.save_model:
if args.filter is None: if args.filter:
torch.save(model.state_dict(), "mnist_cnn_unfiltered.pt") torch.save(model.state_dict(), "mnist_cnn_filtered.pt")
else: else:
torch.save(model.state_dict(), f"mnist_cnn_{args.filter}.pt") torch.save(model.state_dict(), "mnist_cnn_unfiltered.pt")
def filtered(data, batch_size=64, filter="kuwahara"): def filtered(data, batch_size=64):
# Turn the tensor into an image # Turn the tensor into an image
images = data.numpy().transpose(0,2,3,1) images = data.numpy().transpose(0,2,3,1)
# Apply the Kuwahara filter # Apply the Kuwahara filter
filtered_images = np.ndarray((batch_size,28,28,1)) filtered_images = np.ndarray((batch_size,28,28,1))
if filter == "kuwahara": for i in range(batch_size):
for i in range(batch_size): filtered_images[i] = kuwahara(images[i], method='gaussian', radius=5, image_2d=images[i])
filtered_images[i] = kuwahara(images[i], method='gaussian', radius=5, image_2d=images[i])
elif filter == "aniso_diff":
for i in range(batch_size):
img_3ch = np.zeros((np.array(images[i]), np.array(images[i]).shape[1], 3))
img_3ch[:,:,0] = images[i]
img_3ch[:,:,1] = images[i]
img_3ch[:,:,2] = images[i]
img_3ch_filtered = cv2.ximgproc.anisotropicDiffusion(img2, alpha=0.2, K=0.5, niters=5)
filtered_images[i] = cv2.cvtColor(img_3ch_filtered, cv2.COLOR_RGB2GRAY)
plt.imshow(filtered_images[i])
plt.show()
elif filter == "noise":
pass
elif filter == "gaussian_blur":
for i in range(batch_size):
filtered_images[i] = cv2.GaussianBlur(images[i], ksize=(5,5), sigmaX=0).reshape(filtered_images[i].shape)
elif filter == "bilateral":
for i in range(batch_size):
filtered_images[i] = cv2.bilateralFilter(images[i], 5, 50, 50).reshape(filtered_images[i].shape)
# Modify the data with the filtered image # Modify the data with the filtered image
filtered_images = filtered_images.transpose(0,3,1,2) filtered_images = filtered_images.transpose(0,3,1,2)

View File

@ -1,9 +1,4 @@
# The Approach # The Approach
Attacking classifier models essentially boils down to adding precisely calculated noise to the input image, thereby tricking the classifier into selecting an incorrect class. The goal is to understand the efficacy of an array of denoising algorithms as adversarial machine learning defenses. The goal is to use a filtering algorithm such as the [[https://en.wikipedia.org/wiki/Kuwahara_filter#|Kuwahara Filter]] to
## Individual Denoising Algorithms
## An Ensemble Approach
## Training the Model on Filtered Data

View File

@ -4,5 +4,5 @@
- [[Tests]] - [[Tests]]
- [[Approach]] - [[Approach]]
- [[Rationale]] - [[Rationale]]
- [[DesignImpact]] - [[Notes]]
- [[Timeline]] - [[Timeline]]

View File

@ -1,16 +1,13 @@
# Engineering Design Principles # Notes on Filter-Based Defenses
## Engineering Design Principles
1. Clearly defined problem 1. Clearly defined problem
- Assess the efficacy of various denoising filters in preserving the accuracy of image classifier models under a noise-based attack. - Defending gradient-based attacks using denoising filters as a buffer between an attacked image and a classifier
2. Requirements 2. Requirements
- Only algorithmic approach for defense
- Must be faster than auto-encoder
3. Constraints 3. Constraints
- Computing power - Computing power
- Memory usage
- Impossible to know who and how a model will be attacked
4. Engineering standards 4. Engineering standards
- [[https://peps.python.org/pep-0008/|PEP 8]] - [[https://peps.python.org/pep-0008/|PEP 8]]
-
5. Cite applicable references 5. Cite applicable references
- [[https://pytorch.org/tutorials/beginner/fgsm_tutorial.html|FGSM Attack]] - [[https://pytorch.org/tutorials/beginner/fgsm_tutorial.html|FGSM Attack]]
- [[https://github.com/pytorch/examples/blob/main/mnist/main.py|MNIST Model]] - [[https://github.com/pytorch/examples/blob/main/mnist/main.py|MNIST Model]]
@ -19,8 +16,6 @@
a) Iterate on the design a) Iterate on the design
i) Advantages i) Advantages
- Potentially more computationally efficient than an ML approach - Potentially more computationally efficient than an ML approach
- Will likely use less memory than a model used to clean inputs
- No training (very computationally intense) stage
ii) Disadvantages ii) Disadvantages
- Potentially less effective than than an ML approach - Potentially less effective than than an ML approach
iii) Risks iii) Risks
@ -33,4 +28,5 @@
- Different models - Different models
- Different datasets - Different datasets
- Different filters - Different filters
-
8. Deliverables and timeline 8. Deliverables and timeline

View File

@ -1,100 +0,0 @@
# Experimental Results
## Models Trained on Various Filters
**NOTE**: The results in this section contain an oversight in the defense strategy. While models were trained using different filters, they were all defended from FGSM using a Kuwahara filter.
### Model Trained on Unfiltered MNIST Dataset
| $\epsilon$ | Accuracy |
|---------|----------|
| 0.05 | 0.9600 |
| 0.10 | 0.8753 |
| 0.15 | 0.7228 |
| 0.20 | 0.5008 |
| 0.25 | 0.2922 |
| 0.30 | 0.1599 |
### Model Trained on Kuwahara (R=5) Filtered MNIST Dataset
| $\epsilon$ | Attacked Accuracy | Filtered Accuracy | Ratio |
|---------|-------------------|-------------------|--------|
| 0.05 | 0.9605 | 0.9522 | 0.9914 |
| 0.1 | 0.8743 | 0.9031 | 1.0329 |
| 0.15 | 0.7107 | 0.8138 | 1.1451 |
| 0.2 | 0.4876 | 0.6921 | 1.4194 |
| 0.25 | 0.2714 | 0.5350 | 1.9713 |
| 0.3 | 0.1418 | 0.3605 | 2.5423 |
### Model Trained on Gaussian Blurred (K-Size=5x5) MNIST Dataset
| $\epsilon$ | Attacked Accuracy | Filtered Accuracy | Ratio |
|---------|-------------------|-------------------|-------|
| 0.05 | 0.9192 | 0.9325 | 1.014 |
| 0.10 | 0.7629 | 0.8802 | 1.154 |
| 0.15 | 0.4871 | 0.7865 | 1.615 |
| 0.20 | 0.2435 | 0.6556 | 2.692 |
| 0.25 | 0.1093 | 0.5024 | 4.596 |
| 0.30 | 0.0544 | 0.3522 | 6.474 |
### Model Trained on Bilateral Filtered (d=5) MNIST Dataset
| $\epsilon$ | Attacked Accuracy | Filtered Accuracy | Ratio |
|---------|-------------------|-------------------|-------|
| 0.05 | 0.9078 | 0.9287 | 1.023 |
| 0.10 | 0.7303 | 0.8611 | 1.179 |
| 0.15 | 0.4221 | 0.7501 | 1.777 |
| 0.20 | 0.1927 | 0.6007 | 3.117 |
| 0.25 | 0.0873 | 0.4433 | 5.078 |
| 0.30 | 0.0525 | 0.3023 | 5.758 |
## Models Defended with Various Filters
### Tabulated Results
| $\epsilon$ | FGSM | Kuwahara | Bilateral | Gaussian Blur | Random Noise |
|------------|--------|----------|-----------|---------------|--------------|
| 0.05 | 0.9600 | 0.8700 | 0.8902 | 0.9271 | 0.9603 |
| 0.10 | 0.8753 | 0.8123 | 0.8133 | 0.8516 | 0.8677 |
| 0.15 | 0.7229 | 0.7328 | 0.7098 | 0.7415 | 0.7153 |
| 0.20 | 0.5008 | 0.6301 | 0.5683 | 0.5983 | 0.4941 |
| 0.25 | 0.2922 | 0.5197 | 0.4381 | 0.4591 | 0.2843 |
| 0.30 | 0.1599 | 0.3981 | 0.3364 | 0.3481 | 0.1584 |
### Raw Program Output
Epsilon: 0.05
Original Accuracy = 9920 / 10000 = 0.992
Attacked Accuracy = 9600 / 10000 = 0.96
Kuwahara Accuracy = 8700 / 10000 = 0.87
Bilateral Accuracy = 8902 / 10000 = 0.8902
Gaussian Blur Accuracy = 9271 / 10000 = 0.9271
Noisy Accuracy = 9603 / 10000 = 0.9603
Epsilon: 0.1
Original Accuracy = 9920 / 10000 = 0.992
Attacked Accuracy = 8753 / 10000 = 0.8753
Kuwahara Accuracy = 8123 / 10000 = 0.8123
Bilateral Accuracy = 8133 / 10000 = 0.8133
Gaussian Blur Accuracy = 8516 / 10000 = 0.8516
Noisy Accuracy = 8677 / 10000 = 0.8677
Epsilon: 0.15000000000000002
Original Accuracy = 9920 / 10000 = 0.992
Attacked Accuracy = 7229 / 10000 = 0.7229
Kuwahara Accuracy = 7328 / 10000 = 0.7328
Bilateral Accuracy = 7098 / 10000 = 0.7098
Gaussian Blur Accuracy = 7415 / 10000 = 0.7415
Noisy Accuracy = 7153 / 10000 = 0.7153
Epsilon: 0.2
Original Accuracy = 9920 / 10000 = 0.992
Attacked Accuracy = 5008 / 10000 = 0.5008
Kuwahara Accuracy = 6301 / 10000 = 0.6301
Bilateral Accuracy = 5683 / 10000 = 0.5683
Gaussian Blur Accuracy = 5983 / 10000 = 0.5983
Noisy Accuracy = 4941 / 10000 = 0.4941
Epsilon: 0.25
Original Accuracy = 9920 / 10000 = 0.992
Attacked Accuracy = 2922 / 10000 = 0.2922
Kuwahara Accuracy = 5197 / 10000 = 0.5197
Bilateral Accuracy = 4381 / 10000 = 0.4381
Gaussian Blur Accuracy = 4591 / 10000 = 0.4591
Noisy Accuracy = 2843 / 10000 = 0.2843
Epsilon: 0.3
Original Accuracy = 9920 / 10000 = 0.992
Attacked Accuracy = 1599 / 10000 = 0.1599
Kuwahara Accuracy = 3981 / 10000 = 0.3981
Bilateral Accuracy = 3364 / 10000 = 0.3364
Gaussian Blur Accuracy = 3481 / 10000 = 0.3481
Noisy Accuracy = 1584 / 10000 = 0.1584

File diff suppressed because one or more lines are too long