From ab4460aeeb033380245bebcd5e65b2059a0a7a4f Mon Sep 17 00:00:00 2001 From: Adog64 Date: Wed, 10 Apr 2024 20:15:11 -0400 Subject: [PATCH] Moved filtering functions into separate module --- Filter_Analysis/defense_filters.py | 108 +++++++++++++++++++++++++++++ Filter_Analysis/fgsm.py | 63 ----------------- Filter_Analysis/imgComp.py | 13 ---- 3 files changed, 108 insertions(+), 76 deletions(-) create mode 100644 Filter_Analysis/defense_filters.py delete mode 100644 Filter_Analysis/imgComp.py diff --git a/Filter_Analysis/defense_filters.py b/Filter_Analysis/defense_filters.py new file mode 100644 index 0000000..c185fb9 --- /dev/null +++ b/Filter_Analysis/defense_filters.py @@ -0,0 +1,108 @@ +import cv2 +from pykuwahara import kuwahara +import numpy as np + + + +# Turn a pytorch tensor into an image +def pttensor_to_images(data): + images = None + try: + images = data.numpy().transpose(0,2,3,1) + except RuntimeError: + images = data.detach().numpy().transpose(0,2,3,1) + + +def gaussian_kuwahara(data, batch_size=64, radius=5): + images = pttensor_to_images(data) + filtered_images = np.ndarray((batch_size,28,28,1)) + + for i in range(batch_size): + filtered_images[i] = kuwahara(images[i], method='gaussian', radius=radius, image_2d=images[i]) + + filtered_images = filtered_images.transpose(0,3,1,2) + return torch.tensor(filtered_images).float() + + +def mean_kuwahara(data, batch_size=64, radius=5): + images = pttensor_to_images(data) + filtered_images = np.ndarray((batch_size,28,28,1)) + + for i in range(batch_size): + filtered_images[i] = kuwahara(images[i], method='mean', radius=radius, image_2d=images[i]) + + filtered_images = filtered_images.transpose(0,3,1,2) + return torch.tensor(filtered_images).float() + + +def random_noise(data, batch_size=64, intensity=0.001): + images = pttensor_to_images(data) + filtered_images = np.ndarray((batch_size,28,28,1)) + + for i in range(batch_size): + mean = 0 + stddev = 180 + noise = np.zeros(images[i].shape, images[i].dtype) + cv2.randn(noise, mean, stddev) + filtered_images[i] = cv2.addWeighted(images[i], 1.0, noise, intensity, 0.0).reshape(filtered_images[i].shape) + + filtered_images = filtered_images.transpose(0,3,1,2) + return torch.tensor(filtered_images).float() + + +def gaussian_blur(data, batch_size=64, ksize=(5,5)): + images = pttensor_to_images(data) + filtered_images = np.ndarray((batch_size,28,28,1)) + + for i in range(batch_size): + filtered_images[i] = cv2.GaussianBlur(images[i], ksize=ksize, sigmaX=0).reshape(filtered_images[i].shape) + + filtered_images = filtered_images.transpose(0,3,1,2) + return torch.tensor(filtered_images).float() + + +def bilateral_filter(data, batch_size=64, d=5, sigma=50): + images = pttensor_to_images(data) + filtered_images = np.ndarray((batch_size,28,28,1)) + + for i in range(batch_size): + filtered_images[i] = cv2.bilateralFilter(images[i], d, sigma, sigma).reshape(filtered_images[i].shape) + + filtered_images = filtered_images.transpose(0,3,1,2) + return torch.tensor(filtered_images).float() + + +def threshold_filter(data, batch_size=64, threshold=0.5): + images = pttensor_to_images(data) + filtered_images = np.ndarray((batch_size,28,28,1)) + + for i in range(batch_size): + # If the channel contains any negative values, define the lowest negative value as black + min_value = np.min(images[i]) + if min_value < 0: + filtered_images[i] = images[i] + min_value + + # If the color space extends beyond [0,1], re-scale all of the colors to that range + max_value = np.max(filtered_images[i]) + if max_value > 1: + filtered_images[i] /= max_value + filtered_images[i] = np.where(filtered_images[i] >= threshold, 1, 0) * max_value + else: + filtered_images[i] = np.where(filtered_images[i] >= threshold, 1, 0) + + if min_value < 0: + filtered_images[i] -= min_value + + filtered_images = filtered_images.transpose(0,3,1,2) + return torch.tensor(filtered_images).float() + + +def snap_colors(data, batch_size=64, quantizations=4) + images = pttensor_to_images(data) + filtered_images = np.ndarray((batch_size,28,28,1)) + + for i in range(batch_size): + filtered_images[i] = (images[i]*quantizations).astype(int).astype(float)/quantizations + + filtered_images = filtered_images.transpose(0,3,1,2) + return torch.tensor(filtered_images).float() diff --git a/Filter_Analysis/fgsm.py b/Filter_Analysis/fgsm.py index d9cae71..1c85f22 100644 --- a/Filter_Analysis/fgsm.py +++ b/Filter_Analysis/fgsm.py @@ -192,69 +192,6 @@ def test(model, device, test_loader, epsilon): return unfiltered_acc, kuwahara_acc, bilateral_acc, gaussian_blur_acc, random_noise_acc, snap_color_acc, one_bit_acc, plurality_acc -def filtered(data, batch_size=64, filter="kuwahara"): - # Turn the tensor into an image - images = None - try: - images = data.numpy().transpose(0,2,3,1) - except RuntimeError: - images = data.detach().numpy().transpose(0,2,3,1) - - - # Apply the Kuwahara filter - filtered_images = np.ndarray((batch_size,28,28,1)) - - if filter == "kuwahara": - for i in range(batch_size): - filtered_images[i] = kuwahara(images[i], method='gaussian', radius=5, image_2d=images[i]) - elif filter == "aniso_diff": - for i in range(batch_size): - img_3ch = np.zeros((np.array(images[i]), np.array(images[i]).shape[1], 3)) - img_3ch[:,:,0] = images[i] - img_3ch[:,:,1] = images[i] - img_3ch[:,:,2] = images[i] - img_3ch_filtered = cv2.ximgproc.anisotropicDiffusion(img2, alpha=0.2, K=0.5, niters=5) - filtered_images[i] = cv2.cvtColor(img_3ch_filtered, cv2.COLOR_RGB2GRAY) - plt.imshow(filtered_images[i]) - plt.show() - elif filter == "noise": - for i in range(batch_size): - mean = 0 - stddev = 180 - noise = np.zeros(images[i].shape, images[i].dtype) - cv2.randn(noise, mean, stddev) - filtered_images[i] = cv2.addWeighted(images[i], 1.0, noise, 0.001, 0.0).reshape(filtered_images[i].shape) - elif filter == "gaussian_blur": - for i in range(batch_size): - filtered_images[i] = cv2.GaussianBlur(images[i], ksize=(5,5), sigmaX=0).reshape(filtered_images[i].shape) - elif filter == "bilateral": - for i in range(batch_size): - filtered_images[i] = cv2.bilateralFilter(images[i], 5, 50, 50).reshape(filtered_images[i].shape) - elif filter == "1-bit": - num_colors = 2 - for i in range(batch_size): - # If the channel contains any negative values, define the lowest negative value as black - min_value = np.min(images[i]) - if (min_value < 0): - filtered_images[i] = images[i] + min_value - - # If the color space extends beyond [0,1], re-scale all of the colors to that range - max_value = np.max(filtered_images[i]) - if (max_value > 1): - filtered_images[i] *= (num_colors/max_value) - filtered_images[i] = filtered_images[i].astype(int).astype(float)*(max_value/num_colors) - else: - filtered_images[i] *= num_colors - filtered_images[i] = filtered_images[i].astype(int).astype(float)/num_colors - if (min_value < 0): - filtered_images[i] -= min_value - elif filter == "snap_color": - for i in range(batch_size): - filtered_images[i] = (images[i]*4).astype(int).astype(float)/4 - - # Modify the data with the filtered image - filtered_images = filtered_images.transpose(0,3,1,2) - return torch.tensor(filtered_images).float() unfiltered_accuracies = [] kuwahara_accuracies = [] diff --git a/Filter_Analysis/imgComp.py b/Filter_Analysis/imgComp.py deleted file mode 100644 index 876f6bf..0000000 --- a/Filter_Analysis/imgComp.py +++ /dev/null @@ -1,13 +0,0 @@ -import cv2 -from pykuwahara import kuwahara -import sys - -def main(in_path1:str, in_path2:str, out_path:str) -> None: - image1 = cv2.imread(in_path1); - image2 = cv2.imread(in_path2); - diff_image = cv2.absdiff(image1, image2) - cv2.imwrite(out_path, diff_image) - - -if __name__ == '__main__': - main(in_path1=sys.argv[1], in_path2=sys.argv[2], out_path=sys.argv[3])