Moved filtering functions into separate module
This commit is contained in:
parent
df7ac8b236
commit
ab4460aeeb
108
Filter_Analysis/defense_filters.py
Normal file
108
Filter_Analysis/defense_filters.py
Normal file
@ -0,0 +1,108 @@
|
|||||||
|
import cv2
|
||||||
|
from pykuwahara import kuwahara
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# Turn a pytorch tensor into an image
|
||||||
|
def pttensor_to_images(data):
|
||||||
|
images = None
|
||||||
|
try:
|
||||||
|
images = data.numpy().transpose(0,2,3,1)
|
||||||
|
except RuntimeError:
|
||||||
|
images = data.detach().numpy().transpose(0,2,3,1)
|
||||||
|
|
||||||
|
|
||||||
|
def gaussian_kuwahara(data, batch_size=64, radius=5):
|
||||||
|
images = pttensor_to_images(data)
|
||||||
|
filtered_images = np.ndarray((batch_size,28,28,1))
|
||||||
|
|
||||||
|
for i in range(batch_size):
|
||||||
|
filtered_images[i] = kuwahara(images[i], method='gaussian', radius=radius, image_2d=images[i])
|
||||||
|
|
||||||
|
filtered_images = filtered_images.transpose(0,3,1,2)
|
||||||
|
return torch.tensor(filtered_images).float()
|
||||||
|
|
||||||
|
|
||||||
|
def mean_kuwahara(data, batch_size=64, radius=5):
|
||||||
|
images = pttensor_to_images(data)
|
||||||
|
filtered_images = np.ndarray((batch_size,28,28,1))
|
||||||
|
|
||||||
|
for i in range(batch_size):
|
||||||
|
filtered_images[i] = kuwahara(images[i], method='mean', radius=radius, image_2d=images[i])
|
||||||
|
|
||||||
|
filtered_images = filtered_images.transpose(0,3,1,2)
|
||||||
|
return torch.tensor(filtered_images).float()
|
||||||
|
|
||||||
|
|
||||||
|
def random_noise(data, batch_size=64, intensity=0.001):
|
||||||
|
images = pttensor_to_images(data)
|
||||||
|
filtered_images = np.ndarray((batch_size,28,28,1))
|
||||||
|
|
||||||
|
for i in range(batch_size):
|
||||||
|
mean = 0
|
||||||
|
stddev = 180
|
||||||
|
noise = np.zeros(images[i].shape, images[i].dtype)
|
||||||
|
cv2.randn(noise, mean, stddev)
|
||||||
|
filtered_images[i] = cv2.addWeighted(images[i], 1.0, noise, intensity, 0.0).reshape(filtered_images[i].shape)
|
||||||
|
|
||||||
|
filtered_images = filtered_images.transpose(0,3,1,2)
|
||||||
|
return torch.tensor(filtered_images).float()
|
||||||
|
|
||||||
|
|
||||||
|
def gaussian_blur(data, batch_size=64, ksize=(5,5)):
|
||||||
|
images = pttensor_to_images(data)
|
||||||
|
filtered_images = np.ndarray((batch_size,28,28,1))
|
||||||
|
|
||||||
|
for i in range(batch_size):
|
||||||
|
filtered_images[i] = cv2.GaussianBlur(images[i], ksize=ksize, sigmaX=0).reshape(filtered_images[i].shape)
|
||||||
|
|
||||||
|
filtered_images = filtered_images.transpose(0,3,1,2)
|
||||||
|
return torch.tensor(filtered_images).float()
|
||||||
|
|
||||||
|
|
||||||
|
def bilateral_filter(data, batch_size=64, d=5, sigma=50):
|
||||||
|
images = pttensor_to_images(data)
|
||||||
|
filtered_images = np.ndarray((batch_size,28,28,1))
|
||||||
|
|
||||||
|
for i in range(batch_size):
|
||||||
|
filtered_images[i] = cv2.bilateralFilter(images[i], d, sigma, sigma).reshape(filtered_images[i].shape)
|
||||||
|
|
||||||
|
filtered_images = filtered_images.transpose(0,3,1,2)
|
||||||
|
return torch.tensor(filtered_images).float()
|
||||||
|
|
||||||
|
|
||||||
|
def threshold_filter(data, batch_size=64, threshold=0.5):
|
||||||
|
images = pttensor_to_images(data)
|
||||||
|
filtered_images = np.ndarray((batch_size,28,28,1))
|
||||||
|
|
||||||
|
for i in range(batch_size):
|
||||||
|
# If the channel contains any negative values, define the lowest negative value as black
|
||||||
|
min_value = np.min(images[i])
|
||||||
|
if min_value < 0:
|
||||||
|
filtered_images[i] = images[i] + min_value
|
||||||
|
|
||||||
|
# If the color space extends beyond [0,1], re-scale all of the colors to that range
|
||||||
|
max_value = np.max(filtered_images[i])
|
||||||
|
if max_value > 1:
|
||||||
|
filtered_images[i] /= max_value
|
||||||
|
filtered_images[i] = np.where(filtered_images[i] >= threshold, 1, 0) * max_value
|
||||||
|
else:
|
||||||
|
filtered_images[i] = np.where(filtered_images[i] >= threshold, 1, 0)
|
||||||
|
|
||||||
|
if min_value < 0:
|
||||||
|
filtered_images[i] -= min_value
|
||||||
|
|
||||||
|
filtered_images = filtered_images.transpose(0,3,1,2)
|
||||||
|
return torch.tensor(filtered_images).float()
|
||||||
|
|
||||||
|
|
||||||
|
def snap_colors(data, batch_size=64, quantizations=4)
|
||||||
|
images = pttensor_to_images(data)
|
||||||
|
filtered_images = np.ndarray((batch_size,28,28,1))
|
||||||
|
|
||||||
|
for i in range(batch_size):
|
||||||
|
filtered_images[i] = (images[i]*quantizations).astype(int).astype(float)/quantizations
|
||||||
|
|
||||||
|
filtered_images = filtered_images.transpose(0,3,1,2)
|
||||||
|
return torch.tensor(filtered_images).float()
|
@ -192,69 +192,6 @@ def test(model, device, test_loader, epsilon):
|
|||||||
return unfiltered_acc, kuwahara_acc, bilateral_acc, gaussian_blur_acc, random_noise_acc, snap_color_acc, one_bit_acc, plurality_acc
|
return unfiltered_acc, kuwahara_acc, bilateral_acc, gaussian_blur_acc, random_noise_acc, snap_color_acc, one_bit_acc, plurality_acc
|
||||||
|
|
||||||
|
|
||||||
def filtered(data, batch_size=64, filter="kuwahara"):
|
|
||||||
# Turn the tensor into an image
|
|
||||||
images = None
|
|
||||||
try:
|
|
||||||
images = data.numpy().transpose(0,2,3,1)
|
|
||||||
except RuntimeError:
|
|
||||||
images = data.detach().numpy().transpose(0,2,3,1)
|
|
||||||
|
|
||||||
|
|
||||||
# Apply the Kuwahara filter
|
|
||||||
filtered_images = np.ndarray((batch_size,28,28,1))
|
|
||||||
|
|
||||||
if filter == "kuwahara":
|
|
||||||
for i in range(batch_size):
|
|
||||||
filtered_images[i] = kuwahara(images[i], method='gaussian', radius=5, image_2d=images[i])
|
|
||||||
elif filter == "aniso_diff":
|
|
||||||
for i in range(batch_size):
|
|
||||||
img_3ch = np.zeros((np.array(images[i]), np.array(images[i]).shape[1], 3))
|
|
||||||
img_3ch[:,:,0] = images[i]
|
|
||||||
img_3ch[:,:,1] = images[i]
|
|
||||||
img_3ch[:,:,2] = images[i]
|
|
||||||
img_3ch_filtered = cv2.ximgproc.anisotropicDiffusion(img2, alpha=0.2, K=0.5, niters=5)
|
|
||||||
filtered_images[i] = cv2.cvtColor(img_3ch_filtered, cv2.COLOR_RGB2GRAY)
|
|
||||||
plt.imshow(filtered_images[i])
|
|
||||||
plt.show()
|
|
||||||
elif filter == "noise":
|
|
||||||
for i in range(batch_size):
|
|
||||||
mean = 0
|
|
||||||
stddev = 180
|
|
||||||
noise = np.zeros(images[i].shape, images[i].dtype)
|
|
||||||
cv2.randn(noise, mean, stddev)
|
|
||||||
filtered_images[i] = cv2.addWeighted(images[i], 1.0, noise, 0.001, 0.0).reshape(filtered_images[i].shape)
|
|
||||||
elif filter == "gaussian_blur":
|
|
||||||
for i in range(batch_size):
|
|
||||||
filtered_images[i] = cv2.GaussianBlur(images[i], ksize=(5,5), sigmaX=0).reshape(filtered_images[i].shape)
|
|
||||||
elif filter == "bilateral":
|
|
||||||
for i in range(batch_size):
|
|
||||||
filtered_images[i] = cv2.bilateralFilter(images[i], 5, 50, 50).reshape(filtered_images[i].shape)
|
|
||||||
elif filter == "1-bit":
|
|
||||||
num_colors = 2
|
|
||||||
for i in range(batch_size):
|
|
||||||
# If the channel contains any negative values, define the lowest negative value as black
|
|
||||||
min_value = np.min(images[i])
|
|
||||||
if (min_value < 0):
|
|
||||||
filtered_images[i] = images[i] + min_value
|
|
||||||
|
|
||||||
# If the color space extends beyond [0,1], re-scale all of the colors to that range
|
|
||||||
max_value = np.max(filtered_images[i])
|
|
||||||
if (max_value > 1):
|
|
||||||
filtered_images[i] *= (num_colors/max_value)
|
|
||||||
filtered_images[i] = filtered_images[i].astype(int).astype(float)*(max_value/num_colors)
|
|
||||||
else:
|
|
||||||
filtered_images[i] *= num_colors
|
|
||||||
filtered_images[i] = filtered_images[i].astype(int).astype(float)/num_colors
|
|
||||||
if (min_value < 0):
|
|
||||||
filtered_images[i] -= min_value
|
|
||||||
elif filter == "snap_color":
|
|
||||||
for i in range(batch_size):
|
|
||||||
filtered_images[i] = (images[i]*4).astype(int).astype(float)/4
|
|
||||||
|
|
||||||
# Modify the data with the filtered image
|
|
||||||
filtered_images = filtered_images.transpose(0,3,1,2)
|
|
||||||
return torch.tensor(filtered_images).float()
|
|
||||||
|
|
||||||
unfiltered_accuracies = []
|
unfiltered_accuracies = []
|
||||||
kuwahara_accuracies = []
|
kuwahara_accuracies = []
|
||||||
|
@ -1,13 +0,0 @@
|
|||||||
import cv2
|
|
||||||
from pykuwahara import kuwahara
|
|
||||||
import sys
|
|
||||||
|
|
||||||
def main(in_path1:str, in_path2:str, out_path:str) -> None:
|
|
||||||
image1 = cv2.imread(in_path1);
|
|
||||||
image2 = cv2.imread(in_path2);
|
|
||||||
diff_image = cv2.absdiff(image1, image2)
|
|
||||||
cv2.imwrite(out_path, diff_image)
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main(in_path1=sys.argv[1], in_path2=sys.argv[2], out_path=sys.argv[3])
|
|
Loading…
Reference in New Issue
Block a user