from torchvision.transforms import transforms
# color_jitter = transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s)
transforms = torch.nn.Sequential(
transforms.RandomResizedCrop(size=size),
transforms.RandomHorizontalFlip(),
# transforms.RandomApply([color_jitter], p=0.8),
transforms.ColorJitter(0.8 * s, 0.8 * s, 0.8 * s, 0.2 * s),
transforms.RandomGrayscale(p=0.2),
# GaussianBlur(kernel_size=int(0.1 * size)),
)
scripted_transforms = torch.jit.script(transforms)
These augmentation operations are non-differentiable, so why is it still possible to compute gradients with respect to potential adversarial examples during reverse attacks?