def forward(self, x): global glob_gau global glob_blur if args.print == 1: npimg = np.array(x, dtype=float) npimg = npimg.squeeze(0) scipy.misc.toimage(npimg).save("img0.png") #Noise generation part if (glob_gau == 0) & (glob_blur == 0): #no noise pass elif (glob_blur == 0) & (glob_gau == 1): #gaussian noise add gau_kernel = torch.randn(x.size()) * args.gau x = Variable(gau_kernel.cuda()) + x elif (glob_gau == 0) & (glob_blur == 1): #blur noise add blur_kernel_partial = torch.FloatTensor( utils.genblurkernel(args.blur)) blur_kernel_partial = torch.matmul( blur_kernel_partial.unsqueeze(1), torch.transpose(blur_kernel_partial.unsqueeze(1), 0, 1)) kernel_size = blur_kernel_partial.size()[0] zeros = torch.zeros(kernel_size, kernel_size) blur_kernel = torch.cat( (blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial), 0) blur_kernel = blur_kernel.view(3, 3, kernel_size, kernel_size) blur_padding = int((blur_kernel_partial.size()[0] - 1) / 2) #x = torch.nn.functional.conv2d(x, weight=blur_kernel.cuda(), padding=blur_padding) x = torch.nn.functional.conv2d(x, weight=Variable(blur_kernel.cuda()), padding=blur_padding) elif (glob_gau == 1) & (glob_blur == 1): #both gaussian and blur noise added blur_kernel_partial = torch.FloatTensor( utils.genblurkernel(args.blur)) blur_kernel_partial = torch.matmul( blur_kernel_partial.unsqueeze(1), torch.transpose(blur_kernel_partial.unsqueeze(1), 0, 1)) kernel_size = blur_kernel_partial.size()[0] zeros = torch.zeros(kernel_size, kernel_size) blur_kernel = torch.cat( (blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial), 0) blur_kernel = blur_kernel.view(3, 3, kernel_size, kernel_size) blur_padding = int((blur_kernel_partial.size()[0] - 1) / 2) x = torch.nn.functional.conv2d(x, weight=Variable(blur_kernel.cuda()), padding=blur_padding) gau_kernel = torch.randn(x.size()) * args.gau x = Variable(gau_kernel.cuda()) + x else: print("Something is wrong in noise adding part") exit() if args.print == 1: npimg = np.array(x, dtype=float) npimg = npimg.squeeze(0) scipy.misc.toimage(npimg).save("img1.png") exit() fixed = 0 if fixed: x = quant(x) x = roundmax(x) out = self.conv1(x) if fixed: out = quant(out) out = roundmax(out) out = self.conv2(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv3(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv4(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv5(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv6(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv7(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv8(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv9(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv10(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv11(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv12(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv13(out) out = out.view(out.size(0), -1) if fixed: out = quant(out) out = roundmax(out) out = self.fc1(out) if fixed: out = quant(out) out = roundmax(out) out = self.fc2(out) if fixed: out = quant(out) out = roundmax(out) out = self.fc3(out) if fixed: out = quant(out) out = roundmax(out) return out
def forward(self, x): if (args.gau == 0) & (args.blur == 0): #no noise pass elif (args.blur == 0) & (args.gau != 0): #gaussian noise add gau_kernel = torch.randn(x.size()) * args.gau x = Variable(gau_kernel.cuda()) + x elif (args.gau == 0) & (args.blur != 0): #blur noise add blur_kernel_partial = torch.FloatTensor( utils.genblurkernel(args.blur)) blur_kernel_partial = torch.matmul( blur_kernel_partial.unsqueeze(1), torch.transpose(blur_kernel_partial.unsqueeze(1), 0, 1)) kernel_size = blur_kernel_partial.size()[0] zeros = torch.zeros(kernel_size, kernel_size) blur_kernel = torch.cat( (blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial), 0) blur_kernel = blur_kernel.view(3, 3, kernel_size, kernel_size) blur_padding = int((blur_kernel_partial.size()[0] - 1) / 2) #x = torch.nn.functional.conv2d(x, weight=blur_kernel.cuda(), padding=blur_padding) x = torch.nn.functional.conv2d(x, weight=Variable(blur_kernel.cuda()), padding=blur_padding) elif (args.gau != 0) & (args.blur != 0): #both gaussian and blur noise added blur_kernel_partial = torch.FloatTensor( utils.genblurkernel(args.blur)) blur_kernel_partial = torch.matmul( blur_kernel_partial.unsqueeze(1), torch.transpose(blur_kernel_partial.unsqueeze(1), 0, 1)) kernel_size = blur_kernel_partial.size()[0] zeros = torch.zeros(kernel_size, kernel_size) blur_kernel = torch.cat( (blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial), 0) blur_kernel = blur_kernel.view(3, 3, kernel_size, kernel_size) blur_padding = int((blur_kernel_partial.size()[0] - 1) / 2) x = torch.nn.functional.conv2d(x, weight=Variable(blur_kernel.cuda()), padding=blur_padding) gau_kernel = torch.randn(x.size()) * args.gau x = Variable(gau_kernel.cuda()) + x else: print("Something is wrong in noise adding part") exit() fixed = 0 if fixed: x = quant(x) x = roundmax(x) out = self.conv1(x) if fixed: out = quant(out) out = roundmax(out) out = self.conv2(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv3(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv4(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv5(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv6(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv7(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv8(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv9(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv10(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv11(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv12(out) if fixed: out = quant(out) out = roundmax(out) out = self.conv13(out) out = out.view(out.size(0), -1) if fixed: out = quant(out) out = roundmax(out) out = self.fc1(out) if fixed: out = quant(out) out = roundmax(out) out = self.fc2(out) if fixed: out = quant(out) out = roundmax(out) out = self.fc3(out) if fixed: out = quant(out) out = roundmax(out) return out
def forward(self, x): if args.print == 1: npimg = np.array(tensor, dtype=float) npimg = npimg.squeeze(0) scipy.misc.toimage(npimg).save("img0.png") #Noise generation part if (args.gau == 0) & (args.blur == 0): #no noise pass elif args.blur == 0: #gaussian noise add gau_kernel = torch.randn(x.size()) x = Variable(gau_kernel.cuda()) + x elif args.gau == 0: #blur noise add blur_kernel_partial = torch.FloatTensor( utils.genblurkernel(args.blur)) blur_kernel_partial = torch.matmul( blur_kernel_partial.unsqueeze(1), torch.transpose(blur_kernel_partial.unsqueeze(1), 0, 1)) kernel_size = blur_kernel_partial.size()[0] zeros = torch.zeros(kernel_size, kernel_size) blur_kernel = torch.cat( (blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial), 0) blur_kernel = blur_kernel.view(3, 3, kernel_size, kernel_size) blur_padding = int((blur_kernel_partial.size()[0] - 1) / 2) #x = torch.nn.functional.conv2d(x, weight=blur_kernel.cuda(), padding=blur_padding) x = torch.nn.functional.conv2d(x, weight=Variable(blur_kernel.cuda()), padding=blur_padding) elif (not (args.gau == 0)) & (not (args.blur == 0)): #both gaussian and blur noise added blur_kernel_partial = torch.FloatTensor( utils.genblurkernel(args.blur)) blur_kernel_partial = torch.matmul( blur_kernel_partial.unsqueeze(1), torch.transpose(blur_kernel_partial.unsqueeze(1), 0, 1)) kernel_size = blur_kernel_partial.size()[0] zeros = torch.zeros(kernel_size, kernel_size) blur_kernel = torch.cat( (blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial), 0) blur_kernel = blur_kernel.view(3, 3, kernel_size, kernel_size) blur_padding = int((blur_kernel_partial.size()[0] - 1) / 2) x = torch.nn.functional.conv2d(x, weight=Variable(blur_kernel.cuda()), padding=blur_padding) gau_kernel = torch.randn(x.size()) x = Variable(gau_kernel.cuda()) + x else: print("Something is wrong in noise adding part") exit() if args.print == 1: npimg = np.array(tensor, dtype=float) npimg = npimg.squeeze(0) scipy.misc.toimage(npimg).save("img1.png") exit() if args.fixed: x = roundmax(x) x = quant(x) out1 = self.conv1(x) # 1250*64*32*32 if args.fixed: out1 = quant(out1) out1 = roundmax(out1) out2 = self.conv2(out1) # 1250*64*32*32 if args.fixed: out2 = quant(out2) out2 = roundmax(out2) out3 = self.maxpool1(out2) out4 = self.conv3(out3) # 1250*128*16*16 if args.fixed: out4 = quant(out4) out4 = roundmax(out4) out5 = self.conv4(out4) # 1250*128*16*16 if args.fixed: out5 = quant(out5) out5 = roundmax(out5) out6 = self.maxpool2(out5) out7 = self.conv5(out6) # 1250*256*8*8 if args.fixed: out7 = quant(out7) out7 = roundmax(out7) out8 = self.conv6(out7) # 1250*256*8*8 if args.fixed: out8 = quant(out8) out8 = roundmax(out8) out9 = self.conv7(out8) # 1250*256*8*8 if args.fixed: out9 = quant(out9) out9 = roundmax(out9) out10 = self.maxpool3(out9) out11 = self.conv8(out10) # 1250*512*4*4 if args.fixed: out11 = quant(out11) out11 = roundmax(out11) out12 = self.conv9(out11) # 1250*512*4*4 if args.fixed: out12 = quant(out12) out12 = roundmax(out12) out13 = self.conv10(out12) # 1250*512*4* if args.fixed: out13 = quant(out13) out13 = roundmax(out13) out14 = self.maxpool4(out13) out15 = self.conv11(out14) # 1250*512*2* if args.fixed: out15 = quant(out15) out15 = roundmax(out15) out16 = self.conv12(out15) # 1250*512*2* if args.fixed: out16 = quant(out16) out16 = roundmax(out16) out17 = self.conv13(out16) # 1250*512*2* if args.fixed: out17 = quant(out17) out17 = roundmax(out17) out18 = self.maxpool5(out17) out19 = out18.view(out18.size(0), -1) out20 = self.fc1(out19) # 1250*512 if args.fixed: out20 = quant(out20) out20 = roundmax(out20) out21 = self.fc2(out20) # 1250*512 if args.fixed: out21 = quant(out21) out21 = roundmax(out21) out22 = self.fc3(out21) # 1250*10 if args.fixed: out22 = quant(out22) out22 = roundmax(out22) return out22
def forward(self, x): if (args.gau == 0) & (args.blur == 0): #no noise pass elif (args.blur == 0) & (args.gau != 0): #gaussian noise add gau_kernel = torch.randn(x.size()) * args.gau x = Variable(gau_kernel.cuda()) + x elif (args.gau == 0) & (args.blur != 0): #blur noise add blur_kernel_partial = torch.FloatTensor( utils.genblurkernel(args.blur)) blur_kernel_partial = torch.matmul( blur_kernel_partial.unsqueeze(1), torch.transpose(blur_kernel_partial.unsqueeze(1), 0, 1)) kernel_size = blur_kernel_partial.size()[0] zeros = torch.zeros(kernel_size, kernel_size) blur_kernel = torch.cat( (blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial), 0) blur_kernel = blur_kernel.view(3, 3, kernel_size, kernel_size) blur_padding = int((blur_kernel_partial.size()[0] - 1) / 2) #x = torch.nn.functional.conv2d(x, weight=blur_kernel.cuda(), padding=blur_padding) x = torch.nn.functional.conv2d(x, weight=Variable(blur_kernel.cuda()), padding=blur_padding) elif (args.gau != 0) & (args.blur != 0): #both gaussian and blur noise added blur_kernel_partial = torch.FloatTensor( utils.genblurkernel(args.blur)) blur_kernel_partial = torch.matmul( blur_kernel_partial.unsqueeze(1), torch.transpose(blur_kernel_partial.unsqueeze(1), 0, 1)) kernel_size = blur_kernel_partial.size()[0] zeros = torch.zeros(kernel_size, kernel_size) blur_kernel = torch.cat( (blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial, zeros, zeros, zeros, blur_kernel_partial), 0) blur_kernel = blur_kernel.view(3, 3, kernel_size, kernel_size) blur_padding = int((blur_kernel_partial.size()[0] - 1) / 2) x = torch.nn.functional.conv2d(x, weight=Variable(blur_kernel.cuda()), padding=blur_padding) gau_kernel = torch.randn(x.size()) * args.gau x = Variable(gau_kernel.cuda()) + x else: print("Something is wrong in noise adding part") exit() tmp = Variable(torch.zeros(1, 3, 224, 224).cuda()) f = fft.Fft2d() fft_rout, fft_iout = f(x, tmp) mag = torch.sqrt( torch.mul(fft_rout, fft_rout) + torch.mul(fft_iout, fft_iout)) tmp = torch.zeros(1, 1, 224, 224).cuda() tmp = torch.add(torch.add(mag[:, 0, :, :], mag[:, 1, :, :]), mag[:, 2, :, :]) tmp = torch.abs(tmp) PFSUM = 0 for i in range(0, 224): for j in range(0, 224): if (i + j) < 167: print_value = 0 elif (i - j) > 56: print_value = 0 elif (j - i) > 56: print_value = 0 elif (i + j) > 279: print_value = 0 else: PFSUM = PFSUM + tmp[0, i, j] f = open(args.outputfile, 'a+') print(PFSUM.item(), file=f) f.close() #f = open(args.outputfile,'a+') #try: # print(PFSUM.item(),file=f) #except: # print(PFSUM,file=f) #f.close() ''' f = open(args.outputfile,'a+') for i in range(0,224): for j in range(0,224): print(tmp[0,i,j].item()/3,file = f) f.close() exit() ''' """ if args.fixed: x = quant(x) x = roundmax(x) out = self.conv1(x) if args.fixed: out = quant(out) out = roundmax(out) out = self.conv2(out) if args.fixed: out = quant(out) out = roundmax(out) out = self.conv3(out) if args.fixed: out = quant(out) out = roundmax(out) out = self.conv4(out) if args.fixed: out = quant(out) out = roundmax(out) out = self.conv5(out) if args.fixed: out = quant(out) out = roundmax(out) out = self.conv6(out) if args.fixed: out = quant(out) out = roundmax(out) out = self.conv7(out) if args.fixed: out = quant(out) out = roundmax(out) out = self.conv8(out) if args.fixed: out = quant(out) out = roundmax(out) out = self.conv9(out) if args.fixed: out = quant(out) out = roundmax(out) out = self.conv10(out) if args.fixed: out = quant(out) out = roundmax(out) out = self.conv11(out) if args.fixed: out = quant(out) out = roundmax(out) out = self.conv12(out) if args.fixed: out = quant(out) out = roundmax(out) out = self.conv13(out) out = out.view(out.size(0), -1) if args.fixed: out = quant(out) out = roundmax(out) out = self.linear1(out) if args.fixed: out = quant(out) out = roundmax(out) out = self.linear2(out) if args.fixed: out = quant(out) out = roundmax(out) out = self.linear3(out) if args.fixed: out = quant(out) out = roundmax(out) """ out = torch.zeros(1000) return out