start = time.clock() with torch.no_grad(): #try: if eps <= 0: input_data = picts else: input_data = picts[idxlist] if error_bound <= 0 and args.split == 0: outputs = test(torch.from_numpy(input_data).to(device)) totaltime += time.clock() - start zs = outputs[2].cpu().detach().numpy() predict = outputs[0].cpu().detach().numpy() else: if args.split == 0: outputs = test.encode(torch.from_numpy(input_data).to(device)) totaltime += time.clock() - start zs = outputs.cpu().detach().numpy() else: split = args.split len_input = input_data.shape[0] start = 0 zs = None while (start < len_input): end = min(len_input, start + split) input_split = input_data[start:end] zs_split = test.encode( torch.from_numpy(input_split).to( device)).cpu().detach().numpy() if start == 0: zs = zs_split
picts = np.array(picts) start = time.clock() with torch.no_grad(): #try: if eps <= 0: input_data = picts else: input_data = picts[idxlist] if error_bound <= 0 and args.split == 0: outputs = test(torch.from_numpy(input_data).to(device)) totaltime += time.clock() - start zs = outputs[2].cpu().detach().numpy() predict = outputs[0].cpu().detach().numpy() else: outputs = test.encode(torch.from_numpy(input_data).to(device)) totaltime += time.clock() - start zs = outputs.cpu().detach().numpy() latent_size = zs.shape[1] zs = zs.flatten() print(zs.shape[0]) recon = np.zeros(array_size, dtype=np.float32) latents = np.array(zs) if args.transpose: latents = latents.reshape((-1, latent_size)).transpose().flatten() if args.gpu: