def evaluate_model(args, epoch, val_loader, fast_device, generator_model, discriminator_model): #val_loader os.makedirs(os.path.join(args.modelName, str(epoch))) gen_loss = 0.0 disc_loss = 0.0 disc_loss_pose = 0.0 all_images = None all_outputs = [] all_ground_truth = {} for i, data in enumerate(val_loader): print("index :",i) if i>=100: break #---------------------------------------> validation samples images = data['image'] if (all_images is None): all_images = images.numpy() else: all_images = np.concatenate((all_images, images.numpy()), axis=0) ground_truth = {} ground_truth['heatmaps'] = data['heatmaps'] ground_truth['occlusions'] = data['occlusions'] if (len(all_ground_truth) == 0): for k, v in ground_truth.items(): all_ground_truth[k] = ground_truth[k].numpy() else: for k, v in ground_truth.items(): all_ground_truth[k] = np.concatenate((all_ground_truth[k], ground_truth[k].numpy()), axis=0) if (args.use_gpu): images = images.to(fast_device) ground_truth['heatmaps'] = ground_truth['heatmaps'].to(fast_device) ground_truth['occlusions'] = ground_truth['occlusions'].to(fast_device) with torch.no_grad(): outputs = generator_model(images) cur_gen_loss_dic = gen_single_loss(ground_truth, outputs, discriminator_model, mode=args.loss) # cur_disc_loss_dic = disc_single_loss(ground_truth, outputs, discriminator_model) cur_gen_loss = cur_gen_loss_dic['loss'] # cur_disc_loss = cur_disc_loss_dic['loss'] gen_loss += cur_gen_loss # disc_loss += cur_disc_loss if (len(all_outputs) == 0): for output in outputs: all_outputs.append(output.to(cpu_device).numpy()) else: for i in range(len(outputs)): all_outputs[i] = np.concatenate((all_outputs[i], outputs[i].to(cpu_device).numpy()), axis=0) with open(os.path.join(args.modelName, str(epoch), 'validation_outputs.dat'), 'wb') as f: pickle.dump((all_images, all_ground_truth, all_outputs), f) return gen_loss #, disc_loss
outputs = generator_model(images) pck_out = outputs outputs_disc = [ nn.Upsample(scale_factor=256 / 64, mode='nearest')(outputs[-1].detach()) ] #print("outputs for the generator :",len(outputs)) # outputs = [outputs[-1]] # print("outputs for the discriminators :",len(outputs)) _, _, p_fake = pck.StackedHourGlass(pck_out, ground_truth['heatmaps']) cur_gen_loss_dic = gen_single_loss(ground_truth, outputs, None, mode=args.loss) #cur_gen_loss_dic = gen_single_loss(ground_truth, outputs, mode=args.loss) cur_disc_loss_dic_conf = 0.0 cur_disc_loss_dic_pose = 0.0 for output in outputs_disc: cur_disc_loss_dic_conf += get_loss_disc_bce_with_logits( output, discriminator_model_conf, real=False, ground_truth=ground_truth['heatmaps'], p_fake=p_fake) cur_disc_loss_dic_pose += get_loss_disc_bce_with_logits(torch.cat([output, images], 1), discriminator_model_pose, real=False,\ ground_truth =ground_truth['heatmaps'],p_fake = p_fake)
images = images.to(fast_device) ground_truth['heatmaps'] = ground_truth['heatmaps'].to(fast_device) ground_truth['occlusions'] = ground_truth['occlusions'].to( fast_device) ################################## Check and complete the code here ####################################################### ############# Forward pass and calculate losses here ######### if (i % (config['training']['gen_iters'] + config['training']['disc_iters']) < config['training']['gen_iters']): # Generator training outputs = generator_model(images) cur_gen_loss_dic = gen_single_loss(ground_truth, outputs, discriminator_model, mode=args.loss) cur_disc_loss_dic = 0.0 for output in outputs: cur_disc_loss_dic = get_loss_disc(output, discriminator_model, real=True) cur_gen_loss = cur_gen_loss_dic['loss'] cur_disc_loss = cur_disc_loss_dic loss = cur_gen_loss # + config['training']['alpha'] * cur_disc_loss loss.backward() optim_gen.step() optim_disc.zero_grad()