def train(): print_freq = 80 plot_freq = 20 display_freq = 80 data_size = 4000 # update_html_freq = 10 # 00000000 part = 450 save_name = 'save_model/1018/model' # x = np.load('data/shapes/shape0.npy') batchSize = 8 model = source_hourglass.my_hg() torch.nn.DataParallel(model, [0, 1]) # model.load_state_dict(torch.load('save_model/1016/model_phaseandhg55')) all_epoch = 100000000000000 visualizer = Visualizer() total_steps = 0 for epoch in range(0, all_epoch): epoch_iter = 0 # randomshuf() for start in range(0, 3600, 600): loader = get_data(start, start + 600) print(start) for i, data in enumerate(loader): epoch_iter += batchSize total_steps += batchSize model.fit(data[0], data[1], data[2]) if total_steps % display_freq == 0: # save_result = total_steps % update_html_freq == 0 pass vispic = model.get_current_visuals() visualizer.display_current_results(vispic, epoch, False) # print(vispic[]) if total_steps % plot_freq == 0: pass losses = model.get_current_loss() # loss = OrderedDict() # loss['test_loss'] = start_test(model) # losses.append(loss) visualizer.new_plot_current_errors( epoch, float(epoch_iter) / data_size, losses) if total_steps % print_freq == 0: print(losses) pass del loader if epoch % 3 == 0: torch.save(model.state_dict(), save_name + str(epoch))
def train(): print_freq = 80 plot_freq = 20 display_freq = 20 data_size = 400 save_name = 'save_model/mixamo/twodirection__90_no_aug_model' # x = np.load('data/shapes/shape0.npy') batchSize = 8 model = source_hourglass.my_hg(2, 1) # model = torch.nn.DataParallel(model, [0, 1]) # model.load_state_dict(torch.load('save_model/mixamo/90_aug_model60')) all_epoch = 10000000000000 visualizer = Visualizer() total_steps = 0 for epoch in range(0, all_epoch): epoch_iter = 0 if epoch % 20 == 0: loader = get_data(0, 5, epoch) for i, data in enumerate(loader): epoch_iter += batchSize total_steps += batchSize model.fit(data[0], data[1], data[2]) if total_steps % display_freq == 0: # save_result = total_steps % update_html_freq == 0 pass vispic = model.get_current_visuals() visualizer.display_current_results(vispic, epoch, False) if total_steps % plot_freq == 0: pass losses = model.get_current_loss() visualizer.new_plot_current_errors( epoch, float(epoch_iter) / data_size, losses) if total_steps % print_freq == 0: print(losses) pass if epoch % 10 == 0: torch.save(model.state_dict(), save_name + str(epoch))
def start(): print_freq = 80 plot_freq = 20 display_freq = 20 data_size = 1200 # update_html_freq = 1000000000 part = 1200 save_name = 'save_model/model' # x = np.load('data/shapes/shape0.npy') batchSize = 1 pic_set = np.load('data/a1p1all_gray.npy') heat_set = np.load('data/a1p1all_deal_heat.npy') local_set = np.load('data/a1p1_local.npy') pic_set = pic_set[:, np.newaxis, :, :] local_set = local_set[:, np.newaxis, :, :] # print(np.max(heat_set)) # print(np.min(heat_set)) # local_set = norm_localmotion(local_set) # # print(np.max(local_set)) # print(np.min(local_set)) heat_set = heat_set[:, np.newaxis, :, :] heat_set = norm_heatmap(heat_set) x = numpy2tensor(local_set, part) pic_set = (pic_set / 255) * 2 - 1 z = numpy2tensor(pic_set, part) y = numpy2tensor(heat_set, part) x = x.cuda() y = y.cuda() data = Data.TensorDataset(x, y, z) loader = Data.DataLoader(data, batch_size=1, shuffle=True) model = hourglass.my_hg() model.load_state_dict(torch.load('save_model/model9')) all_epoch = 100000000000000 visualizer = Visualizer() total_steps = 0 for epoch in range(39, all_epoch): epoch_iter = 0 for i, data in enumerate(loader): epoch_iter += batchSize total_steps += batchSize model.fit(data[0], data[1], data[2]) if total_steps % display_freq == 0: # save_result = total_steps % update_html_freq == 0 pass vispic = model.get_current_visuals() visualizer.display_current_results(vispic, epoch, False) # print(vispic[]) if total_steps % plot_freq == 0: pass losses = model.get_current_loss() visualizer.new_plot_current_errors( epoch, float(epoch_iter) / data_size, losses) if total_steps % print_freq == 0: print(losses) pass if epoch % 3 == 0: torch.save(model.state_dict(), save_name + str(epoch))
pixnet.set_input(data) pixnet.optimize_parameters() if total_steps % opt.display_freq == 0: save_result = total_steps % opt.update_html_freq == 0 visualizer.display_current_results(pixnet.get_current_visuals(), epoch, save_result) if total_steps % opt.print_freq == 0: losses = pixnet.new_get_current_losses() t = (time.time() - iter_start_time) / opt.batchSize visualizer.print_current_losses(epoch, epoch_iter, losses[0], t, t_data) if opt.display_id > 0: visualizer.new_plot_current_errors( epoch, float(epoch_iter) / dataset_size, opt, losses) if total_steps % opt.save_latest_freq == 0: print('saving the latest model (epoch %d, total_steps %d)' % (epoch, total_steps)) # pixnet.save_networks('latest') iter_data_time = time.time() if epoch % opt.save_epoch_freq == 0: print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps)) # pixnet.save_networks('latest') # pixnet.save_networks(epoch) torch.save(pixnet.state_dict(), 'save_model/pix2pix/point_map_epoch_%d.pkl' % epoch)