def validate(values): """Validate the values in the input widget Parameters ---------- values : list inputs from layout widgets based on input index """ v = ["" for k in inputmap.keys()] #v[0] = checkpath(values[0]) v[1] = checkpath(values[1]) v[2] = checkpath(values[2]) if values[3] not in ['.tif', '.nd2', '.lsm', '.czi']: v[3] = "unknown file extension" v[4] = checktype(values[4], int, "Can't convert to int") if not v[4]: if int(values[4]) <= 0: v[4] = "Channels start at 1" v[5] = checktype(values[5], int, "Can't convert to int") v[5] = checktype(values[5], int, "Can't convert to int") v[6] = checktype(values[6], int, "Can't convert to int") v[7] = checktype(values[7], int, "Can't convert to int") v[8] = checktype(values[8], int, "Can't convert to int") v[9] = checktype(values[9], int, "Can't convert to int") v[10] = checktype(values[10], int, "Can't convert to int") v[11] = checktype(values[11], float, "Can't convert to float") v[12] = checktype(values[12], float, "Can't convert to float") v[13] = checktype(values[13], float, "Can't convert to float") return v
def __init__(self, FLAGS): self.FLAGS = FLAGS self.model_out_dir = "../Model" self.log_dir = "../Log" utils.checkpath(self.model_out_dir) utils.checkpath(self.log_dir) self.callback = TensorBoard(self.log_dir) shutil.copy('./bin/plotboard.py', self.log_dir) shutil.copy('./bin/run.bat', self.log_dir)
def main(args): # Create output directory checkpath(args.out_path) # list all files end with jpg, args.in_path should contain a folder with name imgs for jpg files # and a folder with name pointlines for pkl files. input_paths_train = sorted(glob(os.path.join(args.in_path, '{}/*.jpg'.format("imgs/train")))) input_paths_test = sorted(glob(os.path.join(args.in_path, '{}/*.jpg'.format("imgs/test")))) if len(input_paths_train) == 0 or len(input_paths_test) == 0: raise Exception("No images are found in {}".format(args.in_path)) # filter out outdoor images labeled by us, this is optinal outdoor_list_train = os.path.join(args.in_path, 'outdoor_list_train.txt') outdoor_list_test = os.path.join(args.in_path, 'outdoor_list_test.txt') with open(outdoor_list_train,'r') as fp: outdoor_train = fp.readlines() # the last one doesn't contain the '\n' so processed seperately outdoor_names_train = [ p[:-2] for p in outdoor_train if len(p) > 9] outdoor_names_train += [outdoor_train[-1][:-1]] print(len(outdoor_names_train)) with open(outdoor_list_test,'r') as fp: outdoor_test = fp.readlines() outdoor_names_test = [ p[:-2] for p in outdoor_test if len(p) > 9] outdoor_names_test += [outdoor_test[-1][:-1]] data = [] data_train = [] data_test = [] # save training data for fname in input_paths_train: # retrive the index name of the image basename = fname.split('/')[-1].split('.')[0] if basename not in outdoor_names_train: filename = os.path.join(args.in_path, 'pointlines/{}.pkl'.format(basename)) _ = process(args.uni_wf, args.img_size, args.out_path, filename, mode='Train') item = (filename, 'Train') data_train.append(item) data.append(item) # save test data for fname in input_paths_test: # retrieve the index name of the image basename = fname.split('/')[-1].split('.')[0] if basename not in outdoor_names_test: filename = os.path.join(args.in_path, 'pointlines/{}.pkl'.format(basename)) _ = process(args.uni_wf, args.img_size, args.out_path, filename, mode='Test') item = (filename, 'Test') data_test.append(item) data.append(item) print("The length of the dataset is: {}".format(len(data))) print("The length of the training set is: {}".format(len(data_train))) print("The length of the test set is: {}".format(len(data_test))) print('finished preprocessing data')
def process(uni_wf, out_size, out_path, filename, mode): # get output paths for preprocessed imgs and wireframes basename = filename.split('/')[-1].split('.')[0] wf_dir_train = os.path.join(out_path, 'wireframes/train') checkpath(wf_dir_train) img_dir_train = os.path.join(out_path, 'images/train') checkpath(img_dir_train) wf_dir_test = os.path.join(out_path, 'wireframes/test') checkpath(wf_dir_test) img_dir_test = os.path.join(out_path, 'images/test') checkpath(img_dir_test) with open(filename, 'rb') as f: target = pickle.load(f, encoding='latin1') img = target['img'] h, w, _ = img.shape img_size = np.array((w, h)) img = cv2.resize(img, (out_size, out_size), interpolation=cv2.INTER_AREA) if mode == 'Train': img_dir = os.path.join(img_dir_train, '{}.png'.format(basename)) elif mode == 'Test': img_dir = os.path.join(img_dir_test, '{}.png'.format(basename)) cv2.imwrite(img_dir, img) points = target['points'] lines = target['lines'] wf = np.zeros((out_size, out_size)) for i, j in lines: start = np.array( points[i] ) * out_size / img_size end = np.array( points[j] ) * out_size / img_size if uni_wf: # use unified intensity dist = 1 else: # different intensity represents different dists, optional and haven't been thoroughly tested dist = np.linalg.norm(end - start) / (out_size * np.sqrt(2)) if dist < 0.1: dist = 0.2 elif dist > 0.5: dist = 1 else: dist = dist * 2 # haven't experimented with antialiased lines, user can optinally try lineType=cv2.LINE_AA wf = cv2.line(wf, intx(start, out_size), intx(end, out_size), 255 * dist, 1, lineType=cv2.LINE_8) if mode == 'Train': save_dir = os.path.join(wf_dir_train, '{}.png'.format(basename)) elif mode == 'Test': save_dir = os.path.join(wf_dir_test, '{}.png'.format(basename)) cv2.imwrite(save_dir, wf) return wf
def __init__(self, FLAGS, LOGGER): self.input_size = (60, 60) # image (input the GAN) size, must be times of 2^4 self.class_num = 36 self.FLAGS = FLAGS self.val_ratio = FLAGS.val_ratio self.batch_size = FLAGS.batch_size #%% input dirs self.data_dir = os.path.join('..', 'Data') self.train_dir = os.path.join('..', 'Data', 'DataTrain') # self.test_dir = os.path.join('..', 'Data', 'DataTest') #%% output dirs self.split_record_dir = os.path.join(LOGGER.log_dir, 'SplitRecord') utils.checkpath(self.split_record_dir) #%% record data list and split self._read_data_list() #%% implement batch fetcher self.train_batch_fetcher = TrainBatchFetcher(self.train_names, self.batch_size)
def __init__(self, FLAGS): self.FLAGS = FLAGS self.img_out_dir = "../TrainResult" self.model_out_dir = "../Model" self.log_dir = "../Log" utils.checkpath(self.img_out_dir) utils.checkpath(self.model_out_dir) utils.checkpath(self.log_dir) self.callback = TensorBoard(self.log_dir)
import numpy as np from PIL import Image, ImageDraw, ImageFont from skimage import filters from skimage.morphology import disk,square from skimage.measure import regionprops import skimage.morphology as sm import utils import time time_start=time.time() os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3" #%% inputdir = os.path.join('.', 'TestData') outputdir = os.path.join('.', 'TestResults') utils.checkpath(outputdir) filenames = utils.all_files_under(inputdir) #%% load model modelfile = os.path.join('.','Pretrained','Model.json') weightsfile = os.path.join('.','Pretrained','Weights.h5') model = utils.loadmodel(modelfile, weightsfile) all_num = len(filenames) pre_num = 0 acc_num = 0 setFont = ImageFont.truetype('C:/windows/fonts/Arial.ttf', 60) fillColor = "#000000" REFSTR = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def fit_mlh(model, problems, data, name, fixed={}, fitting={}, niter=5, outdir='.', method='Nelder-Mead', save=True, quiet=False): """Use maximum likelihood to fit CHASE model""" sim_id = sim_id_str(name, fixed, fitting) checkpath(outdir) cols = ['iteration', 'success', 'nllh', 'k', 'N', 'bic'] # get range of thetas for grid search thetas = filter(lambda k: k.count('theta') > 0, fitting.keys()) if len(thetas) > 0: theta_min, theta_max = fitting['theta'] theta_prod = map(list, list(product(range(theta_min, theta_max + 1), repeat=len(thetas)))) cols += thetas else: theta_prod = [[fixed['theta']]] cols += ['theta'] rest = filter(lambda p: p.count('theta')==0, fitting.keys()) rest.sort() cols += rest # determine number of parameters and observations k = len(fitting) N = data.shape[0] # create fit table arr = [] for i in range(niter): for th in theta_prod: arr.append([i, np.nan, np.nan, k, N, np.nan] + th + [np.nan for _ in range(k - len(thetas))]) fitdf = pd.DataFrame(arr, columns=cols) # iterate through parameter combinations for i, row in fitdf.iterrows(): # update pars with current values of theta pars = deepcopy(fixed) for th in thetas: pars[th] = row[th] pars['fitting'] = OrderedDict([(p, fitting[p]) for p in rest]) # if theta=1, can't fit tau if len(thetas)==1 and row[th]==1 and 'tau' in pars['fitting'] and 'stepsize' not in fixed: del pars['fitting']['tau'] init = [] for p in pars['fitting']: # if fitting normal stopping distribution, initialize at mean if p=='mu': init.append(data.samplesize.mean()) else: init.append(uniform(fitting[p][0], fitting[p][1])) # fit! f = minimize(model.nloglik_opt, init, (problems, data, pars,), method=method, options={'ftol': .001}) fitdf.ix[i,'success'] = f['success'] fitdf.ix[i,'nllh'] = f['fun'] fitdf.ix[i,'bic'] = bic(f['fun'], k, N) for v, p in enumerate(pars['fitting'].keys()): fitdf.ix[i,p] = f['x'][v] if not quiet: print sim_id print '%s/%s' % (i, fitdf.shape[0]) print '%s: %s' % (thetas, row[thetas].values) print fitdf.ix[i] # save the table if save: fitdf.to_csv('%s/%s.csv' % (outdir, sim_id)) return fitdf
def main(args): # set which gpu(s) to use, should set PCI_BUS_ID first os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu num_gpus = (len(args.gpu) + 1) // 2 # create model directories checkpath(args.modelG_path) checkpath(args.modelD_path) # tensorboard writer checkpath(args.log_path) writer = SummaryWriter(args.log_path) # load data data_loader, num_train = get_loader(args, batch_size=args.batch_size, shuffle=True, num_workers=args.num_workers, training=True) data_loader_val, num_test = get_loader(args, batch_size=args.val_bs, shuffle=False, num_workers=args.num_workers, training=False) print('Finished data loading') print("The length of the train set is: {}".format(num_train)) print("The length of the test set is: {}".format(num_test)) colorguide = True if args.nocolor: colorguide = False # loss multipliers lambdas = [ args.lambda_imgl1, args.lambda_wfl1, args.lambda_ssim, args.lambda_color ] lambda_perceptual = args.lambda_perceptual # Generator netG = Generator(lambdas=lambdas, colorguide=colorguide, input_nc=1, output_nc=1) if num_gpus > 1: # multi-gpu training with synchonized batchnormalization # make sure enough number of gpus are available assert (torch.cuda.device_count() >= num_gpus) # since we have set CUDA_VISIBLE_DEVICES to avoid some invalid device id issues netG = DataParallelWithCallback( netG, device_ids=[i for i in range(num_gpus)]) netG_single = netG.module else: # single gpu training netG_single = netG # Discriminator netD = NLayerDiscriminator(input_nc=4, n_layers=4) if num_gpus > 1: netD = DataParallelWithCallback( netD, device_ids=[i for i in range(num_gpus)]) netD_single = netD.module else: netD_single = netD # print(netG_single) # print(netD_single) if args.pretrained and args.netG_path != '' and args.netD_path != '': netG_single.load_state_dict(torch.load(args.netG_path)) netD_single.load_state_dict(torch.load(args.netD_path)) # Right now we only support gpu training if torch.cuda.is_available(): netG = netG.cuda() netD = netD.cuda() # define the perceptual loss, place outside the forward func in G for better multi-gpu training Ploss = PNet() if num_gpus > 1: Ploss = DataParallelWithCallback( Ploss, device_ids=[i for i in range(num_gpus)]) if torch.cuda.is_available(): Ploss = Ploss.cuda() # setup optimizer lr = args.learning_rate optimizerD = optim.Adam(netD_single.parameters(), lr=lr, betas=(args.beta1, 0.999)) schedulerD = ReduceLROnPlateau(optimizerD, factor=0.7, patience=10, mode='min', min_lr=1e-06) optimizerG = optim.Adam(netG_single.parameters(), lr=lr, betas=(args.beta1, 0.999)) schedulerG = ReduceLROnPlateau(optimizerG, factor=0.7, patience=10, mode='min', min_lr=1e-06) for epoch in range(args.num_epochs): # switch to train mode netG.train() netD.train() for i, (img_real, wf_real, color_real) in enumerate(data_loader, 0): img_real = img_real.cuda() wf_real = wf_real.cuda() color_real = color_real.cuda() # Update D network, we freeze parameters in G to save memory for p in netG_single.parameters(): p.requires_grad = False for p in netD_single.parameters(): p.requires_grad = True # if using TTUR, D can be trained multiple steps per G step for _ in range(args.D_steps): optimizerD.zero_grad() # train with real real_AB = torch.cat((img_real, wf_real), 1) errD_real = 0.5 * netD(trainG=False, trainReal=True, real_AB=real_AB, fake_AB=None).sum() errD_real.backward() # train with fake img_fake, wf_fake, _, _, _, _, _ = netG(trainG=False, img_real=None, wf_real=wf_real, color_real=color_real) fake_AB = torch.cat((img_fake, wf_fake), 1) errD_fake = 0.5 * netD(trainG=False, trainReal=False, real_AB=None, fake_AB=fake_AB).sum() errD_fake.backward() errD = errD_real + errD_fake optimizerD.step() del img_fake, wf_fake, fake_AB, real_AB, errD_real, errD_fake iterations_before_epoch = epoch * len(data_loader) writer.add_scalar('D Loss', errD.item(), iterations_before_epoch + i) del errD # Update G network, we freeze parameters in D to save memory for p in netG.parameters(): p.requires_grad = True for p in netD.parameters(): p.requires_grad = False optimizerG.zero_grad() img_fake, wf_fake, lossG, wf_ssim, img_l1, color_l1, wf_l1 = netG( trainG=True, img_real=img_real, wf_real=wf_real, color_real=color_real) ploss = Ploss(img_fake, img_real.detach()).sum() fake_AB = torch.cat((img_fake, wf_fake), 1) lossD = netD(trainG=True, trainReal=False, real_AB=None, fake_AB=fake_AB).sum() errG = (lossG.sum() + lambda_perceptual * ploss + lossD) errG.backward() optimizerG.step() del color_real, fake_AB, lossG, errG if args.nocolor: print( 'Epoch: [{}/{}] Iter: [{}/{}] PercLoss : {:.4f} ImageL1 : {:.6f} WfL1 : {:.6f} WfSSIM : {:.6f}' .format(epoch, args.num_epochs, i, len(data_loader), ploss.item(), img_l1.sum().item(), wf_l1.sum().item(), num_gpus + wf_ssim.sum().item())) else: print( 'Epoch: [{}/{}] Iter: [{}/{}] PercLoss : {:.4f} ImageL1 : {:.6f} WfL1 : {:.6f} WfSSIM : {:.6f} ColorL1 : {:.6f}' .format(epoch, args.num_epochs, i, len(data_loader), ploss.item(), img_l1.sum().item(), wf_l1.sum().item(), num_gpus + wf_ssim.sum().item(), color_l1.sum().item())) writer.add_scalar('Color Loss', color_l1.sum().item(), iterations_before_epoch + i) # tensorboard log writer.add_scalar('G Loss', lossD.item(), iterations_before_epoch + i) writer.add_scalar('Image L1 Loss', img_l1.sum().item(), iterations_before_epoch + i) writer.add_scalar('Wireframe MSSSIM Loss', num_gpus + wf_ssim.sum().item(), iterations_before_epoch + i) writer.add_scalar('Wireframe L1', wf_l1.sum().item(), iterations_before_epoch + i) writer.add_scalar('Image Perceptual Loss', ploss.item(), iterations_before_epoch + i) del wf_ssim, ploss, img_l1, color_l1, wf_l1, lossD with torch.no_grad(): # show generated tarining images in tensorboard if i % args.val_freq == 0: real_img = vutils.make_grid( img_real.detach()[:args.val_size], normalize=True, scale_each=True) writer.add_image('Real Image', real_img, (iterations_before_epoch + i) // args.val_freq) real_wf = vutils.make_grid( wf_real.detach()[:args.val_size], normalize=True, scale_each=True) writer.add_image('Real Wireframe', real_wf, (iterations_before_epoch + i) // args.val_freq) fake_img = vutils.make_grid( img_fake.detach()[:args.val_size], normalize=True, scale_each=True) writer.add_image('Fake Image', fake_img, (iterations_before_epoch + i) // args.val_freq) fake_wf = vutils.make_grid( wf_fake.detach()[:args.val_size], normalize=True, scale_each=True) writer.add_image('Fake Wireframe', fake_wf, (iterations_before_epoch + i) // args.val_freq) del real_img, real_wf, fake_img, fake_wf del img_real, wf_real, img_fake, wf_fake # do checkpointing if epoch % args.save_freq == 0 and epoch > 0: torch.save(netG_single.state_dict(), '{}/netG_epoch_{}.pth'.format(args.modelG_path, epoch)) torch.save(netD_single.state_dict(), '{}/netD_epoch_{}.pth'.format(args.modelD_path, epoch)) # validation with torch.no_grad(): netG_single.eval() # since we use a realtively large validation batchsize, we don't go through the who test set (img_real, wf_real, color_real) = next(iter(data_loader_val)) img_real = img_real.cuda() wf_real = wf_real.cuda() color_real = color_real.cuda() img_fake, wf_fake, _, _, _, _, _ = netG_single( trainG=False, img_real=None, wf_real=wf_real, color_real=color_real) # update lr based on the validation perceptual loss val_score = Ploss(img_fake.detach(), img_real.detach()).sum() schedulerG.step(val_score) schedulerD.step(val_score) print('Current lr: {:.6f}'.format( optimizerG.param_groups[0]['lr'])) real_img = vutils.make_grid(img_real.detach()[:args.val_size], normalize=True, scale_each=True) writer.add_image('Test: Real Image', real_img, epoch) real_wf = vutils.make_grid(wf_real.detach()[:args.val_size], normalize=True, scale_each=True) writer.add_image('Test: Real Wireframe', real_wf, epoch) fake_img = vutils.make_grid(img_fake.detach()[:args.val_size], normalize=True, scale_each=True) writer.add_image('Test: Fake Image', fake_img, epoch) fake_wf = vutils.make_grid(wf_fake.detach()[:args.val_size], normalize=True, scale_each=True) writer.add_image('Test: Fake Wireframe', fake_wf, epoch) netG_single.train() del img_real, real_img, wf_real, real_wf, img_fake, fake_img, wf_fake, fake_wf # close tb writer writer.close()
def single_ac_train(env, actor, critic, store_path='./', batch_size=32, epsilon=0.01, save_interval=1000, update_interval=1000, learning_starts=200, memory_size=50000, max_epoch=100000, max_iter=10000): event_path = os.path.join(store_path, 'actor_events') actor_model_path = os.path.join(store_path, 'actor_models') critic_model_path = os.path.join(store_path, 'critic_models') checkpath(event_path) checkpath(actor_model_path) checkpath(critic_model_path) actor.load_model(actor_model_path) critic.load_model(critic_model_path) summary_writer = SummaryWriter(event_path) memory_buffer = Memory(memory_size) results_buffer = ResultsBuffer() states = env.reset() for i in range(max_epoch): states = env.reset() episode_buffer = Episode_Record() episode_buffer.append('state', states) while True: actions = actor.get_action(states, epsilon) next_states, rewards, dones, info = env.step(actions) episode_buffer.append('reward', rewards) episode_buffer.append('action', actions) if dones: state_batch, reward_batch, action_batch = episode_buffer.dump() score_batch = critic.get_target(state_batch) target_batch = np.zeros_like(reward_batch) target_batch[-1] = reward_batch[-1] for idx in range(len(reward_batch) - 2, -1, -1): target_batch[idx] = reward_batch[idx] + \ 0.95 * target_batch[idx + 1] global_step, critic_summary, advantage_batch = critic.update( state_batch, target_batch) # advantage_batch = np.zeros_like(reward_batch) # R = 0.0 # for idx in range(len(reward_batch) - 1, -1, -1): # R = R * 0.95 + reward_batch[idx] # advantage_batch[idx] = R # advantage_batch -= np.mean(advantage_batch) # advantage_batch /= np.std(advantage_batch) actor_summary = actor.update(state_batch, action_batch, advantage_batch) # results_buffer.add_summary(summary_writer, global_step) actor.update_target() critic.update_target() # actor.save_model(actor_model_path, global_step) # critic.save_model(critic_model_path, global_step) print("Epoch {} earns a reward of {}.".format( i, np.sum(reward_batch))) break else: episode_buffer.append('state', next_states) states = next_states
def main(args): # by default we only consider single gpu inference assert (len(args.gpu) == 1) os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu # load data data_loader_val, num_test = get_loader(args, batch_size=args.batch_size, shuffle=False, num_workers=args.num_workers, training=False) print('finished data loading') # Generator colorguide = True if args.nocolor: colorguide = False netG = Generator(lambdas=None, colorguide=colorguide, input_nc=1, output_nc=1) netG.load_state_dict(torch.load(args.model_path)) if torch.cuda.is_available(): netG = netG.cuda() out_path = args.out_path checkpath(out_path) predictions_fid_real = [] predictions_fid_fake = [] fid_model = InceptionV3().cuda() fid_model.eval() Perceptual = PNet().cuda() avg_ssim = 0 lpips = 0 # validate on test set, TODO: test with single color guide image with torch.no_grad(): netG.eval() for i, (img_real, wf_real, color_real) in enumerate(data_loader_val, 0): img_real = img_real.cuda() wf_real = wf_real.cuda() if colorguide: color_real = color_real.cuda() # in case we are in the last interation batch_size = img_real.size(0) img_fake, wf_fake, _, _, _, _, _ = netG(trainG=False, img_real=None, wf_real=wf_real, color_real=color_real) ssim_score = ssim(img_real, img_fake).item() * batch_size avg_ssim += ssim_score lpips += Perceptual(img_real, img_fake) * batch_size # TODO: save generated wireframes save_singleimages(img_fake, out_path, i * args.batch_size, args.img_size) pred_fid_real = fid_model(img_real)[0] pred_fid_fake = fid_model(img_fake)[0] predictions_fid_real.append( pred_fid_real.data.cpu().numpy().reshape(batch_size, -1)) predictions_fid_fake.append( pred_fid_fake.data.cpu().numpy().reshape(batch_size, -1)) print('SSIM: {:6f}'.format(avg_ssim / num_test)) print('LPIPS: {:6f}'.format(lpips / num_test)) predictions_fid_real = np.concatenate(predictions_fid_real, 0) predictions_fid_fake = np.concatenate(predictions_fid_fake, 0) fid = compute_fid_score(predictions_fid_fake, predictions_fid_real) print('FID: {:6f}'.format(fid))