def main(cfg): pl.figure() torch.manual_seed(cfg.SOLVER.SEED) save_path = cfg.MODEL.SAVE_PATH batchsize = cfg.SOLVER.BATCHSIZE draw_loss(save_path) #data_train, data_val = cfg.DATASET.TRAIN_VAL(cfg.DATASET) #train_loader = torch.utils.data.DataLoader(data_train, batch_size=batchsize) #val_loader = torch.utils.data.DataLoader(data_val, batch_size=batchsize) device = torch.device(cfg.SOLVER.N_GPU if cfg.SOLVER.N_GPU >= 0 else "cpu") model = cfg.MODEL.TYPE(**cfg.MODEL.KEYWORDS) print('device', device) model.to(device) model.set_device(device) model.load_state_dict(torch.load(save_path + "model_best.pt", map_location=device)) save_path = cfg.MODEL.SAVE_PATH util.make_directory(save_path) print('train val') #evaluate_test(cfg, save_path, batchsize, model, device) #evaluate_train(cfg, save_path, batchsize, model, device) evaluate_test(cfg, save_path, batchsize, model, device)
def login(): username = raw_input("GitHub username: "******"GitHub password: "******"scopes": ["gist"], "note": "Crumbs" } response = requests.post(AUTHORIZATIONS_URL, data=json.dumps(data), auth=(username, password)) if response.status_code == 401 and "X-GitHub-OTP" in reponse.headers: tfa_code = raw_input("2-factor auth code: ") headers = {"X-GitHub-OTP": tfa_code} response = requests.post(AUTHORIZATIONS_URL, data=json.dumps(data), headers=headers, auth=(username, password)) if response.status_code == 201: util.make_directory(CONFIG_DIR) with open(TOKEN_FILE, "w") as f: data = {"username": username, "token": response.json()["token"]} json.dump(data, f) print "Login successful" else: print "Error: {}".format(response.json()["message"])
def make_tex(self, overwrite_handler): """Make TeX files Args: overwrite_handler: a function which accepts a single str argument, that is used to handle overwritten issue. Returns: tex_file_list, if failed returning an empty list. """ make_directory(self.output_dir) tex_file_list = [] for i, m in enumerate(self.matchings): if self.templates is None: tex_doc = get_tex_doc(m[0], position=self.position) else: tex_doc = get_tex_doc(m[0], template_dir=self.templates[i][0], template=self.templates[i][1]) counter = 0 while not (gen_tex_file(tex_doc, filename=os.path.join( self.output_dir, m[1] + '.tex'), overwrite_handler=overwrite_handler)): if counter > 10: print("Failed to create file for matching %s." % (str(m[0]))) return m[1] += time.strftime("_%H-%M-%S") counter += 1 tex_file_list.append(m[1]) print("Successfully create tex file {}".format(m[1])) self.tex_file_list = tex_file_list return tex_file_list
def fetch(gist_name, filenames=None): auth = get_auth() files = fetch_from_gist(auth, gist_name, filenames) if not filenames: directory = gist_name util.make_directory(directory) else: directory = "" for filename in files: with open(os.path.join(directory, filename), "w") as f: f.write(files[filename])
def main(args): base_path = util.make_directory(args.name, args.ow) device = 'cuda' if torch.cuda.is_available() and len(args.gpu_ids) > 0 else 'cpu' start_epoch = 0 # Note: No normalization applied, since RealNVP expects inputs in (0, 1). trainloader, testloader, in_channels = get_datasets(args) # Model print('Building model..') net = RealNVP(num_scales=args.num_scales, in_channels=in_channels, mid_channels=64, num_blocks=args.num_blocks) net = net.to(device) if device == 'cuda': net = torch.nn.DataParallel(net, args.gpu_ids) cudnn.benchmark = args.benchmark pass if args.resume: # Load checkpoint. ckpt_path = base_path / 'ckpts' best_path_ckpt = ckpt_path / 'best.pth.tar' print(f'Resuming from checkpoint at {best_path_ckpt}') checkpoint = torch.load(best_path_ckpt) net.load_state_dict(checkpoint['net']) global best_loss best_loss = checkpoint['test_loss'] start_epoch = checkpoint['epoch'] loss_fn = RealNVPLoss() param_groups = util.get_param_groups(net, args.weight_decay, norm_suffix='weight_g') optimizer = optim.Adam(param_groups, lr=args.lr) for epoch in range(start_epoch, start_epoch + args.num_epochs): train(epoch, net, trainloader, device, optimizer, loss_fn, args.max_grad_norm) test(epoch, net, testloader, device, loss_fn, args.num_samples, in_channels, base_path)
def en_begin(band_array: np.array, city_array: np.array): k = k_init if record: dir_name = util.make_directory(Config) for i in range(iter_lim): if i in record_moment: filename = "iteration-" + str(i) + ".png" file_path = dir_name + filename plt.savefig(file_path) k = np.amax([k_bottom, k * k_decay]) weights = calc_weight_matrix(band_array, city_array, k) band_array = update_band(band_array, city_array, weights, k) circle_band = np.vstack((band_array, band_array[0, :])) plt.title("iteration=" + str(i + 1)) elastic_band.set_data(circle_band[:, 0], circle_band[:, 1]) plt.pause(0.001) else: i = 1 while plt.get_fignums(): k = np.amax([0.01, k * k_decay]) weights = calc_weight_matrix(band_array, city_array, k) band_array = update_band(band_array, city_array, weights, k) circle_band = np.vstack((band_array, band_array[0, :])) plt.title("iteration=" + str(i)) elastic_band.set_data(circle_band[:, 0], circle_band[:, 1]) i += 1 plt.pause(0.001)
def som_begin(band_array: np.array, city_array: np.array): beta = b_init np.random.shuffle(city_array) if record: dir_name = util.make_directory(Config) for i in range(iter_lim): if i in record_moment: filename = "iteration-" + str(i) + ".png" file_path = dir_name + filename plt.savefig(file_path) picked_city = city_array[i % city_num, :] j_star = calc_champ_node(band_array, picked_city) band_array = update_band(band_array, picked_city, j_star, beta) circle_band = np.vstack((band_array, band_array[0, :])) plt.title("iteration=" + str(i + 1)) elastic_band.set_data(circle_band[:, 0], circle_band[:, 1]) # beta = np.amin([b_ceil, beta * b_growth]) beta = (1 - alpha) * beta plt.pause(0.001) else: i = 1 while plt.get_fignums(): picked_city = city_array[i % city_num, :] j_star = calc_champ_node(band_array, picked_city) band_array = update_band(band_array, picked_city, j_star, beta) circle_band = np.vstack((band_array, band_array[0, :])) plt.title("iteration=" + str(i)) elastic_band.set_data(circle_band[:, 0], circle_band[:, 1]) i += 1 # beta = np.amin([b_ceil, beta * b_growth]) beta = (1 - alpha) * beta plt.pause(0.001)
def make_tex(self, overwrite_handler): make_directory(self.output_dir) tex_file_list = [] for i, c in enumerate(self.cycles): if self.templates is None: tex_doc = make_tex_doc(**c[0]) else: tex_doc = make_tex_doc(template_dir=self.templates[i][0], template=self.templates[i][1], **c[0]) counter = 0 while not (gen_tex_file(tex_doc, filename=os.path.join( self.output_dir, c[1] + '.tex'), overwrite_handler=overwrite_handler)): if counter > 10: print("Failed to create file for cycle %s." % (str(c[0]))) return c[1] += time.strftime("_%H-%M-%S") counter += 1 tex_file_list.append(c[1]) print("Successfully create tex file {}".format(c[1])) self.tex_file_list = tex_file_list return tex_file_list
def hp_begin( inner_vals_array: np.matrix, nodes_array: np.matrix, weights_matrix: np.matrix, biases_array: np.matrix, ) -> None: if record: dir_name: str = util.make_directory(Config) for i in range(iter_lim): if i in record_moment: filename: str = "iteration-" + str(i) + ".png" file_path: str = dir_name + filename plt.savefig(file_path) inner_vals_array = update_inner_vals(nodes_array, inner_vals_array, weights_matrix, biases_array) nodes_array = sigmoid(inner_vals_array) plt.title("iteration=" + str(i + 1)) mat_visual.set_data(np.reshape(nodes_array, (city_num, city_num))) plt.pause(0.0001) else: i = 1 # while plt.get_fignums(): # inner_vals_array = update_inner_vals(nodes_array, inner_vals_array, weights_matrix, biases_array) # nodes_array = sigmoid(inner_vals_array) # plt.title("iteration=" + str(i)) # mat_visual.set_data(np.reshape(nodes_array, (city_num, city_num))) # i += 1 # plt.pause(.01) while plt.get_fignums(): # print(nodes_array.shape, inner_vals_array.shape, weights_matrix.shape, biases_array.shape) inner_vals_array = update_inner_vals(nodes_array, inner_vals_array, weights_matrix, biases_array) nodes_array = sigmoid(inner_vals_array) plt.title("iteration=" + str(i)) mat_visual.set_data(np.reshape(nodes_array, (city_num, city_num))) i += 1 plt.pause(0.0001)
from pathlib import Path data_dir = sys.argv[1] #'/home/shush/profile/QuantPred/datasets/HepG2/fold' summary_file = sys.argv[ 2] #'/home/shush/profile/QuantPred/datasets/HepG2/summary.csv' label = sys.argv[3] #'HepG2' subfolder = sys.argv[4] #'raw' outdir = sys.argv[5] assert subfolder in ['raw', 'sign', 'fold'], 'Unknown folder name' if subfolder == 'raw': filetype = 'bam' else: filetype = subfolder util.make_directory(outdir) bed_dir = os.path.join(Path(data_dir).parent, 'bed') summary_df = pd.read_csv(summary_file, index_col=0) bedfiles = [i[-1] for i in summary_df['bed'].str.split('/')] bedpaths = [os.path.join(bed_dir, f) for f in bedfiles] bed_labels = summary_df['label'] with open(os.path.join(outdir, 'basset_sample_beds_{}.txt'.format(label)), 'w') as filehandle: for i in range(len(bed_labels)): filehandle.write('{}\t{}\n'.format(bed_labels[i], bedpaths[i]))
def main(cfg): torch.manual_seed(cfg.SOLVER.SEED) save_path = cfg.MODEL.SAVE_PATH batchsize = cfg.SOLVER.BATCHSIZE #kwargs = {'num_workers': 1, 'pin_memory': True} if args.use_cuda else {} data_train, data_val = cfg.DATASET.TRAIN_VAL(cfg.DATASET) #sampler_train = torch.utils.data.RandomSampler(data_train) #sampler_train = torch.utils.data.distributed.DistributedSampler(data_train) #train_loader = torch.utils.data.DataLoader(data_train, batch_size=args.batchsize, batch_sampler=sampler_train) #train_loader = torch.utils.data.DataLoader(data_train, batch_size=args.batchsize, shuffle=True, drop_last=True) train_loader = torch.utils.data.DataLoader(data_train, batch_size=batchsize, shuffle=True) val_loader = torch.utils.data.DataLoader(data_val, batch_size=batchsize) device = torch.device(cfg.SOLVER.N_GPU if cfg.SOLVER.N_GPU >= 0 else "cpu") model = cfg.MODEL.TYPE(**cfg.MODEL.KEYWORDS) print('device', device) model.to(device) model.set_device(device) train_loss = [] test_loss = [] best_loss = 5000. loss = 0 util.make_directory(save_path) if cfg.SOLVER.FROM_CHECKPOINT != True: n_epoch_s = 1 optimizer, scheduler = solver.get_optimizer(cfg, model, -1) elif cfg.SOLVER.FROM_CHECKPOINT == True: print('start from checkpoint') model.load_state_dict( torch.load(save_path + "model_last.pt", map_location=device)) train_loss = np.load(save_path + 'loss_train_last.npy', train_loss).tolist() test_loss = np.load(save_path + 'loss_test_last.npy', test_loss).tolist() n_epoch_s = len(train_loss) + 1 best_loss = np.min(test_loss) optimizer, scheduler = solver.get_optimizer(cfg, model, n_epoch_s) n_epoch_e = n_epoch_s + cfg.SOLVER.MAX_ITER print('start', n_epoch_s, n_epoch_e) for epoch in range(n_epoch_s, n_epoch_e): #scheduler.step() print('epoch', epoch, model.name) print("learning rate", scheduler.get_lr()) print('phase : train') loss = train(cfg, model, device, train_loader, optimizer, epoch, n_epoch_e) train_loss.append(loss) scheduler.step() print('phase : validation') loss = val(cfg, model, device, val_loader, n_epoch_e) test_loss.append(loss) print('loss', loss) if loss < best_loss: best_loss = loss mname = save_path + "model_best.pt" torch.save(model.state_dict(), mname) print('saved', mname) mname = save_path + "model_last.pt" torch.save(model.state_dict(), mname) np.save(save_path + 'loss_train_last.npy', train_loss) np.save(save_path + 'loss_test_last.npy', test_loss) torch.save(optimizer.state_dict(), save_path + "optimizer_last.pth") print('saved', mname) if (epoch % cfg.SOLVER.CHECKPOINT_PERIOD == 0): mname = save_path + "model_" + str(epoch) + ".pt" torch.save(model.state_dict(), mname) torch.save(optimizer.state_dict(), save_path + "optimizer_" + str(epoch) + ".pth") print('saved', mname)