types = np.array(types) np.random.shuffle(types) atom_setting = '' for i, j in zip(atoms, types): atom_setting += i + j + '\n' m = values['slope'].values[0] b = values['intercept'].values[0] length = m * start_temp + b run = join(*[save, group, start_temp_str]) # Write run functions.create_dir(run) # Write input files for cubic contents = incar_contents.replace('$side$', str(length)) contents = contents.replace('$lattice$', str(length / l)) contents = contents.replace('$time_step$', str(time_step)) contents = contents.replace('$seed$', str(seed)) contents = contents.replace('$temp$', str(start_temp)) contents = contents.replace('$steps$', str(hold_steps)) contents = contents.replace('$l$', str(l)) contents = contents.replace('$atom_setting$', atom_setting) remove = '' l_cubed = l**3 if atom_count < l_cubed:
density = 100000 # The number of points to use in fit data_save_dir = '../data' # The data save folder data_save_name = 'data.csv' # The data save name save_plots = '../figures' # The figures save folder order = 3 # The order of the spline fit # Load Data df = pd.read_csv(df) df = df.dropna() # Drop bad jobs df = df.sort_values(by=['volume', 'start_temperature', 'end_temperature']) # Group data groups = df.groupby(['composition', 'start_temperature', 'end_temperature']) # Create figures directory functions.create_dir(save_plots) compositions = [] temperatures = [] lengths = [] length_errors = [] pressure_errors = [] for group, values in groups: name = str(group) print('Grouped by: '+name) y = values['pressure'].values x = values['volume'].values x **= 1/3
hold_steps = 3000 # The number of hold steps dT = sys.argv[1] # Change in temperature min_temp = sys.argv[2] # The minimum allowable temperature hold fits = pd.read_csv(fits) # Load TV curves cwd = os.getcwd() temp = float(os.path.basename(cwd)) - dT if temp >= min_temp: # Random integer seed = np.random.randint(100000, 999999) dir_name = os.path.join('../', str(temp)) functions.create_dir(dir_name) # Read composition from name composition = incar_name.split('/')[-1].split('_')[0] fit = fits.loc[fits['composition'] == composition].values[0] m = fit[1] b = fit[2] # Change volume length = m * temp + b # Open and read template incar = open(incar_name) incar_contents = incar.read() incar.close()
import functions import os import platform import victory import use_functions while True: functions.print_menu() # выводим список меню choice = input('Выберете номер пункта меню: ') if choice == '1': functions.create_dir() # создаем папку elif choice == '2': functions.delete_dir() # удаляем папку elif choice == '3': functions.copy_dir() # копируем папку elif choice == '4': print('Содержимое рабочей директории:') print(os.listdir('.')) # папки и файлы рабочей дирректории elif choice == '5': functions.save_dir() # сохраняем файлы и папки рабочей дирректории elif choice == '6': functions.list_file() # файлы рабочей дирректории elif choice == '7': functions.list_folders() # папки рабочей дирректории
'max subsidy': SUB_MAX, 'reward_function': reward_fun, 'length_ep': LENGTH_EPISODE, 'n_ep': NUM_EPISODES, 'lr': LEARNING_RATE, 'batch_size': BATCH_SIZE, 'gamma': GAMMA, 'save_interval': SAVE_INTERVAL} # ###################################################################################################################### # ###################################################################################################################### # ###################################################################################################################### # create saving directory and save config SAVE_DIR = create_dir(DEBUG, LEARNING_RATE, DELTA_RESEARCH, BETA, DELTA_RESOURCE) writer = SummaryWriter(SAVE_DIR) torch.save(CONFIG, (SAVE_DIR / 'config'), _use_new_zipfile_serialization=False) with open((SAVE_DIR / 'config.txt'), 'w') as file: for key in CONFIG.keys(): file.write(str(key) + ': ' + str(CONFIG[key]) + '\n') # create environment env = FSCNetworkEnvAlternative(init_sup=INIT_SUPPORT, init_res=INIT_RESOURCE, ep_len=LENGTH_EPISODE, lambda_=LAMBDA, sub_max=SUB_MAX, delta_res=DELTA_RESOURCE, beta=BETA, delta_search=DELTA_RESEARCH, n_state_space=N_STATE_SPACE, base_impacts=BASE_IMPACTS, agg_weeks=4, save_calc=False) print('-------------------------------- ' + str(device) + ' --------------------------------') if device == 'cuda': print(torch.cuda.get_device_name(0))
# dir: opt_str = 'adam_lr%s_sgd_lr%s_epoch%d_de%d_batch%d' % ( args.lr_w, args.lr_gamma, args.max_epoch, args.decay_epoch, args.batch_size) loss_str = 'train_D_orig_beta%s_rho%s_lc%s_%s' % (args.beta, args.rho, args.lc, args.lc_layer) args.output_dir = os.path.join('output', '%s_%s' % (loss_str, opt_str)) args.pth_dir = os.path.join(args.output_dir, 'pth') args.img_dir = os.path.join(args.output_dir, 'img') args.gamma_dir = os.path.join(args.output_dir, 'gamma') args.fid_buffer_dir = os.path.join(args.output_dir, 'fid_buffer') args.do_IS = False args.do_FID = False create_dir(args.pth_dir), create_dir(args.img_dir), create_dir( args.gamma_dir), create_dir(args.fid_buffer_dir) # gpu: torch.manual_seed(args.random_seed) torch.cuda.manual_seed(args.random_seed) os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu torch.backends.cudnn.deterministic = False torch.backends.cudnn.benchmark = True # set tf env if args.do_IS: _init_inception() if args.do_FID: inception_path = check_or_download_inception(None) create_inception_graph(inception_path)
for group, values in groups: print(group) x = 1/values['end_temperature'] y = df['self_diffusion'] f = interp1d(x, y, kind=order) # Linear interpolation xfit = np.linspace(min(x), max(x), density) yfit = f(xfit) if save_plots: # Create saving directory for figures functions.create_dir(save_plots) save_name = join(save_plots, str(group)) # Plot glass transition curve fig, ax = pl.subplots() ax.plot( x, y, linestyle='none', marker='.', label='Data', color='b' ) ax.plot(
parser.add_argument('--eval_batch_size', type=int, default=100) parser.add_argument('--num_eval_imgs', type=int, default=50000) parser.add_argument('--random_seed', type=int, default=12345) parser.add_argument('--load-epoch', type=int, default=100) parser.add_argument('--dir', type=str) args = parser.parse_args() os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu ### # for pruning models: exp_str = args.dir test_result_dir = os.path.join('output', exp_str, 'Samples') create_dir(test_result_dir) args.fid_buffer_dir = os.path.join('output', exp_str, 'fid_buffer') args.load_path = os.path.join('output', exp_str, 'pth', 'epoch%d.pth' % args.load_epoch) # # for original model: # exp_str = 'sngan_cifar10_2019_10_24_12_19_30' # test_result_dir = os.path.join('logs', exp_str, 'Samples') # create_dir(test_result_dir) # args.fid_buffer_dir = os.path.join('logs', exp_str, 'fid_buffer') # args.load_path = os.path.join('logs', exp_str, 'Model', 'checkpoint_best.pth') ### args.do_IS = True args.do_FID = True