def read_task_options(dataset_str): if dataset_str == 'nell-995': relations = ['nell-995', 'agentbelongstoorganization', 'athletehomestadium', 'athleteplaysforteam', 'athleteplaysinleague', 'athleteplayssport', 'organizationheadquarteredincity', 'organizationhiredperson', 'personborninlocation', 'personleadsorganization', 'teamplaysinleague', 'teamplayssport', 'worksfor'] elif dataset_str == 'fb15k': relations = [] pass else: relations = [] print 'the input dataset_name is illegal' if len(relations) > 0: options = dict() for idx, rel in enumerate(relations): options[rel] = read_options(dataset_str, rel, idx) return options, relations
import color import options import sys import fastopc ### if __name__ == '__main__': np.set_printoptions(linewidth=200) ############################ Traitement #################################### (options, args) = options.read_options() # Ci-dessous les paramètres nécessaires au fonctionement du programme. ############################################################################ # Azimut de la piece, et position GPS de la maison => à déterminer sur place print('Options : ') azimut_maison = int(options.azimut_maison) print "\tazimut_maison : ", azimut_maison lat_maison = float(options.lat_maison) print "\tlat_maison : ", lat_maison long_maison = float(options.long_maison) print "\tlong_maison : ", long_maison diffusion_soleil = int(options.diffusion_soleil)
prediction_score = pair_prediction(model.pair_net, test_pairs, cuda, options, batch_size=10000) test_triplets = [] for term_pair, score in zip(test_pairs, prediction_score): test_triplets.append((term_pair[0], term_pair[1], -1.0 * score)) metrics = evaluation_main(test_triplets) if metrics["all"] >= best_overall_metric: best_overall_metric =metrics["all"] best_epoch = epoch best_metrics = metrics save_model(model, options["save_dir"], 'best', epoch) # save the initial first model return best_overall_metric, best_epoch, best_metrics if __name__ == '__main__': args = read_options() # Add TensorBoard Writer writer = SummaryWriter(log_dir=None, comment=args.comment) # Initialize random seed random.seed(args.random_seed) torch.manual_seed(args.random_seed) np.random.seed(args.random_seed) if args.device_id != -1: torch.cuda.manual_seed_all(args.random_seed) torch.backends.cudnn.deterministic = True torch.set_default_tensor_type(torch.cuda.FloatTensor) else: torch.set_default_tensor_type(torch.FloatTensor) torch.set_printoptions(precision=9)
except subprocess.CalledProcessError: ranOK = False #continue if ranOK: result = return_value(calc.output_path, eng_string) else: result = np.nan os.chdir(root_dir) return result if __name__ == '__main__': args = parse_commandline_arguments() debug = args.debug run_options = read_options('options.yml') # calculation parameters jobname = run_options['calculation']['jobname'] coords = run_options['calculation']['coords'] periodicity = run_options['calculation']['periodicity'] bigbox = run_options['calculation']['bigbox'] charge = run_options['calculation']['charge'] lshift = run_options['calculation']['lshift'] rshift = run_options['calculation']['rshift'] yshift = run_options['calculation']['yshift'] if 'r_rotate' in run_options['calculation'].keys(): degrees = run_options['calculation']['r_rotate']['degrees'] axis = run_options['calculation']['r_rotate']['axis'] else: degrees = 0.0 zlen = run_options['calculation']['zlen']
logger.info("Hits@3: {0:7.4f}".format(all_final_reward_3)) logger.info("Hits@5: {0:7.4f}".format(all_final_reward_5)) logger.info("Hits@10: {0:7.4f}".format(all_final_reward_10)) logger.info("Hits@20: {0:7.4f}".format(all_final_reward_20)) logger.info("auc: {0:7.4f}".format(auc)) def top_k(self, scores, k): scores = scores.reshape(-1, k * self.max_num_actions) # [B, (k*max_num_actions)] idx = np.argsort(scores, axis=1) idx = idx[:, -k:] # take the last k highest indices # [B , k] return idx.reshape((-1)) if __name__ == '__main__': # read command line options options = read_options() # Set logging logger.setLevel(logging.INFO) fmt = logging.Formatter('%(asctime)s: [ %(message)s ]', '%m/%d/%Y %I:%M:%S %p') console = logging.StreamHandler() console.setFormatter(fmt) logger.addHandler(console) logfile = logging.FileHandler(options['log_file_name'], 'w') logfile.setFormatter(fmt) logger.addHandler(logfile) # read the vocab files, it will be used by many classes hence global scope logger.info('reading vocab files...') options['relation_vocab'] = json.load(open(options['vocab_dir'] + '/relation_vocab.json')) options['entity_vocab'] = json.load(open(options['vocab_dir'] + '/entity_vocab.json')) logger.info('Reading mid to name map')
def load_container_options(imagename): coptions = options.read_options(imagename) if coptions == None: coptions = [] return coptions
################ if __name__ == '__main__': # Load input file using first argument as filename try: input_file = sys.argv[1] except IndexError: print("Input file must be provided") sys.exit(1) # Check input file exists if not os.path.isfile(input_file): print("Input file does not exist") sys.exit(1) opt = options.read_options(input_file) # second argument disables logging to file if given try: if sys.argv[2] == 'redirect': opt['log']['file'] = 'none' opt['log']['level'] = sys.argv[3] except IndexError: pass # Check output directory exists init_outdir(opt) # copy input file to output directory for reference shutil.copy(input_file, opt['run']['outdir'])