parser.add_argument("--n_splits", type=int, default=5, help="default: 5") parser.add_argument("--rand_seed", type=int, default=0, help="default: 0") parser.add_argument("--fit", action="store_true", help="Default: False") return parser def simulate_helper(hawkes, max_jumps): simu_hawkes.reset() simu_hawkes.max_jumps = max_jumps simu_hawkes.simulate() return simu_hawkes.timestamps args = get_parser().parse_args() set_rand_seed(args.rand_seed) print(args) # simulate drug event_seqs baseline = args.baseline * np.random.random(args.n_types) adjacency = np.diag(np.random.random(args.n_types)) if args.n_correlations > 0: comb = list(permutations(range(args.n_types), 2)) idx = np.random.choice(range(len(comb)), size=args.n_correlations, replace=False) comb = [comb[i] for i in idx] for i, j in comb: adjacency[i, j] = np.random.random()
if __name__ == "__main__": args = get_parser().parse_args() assert args.model is not None, "`model` needs to be specified." output_path = osp.join( args.output_dir, args.dataset, f"split_id={args.split_id}", args.model, get_hparam_str(args), ) makedirs([output_path]) # initialization set_rand_seed(args.rand_seed, args.cuda) init_logging(output_path) logger = get_logger(__file__) logger.info(args) export_json(vars(args), osp.join(output_path, "config.json")) # load data input_path = osp.join(args.input_dir, args.dataset) data = np.load(osp.join(input_path, "data.npz"), allow_pickle=True) n_types = int(data["n_types"]) event_seqs = data["event_seqs"] train_event_seqs = event_seqs[data["train_test_splits"][args.split_id][0]] test_event_seqs = event_seqs[data["train_test_splits"][args.split_id][1]] # sorted test_event_seqs by their length test_event_seqs = sorted(test_event_seqs, key=lambda seq: len(seq))