def test_slc_crime():
    ind = np.load('SLC_60k_inds.npy')
    y = np.load('SLC_60k_y_in_day.npy').reshape(-1, 1).astype(np.float32)

    TOTAL_LEN = len(ind)
    NUM_TRAIN = 40000

    train_ind = ind[:NUM_TRAIN]
    train_y = y[:NUM_TRAIN]

    test_ind = ind[NUM_TRAIN:]
    test_y = y[NUM_TRAIN:]
    print("Total len = %d, num train = %d, num test = %d" %
          (TOTAL_LEN, NUM_TRAIN, TOTAL_LEN - NUM_TRAIN))
    print(test_y[-1][0] - test_y[0][0])

    nmod = ind.shape[1]
    nvec = np.max(ind, axis=0) + 1

    R = 8

    U = [np.random.rand(nvec[k], R) for k in range(nmod)]

    init_config = {}
    init_config['U'] = U
    init_config['batch_size_event'] = 128
    init_config['batch_size_entry'] = 128

    init_config['learning_rate'] = 1e-3

    len_B = 128  #event

    print('lauching Kmeans')
    B = utils_funcs.init_base_gp_pseudo_inputs(U, train_ind, len_B)
    print('Kmeans end')

    # VI Sparse GP
    init_config['inducing_B'] = B  # init with k-means, [len_B, rank]

    model = NTF_HP(train_ind, train_y, init_config)
    #model.create_all_relevant_test_graph(test_ind, test_y)
    model.create_standAlone_test_graph(test_ind, test_y)
    #model.create_standAlone_test_graph(train_ind[-19300:], train_y[-19300:])

    steps_per_epoch = int(len(train_ind) / init_config['batch_size_event'])
    num_epoch = 50

    test_llk = []
    for epoch in range(1, num_epoch + 1):
        print('epoch %d' % epoch)
        model.train(steps_per_epoch, int(steps_per_epoch))
        test_log_p, int_term, eventSum_term = model.test(verbose=False)
        print("test_log_llk = %g, int_term = %g,  eventsum_term = %g\n" %
              (test_log_p, int_term, eventSum_term))
        test_llk.append(test_log_p)
    jb.dump(test_llk, "slc_bg_only.pkl")
コード例 #2
0
def test_data_set():
    (ind, y), (train_ind, train_y), (test_ind,
                                     test_y) = utils_funcs.load_dataSet(
                                         arg_data_name, '../Data')

    nmod = ind.shape[1]
    nvec = np.max(ind, axis=0) + 1

    R = arg_rank

    U = [np.random.rand(nvec[k], R) for k in range(nmod)]

    init_config = {}
    init_config['U'] = U
    init_config['batch_size_event'] = 64
    init_config['batch_size_entry'] = 64

    init_config['learning_rate'] = arg_lr

    len_B = 128  # event

    print('lauching Kmeans')
    B = utils_funcs.init_base_gp_pseudo_inputs(U, train_ind, len_B)
    print('Kmeans end')

    # VI Sparse GP
    init_config['inducing_B'] = B  # init with k-means, [len_B, rank]

    model = NTF_HP(train_ind, train_y, init_config)
    # model.create_all_relevant_test_graph(test_ind, test_y)
    model.create_standAlone_test_graph(test_ind, test_y)
    # model.create_standAlone_test_graph(train_ind[-19300:], train_y[-19300:])

    steps_per_epoch = int(len(train_ind) / init_config['batch_size_event'])
    num_epoch = 50

    test_llk = []
    for epoch in range(1, num_epoch + 1):
        print('epoch %d' % epoch)
        model.train(steps_per_epoch, int(steps_per_epoch))
        test_log_p, int_term, eventSum_term = model.test(verbose=False)
        print("test_log_llk = %g, int_term = %g,  eventsum_term = %g\n" %
              (test_log_p, int_term, eventSum_term))
        test_llk.append(test_log_p)

    #jb.dump(test_llk, "../Result_Logs/%s_GP_Poisson_rank_%d.jb" % ( arg_data_name, arg_rank))
    #final_U = model.check_vars( model.tf_U)
    #jb.dump( final_U, "../Result_Logs/%s_GP_Poisson_final_U_rank_%d.jb" %( arg_data_name, arg_rank))
    utils_funcs.log_results('../Result_Logs/GP_Rayleigh.txt', arg_data_name,
                            arg_rank, arg_lr, test_llk)

    model.sess.close()
コード例 #3
0
def test_data_set():
    (ind, y), (train_ind, train_y), (test_ind, test_y) = utils_funcs.load_dataSet(arg_data_name, '../Data')

    nmod = ind.shape[1]
    nvec = np.max(ind, axis=0) + 1

    T = y[-1][0] - y[0][0]
    print('T = %g' % T)

    R = arg_rank
    U = [np.random.rand(nvec[k], R)  for k in range(nmod)]

    init_config = {}
    init_config['U'] = U
    init_config['batch_size_event'] = 128
    init_config['batch_size_entry'] = 128

    init_config['learning_rate'] = arg_lr

    len_B = 128  # event

    print('lauching Kmeans')
    B = utils_funcs.init_base_gp_pseudo_inputs(U, train_ind, len_B)
    print('Kmeans end')

    # VI Sparse GP
    init_config['inducing_B'] = B  # init with k-means, [len_B, rank]

    model = NTF_HP(train_ind, train_y, init_config)
    model.create_standAlone_test_graph(test_ind, test_y)

    steps_per_epoch = int(len(train_ind) / init_config['batch_size_event'])
    num_epoch = 50

    test_llk = []
    for epoch in range(1, num_epoch + 1):
        print('epoch %d' % epoch)
        model.train(steps_per_epoch, int(steps_per_epoch))
        test_log_p, int_term, eventSum_term = model.test(verbose=False)
        print("test_log_llk = %g, int_term = %g,  eventsum_term = %g\n" % (test_log_p, int_term, eventSum_term))
        test_llk.append(test_log_p)

    utils_funcs.log_results('GP_PTF.txt', arg_data_name, arg_rank, arg_lr, test_llk)
    model.sess.close()
    nvec = np.max(ind, axis=0) + 1
    R_list = [1, 2, 5, 8, 10]
    res = []
    for R in R_list:
        R = 8
        U = [np.random.rand(nvec[k], R) for k in range(nmod)]
        init_config = {}
        init_config['U'] = U
        init_config['batch_size_event'] = 128
        init_config['batch_size_entry'] = 128

        init_config['learning_rate'] = 1e-3
        len_B = 128  #event

        print('lauching Kmeans')
        B = utils_funcs.init_base_gp_pseudo_inputs(U, train_ind, len_B)

        print('Kmeans end')

        # VI Sparse GP
        init_config['inducing_B'] = B  # init with k-means, [len_B, rank]

        model = NTF_HP(train_ind, train_y, init_config)
        #model.create_all_relevant_test_graph(test_ind, test_y)
        model.create_standAlone_test_graph(test_ind, test_y)
        #model.create_standAlone_test_graph(train_ind[-19300:], train_y[-19300:])

        steps_per_epoch = int(len(train_ind) / init_config['batch_size_event'])
        num_epoch = 50

        test_llk = []
コード例 #5
0
ファイル: BNF.py プロジェクト: USTCEarthDefense/BNF_code
def test_data_set():
    (ind, y), (train_ind, train_y), (test_ind,
                                     test_y) = utils_funcs.load_dataSet(
                                         arg_data_name, '../Data')

    nmod = ind.shape[1]
    nvec = np.max(ind, axis=0) + 1

    R = arg_rank
    U = [np.random.rand(nvec[k], R) * 1.0 for k in range(nmod)]

    init_config = {}
    init_config['U'] = U
    init_config['batch_size_event'] = 64
    init_config['batch_size_entry'] = 64

    init_config['batch_size_inner_event'] = 4096
    init_config['batch_size_entryEvent'] = 4096

    init_config['learning_rate'] = arg_lr

    init_config['log_amp_alpha'] = 0.0
    init_config['log_amp_delta'] = 0.0
    init_config['log_amp_trig'] = -3
    len_B = 128  # Base Rate

    model_config = {
        'log_amp_alpha': init_config['log_amp_alpha'],
        'log_amp_delta': init_config['log_amp_delta'],
        'log_amp_trig': init_config['log_amp_trig'],
        'rank': arg_rank,
        'MATRIX_JITTER': MATRIX_JITTER,
        'DELTA_JITTER': DELTA_JITTER,
        'lr': arg_lr,
        'batch_size_event': init_config['batch_size_event'],
        'batch_size_entry': init_config['batch_size_entry'],
        'batch_size_inner_event': init_config['batch_size_inner_event'],
        'batch_size_entryEvent': init_config['batch_size_entryEvent'],
        'num_psd_points': len_B
    }

    print('launching Kmeans')
    B = utils_funcs.init_base_gp_pseudo_inputs(U, train_ind, len_B)
    print(B.shape)
    print('Kmeans end')

    # VI Sparse GP
    init_config['inducing_B'] = B  # init with k-means, [len_B, rank]

    model = NTF_HP(train_ind, train_y, init_config)
    model.create_standAlone_test_graph(test_ind, test_y)

    steps_per_epoch = int(len(train_ind) / init_config['batch_size_event'])
    num_epoch = 50

    log_file = utils_funcs.init_log_file('./BNF.txt', arg_data_name,
                                         model_config)

    for epoch in range(1, num_epoch + 1):
        print('epoch %d\n' % epoch)
        model.train(steps_per_epoch, int(steps_per_epoch / 5))
        test_log_p, int_term, int_term1, int_term3, eventSum_term = model.test(
            128, 16, verbose=False)

        log_file.write('%g\n' % test_log_p)
        log_file.flush()
        os.fsync(log_file.fileno())

    log_file.close()
    model.sess.close()