コード例 #1
0
def run_test():
    # create report folder 
    rep_folder = create_report_folder()

    # extract data
    X_train, Y_train, X_test, Y_test = get_data()

    # select model 
    model_obj = MLP()

    # transform input data
    X_train = model_obj.transform_data(X_train)
    X_test = model_obj.transform_data(X_test)

    # fit model
    model = model_obj.get_model()
    plot_model(model, to_file=rep_folder + '/model.png', show_shapes=True)
    start_time = time.time()
    history = model.fit(X_train, Y_train, epochs=100, batch_size=8)
    train_time = time.time() - start_time
    print("--- %s seconds for training ---" % (train_time))

    # make predictions
    y_predicted = model.predict(X_test)

    # report performance
    perf_rep = report_performance(Y_test, y_predicted)
    plot_predictions(Y_test, y_predicted, rep_folder)

    # store reports in file 
    write_summary(rep_folder, perf_rep, train_time)
    # store model 
    store_model(rep_folder, model)

    return rep_folder + " " + perf_rep
コード例 #2
0
def make_template():
    """集成模板数据
    将模板文件中的四个配置项整合到一个字典对象中
    {
        'configs': configs,
        'datum': datum,
        'formulas': formulas,
        'maps': maps
    }

    Returns:
        Dictionary: 集成的模板数据
    """
    try:
        wb = get_template()
        configs = get_configs(wb)
        datum = get_data(wb)
        formulas = get_formulas(wb)
        maps = get_maps(wb)
    finally:
        wb.close()
    return {
        GL.GL_TEMPLATE_KEY_CONFIGS_NAME: configs,
        GL.GL_TEMPLATE_KEY_DATUM_NAME: datum,
        GL.GL_TEMPLATE_KEY_FORMULAS_NAME: formulas,
        GL.GL_TEMPLATE_KEY_MAPS_NAME: maps
    }
コード例 #3
0
def train(n_samples, data_type, generator, discriminator, gen_optimizer,
          dis_optimizer, loss_fun):
    generator.train()
    discriminator.train()
    dis_optimizer.zero_grad()
    gen_optimizer.zero_grad()

    # Train discriminator on real
    real_ex = get_data(n_points=n_samples, data_type=data_type)
    real_ex = torch.tensor(real_ex, dtype=torch.float)

    dis_real = discriminator(real_ex)
    dis_loss_real = loss_fun(dis_real, torch.ones(n_samples, dtype=torch.long))
    dis_loss_real.backward()

    # Train discriminator on fake
    noise = torch.rand(n_samples, NOISE_DIM)
    gen_out = generator(noise)

    dis_gen = discriminator(gen_out.detach())
    dis_loss_gen = loss_fun(dis_gen, torch.zeros(n_samples, dtype=torch.long))
    dis_loss_gen.backward()
    #print("Discriminator loss : {}".format(dis_loss_gen.item() + dis_loss_real.item()))
    dis_optimizer.step()

    # Train generator
    dis_gen = discriminator(gen_out)
    loss_gen = loss_fun(dis_gen, torch.ones(n_samples, dtype=torch.long))
    loss_gen.backward()
    #print("Generator loss : {}".format(loss_gen.item()))
    gen_optimizer.step()

    return dis_loss_gen.item() + dis_loss_real.item(), loss_gen.item()
コード例 #4
0
def create_data_splits(sids, n_train=1, n_val=1, n_test=1):
    n_total = n_train + n_val + n_test
    assert n_total <= len(sids)

                       
    n_sids = len(sids)
    SID_SUBSET = np.random.choice(sids, size=n_total, replace=False)

    train = generate_data.get_data(
        SID_SUBSET[:n_train],
        datadir=DATADIR,
        salamidir=SALAMIDIR,
        outputdir=OUTPUTDIR,
        prefix='train')
    val   = generate_data.get_data(
        SID_SUBSET[n_train:n_train+n_val],
        datadir=DATADIR,
        salamidir=SALAMIDIR,
        outputdir=OUTPUTDIR,
        prefix='val'
        )
    test  = generate_data.get_data(
        SID_SUBSET[n_train+n_val:],
        datadir=DATADIR,
        salamidir=SALAMIDIR,
        outputdir=OUTPUTDIR,
        prefix='test'
        )

    splits_dict = {'train': [], 'val': [], 'test': [],}
    for item_train in train.keys():
        for i in range(train[item_train]['X_shape'][0]): 
            splits_dict['train'].append((train[item_train]['X_path'], train[item_train]['X_shape'], train[item_train]['y_path'], train[item_train]['y_shape'], i))

    for item_val in val.keys():
        for i in range(val[item_val]['X_shape'][0]):
            splits_dict['val'].append((val[item_val]['X_path'], val[item_val]['X_shape'], val[item_val]['y_path'], val[item_val]['y_shape'], i))

    for item_test in test.keys():
        for i in range(test[item_test]['X_shape'][0]):
            splits_dict['test'].append((test[item_test]['X_path'], test[item_test]['X_shape'], test[item_test]['y_path'], test[item_test]['y_shape'], i))
    
    return splits_dict
コード例 #5
0
def repeat_experiment(source_file: str,
                      dest_file: str,
                      reps=5,
                      max_rep=1000,
                      est_alpha="no",
                      alpha=0.5):
    theta_a_lst = []
    theta_b_lst = []
    alphas_est = []
    counters = []

    true_theta_a, true_theta_b = get_thetas(source_file)
    for i in range(reps):
        X = get_data(source_file)["X"].astype(int)
        theta_a, theta_b, est_alpha, counter = init_em(X,
                                                       max_rep,
                                                       est_alpha=est_alpha,
                                                       alpha=alpha)
        theta_a_lst.append(theta_a)
        theta_b_lst.append(theta_b)
        alphas_est.append(est_alpha)
        counters.append(counter)

    theta_a_est = reduce(lambda x, y: x + y, theta_a_lst) / len(theta_a_lst)
    theta_b_est = reduce(lambda x, y: x + y, theta_b_lst) / len(theta_b_lst)
    alpha_est = reduce(lambda x, y: x + y, alphas_est) / len(alphas_est)

    res_dict = {
        "theta_a_est": theta_a_est.tolist(),
        "theta_b_est": theta_b_est.tolist(),
        "theta_a": true_theta_a.tolist(),
        "theta_b": true_theta_b.tolist(),
        "alpha": alpha_est,
        "counters": counters
    }

    with open(dest_file, 'w') as outfile:
        json.dump(res_dict, outfile)

    print('Source: {0}'.format(source_file))
    print('Destination: {0}'.format(dest_file))
コード例 #6
0
ファイル: ex3.py プロジェクト: bronichern/Adv_ML_GAN
def train(data_type):
    global last_loss, patience, disc_steps, gen_steps
    real_labels = torch.ones((batch_size), dtype=torch.float)
    fake_labels = torch.zeros((batch_size), dtype=torch.float)
    for epoch in range(epochs):
        # ----- Train D ----- #
        for step in range(disc_steps):
            d_optim.zero_grad()
            noise_samples = FloatTensor(sample_z(batch_size))
            # get_data is already defined such that examples are already chosen from iid
            real_samples = FloatTensor(d_gen.get_data(batch_size, data_type))
            real_samp_decision = discriminator(real_samples)
            d_loss_real_samp = criterion(real_samp_decision.t(), real_labels)
            d_loss_real_samp.backward()

            gen_out = generator(noise_samples).detach()
            fake_samp_decision = discriminator(gen_out)
            d_loss_fake_samp = criterion(fake_samp_decision.t(), fake_labels)
            d_loss_fake_samp.backward()
            d_optim.step()
        # ----- Train G ----- #
        for step in range(gen_steps):
            g_optim.zero_grad()
            noise_samples = FloatTensor(sample_z(batch_size))
            gen_out = generator(noise_samples)
            fake_samp_decision = discriminator(gen_out)
            g_loss = criterion(fake_samp_decision.t(), real_labels)
            g_loss.backward()
            g_optim.step()
        if curr_dt == 2 and epoch == 1000:
            disc_steps -= 5
            gen_steps += 5
        d_err_avg = (d_loss_fake_samp.item() + d_loss_real_samp.item()) / 2
        print("Epoch %s, D loss real:%f, fake:%f, avg_err:%f , G %f err" %
              (epoch, d_loss_real_samp.item(), d_loss_fake_samp, d_err_avg,
               g_loss.item()))

        if epoch > 1000 and epoch % 1000 == 0:
            test_spiral(epoch, d_err_avg, g_loss)
    return d_err_avg, g_loss
コード例 #7
0
    def init_data(self):
        train_dict, valid_dict, test_dict = generate_data.get_data()
        sequence_list = []
        f1 = open("train_list_item.txt", "w")
        f2 = open("train_list_features.txt", "w")
        f7 = open("train_list_location.txt", "w")
        for key in train_dict.keys():
            num_total_sequences = len(train_dict[key][0])
            for i in range(num_total_sequences):
                sequence_length = len(train_dict[key][0][i])
                sequence_list.append(sequence_length)
                f1_row = str(key) + ' '
                f2_row = ''
                f7_row = ''
                for j in range(sequence_length):

                    f1_row += str(train_dict[key][1][i][j]) + ' '
                    f2_row +=  str(train_dict[key][8][i][j])+',' +  str(train_dict[key][3][i][j]) +',' + \
                    str(train_dict[key][4][i][j]) +',' + str(train_dict[key][5][i][j]) + ' '
                f7_row = str(train_dict[key][6][i][sequence_length - 1])

                f1_row += '\n'
                f2_row += '\n'
                f7_row += '\n'
                f1.write(f1_row)
                f2.write(f2_row)
                f7.write(f7_row)
        f1.close()
        f2.close()
        f7.close()

        sequence_list = []
        f3 = open("valid_list_item.txt", "w")
        f4 = open("valid_list_features.txt", "w")
        f8 = open("valid_list_location.txt", "w")

        for key in valid_dict.keys():
            num_total_sequences = len(valid_dict[key][0])
            for i in range(num_total_sequences):
                sequence_length = len(valid_dict[key][0][i])
                sequence_list.append(sequence_length)
                f3_row = str(key) + ' '
                f4_row = ''
                f8_row = ''
                for j in range(sequence_length):

                    f3_row += str(valid_dict[key][1][i][j]) + ' '
                    f4_row +=  str(valid_dict[key][8][i][j])+',' +  str(valid_dict[key][3][i][j]) +',' + \
                    str(valid_dict[key][4][i][j]) +',' + str(valid_dict[key][5][i][j]) + ' '
                f8_row = str(valid_dict[key][6][i][sequence_length - 1])

                f3_row += '\n'
                f4_row += '\n'
                f8_row += '\n'
                f3.write(f3_row)
                f4.write(f4_row)
                f8.write(f8_row)
        f3.close()
        f4.close()
        f8.close()

        sequence_list = []
        f5 = open("test_list_item.txt", "w")
        f6 = open("test_list_features.txt", "w")
        f9 = open("test_list_location.txt", "w")
        for key in test_dict.keys():
            num_total_sequences = len(test_dict[key][0])
            for i in range(num_total_sequences):
                sequence_length = len(test_dict[key][0][i])
                sequence_list.append(sequence_length)
                f5_row = str(key) + ' '
                f6_row = ''
                f9_row = ''
                for j in range(sequence_length):

                    f5_row += str(test_dict[key][1][i][j]) + ' '
                    f6_row +=  str(test_dict[key][8][i][j])+',' +  str(test_dict[key][3][i][j]) +',' + \
                    str(test_dict[key][4][i][j]) +',' + str(test_dict[key][5][i][j]) + ' '
                f9_row = str(test_dict[key][6][i][sequence_length - 1])

                f5_row += '\n'
                f6_row += '\n'
                f9_row += '\n'
                f5.write(f5_row)
                f6.write(f6_row)
                f9.write(f9_row)
        f5.close()
        f6.close()
        f9.close()

        train_list_item = []
        f = open("train_list_item.txt", "r")
        for x in f:
            train_list_item.append(x)
        f.close()

        train_list_features = []
        f = open("train_list_features.txt", "r")
        for x in f:
            train_list_features.append(x)
        f.close()

        train_list_location = []
        f = open("train_list_location.txt", "r")
        for x in f:
            train_list_location.append(x)
        f.close()

        valid_list_item = []
        f = open("valid_list_item.txt", "r")
        for x in f:
            valid_list_item.append(x)
        f.close()

        valid_list_features = []
        f = open("valid_list_features.txt", "r")
        for x in f:
            valid_list_features.append(x)
        f.close()

        valid_list_location = []
        f = open("valid_list_location.txt", "r")
        for x in f:
            valid_list_location.append(x)
        f.close()

        test_list_item = []
        f = open("test_list_item.txt", "r")
        for x in f:
            test_list_item.append(x)
        f.close()

        test_list_features = []
        f = open("test_list_features.txt", "r")
        for x in f:
            test_list_features.append(x)
        f.close()

        test_list_location = []
        f = open("test_list_location.txt", "r")
        for x in f:
            test_list_location.append(x)
        f.close()

        self.train_list_item = train_list_item
        self.train_list_features = train_list_features
        self.train_list_location = train_list_location

        self.valid_list_item = valid_list_item
        self.valid_list_features = valid_list_features
        self.valid_list_location = valid_list_location

        self.test_list_item = test_list_item
        self.test_list_features = test_list_features
        self.test_list_location = test_list_location
コード例 #8
0
    def init_basic(self):

        train_dict, valid_dict, test_dict = generate_data.get_data()

        df = pd.read_csv("new_transE_3.csv")
        self.user_length = df['User_id'].max() + 1
        #        print (self.user_length)
        self.item_length = df['Item_id'].max() + 1
        self.entity_count_list = []
        self.entity_count_list.append(self.user_length)
        self.entity_count_list.append(self.item_length)

        time_length = df['new_time'].max() + 1
        category_length = df['L2_Category_name'].max() + 1
        cluster_length = df['clusters'].max() + 1
        poi_type_length = df['POI_Type'].max() + 1
        self.type_count = poi_type_length
        self.relation_count_list = []
        self.relation_count_list.append(time_length)
        self.relation_count_list.append(category_length)
        self.relation_count_list.append(cluster_length)
        self.relation_count_list.append(poi_type_length)

        self.vector_length = 50
        self.margin = 1.0
        self.device = torch.device('cuda')
        self.norm = 1
        self.learning_rate = 0.01

        df3 = pd.read_csv("dict.csv")
        user_list = df[['User_id']].values.tolist()
        self.user_list = get_unique_column_values(user_list)

        busi_list = df[['Item_id']].values.tolist()
        self.busi_list = get_unique_column_values(busi_list)

        with open('L2.json', 'r') as f:
            self.taxo_dict = json.load(f)
        with open('poi.json', 'r') as f:
            self.poi_dict = json.load(f)

        df3 = pd.read_csv("dict.csv")
        item_dict = dict()
        star_list = []
        for index, row in df3.iterrows():
            if str(int(row['Item_id'])) not in item_dict:
                star_list.clear()
                row_dict = dict()
                star_list.append(row['stars'])
                row_dict['stars'] = row['stars']
                row_dict['clusters'] = int(row['clusters'])
                row_dict['L2_Category_name'] = [int(row['L2_Category_name'])]
                row_dict['POI_Type'] = int(row['POI_Type'])
                row_dict['feature_index'] = [int(row['L2_Category_name'])]
                row_dict['feature_index'].append(
                    int(row['clusters']) + category_length)
                row_dict['feature_index'].append(
                    int(row['POI_Type']) + category_length + cluster_length)
                row_dict['feature_index'].append(2 * int(row['stars']) - 2 +
                                                 category_length +
                                                 cluster_length +
                                                 poi_type_length)
                item_dict[str(int(row['Item_id']))] = row_dict

            else:
                star_list.append(row['stars'])
                item_dict[str(int(row['Item_id']))]['stars'] = (
                    sum(star_list)) / len(star_list)
                item_dict[str(int(row['Item_id']))]['L2_Category_name'].append(
                    int(row['L2_Category_name']))
                item_dict[str(int(row['Item_id']))]['feature_index'].append(
                    int(row['L2_Category_name']))
        self.item_dict = item_dict
コード例 #9
0
def main(num_epochs=1,
         n_songs_train=1,
         n_songs_val=1,
         n_songs_test=1,
         batch_size=256,
         learning_rate=1e-4):
    """
    Main function
    """

    # Theano config
    theano.config.floatX = 'float32'

    train, val, test = None, None, None
    try:
        train, val, test = use_preparsed_data(outputdir='/zap/tsob/audio/', )
    except:
        train, val, test = get_data(n_songs_train=n_songs_train,
                                    n_songs_val=n_songs_val,
                                    n_songs_test=n_songs_test,
                                    outputdir='/zap/tsob/audio/',
                                    seed=None)

    # Save the returned metadata
    np.savez('/zap/tsob/audio/metadata', train, val, test)

    # Print the dimensions
    print "Data dimensions:"
    for datapt in [
            train['Xshape'], train['yshape'], val['Xshape'], val['yshape'],
            test['Xshape'], test['yshape']
    ]:
        print datapt

    # Parse dimensions
    n_train = train['yshape'][0]
    n_val = val['yshape'][0]
    n_test = test['yshape'][0]
    n_chan = train['Xshape'][1]
    n_feats = train['Xshape'][2]
    n_frames = train['Xshape'][3]

    print "n_train  = {0}".format(n_train)
    print "n_val    = {0}".format(n_val)
    print "n_test   = {0}".format(n_test)
    print "n_chan   = {0}".format(n_chan)
    print "n_feats  = {0}".format(n_feats)
    print "n_frames = {0}".format(n_frames)

    # Prepare Theano variables for inputs and targets
    input_var = T.tensor4(name='inputs')
    target_var = T.fcol(name='targets')

    # Create neural network model (depending on first command line parameter)
    print("Building model and compiling functions..."),
    network = build_cnn(input_var)
    print("Done.")

    # Create a loss expression for training, i.e., a scalar objective we want to minimize
    prediction = lasagne.layers.get_output(network)
    loss = lasagne.objectives.binary_hinge_loss(prediction, target_var)
    loss = loss.mean()

    # Create update expressions for training
    # Here, we'll use adam
    params = lasagne.layers.get_all_params(network, trainable=True)
    updates = lasagne.updates.adam(loss,
                                   params,
                                   learning_rate=learning_rate,
                                   beta1=0.95,
                                   beta2=0.999,
                                   epsilon=1e-08)

    # Create a loss expression for validation/testing.
    # The crucial difference here is that we do a deterministic forward pass
    # through the network, disabling dropout layers.
    test_prediction = lasagne.layers.get_output(network, deterministic=True)

    test_loss = lasagne.objectives.binary_hinge_loss(test_prediction,
                                                     target_var)
    test_loss = test_loss.mean()

    test_pred_fn = theano.function([input_var],
                                   test_prediction,
                                   allow_input_downcast=True)

    # As a bonus, also create an expression for the classification accuracy:
    test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
                      dtype=theano.config.floatX)

    # Compile a function performing a training step on a mini-batch (by giving
    # the updates dictionary) and returning the corresponding training loss:
    train_fn = theano.function(
        [input_var, target_var],
        loss,
        updates=updates,
        mode=NanGuardMode(  #TODO remove
            nan_is_error=True,
            inf_is_error=True,
            big_is_error=True  #TODO remove
        ),  #TODO remove
        allow_input_downcast=True)

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function([input_var, target_var], [test_loss, test_acc],
                             allow_input_downcast=True)

    # Finally, launch the training loop.
    print("Starting training...")

    train_error_hist = []

    # We iterate over epochs:
    for epoch in range(num_epochs):

        # In each epoch, we do a full pass over the training data:
        train_err = 0
        train_batches = 0
        start_time = time.time()

        for batch in iterate_minibatches(train, batch_size, shuffle=True):
            inputs, targets = batch
            train_err_increment = train_fn(inputs, targets)
            train_err += train_err_increment
            train_error_hist.append(train_err_increment)
            train_batches += 1

        # And a full pass over the validation data:
        val_err = 0
        val_acc = 0
        val_batches = 0
        for batch in iterate_minibatches(val, batch_size, shuffle=False):
            inputs, targets = batch
            err, acc = val_fn(inputs, targets)
            val_err += err
            val_acc += acc
            val_batches += 1

        # Then we print the results for this epoch:
        print("Epoch {} of {} took {:.3f}s".format(epoch + 1, num_epochs,
                                                   time.time() - start_time))
        print("  training loss:\t\t{:.8f}".format(train_err / train_batches))
        print("  validation loss:\t\t{:.8f}".format(val_err / val_batches))
        print("  validation accuracy:\t\t{:.2f} %".format(val_acc /
                                                          val_batches * 100))
    print("Done training.")

    # After training, we compute and print the test error:
    test_err = 0
    test_acc = 0
    test_batches = 0
    test_predictions = []
    for batch in iterate_minibatches(test, batch_size, shuffle=False):
        inputs, targets = batch
        err, acc = val_fn(inputs, targets)
        test_predictions.append(test_pred_fn(inputs))
        test_err += err
        test_acc += acc
        test_batches += 1
    print("Final results:")
    print("  test loss:\t\t\t{:.6f}".format(test_err / test_batches))
    print("  test accuracy:\t\t{:.2f} %".format(test_acc / test_batches * 100))

    # Optionally, you could now dump the network weights to a file like this:
    timestr = str(time.time())
    np.savez('/zap/tsob/audio/model' + timestr + '.npz',
             *lasagne.layers.get_all_param_values(network))
    np.save('/zap/tsob/audio/train_error_hist' + timestr + '.npy',
            train_error_hist)
    np.save('/zap/tsob/audio/test_predictions' + timestr + '.npy',
            test_predictions)
    print "Wrote model to {0}, test error histogram to {1}, and test predictions to {2}".format(
        'model' + timestr + '.npz', 'train_error_hist' + timestr + '.npy',
        'test_predictions' + timestr + '.npy')
コード例 #10
0
from keras.callbacks import ModelCheckpoint

import tensorflow as tf
import keras.backend.tensorflow_backend as tfback

from sklearn.model_selection import train_test_split

import generate_data

save_name = 'simplecnn_009'

people = [
    'Tim', 'Dan', 'Malachi', 'Grant', 'Jess', 'Lindsey', 'Sydney', 'Kate'
]

X, y = generate_data.get_data(people=people)

pickle.dump(people, open('fitted_models/' + 'ids_' + save_name + '.sav', 'wb'))

X = X / 255

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.15)

checkpoint_loss = ModelCheckpoint(filepath='fitted_models/' + save_name +
                                  '_minloss',
                                  save_weights_only=True,
                                  monitor='val_loss',
                                  mode='min',
                                  verbose=1,
                                  save_best_only=True)
コード例 #11
0
def test_get_data(mocker, make_wb_and_ws):
    mock_function = mocker.patch('generate_data.make_dict_repeatable')
    wb, _ = make_wb_and_ws(GL.GL_EXECEL_SHEET_DATA_NAME)
    get_data(wb)
    mock_function.assert_called_once_with(wb, GL.GL_EXECEL_SHEET_DATA_NAME)
コード例 #12
0
ファイル: main.py プロジェクト: CITIZENDOT/CS299
from prompt_toolkit import prompt
from prompt_toolkit.completion import WordCompleter
from generate_data import get_data

if __name__ == "__main__":
    periodList = WordCompleter([
        "1d", "5d", "1mo", "3mo", "6mo", "1y", "2y", "5y", "10y", "ytd", "max"
    ])
    intervalList = WordCompleter([
        "1m",
        "2m",
        "5m",
        "15m",
        "30m",
        "60m",
        "90m",
        "1h",
        "1d",
        "5d",
        "1wk",
        "1mo",
        "3mo",
    ])

    symbol = prompt("Ticker name: ", default="AAPL")
    period = prompt("Period? ", completer=periodList, default="1mo")
    interval = prompt("Interval? ", completer=intervalList, default="5m")
    file_name = prompt("FileName? ", default="data.csv")

    get_data(symbol, period, interval, file_name)
コード例 #13
0
def train():
    trainset = get_data('train')
    testset = get_data('dev')

    global_step = tf.train.get_or_create_global_step()

    steps_per_period = len(trainset)
    warmup_steps = tf.constant(cfg.BASE.warmup_periods * steps_per_period,
                               dtype=tf.float32,
                               name='warmup_steps')
    train_steps = tf.constant(cfg.BASE.epochs * steps_per_period,
                              dtype=tf.float32,
                              name='train_steps')

    warmup_lr = tf.to_float(global_step) / tf.to_float(warmup_steps) \
                * cfg.BASE.lr

    decay_lr = tf.train.cosine_decay(cfg.BASE.lr,
                                     global_step=tf.to_float(global_step) -
                                     warmup_steps,
                                     decay_steps=train_steps - warmup_steps,
                                     alpha=0.01)
    learn_rate = tf.where(
        tf.to_float(global_step) < warmup_steps, warmup_lr, decay_lr)
    optimizer = tf.train.AdamOptimizer(learn_rate)

    iterator = tf.data.Iterator.from_structure(
        trainset.dataset.output_types,
        trainset.dataset.output_shapes,
        output_classes=trainset.dataset.output_classes)
    train_init_op = iterator.make_initializer(trainset.dataset)
    test_init_op = iterator.make_initializer(testset.dataset)

    trainable = tf.placeholder(dtype=tf.bool, name='training')
    dropout_rate_position = tf.placeholder(tf.float32,
                                           shape=(),
                                           name='drop_out')
    gradients, loss, att, ctc, acc = get_tower_results(iterator, optimizer,
                                                       dropout_rate_position,
                                                       trainable)

    avg_tower_gradients = average_gradients(gradients)

    grads, all_vars = zip(*avg_tower_gradients)
    clipped, gnorm = tf.clip_by_global_norm(grads, 0.25)
    grads_and_vars = list(zip(clipped, all_vars))

    apply_gradient_op = optimizer.apply_gradients(grads_and_vars,
                                                  global_step=global_step)

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:

        loader = tf.train.Saver(tf.global_variables())
        saver = tf.train.Saver(tf.global_variables(), max_to_keep=5)
        sess.run(tf.global_variables_initializer())
        try:
            print('=> Restoring weights from: %s ... ' %
                  cfg.BASE.initial_weight)
            loader.restore(sess, cfg.BASE.initial_weight)
        except:
            print('=> %s does not exist !!!' % cfg.BASE.initial_weight)
            print('=> Now it starts to train LM from scratch ...')

        for epoch in range(1, 1 + cfg.BASE.epochs):
            #初始化数据集
            sess.run(train_init_op)
            train_epoch_loss, test_epoch_loss = [], []
            train_epoch_acc, test_epoch_acc = [], []
            train_epoch_ctc, test_epoch_ctc = [], []
            train_epoch_att, test_epoch_att = [], []
            pbar = tqdm(range(len(trainset) + 1))
            for i in pbar:
                try:
                    _, train_step_loss,train_step_acc,train_step_ctc,train_step_att, \
                    global_step_val = sess.run(
                        [apply_gradient_op, loss,acc,ctc,att,
                         global_step],feed_dict={
                                                    trainable:    True,
                                                    dropout_rate_position:0.1

                    })

                    train_epoch_loss.append(train_step_loss)
                    train_epoch_acc.append(train_step_acc)
                    train_epoch_ctc.append(train_step_ctc)
                    train_epoch_att.append(train_step_att)
                    pbar.set_description("loss:%.2f" % train_step_loss)
                    pbar.set_postfix(acc=train_step_acc)
                except tf.errors.OutOfRangeError:
                    break

            sess.run(test_init_op)
            while True:
                try:

                    test_step_loss, test_step_acc, test_step_ctc, test_step_att = sess.run(
                        [loss, acc, ctc, att],
                        feed_dict={
                            trainable: False,
                            dropout_rate_position: 0.0
                        })

                    test_epoch_loss.append(test_step_loss)
                    test_epoch_acc.append(test_step_acc)
                    test_epoch_ctc.append(test_step_ctc)
                    test_epoch_att.append(test_step_att)

                except tf.errors.OutOfRangeError:
                    break
            train_epoch_loss, test_epoch_loss = np.mean(
                train_epoch_loss), np.mean(test_epoch_loss)
            train_epoch_ctc, test_epoch_ctc = np.mean(
                train_epoch_ctc), np.mean(test_epoch_ctc)
            train_epoch_att, test_epoch_att = np.mean(
                train_epoch_att), np.mean(test_epoch_att)

            train_epoch_acc, test_epoch_acc = np.mean(
                train_epoch_acc), np.mean(test_epoch_acc)
            ckpt_file = "./logs_lm_tf/lm_train_loss=%.4f.ckpt" % train_epoch_loss
            log_time = time.strftime('%Y-%m-%d %H:%M:%S',
                                     time.localtime(time.time()))
            print(
                "=> Epoch: %2d Time: %s Tr_loss: %.2f  Tr_acc: %.2f Te_acc: %.2f "
                " tr_ctc: %.2f tr_att: %.2f " %
                (epoch, log_time, train_epoch_loss, train_epoch_acc,
                 test_epoch_acc, train_epoch_ctc, train_epoch_att))
            saver.save(sess, ckpt_file, global_step=epoch)
コード例 #14
0
    res_dict = {
        "theta_a_est": theta_a_est.tolist(),
        "theta_b_est": theta_b_est.tolist(),
        "theta_a": true_theta_a.tolist(),
        "theta_b": true_theta_b.tolist(),
        "alpha": alpha_est,
        "counters": counters
    }

    with open(dest_file, 'w') as outfile:
        json.dump(res_dict, outfile)

    print('Source: {0}'.format(source_file))
    print('Destination: {0}'.format(dest_file))


def read_results(source_file: str):
    with open(source_file, 'r') as input_file:
        params = json.load(input_file)
    return {k: (np.array(v) if "theta" in k else v) for k, v in params.items()}


if __name__ == "__main__":
    X = get_data(params_file_path)["X"].astype(int)
    single_run(X, verbose=True)
    # theta_1, theta_2, alpha = single_run(X, verbose=True)
    #dest_file = os.path.join(os.getcwd(), 'saved_simulations', 'test.json')
    #repeat_experiment(params_file_path, dest_file)
    #read_results(dest_file)
コード例 #15
0
from typing import Dict, Tuple
import transE_model as model_definition
import torch
import torch.nn as nn
import torch.optim as optim
import dataset as ds
import numpy as np
import pickle
import pandas as pd
import random
import json
from torch.utils import data as torch_data
import generate_data
import inside_category

train_dict, valid_dict, test_dict = generate_data.get_data()

df = pd.read_csv("../data/new_transE_3.csv")
user_length = df['User_id'].max() + 1
item_length = df['Item_id'].max() + 1

time_length = df['new_time'].max() + 1
category_length = df['L2_Category_name'].max() + 1
cluster_length = df['clusters'].max() + 1
poi_type_length = df['POI_Type'].max() + 1

relation_count_list = []
relation_count_list.append(time_length)
relation_count_list.append(category_length)
relation_count_list.append(cluster_length)
relation_count_list.append(poi_type_length)
コード例 #16
0
ファイル: gan.py プロジェクト: shauli-ravfogel/GANs
def train(gen, dis, original_model, num_epochs, dis_updates, gen_updates):
    batch_size = 100

    loss_fn = nn.BCELoss()
    gen_optimizer = optim.Adam(gen.parameters(), lr = 0.0002)
    dis_optimizer = optim.Adam(dis.parameters(), lr = 0.0002)

    l = int(num_epochs / batch_size)
    epochs = trange(l, desc="Training...")
    for t in epochs:
    
        if t % 1000 == 0 and t > 0:
        
            generate_fake_samples(gen, original_model)
            
        for i in range(dis_updates):
            # Discriminator: x~Data, z~noise  Maximize log-likelihood:
            # maximize D | log(D(x)) + log(1 - D(G(z)))
            dis.zero_grad()
            samples = get_latent_samples(batch_size)

            fake_data = gen(samples).detach()
            real_data = torch.from_numpy(get_data(batch_size)).float().type(dtype)

            true_label = torch.ones(batch_size)
            # train discriminator on real data
            real_output = dis(real_data)
            dis_real_loss = loss_fn(real_output.view(-1), true_label)
            dis_real_loss.backward()  # back-prop

            # train discriminator on fake data
            fake_label = torch.zeros(batch_size)
            fake_output = dis(fake_data)
            dis_fake_loss = loss_fn(fake_output.view(-1), fake_label)
            dis_fake_loss.backward()

            # Update weights
            dis_optimizer.step()

            # Generator: z~noise, make discriminator accept generator output:
            # Maximize log likelihood of generated data according to discriminator
            # maximize G | log(D(G(z))

        for i in range(gen_updates):
            gen.zero_grad()
            # another forward pass on D
            samples = get_latent_samples(batch_size)
            fake_data = gen(samples)
            fake_output_for_gen = dis(fake_data)
            # we maximize log(D(G(z)) by using the true label
            true_label = torch.ones(batch_size)
            gen_loss = loss_fn(fake_output_for_gen.view(-1), true_label)
            gen_loss.backward()

            # Update weights
            gen_optimizer.step()

        if t % 10 == 0:
            monitor_string = monitor(
                real_output,
                fake_output,
                fake_output_for_gen,
                dis_fake_loss,
                dis_real_loss,
                gen_loss)
            epochs.set_postfix_str(monitor_string)
    return
コード例 #17
0
def main(
        num_epochs=1,
        n_songs_train=1,
        n_songs_val=1,
        n_songs_test=1,
        batch_size=256,
        learning_rate=1e-4
    ):
    """
    Main function
    """

    # Theano config
    theano.config.floatX = 'float32'

    train, val, test = None, None, None
    try:
        train, val, test = use_preparsed_data(
            outputdir='/zap/tsob/audio/',
            )
    except:
        train, val, test = get_data(
            n_songs_train=n_songs_train,
            n_songs_val=n_songs_val,
            n_songs_test=n_songs_test,
            outputdir='/zap/tsob/audio/',
            seed=None
            )

    # Save the returned metadata
    np.savez('/zap/tsob/audio/metadata', train, val, test)

    # Print the dimensions
    print "Data dimensions:"
    for datapt in [train['Xshape'], train['yshape'],
                   val['Xshape'], val['yshape'],
                   test['Xshape'], test['yshape']]:
        print datapt

    # Parse dimensions
    n_train = train['yshape'][0]
    n_val = val['yshape'][0]
    n_test = test['yshape'][0]
    n_chan = train['Xshape'][1]
    n_feats = train['Xshape'][2]
    n_frames = train['Xshape'][3]

    print "n_train  = {0}".format(n_train)
    print "n_val    = {0}".format(n_val)
    print "n_test   = {0}".format(n_test)
    print "n_chan   = {0}".format(n_chan)
    print "n_feats  = {0}".format(n_feats)
    print "n_frames = {0}".format(n_frames)

    # Prepare Theano variables for inputs and targets
    input_var = T.tensor4(name='inputs')
    target_var = T.fcol(name='targets')

    # Create neural network model (depending on first command line parameter)
    print("Building model and compiling functions..."),
    network = build_cnn(input_var)
    print("Done.")

    # Create a loss expression for training, i.e., a scalar objective we want to minimize
    prediction = lasagne.layers.get_output(network)
    loss = lasagne.objectives.binary_hinge_loss(prediction, target_var)
    loss = loss.mean()

    # Create update expressions for training
    # Here, we'll use adam
    params  = lasagne.layers.get_all_params(
        network,
        trainable=True
    )
    updates = lasagne.updates.adam(
        loss,
        params,
        learning_rate=learning_rate,
        beta1=0.95,
        beta2=0.999,
        epsilon=1e-08
    )

    # Create a loss expression for validation/testing.
    # The crucial difference here is that we do a deterministic forward pass
    # through the network, disabling dropout layers.
    test_prediction = lasagne.layers.get_output(network, deterministic=True)

    test_loss = lasagne.objectives.binary_hinge_loss(
        test_prediction,
        target_var
        )
    test_loss = test_loss.mean()

    test_pred_fn = theano.function(
        [input_var],
        test_prediction,
        allow_input_downcast=True
        )

    # As a bonus, also create an expression for the classification accuracy:
    test_acc = T.mean(T.eq(T.argmax(test_prediction, axis=1), target_var),
                      dtype=theano.config.floatX)

    # Compile a function performing a training step on a mini-batch (by giving
    # the updates dictionary) and returning the corresponding training loss:
    train_fn = theano.function(
        [input_var, target_var],
        loss,
        updates=updates,
        mode=NanGuardMode(                                          #TODO remove
            nan_is_error=True, inf_is_error=True, big_is_error=True #TODO remove
            ),                                                      #TODO remove
        allow_input_downcast=True
    )

    # Compile a second function computing the validation loss and accuracy:
    val_fn = theano.function(
        [input_var, target_var],
        [test_loss, test_acc],
        allow_input_downcast=True
    )

    # Finally, launch the training loop.
    print("Starting training...")

    train_error_hist = []

    # We iterate over epochs:
    for epoch in range(num_epochs):

        # In each epoch, we do a full pass over the training data:
        train_err = 0
        train_batches = 0
        start_time = time.time()

        for batch in iterate_minibatches(
            train, batch_size, shuffle=True
            ):
            inputs, targets = batch
            train_err_increment = train_fn(inputs, targets)
            train_err += train_err_increment
            train_error_hist.append(train_err_increment)
            train_batches += 1

        # And a full pass over the validation data:
        val_err = 0
        val_acc = 0
        val_batches = 0
        for batch in iterate_minibatches(val, batch_size, shuffle=False):
            inputs, targets = batch
            err, acc = val_fn(inputs, targets)
            val_err += err
            val_acc += acc
            val_batches += 1

        # Then we print the results for this epoch:
        print("Epoch {} of {} took {:.3f}s".format(
            epoch + 1, num_epochs, time.time() - start_time))
        print("  training loss:\t\t{:.8f}".format(train_err / train_batches))
        print("  validation loss:\t\t{:.8f}".format(val_err / val_batches))
        print("  validation accuracy:\t\t{:.2f} %".format(
            val_acc / val_batches * 100))
    print("Done training.")

    # After training, we compute and print the test error:
    test_err = 0
    test_acc = 0
    test_batches = 0
    test_predictions = []
    for batch in iterate_minibatches(test, batch_size, shuffle=False):
        inputs, targets = batch
        err, acc = val_fn(inputs, targets)
        test_predictions.append( test_pred_fn(inputs) )
        test_err += err
        test_acc += acc
        test_batches += 1
    print("Final results:")
    print("  test loss:\t\t\t{:.6f}".format(test_err / test_batches))
    print("  test accuracy:\t\t{:.2f} %".format(
        test_acc / test_batches * 100))

    # Optionally, you could now dump the network weights to a file like this:
    timestr = str(time.time())
    np.savez('/zap/tsob/audio/model'+timestr+'.npz', *lasagne.layers.get_all_param_values(network))
    np.save('/zap/tsob/audio/train_error_hist'+timestr+'.npy', train_error_hist)
    np.save('/zap/tsob/audio/test_predictions'+timestr+'.npy', test_predictions)
    print "Wrote model to {0}, test error histogram to {1}, and test predictions to {2}".format(
        'model'+timestr+'.npz',
        'train_error_hist'+timestr+'.npy',
        'test_predictions'+timestr+'.npy'
        )
コード例 #18
0
ファイル: ex3.py プロジェクト: Youngjoo-Kim/first_GAN
# Setup Adam optimizers for both G and D
optimizerD = optim.Adam(netD.parameters(), lr=lr_d, betas=(beta_g, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=lr_g, betas=(beta_d, 0.999))

# Lists to keep track of progress
iters = 0

print("Starting Training Loop...")
# For each epoch
for epoch in range(num_epochs):

    # get random permutation of the noise and the true data
    dis_loss = gen_loss = D_x = D_G_z1 = D_G_z2 = g_grads_norm = d_grads_norm = 0

    # generate data
    real_data = torch.from_numpy(gd.get_data(
        batch_size, target_dist)).float().to(device)  # read data

    # (1) Update D network: maximizing log(D(x)) + log(1 - D(G(z)))
    # An equivalent method is to minimize -(y_t*log(D(x) + (1 - y_t)*log(1 - D(G(z))))) with gradient descent
    # accumulate errors from genuine and fake examples in the batch and make the update step

    netD.train()
    netD.zero_grad()

    # run true examples through the network
    label = torch.full((batch_size, ), real_label, device=device)  # set labels
    output = netD(real_data).view(-1)  # get network output
    errD_real = criterion(output, label)  # calc. average BCE error
    errD_real.backward()  # calc gradients for discriminator
    D_x += output.sum().item()  # average value given to true distribution