예제 #1
0
def build_gan_trainer():

    target = None

    # Place Holder
    input_x = tflearn.input_data(shape=(None, X_SIZE), name="input_x")
    input_z = tflearn.input_data(shape=(None, Z_SIZE), name="input_z")

    # Generator
    G_sample = build_generator(input_z, scope=G_SCOPE)
    target = G_sample

    # Discriminator
    D_origin = build_discriminator(input_x, scope=D_SCOPE)
    D_fake = build_discriminator(G_sample, scope=D_SCOPE, reuse=True)

    # Loss
    D_loss = -tf.reduce_mean(tf.log(D_origin) + tf.log(1. - D_fake))
    G_loss = -tf.reduce_mean(tf.log(D_fake))

    # Optimizer
    G_opt = tflearn.Adam(learning_rate=0.001).get_tensor()
    D_opt = tflearn.Adam(learning_rate=0.001).get_tensor()

    # Vars
    G_vars = get_trainable_variables(G_SCOPE)
    D_vars = get_trainable_variables(D_SCOPE)

    # TrainOp
    G_train_op = tflearn.TrainOp(loss=G_loss,
                                 optimizer=G_opt,
                                 batch_size=BATCH_SIZE,
                                 trainable_vars=G_vars,
                                 name="Generator")

    D_train_op = tflearn.TrainOp(loss=D_loss,
                                 optimizer=D_opt,
                                 batch_size=BATCH_SIZE,
                                 trainable_vars=D_vars,
                                 name="Discriminator")

    # Trainer
    gan_trainer = tflearn.Trainer(
        [D_train_op, G_train_op],
        tensorboard_dir=TENSORBOARD_DIR,
        #                                  checkpoint_path=CHECKPOINT_DIR,  # エラー
        max_checkpoints=1)

    return gan_trainer, target
예제 #2
0
def build_network(p, input_num):
    network = tflearn.input_data(shape=[None, input_num])
    for layer in p.layers:
        network = tflearn.fully_connected(network,
                                          layer[1],
                                          activation=layer[2],
                                          weights_init=p.weights_init)
        if p.batch_norm:
            network = tflearn.batch_normalization(network)
        if layer[3] < 1.0:
            network = tflearn.dropout(network, layer[3])

    network = tflearn.fully_connected(network,
                                      p.class_num,
                                      activation="softmax")

    if p.optimizer == "sgd":
        optimizer = tflearn.SGD(learning_rate=p.learning_rate,
                                lr_decay=p.lr_decay,
                                decay_step=p.decay_step)
    elif p.optimizer == "momentum":
        optimizer = tflearn.Momentum(learning_rate=p.learning_rate,
                                     lr_decay=p.lr_decay,
                                     decay_step=p.decay_step)
    elif p.optimizer == "adagrad":
        optimizer = tflearn.AdaGrad(learning_rate=p.learning_rate)
    else:
        optimizer = tflearn.Adam(learning_rate=p.learning_rate)
    network = tflearn.regression(network,
                                 optimizer=optimizer,
                                 loss="categorical_crossentropy")
    return network
예제 #3
0
 def __call__(self,
              #opt_learning_rate=0.001,
              #opt_epsilon=1e-8,
              **kwargs):
     return tflearn.Adam(
         learning_rate=(kwargs['opt_learning_rate'] if 'opt_learning_rate' in kwargs else 0.001),
         epsilon=(kwargs['opt_epsilon'] if 'opt_epsilon' in kwargs else 1e-8))
예제 #4
0
def run_moel():
	softmax = build_neural_network()
	optimizer = tflearn.Adam(0.001)
	net = tflearn.regression(softmax, optimizer = optimizer, metric = tflearn.metrics.Accuracy(), 
								loss = 'categorical_crossentropy')
	model = tflearn.DNN(net)
	model.fit(x_train, y_train, n_epoch = 10, batch_size = 100, show_metric = True)
	result = model.evaluate(x_test, y_test)
	print ("lost: ", "accuracy: ", result)
예제 #5
0
    kinase_dropout = tf.mul(kinase, kinase_deletion)

transcription_factor = tflearn.fully_connected(kinase_dropout, num_transcription_factors, activation='relu', name='Transcription_Factor')

with tf.name_scope('TF_Targets'):
    W = tf.Variable(tf.random_uniform([num_transcription_factors, num_genes]), name='Weights')
    b = tf.Variable(tf.zeros([num_genes]), name='biases')
#gene = tf.matmul( tf.mul(tf_connectivity, W), transcription_factor) + b
    transcription_factor_targets = tf.mul(tf_connectivity, W)

with tf.name_scope('Gene'):
    gene = tf.matmul(transcription_factor, transcription_factor_targets) + b

#gene = tflearn.fully_connected(transcription_factor, num_genes, activation='linear')

adam = tflearn.Adam(learning_rate=0.00001, beta1=0.99)

regression = tflearn.regression(gene, optimizer=adam, loss='mean_square', metric='R2')

# Define model
model = tflearn.DNN(regression, tensorboard_verbose=1)


# In[ ]:

# Start training (apply gradient descent algorithm)
#model.fit([data1, data2], target, n_epoch=200, show_metric=True, shuffle=True, validation_set=0.10)
model.fit([data1, data2], target, n_epoch=200, show_metric=True, shuffle=True, batch_size=20, validation_set=0.10)
model.save('mymodel_relu_cocktail_with_val_and_dropout_and_reg_test.tflearn')

예제 #6
0
파일: static.py 프로젝트: FlashXT/python-1
input_layer = tflearn.input_data(shape=[None, vector_size])
dense1 = tflearn.fully_connected(input_layer, 64, activation='tanh',
                                 regularizer='L2', weight_decay=0.001)
dropout1 = tflearn.dropout(dense1, drop_pro)
dense2 = tflearn.fully_connected(dropout1, 128, activation='tanh',
                                 regularizer='L2', weight_decay=0.001)
dropout2 = tflearn.dropout(dense2, drop_pro)
dense3 = tflearn.fully_connected(dropout1, 64, activation='tanh',
                                 regularizer='L2', weight_decay=0.001)
dropout3 = tflearn.dropout(dense2, drop_pro)
softmax = tflearn.fully_connected(dropout3, lab_size, activation='softmax')

# Regression using SGD with learning rate decay and Top-3 accuracy
sgd = tflearn.SGD(learning_rate=0.01, lr_decay=0.96, decay_step=1000)
adam = tflearn.Adam(learning_rate=0.001, beta1=0.9, beta2=0.999, epsilon=1e-08, use_locking=False, name="Adam")
# loss = tflearn.losses.L2()
# top_k = tflearn.metrics.Top_k(6)
accu  = tflearn.metrics.Accuracy()
net = tflearn.regression(softmax, optimizer=adam, metric=accu)
# optimizer = tflearn.optimizers.Optimizer(learning_rate=0.01, False, "")
# loss = tf.reduce_mean(tf.nn.log_poisson_loss(logits=softmax, labels=Y))
# optimizer = tf.train.AdamOptimizer(learning_rate=0.01).minimize(loss)
# init = tf.global_variables_initializer()


dnn = DNN(net, clip_gradients=5.0, tensorboard_verbose=0,
          tensorboard_dir='/tmp/tflearn_logs/', checkpoint_path=None,
          best_checkpoint_path=None, max_checkpoints=None,
          session=None, best_val_accuracy=0.0)
예제 #7
0
# import csv
# df_test[['citizen_id', 'Voted_to_ref']] \
#     .to_csv('snark _IITKharagpur_1.csv', index = False, quoting=csv.QUOTE_NONNUMERIC, header = False)

############ Method 2
import tflearn
net = tflearn.input_data(shape=[None, 34])
net = tflearn.fully_connected(net, 68)
net = tflearn.fully_connected(net, 68, activation='sigmoid')
net = tflearn.fully_connected(net, 68)
net = tflearn.fully_connected(net, 68, activation='sigmoid')
net = tflearn.fully_connected(net, 68)
net = tflearn.fully_connected(net, 68, activation='sigmoid')
net = tflearn.fully_connected(net, 34)
net = tflearn.fully_connected(net, 5, activation='softmax')
adam = tflearn.Adam(learning_rate=0.0001)
net = tflearn.regression(net,
                         optimizer='adam',
                         loss='categorical_crossentropy')
model = tflearn.DNN(net)

labels = np.zeros((len(y), 5))
for i in range(len(y)):
    labels[i][y[i]] = 1

model.fit(X, labels, n_epoch=1000, batch_size=16, show_metric=True)

df_test = pd.read_csv('Leaderboard_Dataset.csv')
df_test = clean_data(df_test, False)
test_data = df_test.values
test_x = test_data[:, 1:]
예제 #8
0
train_set = np.concatenate((train_pos_set, train_neg_set), axis=0)
test_set = np.concatenate((test_pos_set, test_neg_set), axis=0)

X_train = train_set[:, :-1]
Y_train = to_categorical(train_set[:, -1:].flatten(), 2)

X_test = test_set[:, :-1]
Y_test = to_categorical(test_set[:, -1:].flatten(), 2)

# train a linear classifier
with tf.Graph().as_default():
    prep = data_prep.DataPreprocessing()
    prep.add_featurewise_zero_center()
    prep.add_featurewise_stdnorm()

    net = tflearn.input_data([None, d])
    net = tflearn.fully_connected(net, 150, activation='relu')
    net = tflearn.fully_connected(net, 2, activation='softmax')
    gd = tflearn.Adam(learning_rate=0.01)
    net = tflearn.regression(net,
                             optimizer=gd,
                             loss='categorical_crossentropy')

    model = tflearn.DNN(net, tensorboard_verbose=0)
    model.fit(X_train, Y_train, n_epoch=500, show_metric=True)

print(np.argmax(model.predict(X_test), axis=1))
print("Accuracy: {}%".format(100 * np.mean(
    test_set[:, -1:].flatten() == np.argmax(model.predict(X_test), axis=1))))
예제 #9
0
파일: BC.py 프로젝트: jxwuyi/hw1
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('envname', type=str)
    parser.add_argument('expert_policy_file', type=str)
    parser.add_argument("data", type=str)
    parser.add_argument("--model", type=str, default='None')
    parser.add_argument("--epochs", type=int, default=5)
    parser.add_argument("--output", type=str, default='None')
    parser.add_argument("--percent", type=int, default=100)

    args = parser.parse_args()

    import gym
    env = gym.make(args.envname)

    import tflearn

    print('loading data')
    with open(args.data, 'rb') as f:
        obs, act = pickle.load(f)
    print('  -> done!')

    if (args.percent < 100):
        n = int(args.percent / 100.0 * obs.shape[0])
        obs = obs[:n, :]
        act = act[:n, :]

    batch_size = 256

    #### Build Model
    X = obs
    Y = act.squeeze()
    input_size = X.shape[1]
    hid1 = 32
    hid2 = 16
    output_size = Y.shape[1]

    input_layer = tflearn.input_data(shape=[None, input_size])
    layer1 = tflearn.fully_connected(input_layer,
                                     hid1,
                                     activation='relu',
                                     regularizer='L2',
                                     weight_decay=0.001)
    layer2 = tflearn.fully_connected(layer1,
                                     hid2,
                                     activation='relu',
                                     regularizer='L2',
                                     weight_decay=0.001)
    output_layer = tflearn.fully_connected(layer2,
                                           output_size,
                                           activation='linear')

    adam = tflearn.Adam(learning_rate=0.001)
    net = tflearn.regression(output_layer, optimizer=adam, loss='mean_square')

    model = tflearn.DNN(net)

    if args.model is 'None':
        model.fit(X,
                  Y,
                  batch_size=256,
                  n_epoch=args.epochs,
                  shuffle=True,
                  show_metric=True,
                  run_id='behavior clone: ' + args.envname)
        model.save('model/' + args.envname + '.tfl')
    else:
        model.load(args.model)
    #return
    # evaluate
    print('loading and building expert policy')
    policy_fn = load_policy.load_policy(args.expert_policy_file)
    print('loaded and built')

    eval_num = 50
    max_steps = 500

    targets = []

    with tf.Session():
        tf_util.initialize()

        returns = []
        for i in range(eval_num):
            print('iter', i)
            obs = env.reset()
            done = False
            totalr = 0.
            steps = 0
            while not done:
                action = model.predict(obs[None, :])
                obs, r, done, _ = env.step(action)
                totalr += r
                steps += 1
                if steps % 100 == 0: print("%i/%i" % (steps, max_steps))
                if steps >= max_steps:
                    break
            returns.append(totalr)
            # simulate target performance
            obs = env.reset()
            done = False
            totalr = 0.
            steps = 0
            while not done:
                action = policy_fn(obs[None, :])
                obs, r, done, _ = env.step(action)
                totalr += r
                steps += 1
                if steps % 100 == 0: print("%i/%i" % (steps, max_steps))
                if steps >= max_steps:
                    break
            targets.append(totalr)

    filename = args.envname + '_result.pkl' if args.output is 'None' else args.output
    print('returns', returns)
    print('mean return', np.mean(returns))
    print('std of return', np.std(returns))

    with open(filename, 'wb') as f:
        pickle.dump([returns, targets], f)
예제 #10
0
                                bias=False,
                                weights_init="truncated_normal",
                                bias_init="zeros",
                                trainable=True,
                                restore=True,
                                reuse=False,
                                scope=None,
                                name="FullyConnectedLayer_relu")

v_net = tflearn.fully_connected(v_net,
                                3,
                                activation="softmax",
                                name="OutputLayer")

v_net = tflearn.regression(v_net,
                           optimizer=tflearn.Adam(learning_rate=0.01,
                                                  epsilon=0.01),
                           metric=tflearn.metrics.accuracy(),
                           loss="categorical_crossentropy")

#define model
model = tflearn.DNN(v_net)

#load saved weights
model.load('C:/temp/Tensorflow/v01/relu_adam_001.tf_model')

#prediction
prediction = model.predict(df_data[df_data.columns[1:5]])

#add predictions to data frame
df_data['PROB_HOME_WIN'] = prediction[:, 0]
df_data['PROB_DRAW'] = prediction[:, 1]
예제 #11
0
                              weights_init='xavier',
                              activation="softsign",
                              name='First_Fully_Connected')
net = tflearn.highway(net, 32, activation="softsign", name="highwayLayer")
net = tflearn.fully_connected(net,
                              32,
                              weights_init='xavier',
                              activation="softsign",
                              name='Third_Fully_Connected')
net = tflearn.fully_connected(net,
                              5,
                              activation="softmax",
                              name='Final_Fully_Connected')

# todo: confusion matrix
adam = tflearn.Adam()
net = tflearn.regression(net, learning_rate=0.001, optimizer=adam)

# Define model
model = tflearn.DNN(net,
                    clip_gradients=1.0,
                    tensorboard_verbose=3,
                    tensorboard_dir='./tmp/weather.log')

# Start training (apply gradient descent algorithm)
model.fit(TrainingSetFeatures,
          TrainingSetLabels,
          n_epoch=15,
          batch_size=24,
          show_metric=True)
예제 #12
0
파일: tf_cnn.py 프로젝트: yejiming/cs231n
def my_model(X, y):
    y = tf.one_hot(y, 10)

    network = tflearn.conv_2d(X,
                              nb_filter=32,
                              filter_size=3,
                              strides=1,
                              activation="linear",
                              padding="valid",
                              name="conv1",
                              weight_decay=0.01)
    network = tflearn.batch_normalization(network, name="bn1")
    network = tflearn.relu(network)
    network = tflearn.max_pool_2d(network,
                                  kernel_size=2,
                                  strides=2,
                                  padding="same",
                                  name="pool1")

    network = tflearn.conv_2d(network,
                              nb_filter=64,
                              filter_size=3,
                              strides=1,
                              activation="linear",
                              padding="valid",
                              name="conv2",
                              weight_decay=0.01)
    network = tflearn.batch_normalization(network, name="bn2")
    network = tflearn.relu(network)
    network = tflearn.max_pool_2d(network,
                                  kernel_size=2,
                                  strides=2,
                                  padding="same",
                                  name="pool2")

    network = tflearn.flatten(network, name="flat1")

    network = tflearn.fully_connected(network,
                                      1024,
                                      activation="linear",
                                      name="fc1",
                                      weight_decay=0.01)
    network = tflearn.batch_normalization(network, name="bn2")
    network = tflearn.relu(network)

    network = tflearn.fully_connected(network,
                                      1024,
                                      activation="linear",
                                      name="fc2",
                                      weight_decay=0.01)
    network = tflearn.batch_normalization(network, name="bn3")
    network = tflearn.relu(network)

    logits = tflearn.fully_connected(network,
                                     10,
                                     activation="softmax",
                                     name="output",
                                     weight_decay=0.01)
    loss = tflearn.categorical_crossentropy(logits, y)
    train_op = tflearn.Adam(0.0001, 0.9)().minimize(loss)

    return logits, loss, train_op
예제 #13
0
def main():
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('envname', type=str)
    parser.add_argument('expert_policy_file', type=str)
    parser.add_argument("data", type=str)
    parser.add_argument("--model", type=str, default='None')
    parser.add_argument("--epochs", type=int, default=5)
    parser.add_argument("--dagger_step", type=int, default=5)
    parser.add_argument("--dagger_rollout", type=int, default=100)

    args = parser.parse_args()

    import gym
    env = gym.make(args.envname)

    import tflearn

    print('loading data')
    with open(args.data, 'rb') as f:
        obs, act = pickle.load(f)
    print('  -> done!')

    batch_size = 256

    #### Build Model
    X = obs
    Y = act.squeeze()
    input_size = X.shape[1]
    hid1 = 32
    hid2 = 16
    output_size = Y.shape[1]

    input_layer = tflearn.input_data(shape=[None, input_size])
    layer1 = tflearn.fully_connected(input_layer,
                                     hid1,
                                     activation='relu',
                                     regularizer='L2',
                                     weight_decay=0.001)
    layer2 = tflearn.fully_connected(layer1,
                                     hid2,
                                     activation='relu',
                                     regularizer='L2',
                                     weight_decay=0.001)
    output_layer = tflearn.fully_connected(layer2,
                                           output_size,
                                           activation='linear')

    adam = tflearn.Adam(learning_rate=0.001)
    net = tflearn.regression(output_layer, optimizer=adam, loss='mean_square')

    model = tflearn.DNN(net)

    if args.model is 'None':
        model.fit(X,
                  Y,
                  batch_size=256,
                  n_epoch=args.epochs,
                  shuffle=True,
                  show_metric=True,
                  run_id='behavior clone: ' + args.envname)
        model.save('model/' + args.envname + '.tfl')
    else:
        model.load(args.model)
    #return
    # evaluate
    print('loading and building expert policy')
    policy_fn = load_policy.load_policy(args.expert_policy_file)
    print('loaded and built')

    cur = evaluate(env, model)
    ret_avg = [np.mean(cur)]
    ret_std = [np.std(cur)]
    ret_all = [cur]
    for step in range(args.dagger_step):
        print('dagger step #', step)
        observations = []
        actions = []
        with tf.Session():
            tf_util.initialize()

            for i in range(args.dagger_rollout):
                if i % 20 == 0:
                    print('rollout iter = ', i)
                obs = env.reset()
                done = False
                totalr = 0.
                steps = 0
                while not done:
                    action = model.predict(obs[None, :])
                    obs, r, done, _ = env.step(action)
                    observations.append(obs)
                    actions.append(policy_fn(obs[None, :]))
                    totalr += r
                    steps += 1
                    if steps >= max_steps:
                        break

        X = np.row_stack((X, np.array(observations)))
        Y = np.row_stack((Y, np.array(actions).squeeze()))
        model.fit(X,
                  Y,
                  batch_size=256,
                  n_epoch=args.epochs,
                  shuffle=True,
                  show_metric=True,
                  run_id='behavior clone: ' + args.envname)
        model.save('model/' + args.envname + '-iter{a}.tfl'.format(a=step))
        returns = evaluate(env, model)
        ret_avg.append(np.mean(returns))
        ret_std.append(np.std(returns))
        ret_all.append(returns)
        print('current avg return = {a}, std = {b}'.format(a=ret_avg[-1],
                                                           b=ret_std[-1]))

    print('mean return', ret_avg)
    print('std of return', ret_std)

    with open(args.envname + '_dagger_result.pkl', 'wb') as f:
        pickle.dump([ret_avg, ret_std, ret_all], f)