예제 #1
0
파일: gen.py 프로젝트: michael13162/novogen
def gen(target_molecules):
    for i in range(0, len(target_molecules)):
        mol = target_molecules[i]
        while (len(mol) > 50):
            mol = mol[:-1]
            target_molecules[i] = mol

    # Create preprocessing instance
    pp = preprocessing()

    # Load data from small data set
    X_train, y_train, X_test, y_test = pp.load_data()

    # Remove non characters
    for i in range(0, len(target_molecules)):
        mol = target_molecules[i]
        for char in mol:
            if char in pp.charset:
                print("We good.")
            else:
                mol = mol.replace(char, '')
                target_molecules[i] = mol
                print("Oopps. Removing bad char:", char)

    # Create & load model
    model = nn(X_train, y_train, X_test, y_test)
    model.load(pp)

    # Generate a molecule
    molecules = model.generate(target=target_molecules,
                               preprocessing_instance=pp,
                               hit_rate=20)
    return molecules
예제 #2
0
 def move_ai(self, ball):
     X = prepare_features(ball.x_speed, ball.y_speed, ball.y, self.y)
     decision = nn(self.A, self.B, self.C, self.D, X)
     if decision > 0.1:
         self._move_up()
     elif decision < -0.1:
         self._move_down()
     self._draw()
예제 #3
0
 def on_update(self):
     if self.dead:
         return
     self.ball.move()
     if (self.ball.x <= 0 or self.ball.x+self.ball.size/2 >= WIDTH):
         self.dead = True
         return
     if self.check_collision(self.paddle):
         self.fitness += 1
     self.check_collision(self.one, train=True)
     X = prepare_features(self.ball.x_speed, self.ball.y_speed, self.ball.y, self.paddle.y)
     self.move(nn(self.A, self.B, self.C, self.D, X))
예제 #4
0
    def model(self):
        """
        It assumes that self.x is already in the required shape 
        (-1, img_size, img_size, img_ch)
        """

        # the forward model
        out, reg_loss = nn(self.measurements,
                           reuse=False,
                           TRAIN_FLAG=self.TRAIN_FLAG,
                           nchstart=64,
                           act_fn=tf.nn.leaky_relu)

        return out, reg_loss
예제 #5
0
def nn_config(filename, activation='RELU'):
    """
    Parse the network from text file
    """
    # obtain the trained parameters and assign the value to res
    with open('nn/' + filename) as inputfile:
        lines = inputfile.readlines()
    length = len(lines)
    res = np.zeros(length)
    for i, text in enumerate(lines):
        res[i] = eval(text)

    # set the neural network
    network = nn(res, activation)
    return network
예제 #6
0
    def build_model(self):

        # concat the projection op

        # first we reshape P to get channels last
        # P shape should be (batch_size, img_size, img_size, ntri)
        reshape_P = tf.transpose(tf.reshape(
            self.P, (-1, self.ntri, self.img_size, self.img_size)),
                                 perm=[0, 2, 3, 1],
                                 name='reshape_proj')

        # next we concat the projection matrix to the image
        concat = tf.concat((self.measurements, reshape_P),
                           axis=3,
                           name='concat_inp')

        self.out, self.reg_loss = nn(concat,
                                     reuse=False,
                                     TRAIN_FLAG=self.TRAIN_FLAG,
                                     nchstart=64,
                                     act_fn=tf.nn.leaky_relu)

        self.out = self.apply_projection(self.out)

        # get projection of actual image
        flat_out = tcl.flatten(self.true_img)
        flat_out = self.apply_projection(flat_out)

        self.proj_out = tf.reshape(flat_out,
                                   (-1, self.img_size, self.img_size))

        self.loss = tf.reduce_mean(
            tf.square(self.out - flat_out)) + self.reg_loss

        # update_ops required for batch_normalization
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        with tf.control_dependencies(update_ops):
            self.NNtrain = tf.train.AdamOptimizer(self.lr).minimize(self.loss)

        summary("nn_loss", self.loss)
        self.summary_op = tf.summary.merge_all()
        print('Model built!')

        return None
예제 #7
0
from model import nn
import yaml


def train(sr_model, opt, loss, tr_hr, tr_lr, vl_hr, vl_lr, epochs, batch_size,
          path_save):
    patch_gen(tr_hr, "train", path_save)
    patch_gen(vl_hr, "validation", path_save)
    val_txt = open(path_save + "validation.txt", "r+")
    tr_txt = open(path_save + "train.txt", "r+")
    tr_patch_list = tr_txt.readlines()
    val_patch_list = (val_txt.readlines())
    if loss == "ssim_loss":
        loss = ssim_loss
    elif loss == "psnr_loss":
        loss = psnr_loss
    tr_generator = DataGenerator(tr_patch_list, batch_size, tr_hr, tr_lr)
    vl_generator = DataGenerator(val_patch_list, batch_size, vl_hr, vl_lr)
    sr_model.compile(optimizer=opt, loss=loss)
    sr_model.fit(tr_generator, epochs=epochs, validation_data=vl_generator)


if __name__ == '__main__':
    with open("config.yml", "r") as yamlfile:
        data = yaml.load(yamlfile)
        print("Read successful")
    model = nn(config['input_shape'])
    train(nn, config['opt'], config['loss'], config['tr_hr_path'],
          config['tr_lr_path'], config['val_hr_path'], config['val_lr_path'],
          config['epoch'], config['batch_size'], config['path_save'])
예제 #8
0
"""
Created on Mon Feb 12 16:46:05 2018

@author: josharnold
"""

from data import preprocessing
from model import nn

# Load data
X_train, y_train, X_test, y_test = preprocessing.load_data(load_char_set=False,
                                                           pad=25,
                                                           file_name="9.smi")

# Define model
model = nn(X_train, y_train, X_test, y_test)

# Create model
model.create_model()

# Set num epochs
model.num_epochs = 50
model.batch_size = 512

# Train
model.train(show_loss=True)

# Save
model.save(force_overwrite=True, protocol=2)

# Predict
예제 #9
0
파일: nn.py 프로젝트: uuup111/acerta-abide
def run_finetuning(experiment,
                   X_train,
                   y_train,
                   X_valid,
                   y_valid,
                   X_test,
                   y_test,
                   model_path,
                   prev_model_1_path,
                   prev_model_2_path,
                   code_size_1=1000,
                   code_size_2=600):
    """

    Run the pre-trained NN for fine-tuning, using first and second autoencoders' weights

    """

    # Hyperparameters
    learning_rate = 0.0005
    dropout_1 = 0.6
    dropout_2 = 0.8
    initial_momentum = 0.1
    final_momentum = 0.9  # Increase momentum along epochs to avoid fluctiations
    saturate_momentum = 100

    training_iters = 100
    start_saving_at = 20
    batch_size = 10
    n_classes = 2

    if os.path.isfile(model_path) or \
       os.path.isfile(model_path + ".meta"):
        return

    # Convert output to one-hot encoding
    y_train = np.array([to_softmax(n_classes, y) for y in y_train])
    y_valid = np.array([to_softmax(n_classes, y) for y in y_valid])
    y_test = np.array([to_softmax(n_classes, y) for y in y_test])

    # Load pretrained encoder weights
    ae1 = load_ae_encoder(X_train.shape[1], code_size_1, prev_model_1_path)
    ae2 = load_ae_encoder(code_size_1, code_size_2, prev_model_2_path)

    # Initialize NN model with the encoder weights
    model = nn(X_train.shape[1], n_classes, [
        {
            "size": code_size_1,
            "actv": tf.nn.tanh
        },
        {
            "size": code_size_2,
            "actv": tf.nn.tanh
        },
    ], [
        {
            "W": ae1["W_enc"],
            "b": ae1["b_enc"]
        },
        {
            "W": ae2["W_enc"],
            "b": ae2["b_enc"]
        },
    ])

    # Place GD + momentum optimizer
    model["momentum"] = tf.placeholder("float32")
    optimizer = tf.train.MomentumOptimizer(
        learning_rate, model["momentum"]).minimize(model["cost"])

    # Compute accuracies
    correct_prediction = tf.equal(tf.argmax(model["output"], 1),
                                  tf.argmax(model["expected"], 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

    # Initialize Tensorflow session
    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)

        # Define model saver
        saver = tf.train.Saver(model["params"],
                               write_version=tf.train.SaverDef.V2)

        # Initialize with an absurd cost and accuracy for model selection
        prev_costs = np.array([9999999999] * 3)
        prev_accs = np.array([0.0] * 3)

        # Iterate Epochs
        for epoch in range(training_iters):

            # Break training set into batches
            batches = range(int(len(X_train) / batch_size))
            costs = np.zeros((len(batches), 3))
            accs = np.zeros((len(batches), 3))

            # Compute momentum saturation
            alpha = float(epoch) / float(saturate_momentum)
            if alpha < 0.:
                alpha = 0.
            if alpha > 1.:
                alpha = 1.
            momentum = initial_momentum * (1 - alpha) + alpha * final_momentum

            for ib in batches:

                # Compute start and end of batch from training set data array
                from_i = ib * batch_size
                to_i = (ib + 1) * batch_size

                # Select current batch
                batch_xs, batch_ys = X_train[from_i:to_i], y_train[from_i:to_i]

                # Run optimization and retrieve training cost and accuracy
                _, cost_train, acc_train = sess.run(
                    [optimizer, model["cost"], accuracy],
                    feed_dict={
                        model["input"]: batch_xs,
                        model["expected"]: batch_ys,
                        model["dropouts"][0]: dropout_1,
                        model["dropouts"][1]: dropout_2,
                        model["momentum"]: momentum,
                    })

                # Compute validation cost and accuracy
                cost_valid, acc_valid = sess.run(
                    [model["cost"], accuracy],
                    feed_dict={
                        model["input"]: X_valid,
                        model["expected"]: y_valid,
                        model["dropouts"][0]: 1.0,
                        model["dropouts"][1]: 1.0,
                    })

                # Compute test cost and accuracy
                cost_test, acc_test = sess.run(
                    [model["cost"], accuracy],
                    feed_dict={
                        model["input"]: X_test,
                        model["expected"]: y_test,
                        model["dropouts"][0]: 1.0,
                        model["dropouts"][1]: 1.0,
                    })

                costs[ib] = [cost_train, cost_valid, cost_test]
                accs[ib] = [acc_train, acc_valid, acc_test]

            # Compute the average costs from all batches
            costs = costs.mean(axis=0)
            cost_train, cost_valid, cost_test = costs

            # Compute the average accuracy from all batches
            accs = accs.mean(axis=0)
            acc_train, acc_valid, acc_test = accs

            # Pretty print training info
            print(
                "Exp={experiment}, Model=mlp, Iter={epoch:5d}, Acc={acc_train:.6f} {acc_valid:.6f} {acc_test:.6f}, Momentum={momentum:.6f}",
                {
                    "experiment": experiment,
                    "epoch": epoch,
                    "acc_train": acc_train,
                    "acc_valid": acc_valid,
                    "acc_test": acc_test,
                    "momentum": momentum,
                })

            # Save better model if optimization achieves a lower accuracy
            # and avoid initial epochs because of the fluctuations
            if acc_valid > prev_accs[1] and epoch > start_saving_at:
                print("Saving better model")
                saver.save(sess, model_path)
                prev_accs = accs
                prev_costs = costs
            else:
                print
예제 #10
0
파일: nn.py 프로젝트: zfyyfz12/AIMAFE
def run_finetuning(experiment,
                   X_train,
                   y_train,
                   X_valid,
                   y_valid,
                   X_test,
                   y_test,
                   model_path,
                   prev_model_1_path,
                   prev_model_2_path,
                   prev_model_3_path,
                   code_size_1=2500,
                   code_size_2=1250,
                   code_size_3=625):
    learning_rate = 0.0005
    dropout_1 = 0.6
    dropout_2 = 0.8
    dropout_3 = 0.6
    initial_momentum = 0.1
    final_momentum = 0.9  # Increase momentum along epochs to avoid fluctiations
    saturate_momentum = 100

    training_iters = 100
    start_saving_at = 20
    batch_size = 10
    n_classes = 2

    if os.path.isfile(model_path) or \
       os.path.isfile(model_path + ".meta"):
        return

    y_train = np.array([to_softmax(n_classes, y) for y in y_train])
    y_valid = np.array([to_softmax(n_classes, y) for y in y_valid])
    y_test = np.array([to_softmax(n_classes, y) for y in y_test])

    ae1 = load_ae_encoder(X_train.shape[1], code_size_1, prev_model_1_path)
    ae2 = load_ae_encoder(code_size_1, code_size_2, prev_model_2_path)
    ae3 = load_ae_encoder(code_size_2, code_size_3, prev_model_3_path)

    model = nn(X_train.shape[1], n_classes, [
        {
            "size": code_size_1,
            "actv": tf.nn.tanh
        },
        {
            "size": code_size_2,
            "actv": tf.nn.tanh
        },
        {
            "size": code_size_3,
            "actv": tf.nn.tanh
        },
    ], [
        {
            "W": ae1["W_enc"],
            "b": ae1["b_enc"]
        },
        {
            "W": ae2["W_enc"],
            "b": ae2["b_enc"]
        },
        {
            "W": ae3["W_enc"],
            "b": ae3["b_enc"]
        },
    ])

    model["momentum"] = tf.placeholder("float32")
    optimizer = tf.train.MomentumOptimizer(
        learning_rate, model["momentum"]).minimize(model["cost"])

    # Compute accuracies
    correct_prediction = tf.equal(tf.argmax(model["output"], 1),
                                  tf.argmax(model["expected"], 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

    init = tf.global_variables_initializer()
    with tf.Session() as sess:
        sess.run(init)

        # Define model saver
        saver = tf.train.Saver(model["params"],
                               write_version=tf.train.SaverDef.V2)
        prev_costs = np.array([9999999999] * 3)
        prev_accs = np.array([0.0] * 3)

        # Iterate Epochs
        for epoch in range(training_iters):

            batches = range(len(X_train) / batch_size)
            costs = np.zeros((len(batches), 3))
            accs = np.zeros((len(batches), 3))

            # Compute momentum saturation
            alpha = float(epoch) / float(saturate_momentum)
            if alpha < 0.:
                alpha = 0.
            if alpha > 1.:
                alpha = 1.
            momentum = initial_momentum * (1 - alpha) + alpha * final_momentum

            for ib in batches:

                from_i = ib * batch_size
                to_i = (ib + 1) * batch_size
                batch_xs, batch_ys = X_train[from_i:to_i], y_train[from_i:to_i]
                _, cost_train, acc_train = sess.run(
                    [optimizer, model["cost"], accuracy],
                    feed_dict={
                        model["input"]: batch_xs,
                        model["expected"]: batch_ys,
                        model["dropouts"][0]: dropout_1,
                        model["dropouts"][1]: dropout_2,
                        model["dropouts"][2]: dropout_3,
                        model["momentum"]: momentum,
                    })
                cost_valid, acc_valid = sess.run(
                    [model["cost"], accuracy],
                    feed_dict={
                        model["input"]: X_valid,
                        model["expected"]: y_valid,
                        model["dropouts"][0]: 1.0,
                        model["dropouts"][1]: 1.0,
                        model["dropouts"][2]: 1.0,
                    })
                cost_test, acc_test = sess.run(
                    [model["cost"], accuracy],
                    feed_dict={
                        model["input"]: X_test,
                        model["expected"]: y_test,
                        model["dropouts"][0]: 1.0,
                        model["dropouts"][1]: 1.0,
                        model["dropouts"][2]: 1.0,
                    })

                costs[ib] = [cost_train, cost_valid, cost_test]
                accs[ib] = [acc_train, acc_valid, acc_test]
            costs = costs.mean(axis=0)
            cost_train, cost_valid, cost_test = costs

            accs = accs.mean(axis=0)
            acc_train, acc_valid, acc_test = accs
            print format_config(
                "Exp={experiment}, Model=mlp, Iter={epoch:5d}, Acc={acc_train:.6f} {acc_valid:.6f} {acc_test:.6f}, Momentum={momentum:.6f}",
                {
                    "experiment": experiment,
                    "epoch": epoch,
                    "acc_train": acc_train,
                    "acc_valid": acc_valid,
                    "acc_test": acc_test,
                    "momentum": momentum,
                }),
            if acc_valid > prev_accs[1] and epoch > start_saving_at:
                print "Saving better model"
                saver.save(sess, model_path)
                prev_accs = accs
                prev_costs = costs
            else:
                print "123"
예제 #11
0
파일: main.py 프로젝트: dllmm/CS229-Notes
    y = np.matrix(yy).astype('float')
    return x, y


if __name__ == '__main__':
    x, y = read_file('iris.csv')
    x_test = np.row_stack((x[40:50, :], x[90:100, :], x[140:, :]))
    y_test = np.row_stack((y[40:50, :], y[90:100, :], y[140:, :]))
    x = np.row_stack((x[0:40, :], x[50:90, :], x[100:140, :]))
    y = np.row_stack((y[0:40, :], y[50:90, :], y[100:140, :]))
    config = {}
    config['hidden_num'] = 2
    config['neuron_num'] = 5
    config['alpha'] = 4
    config['lamda'] = 1
    mynn = nn(config)
    theta = mynn.theta_init(x, y)
    ls, a, pre_y = mynn.loss(x, y, theta)
    for i in range(5000):
        theta = mynn.grad(x, y, a, theta)
        lss, a, pre_y = mynn.loss(x, y, theta)
        if lss > ls:
            mynn.alpha = mynn.alpha / 2
        ls = lss
    y_pred = np.array(np.argmax(pre_y, axis=1) + 1)
    y = np.array(np.argmax(y, axis=1) + 1)
    correct = [1 if a == b else 0 for (a, b) in zip(y_pred, y)]
    accuracy = (sum(map(int, correct)) / float(len(correct)))
    print('train accuracy = {0}%'.format(accuracy * 100))
    lss_test, a_test, pre_y_test = mynn.loss(x_test, y_test, theta)
    print(pre_y_test)