def train():
    model_version, model_name = get_latest_model()
    logger.info("Training on gathered game data, initializing from {}".format(
        model_name))
    new_model_name = generate(model_version + 1)
    logger.info("New model will be {}".format(new_model_name))
    save_file = os.path.join(PATHS.MODELS_DIR, new_model_name)

    try:
        logger.info("Getting tf_records")
        tf_records = sorted(
            gfile.Glob(os.path.join(PATHS.TRAINING_CHUNK_DIR,
                                    '*.tfrecord.zz')))
        tf_records = tf_records[-1 *
                                (GLOBAL_PARAMETER_STORE.WINDOW_SIZE //
                                 GLOBAL_PARAMETER_STORE.EXAMPLES_PER_RECORD):]

        print("Training from:", tf_records[0], "to", tf_records[-1])

        with timer("Training"):
            network.train(PATHS.ESTIMATOR_WORKING_DIR, tf_records,
                          model_version + 1)
            network.export_latest_checkpoint_model(PATHS.ESTIMATOR_WORKING_DIR,
                                                   save_file)

    except:
        logger.info("Got an error training")
        logging.exception("Train error")
def train_network(dataPath, matPath, networkPath, epochSize, learningRate,
                  hiddenSize, batchSize):

    try:
        m_data = sio.loadmat(matPath)
        im_data = m_data['images']
        l_data = np.squeeze(m_data['labels'])
    except:
        print("\nPreprocessing train data\n")
        training_data = create_training_data(dataPath)

        imgs = []
        lbs = []

        for featureVector, label in training_data:
            imgs.append(featureVector)
            lbs.append(label)

        im_data = np.squeeze(imgs).transpose()
        l_data = np.asarray(lbs)

        sio.savemat(matPath, {'images': im_data, 'labels': l_data})

    trainset = network.ProcessedDataset(im_data, l_data)

    trainloader = torch.utils.data.DataLoader(trainset,
                                              batch_size=batchSize,
                                              shuffle=True,
                                              num_workers=0)

    net = network.Feedforward(hiddenSize)

    network.train(net, trainloader, networkPath, learningRate, epochSize)
Exemple #3
0
def train_network():
    """
    Train a neural net using the pipeline.
    """

    import network

    network.train()
    input("Press Enter to continue...")
Exemple #4
0
def run(arg, f1, f2):
	if arg == '--train':
		agent = objects.Agent(14, 2)
           	fight_data = construct.fights('../xml_data/schedule.xml')
            	write_file(fight_data, TRAIN_FILE_NAME)
	        network.train(agent)
	elif arg == '--predict':
		agent = objects.Agent(14, 2)
		fight_data = construct.fights('../xml_data/schedule.xml')
		write_file(fight_data, FIGHT_FILE_NAME)
		network.predict(agent, f1.replace(" ", ""), f2.replace(" ", ""))
	print "Arguments Processed!"
def train_nets(inputs, outputs, training_rate, epochs, batch_size, outer_min,
               random_limit, layers, activations, d_activations, cost, d_cost,
               num_nets):
    minimum = 100
    minnet = 3
    while (minimum > outer_min):  #or random_limit>100001):
        sum = 0
        #while minimum > outer_min:
        print('building networks')
        print("Random Limit :", random_limit)
        build_networks(layers, activations, d_activations, cost, d_cost,
                       num_nets, random_limit)
        for network in networks:
            output = network.train(inputs, outputs, training_rate, epochs,
                                   batch_size, False)
            print('finished a network')
            print("output error =", output)
            # Everything below subjected to changes
            sum += output
            if output < minimum:
                minimum = output
                minnet = network
            #if minimum < outer_min:
            #   return minnet
        print("Couldn't find anything")
        avg = sum / num_nets
        print(minimum, avg, random_limit)
        networks.clear()
        random_limit *= 10
    return minnet
Exemple #6
0
 def train_net(self, comb):
     self.txt_resultsText.clear()
     if (self.dataPath is not None):
         print("Loading Data...")
         if (comb and len(self.app_combs) > 0):
             train_data, train_labels, test_data, test_labels = data.data_init_comb(
                 self.dataPath,
                 train_comb=self.app_combs,
                 test_comb=self.app_combs)
             _, _, self.Net = network.train(train_data, train_labels,
                                            test_data, test_labels)
         if (not comb):
             train_data, train_labels, test_data, test_labels = data.data_init_measured(
                 self.dataPath)
             _, _, self.Net = network.train(train_data, train_labels,
                                            test_data, test_labels)
     self.btn_disaggComb.setEnabled(True)
Exemple #7
0
def train_network():
    print('BEGIN_TRAINING')
    images = normalize_images_255(read_images(TRAIN_IMAGE_FILE))
    labels = read_labels(TRAIN_LABEL_FILE)
    network = Network([784, 16, 16, 10])
    print('TRAINING')
    train(network, images, labels, 0.5, 20)
    training_values = []
    for i in range(len(images)):
        classify_outputs = classify_network(network, images[i])
        output_value = single_network_output(classify_outputs)
        training_values.append(output_value)
    accuracy = calculate_network_accuracy(training_values, labels)
    print('TRAIN_COMPLETE')
    print('Accuracy')
    print(accuracy)
    return network
def base_experiment():
    n_train, n_valid = 55000, 5000
    num_pruning_iter = 15  # How many iterative pruning steps to perform

    print(
        "Running a base experiment with input_size:{}, hidden_size:{}, num_classes:{}, "
        "batch_size:{}, num_epochs:{}, num_pruning_iter:{}, pruning_rates:{}".
        format(network.input_size, network.hidden_size, network.num_classes,
               network.batch_size, network.num_epochs, num_pruning_iter,
               network.pruning_rates))

    train_loader, val_loader = dataset.init_data_mask_base_expt(
        n_train, n_valid)

    model = network.MultiLayerPerceptron().to(network.device)
    model.apply(weights_init)

    presets = {}
    for name, param in model.state_dict().items():
        presets[name] = param.clone()
    model.presets = presets

    early_stop_iteration = network.train(model, train_loader, val_loader)

    test_accuracy_history = []
    test_accuracy = network.test(model)
    test_accuracy_history.append(test_accuracy)
    early_stop_iteration_history = [early_stop_iteration]

    for iter in range(num_pruning_iter):
        # This is the percentage of weights remaining in the network after pruning
        print(
            "\tResults for pruning round {} with percentage of weights remaining {}"
            .format(iter + 1, 100 * 0.8**(iter + 1)))
        network.prune(model)
        network.reset_params(model)
        early_stop_iteration = network.train(model, train_loader, val_loader)
        early_stop_iteration_history.append(early_stop_iteration)
        test_accuracy = network.test(model)
        test_accuracy_history.append(test_accuracy)

    print('Test accuracy history {}'.format(test_accuracy_history))
    print(
        'Early stop iteration history {}'.format(early_stop_iteration_history))
Exemple #9
0
def train():
    """Train network for a number of steps."""
    with tf.Graph().as_default():
        global_step = tf.Variable(0, trainable=False)

        # Get images and labels for network.
        images, labels = network.distorted_inputs()

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = network.inference(images)

        # Calculate loss.
        loss = network.loss(logits, labels)

        # Build a Graph that trains the model with one batch of examples and
        # updates the model parameters.
        train_op = network.train(loss, global_step)

        # Create a saver.
        saver = tf.train.Saver(tf.all_variables())

        # Build the summary operation based on the TF collection of Summaries.
        #summary_op = tf.merge_all_summaries()

        # Build an initialization operation to run below.
        init = tf.initialize_all_variables()

        # Start running operations on the Graph.
        sess = tf.Session(config=tf.ConfigProto(
            log_device_placement=FLAGS.log_device_placement))
        sess.run(init)

        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)

        #summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
        #                                        graph_def=sess.graph_def)

        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            _, loss_value = sess.run([train_op, loss])
            duration = time.time() - start_time

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            if step % 10 == 0:
                num_examples_per_step = FLAGS.batch_input_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)

                format_str = (
                    '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                    'sec/batch)')
                print(format_str % (datetime.now(), step, loss_value,
                                    examples_per_sec, sec_per_batch))
Exemple #10
0
def level(data, targets, layers, pattern, seed):

    # ---------------------------------------------------------------------------------------------------
    # set up dataset for nn

    active = organiser.remove_non_pfam(data)
    if pattern is None:
        active = organiser.reduce_none(targets, active, 20, seed)
    else:
        active = organiser.get_current(targets, active, pattern)
    inputs = data.dense_shape[1]
    outputs = network.get_outputs(targets, active, pattern)

    nodes = range(inputs, outputs, (inputs-outputs)/layers)

    nn = network.build(inputs, outputs, layers, "sigmoid", nodes, "rmsprop", "mse", ["accuracy"])

    network.train(nn, data, targets, active, 1000, 5, 0)
    return nn
Exemple #11
0
def train():
  """Train network for a number of steps."""
  with tf.Graph().as_default():
    global_step = tf.Variable(0, trainable=False)

    # Get images and labels for network.
    images, labels = network.distorted_inputs()

    # Build a Graph that computes the logits predictions from the
    # inference model.
    logits = network.inference(images)

    # Calculate loss.
    loss = network.loss(logits, labels)

    # Build a Graph that trains the model with one batch of examples and
    # updates the model parameters.
    train_op = network.train(loss, global_step)

    # Create a saver.
    saver = tf.train.Saver(tf.all_variables())

    # Build the summary operation based on the TF collection of Summaries.
    #summary_op = tf.merge_all_summaries()

    # Build an initialization operation to run below.
    init = tf.initialize_all_variables()

    # Start running operations on the Graph.
    sess = tf.Session(config=tf.ConfigProto(
        log_device_placement=FLAGS.log_device_placement))
    sess.run(init)

    # Start the queue runners.
    tf.train.start_queue_runners(sess=sess)

    #summary_writer = tf.train.SummaryWriter(FLAGS.train_dir,
    #                                        graph_def=sess.graph_def)

    for step in xrange(FLAGS.max_steps):
      start_time = time.time()
      _, loss_value = sess.run([train_op, loss])
      duration = time.time() - start_time

      assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

      if step % 10 == 0:
        num_examples_per_step = FLAGS.batch_input_size
        examples_per_sec = num_examples_per_step / duration
        sec_per_batch = float(duration)

        format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                      'sec/batch)')
        print (format_str % (datetime.now(), step, loss_value,
                             examples_per_sec, sec_per_batch))
Exemple #12
0
def train():
    with tf.Graph().as_default():
        global_step = tf.Variable(0, trainable=False)

        images, labels = network.distorted_inputs()

        logits = network.inference(images)

        loss = network.loss(logits, labels)

        train_op = network.train(loss, global_step)

        saver = tf.train.Saver(tf.all_variables())

        summary_op = tf.merge_all_summaries()

        init = tf.initialize_all_variables()

        sess = tf.Session(config=tf.ConfigProto(
            log_device_placement=FLAGS.log_device_placement))
        sess.run(init)

        tf.train.start_queue_runners(sess=sess)

        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)

        for step in xrange(FLAGS.max_steps):
            start_time = time.time()
            _, loss_value = sess.run([train_op, loss])
            duration = time.time() - start_time

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            if step % 10 == 0:
                num_examples_per_step = FLAGS.batch_input_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)

                format_str = (
                    '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                    'sec/batch)')
                print(format_str % (datetime.now(), step, loss_value,
                                    examples_per_sec, sec_per_batch))

            if step % 100 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

            if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
Exemple #13
0
def reinforcement_learn(predictor, turns=1000):
    game = CookieClickerGame(verbose=False)
    game.cookies = 100
    state = State(game)
    # predictor = Predictor()
    # predictor = LinearPredictor(len(state.get_state()), state.get_action_space())

    for i in range(1000):
        # print(game.str_basic())
        # print(game)

        # pred = predictor.predict(state)
        inputs_tensor = torch.tensor(state.get_state(), dtype=torch.float)
        pred_tensor = predictor(inputs_tensor)

        avail_preds = [
            p * a for p, a in zip(pred_tensor.detach().numpy(),
                                  state.get_action_availability())
        ]
        # pred_idx = argmax(avail_preds)
        pred_idx = random.choices(range(len(avail_preds)), avail_preds, k=1)[0]

        action = state.prediction_to_action(pred_idx)
        reward = state.perform_action(action)
        # print(avail_preds, "->", pred_idx)
        # print("reward:", reward)

        desired_tensor = pred_tensor.clone()

        alpha = 0.01
        if reward > 0:
            desired_tensor[pred_idx] += alpha
            train(predictor, desired_tensor, pred_tensor)
        elif reward < 0:
            desired_tensor[pred_idx] -= alpha
            train(predictor, desired_tensor, pred_tensor)

    print(game.total_cookies, game.cpt)
Exemple #14
0
def test_network():
    wine_attributes = ["alch","malic","ash","alcash","mag","phen","flav","nfphens","proant","color","hue","dil","prol"]
    columns = ["class","alch","malic","ash","alcash","mag","phen","flav","nfphens","proant","color","hue","dil","prol"]
    """
        0) class
    	1) Alcohol
        2) Malic acid
        3) Ash
        4) Alcalinity of ash  
        5) Magnesium
        6) Total phenols
        7) Flavanoids
        8) Nonflavanoid phenols
        9) Proanthocyanins
        10)Color intensity
        11)Hue
        12)OD280/OD315 of diluted wines
        13)Proline    
    """

    from Induction.IntGrad.integratedGradients import random_baseline_integrated_gradients, integrated_gradients
    exit()

    df = read_data_pd("../../Data/wine.csv",columns = columns)

    df.columns = columns # Add columns to dataframe.
    #Cov.columns = ["Sequence", "Start", "End", "Coverage"]
    dataman = Datamanager.Datamanager(dataframe_train=df,classes=3,dataset="wine")   

    model = network.NN_3_25("wine",in_dim=13,out_dim=3)
    print(model.input_type)
    optimizer = optim.Adam(model.parameters(), lr=0.01,betas=(0.9,0.999),eps=1e-6)
    #loss = network.RootMeanSquareLoss()
    #loss = t_loss.L1Loss()
    loss = t_loss.MSELoss()
    network.train(model, dataman,validation=True, optimizer = optimizer,loss_function = loss, batch=20, iterations=50)
def split_data_experiment():

    n_train1, n_train2 = 27500, 27500
    n_valid1, n_valid2 = 2500, 2500
    num_pruning_iter = 27  # How many iterative pruning steps to perform

    print(
        "Running a data split experiment with input_size:{}, hidden_size:{}, num_classes:{}, "
        "batch_size:{}, num_epochs:{}, num_pruning_iter:{}, pruning_rates:{}".
        format(network.input_size, network.hidden_size, network.num_classes,
               network.batch_size, network.num_epochs, num_pruning_iter,
               network.pruning_rates))

    train_loader1, val_loader1, train_loader2, val_loader2 = \
        dataset.init_data_mask_split_data_expt(n_train1, n_valid1, n_train2, n_valid2)

    model = network.MultiLayerPerceptron().to(network.device)
    model.apply(weights_init)

    presets = {}
    for name, param in model.state_dict().items():
        presets[name] = param.clone()
    model.presets = presets
    network.train(model, train_loader1, val_loader1)

    test_accuracy1 = network.test(model)
    test_accuracy_history1 = [test_accuracy1]
    test_accuracy_history2 = [test_accuracy1]

    for iter in range(num_pruning_iter):
        # This is the percentage of weights remaining in the network after pruning
        print(
            "Results for pruning round {} with percentage of weights remaining {}"
            .format(iter + 1, 100 * 0.8**(iter + 1)))
        # prune model after training with first half of data
        network.prune(model)
        # Reset, retrain on second half of data - perform testing
        network.reset_params(model)
        network.train(model, train_loader2, val_loader1)
        test_accuracy2 = network.test(model)
        test_accuracy_history2.append(test_accuracy2)
        # reset the model, retrain with first half of data - then perform testing
        network.reset_params(model)
        network.train(model, train_loader1, val_loader1)
        test_accuracy1 = network.test(model)
        test_accuracy_history1.append(test_accuracy1)

    print(
        'Test accuracy history after re-training with first half of the training dataset {}'
        .format(test_accuracy_history1))
    print(
        'Test accuracy history after re-training with second half of the training dataset {}'
        .format(test_accuracy_history2))
    visualize.plot_test_accuracy_coarse(test_accuracy_history1,
                                        test_accuracy_history2)
Exemple #16
0
def train():

  with tf.Graph().as_default():
    global_step = tf.Variable(0, trainable=False)
    # 获得图片数据和对应的标签batch
    float_image, label = tfrecord.train_data_read(tfrecord_path=FLAGS.train_data)
    images, labels = tfrecord.create_batch(float_image,label,count_num=FLAGS.train_num)

    logits = network.inference(images)
    # 误差计算
    loss = network.loss(logits, labels)
    # 模型训练
    train_op = network.train(loss, global_step)
    # 存储模型
    saver = tf.train.Saver(tf.global_variables())
    # 存储所有操作
    summary_op = tf.summary.merge_all()
    # 初始化所有变量.
    init = tf.initialize_all_variables()
    # 开始计算流图
    sess = tf.Session(config=tf.ConfigProto(
        log_device_placement=FLAGS.log_device_placement))
    sess.run(init)
    # 队列开始
    tf.train.start_queue_runners(sess=sess)
    summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                            graph_def=sess.graph_def)
    for step in xrange(FLAGS.max_steps):
      start_time = time.time()
      _, loss_value = sess.run([train_op, loss])
      duration = time.time() - start_time
      assert not np.isnan(loss_value), 'Model diverged with loss = NaN'
      if step % 10 == 0:
        num_examples_per_step = FLAGS.batch_size
        examples_per_sec = num_examples_per_step / duration
        sec_per_batch = float(duration)
        format_str = ('%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                      'sec/batch)')
        print (format_str % (datetime.now(), step, loss_value,
                             examples_per_sec, sec_per_batch))
      if step % 50 == 0:
        summary_str = sess.run(summary_op)
        summary_writer.add_summary(summary_str, step)
      # 保存模型检查点.
      if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
        checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
        saver.save(sess, checkpoint_path, global_step=step)
def train(network, train_images, train_labels, batch_size, hm_epochs):
    for epoch in range(hm_epochs):
        batch_count = round(len(train_images) / batch_size)
        c_average = 0
        for batch in range(batch_count):
            c = network.train(
                train_images[batch * batch_size:batch * batch_size +
                             batch_size],
                train_labels[batch * batch_size:batch * batch_size +
                             batch_size])

            c_average += c * len(train_labels[batch * batch_size:batch *
                                              batch_size + batch_size])
            print('Batch {} out of {} finished'.format(batch + 1, batch_count))

        print('Epoch {} completet out of {} cost:\t{c}'.format(
            epoch + 1, hm_epochs, c_average / len(train_images)))
Exemple #18
0
def main():
    print("Extracting Features...")
    Training_DS, Testing_DS = network.getDataSet()
    print("Features Extracted")
    print("Building Network...")

    net = network.build(10)

    print("Network Built")
    print("Training...")

    trainer = network.train(net, Training_DS, epoch=3000)
    print("Trained")
    print("Testing..")
    test(net, Testing_DS, trainer)

    return net
Exemple #19
0
def train_nets(inputs, outputs, training_rate, epochs, batch_size, outer_min,
               random_limit, layers, activations, d_activations, cost, d_cost,
               num_nets):
    minimum = 100
    minnet = 3
    while minimum > outer_min:
        build_networks(layers, activations, d_activations, cost, d_cost,
                       num_nets, random_limit)
        for network in networks:
            output = network.train(inputs, outputs, training_rate, epochs,
                                   batch_size, False)
            if output < minimum:
                minimum = output
                minnet = network

        print("Couldn't find anything")
        print(minimum, random_limit)
        random_limit *= 10
        networks.clear()

    return minnet
def train(model):
    torch.manual_seed(2)
    landmarks = face_key_point.load_face_landmark_data(path=os.path.join(
        "data", "training.csv"),
                                                       batch_size=32)
    train_d = landmarks.data["train"]
    valid_d = landmarks.data["valid"]
    loss_fn = torch.nn.MSELoss()
    optimizer = optim.RMSprop(model.parameters(), lr=1e-3, weight_decay=1e-5)
    num_epochs = 500
    params = {
        'model': model,
        'train_loader': train_d,
        'test_loader': valid_d,
        'num_epochs': num_epochs,
        'loss_fn': loss_fn,
        'optimizer': optimizer,
    }

    loss_train, loss_test, model = network.train(**params)
    network.write_loss(loss_train, "train.txt")
    torch.save(model.state_dict(), 'model.pt')
Exemple #21
0
	def evaluateFitnessNN(self, nn):
		return(nn.train(self.train_train_data[0], self.train_train_data[1])/1797.0)
Exemple #22
0
import numpy as np
import matplotlib.pyplot as plt
import utilities
import preproc
import network
import data

test_comb = [
    '0b11100',
    '0b11011',
    '0b11111',
    '0b10101',
    '0b00100',
    '0b10111'
            ]

train_data_unscaled, train_labels, test_data_unscaled, test_labels = data.data_init_comb('data/real_world_new/', train_comb=test_comb, test_comb=test_comb)
_,_, NET = network.train(train_data_unscaled, train_labels, test_data_unscaled, test_labels)
def main():
    args = cfg.parse_args()
    torch.cuda.manual_seed(args.random_seed)

    # set visible GPU ids
    if len(args.gpu_ids) > 0:
        os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu_ids

    # set TensorFlow environment for evaluation (calculate IS and FID)
    _init_inception()
    inception_path = check_or_download_inception('./tmp/imagenet/')
    create_inception_graph(inception_path)

    # the first GPU in visible GPUs is dedicated for evaluation (running Inception model)
    str_ids = args.gpu_ids.split(',')
    args.gpu_ids = []
    for id in range(len(str_ids)):
        if id >= 0:
            args.gpu_ids.append(id)
    if len(args.gpu_ids) > 1:
        args.gpu_ids = args.gpu_ids[1:]
    else:
        args.gpu_ids = args.gpu_ids

    # genotype G
    genotypes_root = os.path.join('exps', args.genotypes_exp, 'Genotypes')
    genotype_G = np.load(os.path.join(genotypes_root, 'latest_G.npy'))

    # import network from genotype
    basemodel_gen = eval('archs.' + args.arch + '.Generator')(args, genotype_G)
    gen_net = torch.nn.DataParallel(
        basemodel_gen, device_ids=args.gpu_ids).cuda(args.gpu_ids[0])
    basemodel_dis = eval('archs.' + args.arch + '.Discriminator')(args)
    dis_net = torch.nn.DataParallel(
        basemodel_dis, device_ids=args.gpu_ids).cuda(args.gpu_ids[0])

    # basemodel_gen = eval('archs.' + args.arch + '.Generator')(args=args)
    # gen_net = torch.nn.DataParallel(basemodel_gen, device_ids=args.gpu_ids).cuda(args.gpu_ids[0])
    # basemodel_dis = eval('archs.' + args.arch + '.Discriminator')(args=args)
    # dis_net = torch.nn.DataParallel(basemodel_dis, device_ids=args.gpu_ids).cuda(args.gpu_ids[0])

    # weight init
    def weights_init(m):
        classname = m.__class__.__name__
        if classname.find('Conv2d') != -1:
            if args.init_type == 'normal':
                nn.init.normal_(m.weight.data, 0.0, 0.02)
            elif args.init_type == 'orth':
                nn.init.orthogonal_(m.weight.data)
            elif args.init_type == 'xavier_uniform':
                nn.init.xavier_uniform(m.weight.data, 1.)
            else:
                raise NotImplementedError('{} unknown inital type'.format(
                    args.init_type))
        elif classname.find('BatchNorm2d') != -1:
            nn.init.normal_(m.weight.data, 1.0, 0.02)
            nn.init.constant_(m.bias.data, 0.0)

    gen_net.apply(weights_init)
    dis_net.apply(weights_init)

    # set up data_loader
    dataset = datasets.ImageDataset(args)
    train_loader = dataset.train

    # epoch number for dis_net
    args.max_epoch_D = args.max_epoch_G * args.n_critic
    if args.max_iter_G:
        args.max_epoch_D = np.ceil(args.max_iter_G * args.n_critic /
                                   len(train_loader))
    max_iter_D = args.max_epoch_D * len(train_loader)

    # set optimizer
    gen_optimizer = torch.optim.Adam(
        filter(lambda p: p.requires_grad, gen_net.parameters()), args.g_lr,
        (args.beta1, args.beta2))
    dis_optimizer = torch.optim.Adam(
        filter(lambda p: p.requires_grad, dis_net.parameters()), args.d_lr,
        (args.beta1, args.beta2))
    gen_scheduler = LinearLrDecay(gen_optimizer, args.g_lr, 0.0, 0, max_iter_D)
    dis_scheduler = LinearLrDecay(dis_optimizer, args.d_lr, 0.0, 0, max_iter_D)

    # fid stat
    if args.dataset.lower() == 'cifar10':
        fid_stat = 'fid_stat/fid_stats_cifar10_train.npz'
    elif args.dataset.lower() == 'stl10':
        fid_stat = 'fid_stat/stl10_train_unlabeled_fid_stats_48.npz'
    else:
        raise NotImplementedError(f'no fid stat for {args.dataset.lower()}')
    assert os.path.exists(fid_stat)

    # initial
    gen_avg_param = copy_params(gen_net)
    start_epoch = 0
    best_fid = 1e4

    # set writer
    if args.checkpoint:
        # resuming
        print(f'=> resuming from {args.checkpoint}')
        assert os.path.exists(os.path.join('exps', args.checkpoint))
        checkpoint_file = os.path.join('exps', args.checkpoint, 'Model',
                                       'checkpoint_best.pth')
        assert os.path.exists(checkpoint_file)
        checkpoint = torch.load(checkpoint_file)
        start_epoch = checkpoint['epoch']
        best_fid = checkpoint['best_fid']
        gen_net.load_state_dict(checkpoint['gen_state_dict'])
        dis_net.load_state_dict(checkpoint['dis_state_dict'])
        gen_optimizer.load_state_dict(checkpoint['gen_optimizer'])
        dis_optimizer.load_state_dict(checkpoint['dis_optimizer'])
        avg_gen_net = deepcopy(gen_net)
        avg_gen_net.load_state_dict(checkpoint['avg_gen_state_dict'])
        gen_avg_param = copy_params(avg_gen_net)
        del avg_gen_net

        args.path_helper = checkpoint['path_helper']
        logger = create_logger(args.path_helper['log_path'])
        logger.info(
            f'=> loaded checkpoint {checkpoint_file} (epoch {start_epoch})')
    else:
        # create new log dir
        assert args.exp_name
        args.path_helper = set_log_dir('exps', args.exp_name)
        logger = create_logger(args.path_helper['log_path'])

    logger.info(args)
    writer_dict = {
        'writer': SummaryWriter(args.path_helper['log_path']),
        'train_global_steps': start_epoch * len(train_loader),
        'valid_global_steps': start_epoch // args.val_freq,
    }

    # model size
    logger.info('Param size of G = %fMB', count_parameters_in_MB(gen_net))
    logger.info('Param size of D = %fMB', count_parameters_in_MB(dis_net))
    print_FLOPs(basemodel_gen, (1, args.latent_dim), logger)
    print_FLOPs(basemodel_dis, (1, 3, args.img_size, args.img_size), logger)

    # for visualization
    if args.draw_arch:
        from utils.genotype import draw_graph_G
        draw_graph_G(genotype_G,
                     save=True,
                     file_path=os.path.join(args.path_helper['graph_vis_path'],
                                            'latest_G'))
    fixed_z = torch.cuda.FloatTensor(
        np.random.normal(0, 1, (100, args.latent_dim)))

    # train loop
    for epoch in tqdm(range(int(start_epoch), int(args.max_epoch_D)),
                      desc='total progress'):
        lr_schedulers = (gen_scheduler,
                         dis_scheduler) if args.lr_decay else None
        train(args, gen_net, dis_net, gen_optimizer, dis_optimizer,
              gen_avg_param, train_loader, epoch, writer_dict, lr_schedulers)

        if epoch % args.val_freq == 0 or epoch == int(args.max_epoch_D) - 1:
            backup_param = copy_params(gen_net)
            load_params(gen_net, gen_avg_param)
            inception_score, std, fid_score = validate(args, fixed_z, fid_stat,
                                                       gen_net, writer_dict)
            logger.info(
                f'Inception score mean: {inception_score}, Inception score std: {std}, '
                f'FID score: {fid_score} || @ epoch {epoch}.')
            load_params(gen_net, backup_param)
            if fid_score < best_fid:
                best_fid = fid_score
                is_best = True
            else:
                is_best = False
        else:
            is_best = False

        # save model
        avg_gen_net = deepcopy(gen_net)
        load_params(avg_gen_net, gen_avg_param)
        save_checkpoint(
            {
                'epoch': epoch + 1,
                'model': args.arch,
                'gen_state_dict': gen_net.state_dict(),
                'dis_state_dict': dis_net.state_dict(),
                'avg_gen_state_dict': avg_gen_net.state_dict(),
                'gen_optimizer': gen_optimizer.state_dict(),
                'dis_optimizer': dis_optimizer.state_dict(),
                'best_fid': best_fid,
                'path_helper': args.path_helper
            }, is_best, args.path_helper['ckpt_path'])
        del avg_gen_net
Exemple #24
0
import network
import tool
if __name__ == '__main__':
    network.train()
    '''times = tool.loadList('./times.txt')
    accus = tool.loadList('./accus.txt')
    ttimes = []
    aaccus = []
    for i in xrange(len(times)):
        if i % 3 == 0:
            ttimes.append(times[i] / 3)
            aaccus.append(accus[i])
    tool.showXYData(ttimes, aaccus, './times_accus.png')'''
Exemple #25
0
        l.FullyConnectedLayer(height=10, init_func=f.glorot_uniform, act_func=f.softmax)
    ], f.categorical_crossentropy)
    optimizer = o.SGD(0.1)
    num_epochs = 2
    batch_size = 8
    return net, optimizer, num_epochs, batch_size


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument("data", help="the path to the MNIST data set in .npz format (generated using utils.py)")
    parser.add_argument("func", help="the function name of the example to be run")
    args = parser.parse_args()

    np.random.seed(314)

    u.print("Loading '%s'..." % args.data, bcolor=u.bcolors.BOLD)
    trn_set, tst_set = u.load_mnist_npz(args.data)
    trn_set, vld_set = (trn_set[0][:50000], trn_set[1][:50000]), (trn_set[0][50000:], trn_set[1][50000:])

    u.print("Loading '%s'..." % args.func, bcolor=u.bcolors.BOLD)
    net, optimizer, num_epochs, batch_size = locals()[args.func]()
    u.print(inspect.getsource(locals()[args.func]).strip())

    u.print("Training network...", bcolor=u.bcolors.BOLD)
    n.train(net, optimizer, num_epochs, batch_size, trn_set, vld_set)

    u.print("Testing network...", bcolor=u.bcolors.BOLD)
    accuracy = n.test(net, tst_set)
    u.print("Test accuracy: %0.2f%%" % (accuracy*100))
Exemple #26
0
if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "data",
        help=
        "the path to the MNIST data set in .npz format (generated using utils.py)"
    )
    parser.add_argument("func",
                        help="the function name of the example to be run")
    args = parser.parse_args()

    np.random.seed(314)

    u.print("Loading '%s'..." % args.data, bcolor=u.bcolors.BOLD)
    trn_set, tst_set = u.load_mnist_npz(args.data)
    trn_set, vld_set = (trn_set[0][:50000],
                        trn_set[1][:50000]), (trn_set[0][50000:],
                                              trn_set[1][50000:])

    u.print("Loading '%s'..." % args.func, bcolor=u.bcolors.BOLD)
    net, optimizer, num_epochs, batch_size = locals()[args.func]()
    u.print(inspect.getsource(locals()[args.func]).strip())

    u.print("Training network...", bcolor=u.bcolors.BOLD)
    n.train(net, optimizer, num_epochs, batch_size, trn_set, vld_set)

    u.print("Testing network...", bcolor=u.bcolors.BOLD)
    accuracy = n.test(net, tst_set)
    u.print("Test accuracy: %0.2f%%" % (accuracy * 100))
Exemple #27
0
]

#Settings
# number of samples in the data set
N_SAMPLES = 1000
# ratio between training and test sets
TEST_SIZE = 0.1

#Data set
X, y = make_moons(n_samples=N_SAMPLES, noise=0.2, random_state=100)
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=TEST_SIZE,
                                                    random_state=42)

plt.make_plot(X, y, "Dataset")

# Training
params_values, cost = net.train(
    np.transpose(X_train), np.transpose(y_train.reshape(
        (y_train.shape[0], 1))), NN_ARCHITECTURE, 10000, 0.01)

# Prediction
Y_test_hat, _ = net.full_forward_propagation(np.transpose(X_test),
                                             params_values, NN_ARCHITECTURE)

# Accuracy achieved on the test set
acc_test = co.get_accuracy_value(
    Y_test_hat, np.transpose(y_test.reshape((y_test.shape[0], 1))))
print("Test set accuracy: {:.2f} - David".format(acc_test))
Exemple #28
0
import network as nn
import data as dt

train_inputs, train_labels, test_inputs, test_labels = dt.prepare_data()

nn.train(train_inputs, train_labels, test_inputs, test_labels)

# Rodar o tensorboard com o comando:
# tensorboard --logdir logs/fit
# no prompt
Exemple #29
0
weight0 = 2 * np.random.random((784, 50)) - 1
weight1 = 2 * np.random.random((50, 10)) - 1
"""Loading the MNIST data set into three lists two containg the training data
    and the third one containing the test data"""

tr_data, val_data, test_data = ml.load_data()
"""Fitting the 28*28 input image into a numpy array of 784*1 dimension"""

tr_inputs = [np.reshape(a, (784, 1)) for a in tr_data[0]]
"""Converting the single output into a numpy array of 10 dimensions with 1 at
    the index of the output an 0 elsewhere"""
tr_output = [ml.vectr_result(a) for a in tr_data[1]]
"""Loop to train the data taking an input of 10,000 images"""
for i in range(50000):

    weight0, weight1 = net.train(tr_inputs[i], tr_output[i], weight0, weight1)
    if (i % 500) == 0:
        br.progress(i, 50000)
br.progress(50000, 50000, cond=True)

print("\n")

print("Network Trained and ready to be operated")

te_inputs = [np.reshape(a, (784, 1)) for a in test_data[0]]
te_output = test_data[1]
"""Function to check the accuracy of our trained network by testing it on
    unchecked data of 10,000 images"""
tt.check(te_inputs, te_output, weight0, weight1)
Exemple #30
0
print("x_train.shape = ", x_train.shape)
print("y_train.shape = ", y_train.shape)
print("x_val.shape = ", x_val.shape)
print("y_val.shape = ", y_val.shape)

TRAIN_EPOCHS = args.TRAIN_EPOCHS
BATCH_SIZES = args.BATCH_SIZES
LEARNING_RATE = args.LEARNING_RATE
TOTAL_BATCHES = int(n_examples / BATCH_SIZES + 0.5)
DISPLAY_EPOCH = args.DISPLAY_EPOCH

X = network.get_placeholder_X(img_h, img_w, n_features)
Y = network.get_placeholder_Y(n_labels)
outputs, predictions = network.forward(X, is_training=True)
loss, train_op = network.train(LEARNING_RATE, outputs, Y)
correct_prediction, acc = network.accuracy(predictions, Y)
saver = network.save()

logs_path = args.LOG_DIR
tf.summary.scalar("loss", loss)
tf.summary.scalar("accuracy", acc)
merged_summary_op = tf.summary.merge_all()

print("\nStart Training!!!\n")

with tf.Session() as sess:
    sess.run(
        [tf.global_variables_initializer(),
         tf.local_variables_initializer()])
    summary_writer = tf.summary.FileWriter(logs_path,
Exemple #31
0
    theta0=test_set[:, 0] + test_set[:, 2],
    theta1=test_set[:, 1] + test_set[:, 3])

# Scale inputs to range [-1. 1.]
training_input[:, :2] /= np.sum(arm_length)
test_input[:, :2] /= np.sum(arm_length)
training_input[:, 2:] /= max_delta
test_input[:, 2:] /= max_delta

# Baseline MF input to PN: required to map network response to target range
pn_baselines = np.random.normal(1.2, 0.1, number_pn)
net = create_network(pn_baselines)

# Train with algorithm by Bouvier et al. 2018
print("Train network...")
training_errors, error_estimates, training_responses = train(
    net, training_input, training_targets, pn_baselines, arm_length)

# Test network performance
print("Test network performance...")
test_errors, test_responses, pn_activity = test(net, test_input, test_targets,
                                                pn_baselines, arm_length)

filename = time.strftime("%Y%m%d%H%M")
filename = filename + "_results.npz"
filename = os.path.join(sys.argv[2], filename)

np.savez_compressed(filename,
                    training_errors=training_errors,
                    error_estimates=error_estimates,
                    training_angles=training_set,
                    training_targets=training_targets,
Exemple #32
0
hdf5 = "/media/zac/easystore/dataset.hdf5"
# batches = readFromHDF5(hdf5, 100, 10)

# print(batches)

# file = tables.open_file(hdf5, mode='r')
network = CNN(.0000001, (224, 224), 140, 128)
# totalBatch = file.root.trainImg.shape[0] // network.batchSize
# testBatch = file.root.testImg.shape[0] // network.batchSize
# file.close()
# # # print(getClassfierCount("alligned_db/aligned_images_DB"))
gpu_options = tf.GPUOptions(allow_growth=True)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
    #178
    network.train(sess, 33, 33, 100)

# print(batches

# 10 faces
# 0.14424242424242426 0
# 0.14424242424242426 10
# 0.14454545454545453 20
# 0.14484848484848484 30
# 0.14484848484848484 40
# 0.14515151515151514 50
# 0.14515151515151514 60
# 0.14606060606060606 70
# 0.1478787878787879  90
# 0.14939393939393938 110
# 0.1481818181818182  130
Exemple #33
0
    file_source_lang='small_vocab_en',
    file_target_lang='small_vocab_fr',
    load_method=load_from_file,
)

# create mappings
x_sent_array, x_vocab_len, x_word_to_idx, x_idx_to_word = preprocess_data(
    x, conf['MAX_LEN'], conf['VOCAB_SIZE'], conf['NUM_SENT'])
y_sent_array, y_vocab_len, y_word_to_idx, y_idx_to_word = preprocess_data(
    y, conf['MAX_LEN'], conf['VOCAB_SIZE'], conf['NUM_SENT'])

# Find the length of the longest sequence
x_max_len = max([len(sentence) for sentence in x_sent_array])
y_max_len = max([len(sentence) for sentence in y_sent_array])

# Padding zeros to make all sequences have a same length with the longest one
print('Zero padding...')
X = pad_sequences(x_sent_array, maxlen=x_max_len, dtype='int32')
y = pad_sequences(y_sent_array, maxlen=y_max_len, dtype='int32')

# Creating the network model
print('Compiling model...')
model = create_model(x_vocab_len, x_max_len, y_vocab_len, y_max_len,
                     conf['HIDDEN_DIM'], conf['LAYER_NUM'])

# Finding trained weights of previous epoch if any
saved_weights = find_checkpoint_file('.')

saved_weights = []
train(X, y, y_word_to_idx, y_max_len, saved_weights, model, conf)