Esempio n. 1
0
 def train(self):
     self.iterLineEdit.setText("xcz")
     sample, label = get_train_data()
     sample = np.array(sample, dtype='float')
     sample = (sample) / 256.0
     samp_num = len(sample)
     inp_num = len(sample[0])
     out_num = 10
     hid_num = 15
     loss = 0
     self.BP = BPNetwork(inp_num, 20, hid_num, out_num, 0.1, 0.1, 0.1)
     for step in range(0, 3):
         if step == 1:
             self.BP.reduce_lr(0.01, 0.01, 0.01)
         elif step == 2:
             self.BP.reduce_lr(0.001, 0.001, 0.001)
         for i in range(0, samp_num):
             train_label = np.zeros(out_num)
             train_label[label[i]] = 1
             self.BP.forward(sample[i])
             self.BP.backward(np.array(train_label))
             if i % 10000 == 0:
                 print(str(i + 60000 * step))
                 error = self.BP.erro
                 loss = 0
                 for j in range(0, len(error)):
                     loss = loss + abs(error[j])
                 print(loss)
     self.iterLineEdit.setText("ending")
     self.lossLineEdit.setText("ending")
Esempio n. 2
0
def train():
    start_time = time.time()
    checkpoint_file = os.path.join(FLAGS.log_dir, 'model.ckpt')

    with tf.Graph().as_default():
        with tf.name_scope('input'):
            x = tf.placeholder(tf.float32, [None, IMAGE_SIZE * IMAGE_SIZE],
                               name='x-input')
            y_ = tf.placeholder(tf.float32, [None, 4], name='y-input')
        with tf.name_scope('input_reshape'):
            image_shaped_input = tf.reshape(x, [-1, IMAGE_SIZE, IMAGE_SIZE, 1])
            tf.summary.image('input', image_shaped_input, 10)
        with tf.name_scope('dropout_keep_prob'):
            keep_prob = tf.placeholder(tf.float32)
        y = core.inference(image_shaped_input, keep_prob=keep_prob)
        loss = core.loss(y, y_)
        train_op = core.train(loss, FLAGS.learning_rate)
        accuracy = core.evaluation(y, y_)
        summary = tf.summary.merge_all()
        init = tf.global_variables_initializer()
        saver = tf.train.Saver()
        sess = tf.Session()
        summary_writer = tf.summary.FileWriter(FLAGS.log_dir, sess.graph)
        sess.run(init)
        for i in range(20):
            images, labels = input_data.get_train_data()

            image_num, _ = images.shape
            batch_num = int(image_num / 100)
            random_index = random.sample(range(batch_num), batch_num)
            for j in range(batch_num):
                step = i * batch_num + j
                index = random_index[j]
                xs = images[index * 100:(index + 1) * 100]
                ys = labels[index * 100:(index + 1) * 100]
                if step % 50 == 0:
                    feed_dict = {x: xs, y_: ys, keep_prob: 1.0}
                    summary_str, acc = sess.run([summary, accuracy],
                                                feed_dict=feed_dict)
                    summary_writer.add_summary(summary_str, step)
                    print("step %d, train accuracy %g" % (step, acc))
                else:
                    feed_dict = {
                        x: xs,
                        y_: ys,
                        keep_prob: FLAGS.dropout_keep_prob
                    }
                    summary_str, loss_value, _ = sess.run(
                        [summary, loss, train_op], feed_dict=feed_dict)
                    summary_writer.add_summary(summary_str, step)
                if (step + 1) % 1000 == 0:
                    saver.save(sess, checkpoint_file, global_step=step)
        saver.save(sess, checkpoint_file)
        duration = time.time() - start_time
        print('%d seconds' % int(duration))
Esempio n. 3
0
def run_eval(chkpt):
    global net
    net = imp.load_source('net', FLAGS.net_module)
    with tf.Graph().as_default(), tf.device('/cpu:0'):
        train_phase = tf.constant(False, name='train_phase', dtype=tf.bool)

        t_images, t_labels = input_data.get_train_data(FLAGS.data_dir)
        aux = {
            'mean': np.mean(t_images, axis=0),
            'std': np.std(t_images, axis=0)
        }
        v_images, v_labels = input_data.get_validation_data(FLAGS.data_dir)

        images_ph = tf.placeholder(tf.float32,
                                   shape=[None] + list(t_images.shape[1:]),
                                   name='images_ph')
        labels_ph = tf.placeholder(tf.int32, shape=[None], name='labels_ph')

        images = net.aug_eval(images_ph, aux)
        with tf.device('/gpu:0'):
            with tf.name_scope('tower_0') as scope:
                loss, evaluation = tower_loss_and_eval(images, labels_ph,
                                                       train_phase)

        variable_averages = tf.train.ExponentialMovingAverage(0.999)
        variables_averages_op = variable_averages.apply(
            tf.trainable_variables())

        saver = tf.train.Saver(tf.global_variables())
        ema_saver = tf.train.Saver(variable_averages.variables_to_restore())

        sess = tf.Session(config=tf.ConfigProto(
            allow_soft_placement=True,
            log_device_placement=FLAGS.log_device_placement))

        saver.restore(sess, chkpt)
        ema_saver.restore(sess, chkpt)
        sys.stdout.write('Checkpoint "%s" restored.\n' % (chkpt))
        evaluate(sess, loss, evaluation, 'Train', images_ph, t_images,
                 labels_ph, t_labels)
        evaluate(sess, loss, evaluation, 'Validation', images_ph, v_images,
                 labels_ph, v_labels)
def train(net, net_para, label, keep_prob, save_dir, log_dir):
    times_1000 = net_para.train_steps/1000
    summary_writer = tf.summary.FileWriter(log_dir)
    for t in range(35,int(times_1000)):
        tf.reset_default_graph()
        graph = tf.Graph()
        with graph.as_default() as g:
            data_iterator = get_train_data(DATA_PATH, BATCH_SIZE)
            next_element = data_iterator.get_next()
            x = tf.placeholder(
                tf.float32,
                [BATCH_SIZE, net_para.image_size, 
                net_para.image_size, NUMBER_CHANNEL],
                name = 'input-x'
                )
            y_ = tf.placeholder(
                tf.int64, 
                [None],
                name = 'input-y'
            )
            if NET_TYPE == 'mobilenet_0.5' or NET_TYPE == 'mobilenet_0.75':
                model = net(x, label, keep_prob, net_para.skip, train_list=net_para.train_list, depth_multiplier=net_para.depth_multiplier)
            else:
                model = net(x, label, keep_prob, net_para.skip, train_list=net_para.train_list)
            
            y = model.get_prediction()
    
            global_step = tf.Variable(t*1000, trainable = False)

            variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY, global_step)
            variable_averages_op = variable_averages.apply(tf.trainable_variables())
    
            logits =  y+1e10
            cross_entropy_mean = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits, labels=y_))
            with tf.name_scope('loss'):
                loss = cross_entropy_mean
                tf.summary.scalar('loss', loss)

            with tf.name_scope('accuracy'):
                correct_prediction = tf.equal(tf.argmax(y,axis=1),y_)
                correct_rate = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
                tf.summary.scalar('accuracy', correct_rate)


            if isinstance(model, MobileNets) and (TRAIN_MODEL == 'finetune' or TRAIN_MODEL == 'parttune'):
                train_step = tf.train.RMSPropOptimizer(net_para.lr, net_para.lr_decay).minimize(loss, global_step=global_step, 
                                            var_list = tf.get_collection('train'))
                # train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step,
                #                                         var_list = tf.get_collection('train'))
            elif isinstance(model, MobileNets_depth) and (TRAIN_MODEL == 'finetune' or TRAIN_MODEL == 'parttune'):
                train_step = tf.train.RMSPropOptimizer(net_para.lr, net_para.lr_decay).minimize(loss, global_step=global_step, 
                                            var_list = tf.get_collection('train'))
                # train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step,
                #                                         var_list = tf.get_collection('train'))
            else:
                train_step = tf.train.RMSPropOptimizer(net_para.lr, net_para.lr_decay).minimize(loss, global_step=global_step)
                # train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)
    
    

            with tf.control_dependencies([train_step, variable_averages_op]):
                train_op = tf.no_op(name='train')
    
            saver = tf.train.Saver()
    
            merged = tf.summary.merge_all()

            with tf.Session() as sess:
                summary_writer.add_graph(sess.graph, t*1000)

                if t == 0:
                    sess.run(tf.global_variables_initializer())
                    model.loadModel(sess)
                else:
                    ckpt = tf.train.get_checkpoint_state(save_dir)
                    print('load model path is {0}'.format(ckpt.model_checkpoint_path))
                    saver.restore(sess, ckpt.model_checkpoint_path)

                for i in range(1000):
                    xs, ys = next_element
                    ys = tf.reshape(ys,[BATCH_SIZE])
                    x_input, y_input = sess.run([xs,ys])
                    y_input -= 1
                    _, rate, loss_value, step, summary = sess.run([train_op, correct_rate, loss, global_step, merged], feed_dict={x: x_input, y_: y_input})
                    summary_writer.add_summary(summary,step)

                    if i%1000 == 0:
                        print("After {0:d} training step(s), loss on trian batch {1:g}".format(step, loss_value))
                        print("After {0:d} training step(s), correct rate on trian batch {1:s}".format(step, str(rate.astype(np.float))))
                
                saver.save(sess, os.path.join(save_dir, MODEL_NAME), global_step=global_step)

    summary_writer.close()
def train(net, net_para, label, keep_prob, save_dir, log_dir):
    data_iterator = get_train_data(DATA_PATH, BATCH_SIZE)
    next_element = data_iterator.get_next()
    # x_mean = np.load('./Vehicle-Make-and-Model-CNN/data/'+MEAN_VALUE)
    x = tf.placeholder(
        tf.float32,
        [BATCH_SIZE, net_para.image_size, net_para.image_size, NUMBER_CHANNEL],
        name='input-x')
    y_ = tf.placeholder(tf.int64, [None], name='input-y')

    model = net(x,
                label,
                keep_prob,
                net_para.skip,
                train_list=net_para.train_list)
    y = model.get_prediction()

    global_step = tf.Variable(0, trainable=False)

    variable_averages = tf.train.ExponentialMovingAverage(
        MOVING_AVERAGE_DECAY, global_step)
    variable_averages_op = variable_averages.apply(tf.trainable_variables())

    # one_hot_y = tf.one_hot(y_, LABEL)
    # cross_entropy = one_hot_y*tf.log(y+1e10)

    # cross_entropy_mean = tf.reduce_mean(cross_entropy)
    logits = y + 1e10
    cross_entropy_mean = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,
                                                       labels=y_))
    with tf.name_scope('loss'):
        loss = cross_entropy_mean
        tf.summary.scalar('loss', loss)

    with tf.name_scope('accuracy'):
        correct_prediction = tf.equal(tf.argmax(y, axis=1), y_)
        correct_rate = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
        tf.summary.scalar('accuracy', correct_rate)

    # learning_rate = tf.train.exponential_decay(
    #     net_para.lr,
    #     global_step,
    #     net_para.train_steps / BATCH_SIZE,
    #     net_para.lr_decay
    # )

    if isinstance(model, MobileNets) and (TRAIN_MODEL == 'finetune'
                                          or TRAIN_MODEL == 'parttune'):
        train_step = tf.train.RMSPropOptimizer(
            net_para.lr,
            net_para.lr_decay).minimize(loss,
                                        global_step=global_step,
                                        var_list=tf.get_collection('train'))
        # train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step,
        #                                         var_list = tf.get_collection('train'))
    else:
        train_step = tf.train.RMSPropOptimizer(
            net_para.lr, net_para.lr_decay).minimize(loss,
                                                     global_step=global_step)
        # train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss, global_step=global_step)

    with tf.control_dependencies([train_step, variable_averages_op]):
        train_op = tf.no_op(name='train')

    saver = tf.train.Saver()

    merged = tf.summary.merge_all()

    with tf.Session() as sess:
        summary_writer = tf.summary.FileWriter(log_dir, sess.graph)
        sess.run(tf.global_variables_initializer())
        model.loadModel(sess)

        for i in range(net_para.train_steps):
            xs, ys = next_element
            ys = tf.reshape(ys, [BATCH_SIZE])
            x_input, y_input = sess.run([xs, ys])
            # x_input -= x_mean
            y_input -= 1
            _, rate, loss_value, step, summary = sess.run(
                [train_op, correct_rate, loss, global_step, merged],
                feed_dict={
                    x: x_input,
                    y_: y_input
                })

            # print('-- origin --')
            # print(y_input)
            # print('-- predict --')
            # print(np.argmax(y_pred,axis=1))
            # print('-- compare--')
            # print(np.equal(y_input, np.argmax(y_pred,axis=1)))
            # print('-- rate --')
            # pre_rate = np.sum(np.equal(y_input, np.argmax(y_pred,axis=1))) / BATCH_SIZE
            # print(pre_rate)
            # print('-- loss --')
            # print('loss is {0}'.format(loss_value))

            if i % 1000 == 0:
                print(
                    "After {0:d} training step(s), loss on trian batch {1:g}".
                    format(step, loss_value))
                print(
                    "After {0:d} training step(s), correct rate on trian batch {1:s}"
                    .format(step, str(rate.astype(np.float))))
                saver.save(sess,
                           os.path.join(save_dir, MODEL_NAME),
                           global_step=global_step)

            summary_writer.add_summary(summary, i)

        summary_writer.close()