def train():
    x = tf.placeholder(tf.float32, [None, *setting.size, 3], 'input')
    label = tf.placeholder(tf.float32, [None, 1], 'label')
    keep_prob = tf.placeholder(tf.float32, name='keep_prob')

    loss, y = inference_op(x, label, keep_prob)
    trainer = tf.train.AdamOptimizer(setting.learnning_rate).minimize(
        tf.log(loss))

    labelarray, imagearray = dataset.loadimage(setting.datasetfromfile)
    trainlabel, trainimage, testlabel, testimage, validlabel, validimage = dataset.splitdataset(
        labelarray, imagearray, setting.datasetfromfile)

    saver = tf.train.Saver(max_to_keep=10)
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    merged = tf.summary.merge_all()
    trainwriter = tf.summary.FileWriter("logs/train", sess.graph)
    testwriter = tf.summary.FileWriter("logs/test", sess.graph)
    for i in range(setting.epoch):
        for j in range(setting.step):
            trainbatchlabel, trainbatchimage, validbatchlabel, validbatchimage = dataset.next_batch(
                setting.batch_size, trainlabel, trainimage, validlabel,
                validimage)

            _, trainloss, out, rs = sess.run(
                [trainer, loss, y, merged], {
                    x: trainbatchimage,
                    label: trainbatchlabel,
                    keep_prob: setting.keep_prob,
                })
            if not j % 10:
                trainwriter.add_summary(rs, i * setting.step + j)
                trainwriter.flush()
                trainacc = np.equal(out > 0.5, trainbatchlabel).mean()
                validloss, rs, out = sess.run([loss, merged, y], {
                    x: validbatchimage,
                    label: validbatchlabel,
                    keep_prob: 1
                })
                validacc = np.equal(out > 0.5, validbatchlabel).mean()
                testwriter.add_summary(rs, i * setting.step + j)
                testwriter.flush()
                print('第%d轮第%d批训练,训练损失为%f,验证损失为%f,训练集准确率为%f,验证集准确率为%f' %
                      (i, j, trainloss, validloss, trainacc, validacc))
        saver.save(sess, 'model/steganalysismodel', i)
示例#2
0
    train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)

    # 定义测试的准确率
    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    # 创建Session和变量初始化
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())


    # 训练20000步
    for i in range(2000):


        batch_x,batch_lables=ds.next_batch(train_x, train_y_onehot,50)



        # 每100步报告一次在验证集上的准确度
        if i % 10 == 0:
            train_accuracy = accuracy.eval(feed_dict={x: batch_x, y_: batch_lables, keep_prob: 1.0})  #
            #print(batch_x.shape)  #  (1080, 64, 64, 3)
            #print(batch_lables.shape)  #   (1080, 6)

            print("step %d, training accuracy %f" % (i, train_accuracy))
            #print("step %d" % (i))
        train_step.run(feed_dict={x: batch_x, y_:batch_lables , keep_prob: 0.5})

    # 训练结束后报告在测试集上的准确度
    # print("test accuracy %g" % accuracy.eval(feed_dict={
示例#3
0
        ]

    summary_writer = tf.summary.FileWriter("{}/{}-{}".format(
        args.logdir, timestamp, experiment_name),
                                           flush_secs=10)

    # Train
    print('Training')
    sys.stdout.flush()
    for epoch in range(args.epochs):
        dataset.reset_batch_pointer()
        for batch_ind in range(dataset.num_batches):
            step_number = epoch * dataset.num_batches + batch_ind

            start = time.time()
            input_sentences, sentence_lens, target_sentences = dataset.next_batch(
            )
            network.train(input_sentences, sentence_lens, target_sentences,
                          args.keep_prob)
            end = time.time()

            if step_number % args.log_every == 0:
                eval_time_start = time.time()
                string_summary = "{}/{}, epoch: {}, time/batch = {:.3f}".format(
                    step_number, args.epochs * dataset.num_batches, epoch,
                    end - start)
                for eval_set_name in evaluation_sets.keys():
                    print('Evaluating {}'.format(eval_set_name))
                    string_summary += "\n  {}".format(eval_set_name)
                    eval_set_input_sentences, eval_set_sentence_lens, eval_set_target_sentences = evaluation_sets[
                        eval_set_name]
示例#4
0
def train(model, vocab, train_loader, val_loader, hps):
    '''
    Args:
        model (torchutils.Model)
        vocab (Vocab)
        train_loader (torch.utils.data.dataloader)
        val_loader (torch.utils.data.dataloader)
    '''
    olp = OneLinePrint()
    timer = Timer()

    # remove parameters if requires_grad == False
    model_params = list(
        filter(lambda p: p.requires_grad, model.model.parameters()))
    model.addopt(optimzier(hps.opt, model_params, lr=hps.init_lr))

    if hps.restore:
        init_step, ckpt_name = model.restore(hps.restore)
        logger.info('Restored from %s' % ckpt_name)
    else:
        init_step = hps.start_step

    runner = SentRunner(model, vocab)

    # for store summary
    if hps.store_summary:
        writer = model.make_writer()

    t_batcher = next_batch(train_loader)

    logger.info('----Start training: %s----' % model.name)
    timer.start()
    loss_sum = 0
    for step in range(init_step, hps.num_iters + 1):
        model.train()

        model.opt.zero_grad()
        batch = next(t_batcher)
        loss, _, _ = runner.step(batch)
        loss.backward()

        global_norm = nn.utils.clip_grad_norm_(model_params, hps.clip)
        model.opt.step()
        loss_sum += loss.item()

        olp.write('step %s train loss: %f' % (step, loss.item()))

        # save checkpoint
        if step % hps.ckpt_steps == 0:
            model.save(step, loss.item())
            olp.write('save checkpoint (step=%d)\n' % step)
        olp.flush()

        # store summary
        if hps.store_summary and (step - 1) % hps.summary_steps == 0:
            writer.add_scalar('loss', loss, step)
            writer.add_scalar('global_norm', global_norm, step)
            # average time
            if step - 1 != 0:
                lap_time, _ = timer.lap('summary')
                steps = hps.summary_steps
                writer.add_scalar('avg time/step', lap_time / steps, step)

        if step % hps.check_steps == 0:
            logger.info('\nstep:%d~%d avg loss: %f', step - hps.check_steps,
                        step, loss_sum / hps.check_steps)
            loss_sum = 0

            # validation
            model.eval()
            preds = []
            tgts = []
            for v_batch in val_loader:
                v_outputs = runner.run(v_batch)
                _, pred = v_outputs.max(1)
                pred = pred.cpu().data.tolist()
                preds.extend(pred)
                tgts.extend(v_batch['label'])
                if len(preds) > hps.val_num:
                    break

            assert len(preds) == len(tgts)
            f1 = sklearn.metrics.f1_score(tgts, preds, average='macro')
            precision = sklearn.metrics.precision_score(
                tgts, preds, average='macro')
            recall = sklearn.metrics.recall_score(tgts, preds, average='macro')

            if f1 is None:
                continue
            if hps.store_summary:
                writer.add_scalar('F1', f1, step)
                writer.add_scalar('Precision', precision, step)
                writer.add_scalar('Recall', recall, step)

            logger.info('F1: %.3f, P: %.3f, R: %.3f' % (f1, precision, recall))

    if hps.store_summary:
        writer.close()