示例#1
0
def train_model_epochs(num_epochs, gpu=True):
    """ Trains the model for a given number of epochs on the training set. """
    for epoch in range(num_epochs):
        length = len(train_loader) // 10

        running_loss = 0.0
        for i, data in enumerate(train_loader, 0):
            images, labels = data

            if gpu:
                images = images.to(device)
                labels = labels.to(device)

            # Zero the parameter gradients means to reset them from
            # any previous values. By default, gradients accumulate!
            optimizer.zero_grad()

            # Passing inputs to the model calls the forward() function of
            # the Module class, and the outputs value contains the return value
            # of forward()
            outputs = model(images)

            # Compute the loss based on the true labels
            loss = criterion(outputs, labels)

            # Backpropagate the error with respect to the loss
            loss.backward()

            # Updates the parameters based on current gradients and update rule;
            # in this case, defined by SGD()
            optimizer.step()

            # Print our loss
            running_loss += loss.item()
            if i % length == length - 1:  # print every 1000 mini-batches
                print('Epoch / Batch [%d / %d] - Loss: %.3f' %
                      (epoch + 1, i + 1, running_loss / length))
                running_loss = 0.0

        with torch.no_grad():
            for data_val in validation_loader:
                images_val, labels_val = data_val

                images_val = images_val.to(device)
                labels_val = labels_val.to(device)

                outputs_val = model(images_val)

                loss_val = criterion(outputs_val, labels_val)
                running_loss_val += loss_val.item()

                _, predicted = torch.max(outputs_val.data, 1)

        print('- val_loss:%.3f' % (running_loss_val / len(validation_loader)))
        running_loss_val = 0
示例#2
0
    def run_iter(batch, is_training):
        model.train(is_training)

        sent = batch['sent'].to(DEVICE)
        tag = batch['tag'].to(DEVICE)
        pos1 = batch['pos1'].to(DEVICE)
        pos2 = batch['pos2'].to(DEVICE)
        length = batch['length'].to(DEVICE)

        label = batch['label'].to(DEVICE)
        id = batch['id']
        scope = batch['scope']

        logits = model(sent, tag, length)

        loss = criterion(input=logits, target=label)

        label_pred = logits.max(1)[1]

        if is_training:
            optimizer.zero_grad()
            loss.backward()
            clip_grad_norm_(parameters=params, max_norm=5)
            optimizer.step()

        return loss, label_pred.cpu()
示例#3
0
def train(model, dataset, optimizer):
    net = model()
    net.compile(
        optimizer=optimizer,
        loss=loss_fn,
        metrics=['accuracy'])
    net.fit(
        x_train, y_train,  # 告知训练集的输入以及标签,
        batch_size=BATCH_SIZE,  # 每一批batch的大小为32,
        epochs=EPOCHS,  # 迭代次数
        validation_split=0.2,  # 从测试集中划分80%给训练集
        validation_freq=20  # 测试的间隔次数为20
    )
    for x, y in dataset:
        with tf.GradientTape() as tape:
            prediction = model(x, training=True)
            loss = loss_fn(prediction, y)
        gradients = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))
示例#4
0
    def run_iter(batch):
        sent = batch['sent'].to(DEVICE)
        tag = batch['tag'].to(DEVICE)

        length = batch['length'].to(DEVICE)
        scope = batch['scope']

        logit = model(sent, tag, length, scope)

        return logit.cpu()
示例#5
0
def main(_):
    FLAGS.agent = model(params=FLAGS)
    FLAGS.environment = get_env(FLAGS)
    FLAGS.act = action()

    FLAGS.step_max = FLAGS.environment.data_len()
    FLAGS.train_freq = 40
    FLAGS.update_q_freq = 50
    FLAGS.gamma = 0.97
    FLAGS.show_log_freq = 5
    FLAGS.memory = []  #Experience(FLAGS.memory_size)

    init = tf.global_variables_initializer()
    saver = tf.train.Saver()

    #创建用于保存模型的目录
    if not os.path.exists(FLAGS.model_dir):
        os.makedirs(FLAGS.model_dir)
    start = time.time()

    with tf.Session() as sess:
        sess.run(init)
        eval = evaluation(FLAGS, sess)
        ckpt = tf.train.get_checkpoint_state(FLAGS.model_dir)
        if ckpt:
            print('Loading Model...')
            saver.restore(sess, ckpt.model_checkpoint_path)
        total_step = 1
        print('\t'.join(
            map(str, [
                "epoch", "epsilon", "total_step", "rewardPerEpoch", "profits",
                "lossPerBatch", "elapsed_time"
            ])))
        for epoch in range(FLAGS.epoch_num):
            avg_loss_per_batch, total_reward, total_step, profits = run_epch(
                FLAGS, sess, total_step)
            # total_rewards.append(total_reward)
            # total_losses.append(total_loss)

            if (epoch + 1) % FLAGS.show_log_freq == 0:
                # log_reward = sum(total_rewards[((epoch+1)-FLAGS.show_log_freq):])/FLAGS.show_log_freq
                # log_loss = sum(total_losses[((epoch+1)-FLAGS.show_log_freq):])/FLAGS.show_log_freq
                elapsed_time = time.time() - start
                #print('\t'.join(map(str, [epoch+1, FLAGS.act.epsilon, total_step, log_reward, log_loss, elapsed_time])))
                print('\t'.join(
                    map(str, [
                        epoch + 1, FLAGS.act.epsilon, total_step, total_reward,
                        profits, avg_loss_per_batch, elapsed_time
                    ])))
                start = time.time()

                saver.save(
                    sess,
                    FLAGS.model_dir + '\model-' + str(epoch + 1) + '.ckpt')
                eval.eval()
示例#6
0
文件: train.py 项目: qualiaa/dreyeve
def train():
    tb_path = Path(c.TB_DIR, settings.run_name())
    if tb_path.exists():
        print("TensorBoard logdir", tb_path, "already exists. Overwrite [y/N]")
        r = input()
        if r.lower()[0] == "y":
            shutil.rmtree(str(tb_path))
        else:
            return

    print("Loading model...")
    model = network.model()
    opts = tf.RunOptions(report_tensor_allocations_upon_oom=True)
    model.compile(optimizer='adam', loss=settings.loss(), options=opts)

    video_folders = list(Path(c.DATA_DIR).glob("[0-9][0-9]"))

    train_split = int(c.TRAIN_SPLIT * len(video_folders))
    validation_split = int(c.VALIDATION_SPLIT * train_split)

    train_folders = video_folders[:train_split][:-validation_split]
    validation_folders = video_folders[:train_split][-validation_split:]

    seq = lambda x: KerasSequenceWrapper(DreyeveExamples, c.BATCH_SIZE, x)

    train_examples = seq(train_folders)

    validation_examples = seq(validation_folders)

    callbacks = [
        TerminateOnNaN(),
        ModelCheckpoint(c.CHECKPOINT_DIR + "/" + settings.run_name() +
                        "_epoch_{epoch:02d}_loss_{val_loss:.2f}.h5",
                        save_weights_only=True,
                        period=2),
        TensorBoard(
            log_dir=str(tb_path),
            #histogram_freq=10,
            batch_size=c.BATCH_SIZE,
            write_images=True,
            write_graph=True)
    ]

    history = model.fit_generator(train_examples,
                                  steps_per_epoch=c.TRAIN_STEPS,
                                  epochs=c.EPOCHS,
                                  callbacks=callbacks,
                                  validation_data=validation_examples,
                                  validation_steps=c.VALIDATION_STEPS,
                                  use_multiprocessing=c.USE_MULTIPROCESSING,
                                  workers=c.WORKERS)

    print("Saving weights and history")
    model.save_weights("weights_" + settings.run_name() + ".h5")
    pkl_xz.save(history.history, "history_" + settings.run_name() + ".pkl.xz")
示例#7
0
    def run_iter(batch):
        sent = batch['sent'].to(DEVICE)
        tag = batch['tag'].to(DEVICE)

        length = batch['length'].to(DEVICE)
        scope = batch['scope']

        logit, word_attn, tree_order, sent_attn = model(sent,
                                                        tag,
                                                        length,
                                                        scope,
                                                        verbose_output=True)

        return logit.cpu(), word_attn.cpu(), tree_order, sent_attn
示例#8
0
    def run_iter(batch):
        sent = batch['sent'].to(DEVICE)
        tag = batch['tag'].to(DEVICE)

        length = batch['length'].to(DEVICE)

        label = batch['label']
        id = batch['id']
        scope = batch['scope']

        logits = model(sent, tag, length, scope)
        logits = F.softmax(logits, dim=1)
        label_pred = logits.max(1)[1]

        return id, label, logits.detach().cpu(), label_pred.detach().cpu()
示例#9
0
    def run_iter(batch):
        sent = batch['sent'].to(DEVICE)
        tag = batch['tag'].to(DEVICE)
        pos1 = batch['pos1'].to(DEVICE)
        pos2 = batch['pos2'].to(DEVICE)
        length = batch['length'].to(DEVICE)

        label = batch['label']
        id = batch['id']
        scope = batch['scope']

        logits = model(sent, tag, length)
        label_pred = logits.max(1)[1]

        return label_pred.cpu()
示例#10
0
# Model

if FLAGS.vgg:
    print('\nEncoder model from VGG16.\n')
    x = tf.stack([x, x, x], axis=3)
    x = tf.reshape(x, [FLAGS.batch_size, sx, sy, 3])
    x_aug = tf.stack([x_aug, x_aug, x_aug], axis=3)
    x_aug = tf.reshape(x_aug, [FLAGS.batch_size, sx, sy, 3])
    with tf.variable_scope("siamese") as scope:
        yab, vgg16_conv_layers = network.model_vgg(x, True, True)
        scope.reuse_variables()
        yab_s, vgg16_conv_layers_s = network.model_vgg(x_aug, True, True)
else:
    print('\nEncoder model ala Izuka et al.\n')
    with tf.variable_scope("siamese") as scope:
        yab = network.model(x, True, True)
        scope.reuse_variables()
        yab_s = network.model(x_aug, True, True)

print('Model size = %d weights\n' % network.count_all_vars())

# Compose final image
ya, yb = tf.unstack(yab, axis=3)
ya = tf.reshape(ya, [FLAGS.batch_size, int(sx / 2), int(sy / 2), 1])
yb = tf.reshape(yb, [FLAGS.batch_size, int(sx / 2), int(sy / 2), 1])
yabf = tf.image.resize_images(yab,
                              size=[sx, sy],
                              method=tf.image.ResizeMethod.BILINEAR)
yaf, ybf = tf.unstack(yabf, axis=3)
ylab = color_transform.deprocess_lab(y_l, yaf, ybf)
y = color_transform.lab_to_rgb(ylab)
                        type=str,
                        default="../checkpoints/chair_15.pth",
                        help='checkpoint path')
    parser.add_argument('--save_dir',
                        type=str,
                        default="104256_meta_handle",
                        help='save dir')
    parser.add_argument('--num_basis',
                        type=int,
                        default=15,
                        help='number of basis vectors')
    return parser.parse_args()


opt = parse_args()
net = network.model(opt.num_basis).cuda()
net = torch.nn.DataParallel(net)
try:
    checkpoint = torch.load(opt.checkpoint)
    net.load_state_dict(checkpoint['model_state_dict'])
    print('Use pretrain model')
except:
    print('Error! No existing model!')
    exit(-1)

net.eval()

src_pc, _, _ = load_obj("%s/pc_4096.obj" % opt.src_shape)
tar_pc, _, _ = load_obj("%s/pc_4096.obj" % opt.tar_shape)
key_pts, _, _ = load_obj("%s/key_point_50.obj" % opt.src_shape)
_, src_faces, _ = load_obj("%s/manifold.obj" % opt.src_shape)
示例#12
0
def runInference(input_data, input_labels, input_file=None):
    ACC_DIFF = 0.00001

    input_data = torch.from_numpy(input_data)
    input_data = input_data.type(torch.FloatTensor)

    input_labels = torch.from_numpy(input_labels)
    input_labels = input_labels.type(torch.FloatTensor)

    # Load networks
    MODELS_DIR = os.path.join('../', 'models/')

    network1 = network.model()
    network1.load_state_dict(
        torch.load(os.path.join(MODELS_DIR, 'network1.torch')))

    network2 = network.model()
    network2.load_state_dict(
        torch.load(os.path.join(MODELS_DIR, 'network2.torch')))

    network3 = network.model()
    network3.load_state_dict(
        torch.load(os.path.join(MODELS_DIR, 'network3.torch')))

    network4 = network.model()
    network4.load_state_dict(
        torch.load(os.path.join(MODELS_DIR, 'network4.torch')))

    network5 = network.model()
    network5.load_state_dict(
        torch.load(os.path.join(MODELS_DIR, 'network5.torch')))

    network6 = network.model()
    network6.load_state_dict(
        torch.load(os.path.join(MODELS_DIR, 'network6.torch')))

    correct = np.zeros((6, 1))

    predicted = np.empty((0, 6))
    labels = np.empty((0, 6))

    for data, label in zip(input_data, input_labels):
        output1 = network1(data)
        output2 = network2(data)
        output3 = network3(data)
        output4 = network4(data)
        output5 = network5(data)
        output6 = network6(data)

        predicted = np.append(predicted,
                              np.array([[
                                  output1.item(),
                                  output2.item(),
                                  output3.item(),
                                  output4.item(),
                                  output5.item(),
                                  output6.item()
                              ]]),
                              axis=0)
        labels = np.append(labels,
                           np.array([[
                               label[0], label[1], label[2], label[3],
                               label[4], label[5]
                           ]]),
                           axis=0)

        correct[0] = correct[0] + 1 if output1.item(
        ) - label[0] < ACC_DIFF else correct[0]
        correct[1] = correct[1] + 1 if output2.item(
        ) - label[1] < ACC_DIFF else correct[1]
        correct[2] = correct[2] + 1 if output3.item(
        ) - label[2] < ACC_DIFF else correct[2]
        correct[3] = correct[3] + 1 if output4.item(
        ) - label[3] < ACC_DIFF else correct[3]
        correct[4] = correct[4] + 1 if output5.item(
        ) - label[4] < ACC_DIFF else correct[4]
        correct[5] = correct[5] + 1 if output6.item(
        ) - label[5] < ACC_DIFF else correct[5]

    np.savetxt('../inference_results/test_predictions.csv',
               predicted,
               fmt='%10.10f',
               delimiter=',')
    np.savetxt('../inference_results/test_labels.csv',
               labels,
               fmt='%10.10f',
               delimiter=',')
    print('Inference Results: {}'.format(
        np.divide(correct, len(input_data)) * 100))
示例#13
0
# Single frame
frames = [FLAGS.im_dir]

# If directory is supplied, get names of all files in the path
if os.path.isdir(FLAGS.im_dir):
    frames = [os.path.join(FLAGS.im_dir, name)
              for name in sorted(os.listdir(FLAGS.im_dir))
              if os.path.isfile(os.path.join(FLAGS.im_dir, name))]

# Placeholder for image input
x = tf.placeholder(tf.float32, shape=[1, sy, sx, 3])

# HDR reconstruction autoencoder model
print_("Network setup:\n")
net = network.model(x)

# The CNN prediction (this also includes blending with input image x)
y = network.get_final(net, x)

# TensorFlow session for running inference
sess = tf.InteractiveSession()

# Load trained CNN weights
print_("\nLoading trained parameters from '%s'..." % FLAGS.params)
load_params = tl.files.load_npz(name=FLAGS.params)
tl.files.assign_params(sess, load_params, net)
print_("\tdone\n")

if not os.path.exists(FLAGS.out_dir):
    os.makedirs(FLAGS.out_dir)
def main():
	# images=tf.placeholder(tf.float32,[None,64,64,3])
	# labels=tf.placeholder(tf.int32,[None])

	net=model()
	net.train()
y_b = tf.image.resize_images(y_b, size=[int(sx/2),int(sy/2)], method=tf.image.ResizeMethod.BILINEAR)
x = tf.reshape(y_l, [FLAGS.batch_size, sy, sx, 1])

# HDR reconstruction autoencoder model
print_("Network setup:\n")

if FLAGS.vgg:
  print_('\nEncoder model from VGG16.\n')
  x = tf.stack([x,x,x], axis=3)
  x = tf.reshape(x, [FLAGS.batch_size,sy,sx,3])
  with tf.variable_scope("siamese") as scope:
    yab = network.model_vgg(x, False, True)
else:
  print_('\nEncoder model ala Izuka et al.\n')
  with tf.variable_scope("siamese") as scope:
    yab = network.model(x, False, True)
    scope.reuse_variables()
    yab_s = network.model(x, False, True)
  #yab = network.model(x, False, True)

# Compose final image
ya, yb = tf.unstack(yab, axis=3)
ya = tf.reshape(ya, [FLAGS.batch_size, int(sy/2), int(sx/2), 1])
yb = tf.reshape(yb, [FLAGS.batch_size, int(sy/2), int(sx/2), 1])
yabf = tf.image.resize_images(yab, size=[sy,sx], method=tf.image.ResizeMethod.BILINEAR)
yaf, ybf = tf.unstack(yabf, axis=3)
ylab = color_transform.deprocess_lab(y_l, yaf, ybf)
y = color_transform.lab_to_rgb(ylab)

saver = tf.train.Saver()
示例#16
0
    for x, y in dataset:
        with tf.GradientTape() as tape:
            prediction = model(x, training=True)
            loss = loss_fn(prediction, y)
        gradients = tape.gradient(loss, model.trainable_variables)
        optimizer.apply_gradients(zip(gradients, model.trainable_variables))


if __name__ == '__main__':
    model_save_path = ""
    train_data_path = "train_data.txt"
    print("开始加载数据")
    X, label = get_data(train_data_path, )
    print(type(X))
    print(X)
    print(type(label))
    print(label)
    print("数据加载完毕")
    net = model()
    net.build(input_shape=(256, 256, 3))
    print(net.summary())
    # history = net.fit_generator(train_loader,
    #                             steps_per_epoch=trainX.shape[0] / batch_size,
    #                             validation_data=val_generator,
    #                             epochs=EPOCHS,
    #                             validation_steps=valX.shape[0] / batch_size,
    #                             callbacks=[checkpointer, reduce],
    #                             verbose=1,
    #                             shuffle=True)
    net.save(model_save_path)
示例#17
0
                                           name=None)
NUM_EPOCHS = 999999
DATA_SIZE = 4000
BATCH_SIZE = 1
LABELS = list(range(DATA_SIZE))
oldLoss = 999999
totalLoss = 0
pastLoss = 999999

# IMG-LDR
curr_LDR = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 320, 320, 3])
# IMG-HDR
curr_HDR = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 320, 320, 3])

y_input, y_actual = getTransform(curr_LDR, curr_HDR)
my_net = network.model(y_input)
y_pred = network.get_final(my_net, y_input)

# Loss functions
meanSquareLoss = tf.losses.mean_squared_error(labels=y_actual,
                                              predictions=y_pred)
absDevLoss = tf.losses.absolute_difference(labels=y_actual, predictions=y_pred)
huberLoss = tf.losses.huber_loss(labels=y_actual, predictions=y_pred)

train_step = tf.train.AdamOptimizer(LEARNING_RATE).minimize(huberLoss)
correct_prediction = tf.equal(tf.argmax(y_actual, 1), tf.argmax(y_pred, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

# Initialize the saver to save the model
saver = tf.train.Saver()
示例#18
0
enqueue_op_frames = q_frames.enqueue([input_frame])
dequeue_op_frames = q_frames.dequeue()

# For multi-threaded queueing of training images
input_data = tf.placeholder(tf.float32, shape=[sy, sx, 3])
input_target = tf.placeholder(tf.float32, shape=[sy, sx, 3])
q_train = tf.FIFOQueue(FLAGS.buffer_size, [tf.float32, tf.float32],
                       shapes=[[sy, sx, 3], [sy, sx, 3]])
enqueue_op_train = q_train.enqueue([input_target, input_data])
y_, x = q_train.dequeue_many(FLAGS.batch_size)

#=== Network ==================================================================

# Setup the network
print("Network setup:\n")
net, vgg16_conv_layers = network.model(x, FLAGS.batch_size, True)

y = net.outputs
train_params = net.all_params

# The TensorFlow session to be used
sess = tf.InteractiveSession()

#=== Loss function formulation ================================================

# For masked loss, only using information near saturated image regions
thr = 0.05  # Threshold for blending
msk = tf.reduce_max(y_, reduction_indices=[3])
msk = tf.minimum(1.0, tf.maximum(0.0, msk - 1.0 + thr) / thr)
msk = tf.reshape(msk, [-1, sy, sx, 1])
msk = tf.tile(msk, [1, 1, 1, 3])
示例#19
0
def main(args):
    # 建立文件夹
    output_path = "output_test/"+args.name
    txt_path = 'data/txt'+str(args.cv_index)
    result_path = os.path.join(output_path, "result")
    best_path = os.path.join(output_path, "best_weights")
    log_path = os.path.join(result_path, "Log.txt")
    check_dir(best_path)
    check_dir(output_path)
    check_dir(result_path)
    # 保存parser设置
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    fh = logging.FileHandler((os.path.join(result_path, 'Setting_Log.txt')), mode='w')
    logger.addHandler(fh)
    logger.info(args)
    # 加载数据
    train_set = MySet(txt_path, mode="train", is_debug=args.smoke_test)
    val_set = MySet(txt_path,  mode="val", is_debug=args.smoke_test)
    train_loader = DataLoader(train_set, batch_size=args.batch_size, shuffle=True,
                              num_workers=20 if not args.smoke_test else 0)
    val_loader = DataLoader(val_set, batch_size=args.batch_size, num_workers=20 if not args.smoke_test else 0)
    # 定义模型
    net = model().cuda()
    # 多GPU
    if len(args.gpu_id) > 1:
        torch.distributed.init_process_group(
            backend="nccl", init_method='tcp://localhost:8000', rank=0, world_size=1)
        net = torch.nn.parallel.DistributedDataParallel(net)

    # 损失以及优化器
    cost = torch.nn.CrossEntropyLoss()
    if args.optimizer == "Adam":
        optimizer = torch.optim.Adam(net.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    if args.optimizer == "SGD":
        optimizer = torch.optim.SGD(net.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    # 训练结果记录
    train_loss_list, train_acc_list, val_loss_list, val_acc_list = [], [], [], []
    save = savebest_weights(args.num_model_to_save, best_path)
    t0 = time.time()
    # epoch循环
    for epoch in trange(args.num_epoch):
        train_loss, train_acc = train(net, train_loader, optimizer, cost)
        val_loss, val_acc = val(net, val_loader, cost)

        train_loss_list.append(train_loss)
        train_acc_list.append(train_acc)
        val_loss_list.append(val_loss)
        val_acc_list.append(val_acc)

        save.save_weight(net, val_acc, epoch)

        # 画出训练集和验证集的loss及acc
        plot(train_loss_list, 'train_loss', val_loss_list, 'val_loss',
             x_label="epoch", y_label="loss", title="Loss Curve-epoch", save_path=result_path)
        plot(train_acc_list, 'train_acc', val_acc_list, 'val_acc',
             x_label="epoch", y_label="acc", title="Acc Curve-epoch", save_path=result_path)

        info = [str(epoch).zfill(3), train_loss, val_acc]
        logtxt = open(log_path, "a")
        logtxt.write("Epoch: {} | Train Loss: {:.4f} Val ACC: {:.4f}\n".format(*info))
        print("\rEpoch: {} | Train Loss: {:.4f} Val ACC: {:.4f}".format(*info))
        logtxt.close()

    t2 = time.time()
    print("Optimization Finished!  Cost time:{:.1f} minutes".format((t2-t0)/60))
    print("Start test in best val model...")
    test_acc_list = []
    test_set = MySet(txt_path, mode="test", is_debug=args.smoke_test)
    test_loader = DataLoader(test_set, batch_size=args.batch_size, num_workers=20 if not args.smoke_test else 0)
    best_weight = os.listdir(best_path)
    for i in range(args.num_model_to_save):
        # 模型
        net.load_state_dict(torch.load(os.path.join(best_path, best_weight[i])))
        test_acc, tn, fp, fn, tp = test(net, test_loader)
        test_acc_list.append(test_acc)
        tp_list.append(tp)
        tn_list.append(tn)
        fp_list.append(fp)
        fn_list.append(fn)
    tp = tp_list[np.argmax(test_acc_list)]
    tn = tn_list[np.argmax(test_acc_list)]
    fp = fp_list[np.argmax(test_acc_list)]
    fn = fn_list[np.argmax(test_acc_list)]
    precision = tp/(tp+fp)
    sensitive = tp/(tp+fn)
    specificity = tn/(tn+fp)
    F1score = 2*tp/(2*tp+fp+fn)
    logtxt = open(log_path, "a")
    logtxt.write("Test ACC: {} ,|sensitive:{:.2f}|specificity:{:.2f}|precision:{:.2f}|F1score:{:.2f}| the best: {:4f} and the weight name: {}\n".format(
        test_acc_list, np.max(test_acc_list), sensitive*100, specificity*100, precision*100, F1score*100, best_weight[np.argmax(test_acc_list)]))
    print("The test acc:{}, the best: {} and the weight name: {}\n".format(
        test_acc_list, np.max(test_acc_list), sensitive*100, specificity*100, precision*100, F1score*100, best_weight[np.argmax(test_acc_list)]))
    logtxt.close()
示例#20
0
        data = data.cuda()
        y = net(data)
        predict = y.data.cpu().numpy()
        label = label.data.numpy()
        predicts.extend(np.argmax(predict, axis=1))
        labels.extend(label)
    acc = accuracy_score(labels, predicts)
    tn, fp, fn, tp = confusion_matrix(labels, predicts).ravel()
    return acc, tn, fp, fn, tp


name = ''
txt_path = 'data/txt'
os.environ['CUDA_VISIBLE_DEVICES'] = '4'
# 模型
output_path = "output_test/"+name
best_path = os.path.join(output_path, "best_weights")
best_weight = os.path.join(best_path, '15_0_2_0.9074.pth.gz')
net = model().cuda()
net.load_state_dict(torch.load(best_weight))
# 数据
test_set = MySet(txt_path, mode="test")
test_loader = DataLoader(test_set, batch_size=10, num_workers=20)
# 测试
test_acc, tn, fp, fn, tp = test(net, test_loader)
precision = tp/(tp+fp)
sensitive = tp/(tp+fn)
specificity = tn/(tn+fp)
F1score = 2*tp/(2*tp+fp+fn)
print("The test acc:{}".format(test_acc))
print("The precision:{}|sensitive:{}|specificity:{}|F1score:{}".format(precision, sensitive, specificity, F1score)
示例#21
0
def trainNetworks(train_data, train_labels, epochs=5, learning_rate=0.001):
    """
    Trains 6 networks on the data using the hyperparameters provided.
    Saves the final model as in the models directory with timestamp as the name.

    Input is same for all 6 networks. The prediction of each network is for its corresponding
    joint. The labels data structure is divided into

    :param train_data: Numpy array with training data points
    :param val_data: Numpy array with validation data points
    :param steps: Number of training steps
    :param learning_rate: Learning rate used for training
    """

    on_gpu = False
    if torch.cuda.is_available():
        on_gpu = True

    network1 = network.model()
    network2 = network.model()
    network3 = network.model()
    network4 = network.model()
    network5 = network.model()
    network6 = network.model()

    data_length = len(train_data)

    train_data = torch.from_numpy(train_data)
    train_data = train_data.type(torch.FloatTensor)
    train_labels = torch.from_numpy(train_labels)
    train_labels = train_labels.type(torch.FloatTensor)

    if on_gpu:
        network1 = network.model().cuda()
        network2 = network.model().cuda()
        network3 = network.model().cuda()
        network4 = network.model().cuda()
        network5 = network.model().cuda()
        network6 = network.model().cuda()

        #train_data = train_data.cuda()
        #train_labels = train_labels.cuda()

    optimiser1 = torch.optim.Adam(network1.parameters(), learning_rate)
    optimiser2 = torch.optim.Adam(network2.parameters(), learning_rate)
    optimiser3 = torch.optim.Adam(network3.parameters(), learning_rate)
    optimiser4 = torch.optim.Adam(network4.parameters(), learning_rate)
    optimiser5 = torch.optim.Adam(network5.parameters(), learning_rate)
    optimiser6 = torch.optim.Adam(network6.parameters(), learning_rate)

    criterion = torch.nn.MSELoss()

    avg_train_loss = np.empty((0, 6))
    avg_val_loss = np.empty((0, 6))

    print("Starting training!!!")
    # SGD Implementation
    for epoch in range(epochs):
        index = 0

        train_loss = np.empty((0, 6))
        val_loss = np.empty((0, 6))
    
        # Shuffle train_data randomly
        random_perm = np.random.permutation(data_length)
        train_data1 = train_data[random_perm]
        train_labels1 = train_labels[random_perm]

        # Split into train and val
        val_data = train_data1[:int(0.2*data_length),:]
        val_labels = train_labels1[:int(0.2*data_length),:]

        if on_gpu:
            val_data = val_data.cuda()
            val_labels = val_labels.cuda()

        train_data1 = train_data1[int(0.2*data_length):,:]
        train_labels1 = train_labels1[int(0.2*data_length):,:]

        for data, label in zip(train_data1, train_labels1):
            # data = torch.from_numpy(data)
            # data = data.type(torch.FloatTensor)
            # label = torch.from_numpy(label)
            # label = label.type(torch.FloatTensor)
            #print(data)

            if on_gpu:
                data = data.cuda()
                label = label.cuda()

            output1 = network1(data)
            output2 = network2(data)
            output3 = network3(data)
            output4 = network4(data)
            output5 = network5(data)
            output6 = network6(data)

            # Each label frame consists of 6 values corresponding to each joint
            loss1 = criterion(output1, label[0])
            loss2 = criterion(output2, label[1])
            loss3 = criterion(output3, label[2])
            loss4 = criterion(output4, label[3])
            loss5 = criterion(output5, label[4])
            loss6 = criterion(output6, label[5])

            # Backpropagate loss through each network and take gradient step
            optimiser1.zero_grad()
            loss1.backward()
            optimiser1.step()

            optimiser2.zero_grad()
            loss2.backward()
            optimiser2.step()

            optimiser3.zero_grad()
            loss3.backward()
            optimiser3.step()

            optimiser4.zero_grad()
            loss4.backward()
            optimiser4.step()

            optimiser5.zero_grad()
            loss5.backward()
            optimiser5.step()

            optimiser6.zero_grad()
            loss6.backward()
            optimiser6.step()

            with torch.no_grad():
                print("Progress: {}/{}".format(index+1, len(train_data1)), end="\r", flush=True)
                index += 1

                train_loss = np.append(train_loss, np.array([[loss1.item(), loss2.item(), loss3.item(), loss4.item(), loss5.item(), loss6.item()]]), axis=0)

        
        for data, label in zip(val_data, val_labels):
            output1 = network1(data)
            output2 = network2(data)
            output3 = network3(data)
            output4 = network4(data)
            output5 = network5(data)
            output6 = network6(data)

            loss1 = criterion(output1, label[0])
            loss2 = criterion(output2, label[1])
            loss3 = criterion(output3, label[2])
            loss4 = criterion(output4, label[3])
            loss5 = criterion(output5, label[4])
            loss6 = criterion(output6, label[5])
            
            val_loss = np.append(val_loss, np.array([[loss1.item(), loss2.item(), loss3.item(), loss4.item(), loss5.item(), loss6.item()]]), axis=0)
        # Log progress
        print('Epoch: {}'.format(epoch+1))

        mean_train_loss = np.mean(train_loss, axis=0)
        mean_val_loss = np.mean(val_loss, axis=0)

        avg_train_loss = np.append(avg_train_loss, np.array([mean_train_loss]), axis=0)
        avg_val_loss = np.append(avg_val_loss, np.array([mean_val_loss]), axis=0)
        print('Avg Train Loss: \n{}'.format(mean_train_loss))
        print('Avg Val Loss : \n{}\n'.format(mean_val_loss))
                  
    # Save the models after training
    MODELS_DIR = os.path.join('..' + '/models')
    print('Saving models to: {}'.format(MODELS_DIR))

    np.savetxt('../train_results/avg_train_loss.csv', avg_train_loss, fmt='%10.10f', delimiter=',')
    np.savetxt('../train_results/avg_val_loss.csv', avg_val_loss, fmt='%10.10f', delimiter=',')
    
    torch.save(network1.state_dict(), os.path.join(MODELS_DIR, 'network1.torch'))
    torch.save(network2.state_dict(), os.path.join(MODELS_DIR, 'network2.torch'))
    torch.save(network3.state_dict(), os.path.join(MODELS_DIR, 'network3.torch'))
    torch.save(network4.state_dict(), os.path.join(MODELS_DIR, 'network4.torch'))
    torch.save(network5.state_dict(), os.path.join(MODELS_DIR, 'network5.torch'))
    torch.save(network6.state_dict(), os.path.join(MODELS_DIR, 'network6.torch'))
示例#22
0
#Load the network that has been driven
#the model properties must be the same of the learning one
model.load_weights("results/netCheckpoint-v1-1.27.h5")

#The sentence you want to start with
seed = "i have no idea how to"

#Load the properties of your dataset
char2int = pickle.load(open("char2int.pickle", "rb"))
int2char = pickle.load(open("int2char.pickle", "rb"))

# building the model
sequence_length = 100
n_unique_chars = len(char2int)
model = model(n_unique_chars, sequence_length)

# generate 1000 characters
generated = ""
for i in tqdm.trange(1000):
    # make the input sequence
    X = np.zeros((1, sequence_length, n_unique_chars))
    for t, char in enumerate(seed):
        #Convert the input into numeric values
        X[0, (sequence_length - len(seed)) + t, char2int[char]] = 1
    #prediction
    predicted = model.predict(X, verbose=0)[0]
    #take the index of the 'most important' value
    next_index = np.argmax(predicted)
    #match of the selected predicted value
    next_char = int2char[next_index]
示例#23
0
    model_train = model.fit(digits_train,
                            encoded_titles_train,
                            validation_data=(digits_train_cross,
                                             encoded_titles_train_cross),
                            epochs=epochs,
                            batch_size=batch_size,
                            verbose=verbose)

    # Final evaluation of the model
    scores = model.evaluate(digits_test, encoded_titles_test, verbose=verbose)
    print("Large CNN Error: %.2f%%" % (100 - scores[1] * 100))

else:
    print("Training CNN model")
    # Build the model
    model = model(pixels, classes, dropout)

    # Train model with 20 epochs that updates every 200 images and a verbose of 2 is used to format and reduce the output line
    model_train = model.fit(digits_train,
                            encoded_titles_train,
                            validation_data=(digits_train_cross,
                                             encoded_titles_train_cross),
                            batch_size=batch_size,
                            epochs=epochs,
                            verbose=verbose)

print("\nSaving model for webapp")
model_save_path = "output"
tensorflowjs.converters.save_keras_model(model, model_save_path)

# training the model and saving metrics in history
示例#24
0
文件: train.py 项目: jwkvam/im2latex
#!/usr/bin/env python
# -*- coding: utf-8 -*-

import network
import joblib
import numpy as np

model = network.model()


features = joblib.load('features.jbl')
labels = joblib.load('labels.jbl').astype(int)

features = features.reshape(features.shape[0], 1,
                            features.shape[1],
                            features.shape[2])

print('feature shape = {}'.format(features.shape))
print('labels shape = {}'.format(labels.shape))

exp_labels = np.zeros((labels.shape[0], 10, 3))
exp_labels[np.arange(exp_labels.shape[0])[:, None], labels, np.arange(3)[None, :]] = 1
exp_labels = exp_labels.transpose((0, 2, 1))

print(features.shape)
print(exp_labels.shape)
#
model.fit(features, exp_labels, batch_size=2, verbose=True)
示例#25
0
data = pd.read_csv(
    'C:\\Users\\jaide\\OneDrive\\Documents\\VSCODE\\NeuralNets\\Housing.csv')

columns = [
    'CRIM', 'ZN', 'INDUS', 'CHAS', 'NOX', 'RM', 'AGE', 'DIS', 'RAD', 'TAX',
    'PTR', 'B', 'LSTAT', 'MEDB'
]

data = data.values

features = []
labels = []

for x in data:
    lt = []
    for y in x[0].split(' '):
        if y != '':
            lt.append(float(y))

    features.append(lt[:-1])
    labels.append([lt[13]])

features = np.array(features)
labels = np.array(labels)

x_train, y_train, x_test, y_test = features[:400], labels[:400], features[
    400:], labels[400:]

model(x_train, y_train, x_test, y_test)