Exemplo n.º 1
0
 def get_data(self, video_paths, label_paths, gt_paths, clips, mode):
     print("create generator....")
     batch_gen = process_data.get_batch(video_paths,
                                        label_paths,
                                        gt_paths,
                                        clips=clips,
                                        batch_size=self.batch_size,
                                        mode=mode,
                                        width=self.width,
                                        height=self.height)
     return batch_gen
Exemplo n.º 2
0
def evaluate(model, data_src):
    model.eval()
    total_loss = 0
    src_mask = model.gen_square_subsequent_mask(bptt).to(device)
    with torch.no_grad():
        for ii in range(0, data_src.size(0) - 1, bptt):
            data, targets = get_batch(data_src, ii)
            if data.size(0) != bptt:
                src_mask = model.gen_square_subsequent_mask(
                    data.size(0)).to(device)
                output = model(data, src_mask)
                output_flat = output.view(-1, ntokens)
                total_loss += len(data) * criterion(output_flat,
                                                    targets).item()
    return total_loss / (len(data_src) - 1)
Exemplo n.º 3
0
def train(model):
    # Turn on the train mode
    model.train()
    total_loss = 0
    start_time = time.time()
    src_mask = model.gen_square_subsequent_mask(bptt).to(device)

    def _gen_train_info():
        return ('| epoch {:3d} | {:5d}/{:5d} batches | ' +
                'lr {:02.2f} | ms/batch {:5.2f} | ' +
                'loss {:5.2f} | ppl {:8.2f}').format(
                    epoch, batch,
                    len(train_data) // bptt,
                    scheduler.get_last_lr()[0], elapsed * 1000 / log_interval,
                    cur_loss, math.exp(cur_loss))

    for batch, ii in enumerate(range(0, train_data.size(0) - 1, bptt)):
        data, targets = get_batch(train_data, ii)

        optimizer.zero_grad()
        if data.size(0) != bptt:
            src_mask = model.gen_square_subsequent_mask(
                data.size(0)).to(device)
        output = model(data, src_mask)
        loss = criterion(output.view(-1, ntokens), targets)
        loss.backward()
        torch.nn.utils.clip_grad_norm_(model.parameters(), 0.5)
        optimizer.step()

        total_loss += loss.item()
        log_interval = 200
        if batch % log_interval == 0 and batch > 0:
            cur_loss = total_loss / log_interval
            elapsed = time.time() - start_time
            print(_gen_train_info())
            total_loss = 0
            start_time = time.time()
Exemplo n.º 4
0
print(y_conv)
#  tf.nn.sofmax_cross_ entropy_ with_logits 函数 , 它可以直接对
# Logit 定义交叉烟损失 , 写法为
cross_entropy = tf.reduce_mean(
    tf.nn.softmax_cross_entropy_with_logits_v2(labels=y_, logits=y_conv))

train_op = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct = tf.equal(tf.argmax(y_, axis=1), tf.argmax(y_conv, axis=1))
acc = tf.reduce_mean(tf.cast(correct, tf.float32))

saver = tf.train.Saver()
ck_path = "./model/hand"
with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())
    for i in range(100):
        batch = get_batch(50)
        if i % 10 == 0:
            # 原来我写的是acc = sess.run(acc,feed_dict = {x:batch[0],y_:batch[1],keep_drop:1.0})
            # 这样acc会变成常数导致出错
            acc_v = sess.run(acc,
                             feed_dict={
                                 x_image: batch[0],
                                 y_: batch[1],
                                 keep_drop: 1.0
                             })
            print(acc_v)
        if i % 10 == 0:
            print("save")
            saver.save(sess, ck_path, write_meta_graph=True)
        sess.run(train_op,
                 feed_dict={
Exemplo n.º 5
0

total = rep(data)

sc = MinMaxScaler((-0.95, 0.95))
total = sc.fit_transform(total)

#arr = total[60:60+batch_size, :]
#_, l = sess.run([training_op, loss], feed_dict = {X_in: arr, Y: arr, rate: 0.2})
#print("Batch loss: " + str(l))

for epoch in range(epochs):
    epoch_loss = []
    print("Epoch: " + str(epoch))
    for i in range(len(total) // batch_size):
        batch = process_data.get_batch(i, batch_size, total)
        batch_loss = []
        _, l = sess.run([training_op, loss],
                        feed_dict={
                            X_in: batch,
                            Y: batch,
                            rate: 0.2
                        })
        batch_loss.append(l)

    print("Curr Epoch: " + str(epoch))
    print("Batch Loss: " + str(np.mean(batch_loss)))
    epoch_loss.append(np.mean(batch_loss))

print("Epoch Avg Loss: " + str(np.mean(epoch_loss)))
save_path = saver.save(sess, "./Model/model1.ckpt")