Beispiel #1
0
for i in range(batches):
    torch.cuda.synchronize(
    )  # synchronize function call for precise time measurement
    batch_start = timer.perf_counter()

    bXt = Variable(torch.from_numpy(bX).cuda())
    bYt = Variable(torch.from_numpy(bY).cuda())

    optimizer.zero_grad()
    output = net(bXt)
    loss = criterion(output, bYt.long())
    loss.backward()
    optimizer.step()

    torch.cuda.synchronize(
    )  # synchronize function call for precise time measurement
    batch_end = timer.perf_counter()
    batch_time.append(batch_end - batch_start)
    batch_loss.append(float(loss.data.cpu().numpy()))
train_end = timer.perf_counter()  # end of training

# Write results
print_results(batch_time)
check_results(batch_loss, batch_time, train_start, train_end)
write_results(script_name=os.path.basename(__file__),
              bench=bench,
              experiment=experiment,
              parameters=params,
              run_time=batch_time,
              version=version)
        for dim in shape:
            variable_parametes *= dim.value
        params += variable_parametes
print('# network parameters: ' + str(params))

# Start training
with tf.Session(config=config) as sess:
    sess.run(init)
    time = []
    for i in range(batches):
        print('Batch {}/{}'.format(i, batches))
        start = timer.perf_counter()
        _, output = sess.run([train_step, h3],
                             feed_dict={
                                 x: bX,
                                 y: bY,
                                 x_len: b_lenX
                             })
        end = timer.perf_counter()
        time.append(end - start)
        assert (output.shape == (batch_size, classes))

# Write results
write_results(script_name=os.path.basename(__file__),
              bench=bench,
              experiment=experiment,
              parameters=params,
              run_time=time,
              version=version)
print_results(time)