コード例 #1
0
def train(cur_iter, total_iter, data_loader, model, criterion, optimizer,
          scheduler, opt):
    model.eval()

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    accuracies = AverageMeter()

    end_time = time.time()
    i = cur_iter
    while i < total_iter:
        for _, (inputs, targets) in enumerate(data_loader):

            if not opt.no_cuda:
                targets = targets.cuda(async=True)

            targets = Variable(targets)
            inputs = Variable(inputs)

            outputs = model(inputs)

            loss = criterion(outputs, targets)

            acc = calculate_accuracy(outputs, targets)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step(loss.data[0])

            print('Iter:{} Loss_conf:{} acc:{} lr:{}'.format(
                i + 1, loss.data[0], acc, optimizer.param_groups[0]['lr']),
                  flush=True)
            i += 1

            if i % 2000 == 0:
                save_file_path = os.path.join(opt.result_dir,
                                              'model_iter{}.pth'.format(i))
                print("save to {}".format(save_file_path))
                states = {
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                }
                torch.save(states, save_file_path)
            if i >= total_iter:
                break

    save_file_path = os.path.join(
        opt.result_dir, 'model_final.pth'.format(opt.checkpoint_path))
    print("save to {}".format(save_file_path))
    states = {
        'state_dict': model.state_dict(),
        'optimizer': optimizer.state_dict(),
    }
    torch.save(states, save_file_path)
コード例 #2
0
def train(cur_iter, iter_per_epoch, epoch, data_loader, model, criterion, optimizer, scheduler, opt):
    # 19.3.14. add
    # print("device : ", torch.cuda.get_device_name(0), flush=True)
    # torch.set_default_tensor_type('torch.cuda.DoubleTensor')
    # 19.5.10. revision
    for i in range(opt.gpu_num):
        print("device {} : {}".format(i, torch.cuda.get_device_name(i)), flush=True)

    # batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    accuracies = AverageMeter()

    i = cur_iter
    total_acc, epoch_acc, avg_acc = dict(), dict(), dict()
    if opt.loss_type == 'multiloss':
        keys = ['loc', 'conf']
    else:
        keys = ['conf']

    for key in keys:
        total_acc[key] = [0.0] * epoch
        epoch_acc[key] = 0.0
        avg_acc[key] = 0.0

    # for debug
    # print(opt.cuda) : True

    total_iter = epoch * iter_per_epoch
    save_timing = int(iter_per_epoch / 5)
    if opt.use_save_timing:
        if save_timing < 2000:
            save_timing = 2000
        elif save_timing > 5000:
            save_timing = 5000
    epoch_time = time.time()

    writer = SummaryWriter('runs/')
    c = Configure(sample_duration=opt.sample_duration, data_type=opt.train_data_type, policy=opt.layer_policy)
    default = c.get_default_bar()
    print('\n====> Training Start', flush=True)
    while i < total_iter:
        start_time = time.time()
        for _, (inputs, targets) in enumerate(data_loader):
            # for check size
            # print(sys.getsizeof(inputs))

            # 19.3.7 add
            # if opt.cuda:
            #     targets = targets.cuda(async=True)
            #     inputs = inputs.cuda(async=True)

            # 19.3.8. revision
            # 19.7.16
            # .cuda > .to
            # with torch.no_grad() for target
            # remove 'if opt.cuda:'
            inputs = inputs.to(opt.device, non_blocking=True)
            with torch.no_grad():
                targets = targets.to(opt.device, non_blocking=True)

            # 19.7.16. no use
            # targets = Variable(targets)
            # inputs = Variable(inputs)

            outputs = model(inputs)

            # if opt.loss_type in ['normal', 'KDloss']:
            #     targets = targets[:, -1].to(torch.long)

            loss_loc, loss_conf = criterion(outputs, targets)
            alpha = 0.5
            loss = loss_loc * alpha + loss_conf * (1. - alpha)

            if opt.loss_type in ['KDloss']:
                outputs = outputs[1]

            acc = calculate_accuracy(outputs, targets, opt.sample_duration, default, opt.device)
            for key in keys:
                avg_acc[key] += acc[key] / 10
                epoch_acc[key] += acc[key] / iter_per_epoch

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step(loss.item())

            i += 1

            if i % 10 == 0:
                batch_time = time.time() - start_time
                lr = optimizer.param_groups[0]['lr']
                print('Iter:{} Loss(per 10 batch):{} lr:{} batch_time:{:.3f}s'.format(
                    i, loss.item(), lr, batch_time), flush=True)

                writer.add_scalar('loss/loss_loc', loss_loc.item(), i)
                writer.add_scalar('loss/loss_conf', loss_conf.item(), i)
                writer.add_scalar('loss/loss', loss.item(), i)
                writer.add_scalar('learning_rate', lr, i)
                writer.add_scalar('acc_loss/loss_loc', loss_loc.item(), i)
                writer.add_scalar('acc_loss/loss_conf', loss_conf.item(), i)
                writer.add_scalar('acc_loss/loss', loss.item(), i)

                for key in keys:
                    print('avg_acc:{:.5f}({}) epoch_acc:{:.9f}({})'.format(
                        avg_acc[key], key, epoch_acc[key], key), end=' ', flush=True)
                    writer.add_scalar('accuracy/acc_{}'.format(key), avg_acc[key], i)
                    writer.add_scalar('acc_loss/acc_{}'.format(key), avg_acc[key], i)
                    avg_acc[key] = 0.0
                print(flush=True)
                start_time = time.time()

            if i % save_timing == 0:
                save_file_path = os.path.join(opt.result_dir, 'model_iter{}.pth'.format(i))
                print("save to {}".format(save_file_path))
                states = {
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                }
                torch.save(states, save_file_path)
            if i % iter_per_epoch == 0 and i != 0:
                print("epoch {} accuracy : ".format(i / iter_per_epoch), end='', flush=True)
                for key in keys:
                    print('{}({}) '.format(epoch_acc[key], key), end='', flush=True)
                    total_acc[key][int(i / iter_per_epoch)-1] = float(epoch_acc[key])
                    epoch_acc[key] = 0.0
                print(flush=True)
            if i >= total_iter:
                break

    total_time = time.time() - epoch_time
    total_time = datetime.timedelta(seconds=total_time)
    print("Training Time : {}".format(total_time), flush=True)
    total_acc['Training_Time'] = str(total_time)

    save_file_path = os.path.join(opt.result_dir, 'model_final.pth')
    print("save to {}".format(save_file_path), flush=True)
    states = {
        'state_dict': model.state_dict(),
        'optimizer': optimizer.state_dict(),
    }
    torch.save(states, save_file_path)

    json.dump(total_acc, open(os.path.join(opt.result_dir, 'epoch_accuracy_and_total_time.json'), 'w'))
    writer.close()
コード例 #3
0
def train(network_architecture, learning_rate=0.001,
          batch_size=100, training_epochs=10, display_step=1,test_batches=[]):
    vae = VariationalAutoencoder(network_architecture,
                                 learning_rate=learning_rate,
                                 transfer_fct=tf.nn.softmax,
                                 batch_size=batch_size)

    total_batch = int(n_samples / batch_size)
    # train_batches = np.array_split(trX, total_batch)
    train_batches = [_ for _ in generate_batches(trX.values, batch_size)]


    with tf.Session() as sess:

        #Tensorboard Initialization

        sess.run(tf.global_variables_initializer())
        writer_1 = tf.summary.FileWriter("./tensorboard_summary/final_vae_501")

        log_var = tf.Variable(0.0)
        log_var2 = tf.Variable(0.0)
        tf.summary.scalar("accuracy", log_var)
        tf.summary.scalar("loss",log_var2)

        write_op = tf.summary.merge_all()

        # Training cycle
        for epoch in range(training_epochs):
            avg_cost = 0.

            # Loop over all batches
            for i in range(total_batch):
                batch_xs = train_batches[i]
                # Fit training using batch data
                cost = vae.partial_fit(batch_xs)
                # Compute average loss
                avg_cost += cost / n_samples * batch_size

            # Display logs per epoch step
            if epoch % display_step == 0:
                print("Epoch:", '%04d' % (epoch + 1),
                      "cost=", "{:.9f}".format(avg_cost))


            #Write to Tensorboard
            train_accuracy = 0.

            for i in range(len(test_batches)):
                x_bach = test_batches[i]
                x_reconstr = vae.reconstruct(x_bach)
                train_accuracy += calculate_accuracy(x_bach, x_reconstr)

            train_accuracy = train_accuracy / float(len(test_batches))
            print(train_accuracy)

            if epoch % 5 == 0 and epoch != 0:
                summary = sess.run(write_op, {log_var: train_accuracy, log_var2: float(int(avg_cost))})
                writer_1.add_summary(summary, epoch)
                writer_1.flush()

    return vae
コード例 #4
0
start_time = time.time()
print("--- %s seconds ---" % (time.time() - start_time))

vae = train(network_architecture, training_epochs=1000, learning_rate=1e-3,test_batches=test_batches)

print("--- %s seconds ---" % (time.time() - start_time))



avg_accuracy = 0.

for i in range(len(test_batches)):
    x_bach = test_batches[i]
    x_reconstr = vae.reconstruct(x_bach)
    avg_accuracy += calculate_accuracy(x_bach, x_reconstr)

avg_accuracy = avg_accuracy / float(len(test_batches))
print(avg_accuracy)



'''
correct = tf.equal(tf.argmax(x_reconstr, 1), tf.argmax(vae.x, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:', accuracy.eval({vae.x: teX[:100]}))
'''
'''x_reconstr = vae.reconstruct(teX[:100])
correct = tf.equal(tf.argmax(x_reconstr, 1), tf.argmax(vae.x, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:', accuracy.eval({vae.x: teX[:100]}))'''
コード例 #5
0
# print("train data size: %d, test data size: %d" % (len(dataset.train.data), len(dataset.test.data)))
# print("train labels size: %d, test labels size: %d" % (len(dataset.train.labels), len(dataset.test.labels)))
#
# batch_size = 100
# iter_count = 10
# for i in range(iter_count):
#     data, labels = dataset.train.next_batch(batch_size)
#     assert len(data) == len(labels) == batch_size, "Batch size is incorrect"
#
#     batch = dataset.train.next_batch(batch_size)
#     assert len(batch[0]) == len(batch[1]) == batch_size, "Batch size is incorrect"
#
#     print("batch %d of length %d" % (i+1, len(data)))
#
# batch_size_to_print = 10
# print()
#for d, l in list(zip(*dataset.train.next_batch(batch_size_to_print))):
#    print(d, "->", l)
#
#print()
#for d, l in list(zip(*dataset.test.next_batch(batch_size_to_print))):
#    print(d, "->", l)
#
# dataset.save_all()

from lib.utils import calculate_accuracy

lst1 = [[1, 2, 3], [4, 5, 6], [1, 2, 3]]
lst2 = [[1, 2, 3], [4, 4, 6], [1, 4, 3]]
print('acc = %.2f' % calculate_accuracy(lst1, lst2))
コード例 #6
0
def train_neural_network(network_architecture,
                         learning_rate=1e-3,
                         batch_size=100,
                         epochs=100,
                         activation_fct=tf.nn.sigmoid):
    ''' Choosing the model Architecture '''

    autoencoder = DenoisingAutoencoder(network_architecture,
                                       learning_rate=learning_rate,
                                       batch_size=batch_size,
                                       activation_fct=activation_fct)

    #autoencoder = StackedAutoencoder(network_architecture, learning_rate=learning_rate, batch_size=batch_size,
    #                  activation_fct=activation_fct)

    #autoencoder = Autoencoder(n_layers=network_architecture['layers'],
    #                     transfer_function = tf.nn.softplus,
    #                    optimizer = tf.train.AdamOptimizer(learning_rate = 0.001))

    # autoencoder = AdditiveGaussianNoiseAutoencoder(n_input=trX.shape[1],
    #                                              n_z=int(50 * trX.shape[1] / 100),
    #                                             transfer_function=tf.nn.sigmoid,
    #                                            optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
    #                                           scale=0.001)

    #autoencoder = StackedSparseAutoencoder(n_input=trX.shape[1],
    #                                       network_architecture=network_architecture,
    #                                      n_z=network_architecture['n_z'],
    #                                     transfer_function=tf.nn.sigmoid,
    #                                    optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
    #                                   scale=0.001)

    n_batches = int(ceil(len(trX) / batch_size))
    train_batches = [_ for _ in generate_batches(trX.values, batch_size)]

    with tf.Session() as sess:

        sess.run(tf.global_variables_initializer())
        '''
       train_accuracy_variable = tf.Variable(0.0, name="accuracy")

        merge_summary = tf.summary.merge_all()
        writer = tf.summary.FileWriter("./tensorboard_summary/5")
        writer.add_graph(sess.graph) 
        '''

        writer_1 = tf.summary.FileWriter("./tensorboard_summary/602")

        log_var = tf.Variable(0.0)
        log_var2 = tf.Variable(0.0)
        tf.summary.scalar("accuracy", log_var)
        tf.summary.scalar("loss", log_var2)

        write_op = tf.summary.merge_all()

        for epoch in range(epochs):
            epoch_loss = 0
            avg_cost = 0.

            for i in range(n_batches):
                x_batch = train_batches[
                    i]  # trX, trY = mnist.train.next_batch(batch)
                _, c = autoencoder.partial_fit(
                    x_batch, corrupt_rate=corrupt_rate
                )  # sess.run([SAE.optimizer, SAE.cost], feed_dict={SAE.x: x_batch})

                epoch_loss += c
                # Compute average loss
                avg_cost += c / n_batches * batch_size

            print("Epoch:", (epoch + 1), ' Loss: ', epoch_loss, "cost=",
                  "{:.9f}".format(avg_cost))

            x_hat = autoencoder.reconstructor(teX.values,
                                              corrupt_rate=corrupt_rate)
            train_accuracy = calculate_accuracy(teX.values, x_hat)
            print('Train accuracy: ', train_accuracy)
            print('')
            '''
            with tf.name_scope('accuracy'):
            tf.summary.scalar("Training Accuracy", train_accuracy_variable)

            summary = sess.run(merge_summary, feed_dict={train_accuracy_variable: train_accuracy})
            writer.add_summary(summary, epoch)
            writer.flush()
            
            '''
            if epoch % 10 == 0 and epoch != 0:
                summary = sess.run(write_op, {
                    log_var: train_accuracy,
                    log_var2: float(int(avg_cost))
                })
                writer_1.add_summary(summary, epoch)
                writer_1.flush()

        x_hat = autoencoder.reconstructor(teX.values,
                                          corrupt_rate=corrupt_rate)
        print(len(x_hat))
        print(len(teX))
        accuracy = calculate_accuracy(teX.values, x_hat)
        print('Accuracy: ', accuracy)

        # correct = tf.equal(tf.argmax(SAE.x_hat, 1), tf.argmax(SAE.x, 1))
        # accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
        # print('Old Accuracy :', accuracy.eval({SAE.x: trX}))
        """ Replace API Services """
        z = autoencoder.transformer(dataset.values, corrupt_rate=corrupt_rate)

        data_dict = dict()
        for i, api_name in enumerate(API_Names):
            if api_name in data_dict:
                pass  #print(api_name)
            else:
                data_dict[api_name] = z[i]

        print(len(data_dict.keys()))

        prepare_mashup(data_dict)
        save_reduced_dimensions(data_dict)
コード例 #7
0
def train_neural_network(network_architecture, learning_rate=1e-3, batch_size=100, epochs=100,
                         activation_fct=tf.nn.sigmoid):
    ''' Choosing the model Architecture '''

    #autoencoder = DenoisingAutoencoder(network_architecture, learning_rate=learning_rate, batch_size=batch_size,
    #                                  activation_fct=activation_fct)

    autoencoder = StackedAutoencoder(network_architecture, learning_rate=learning_rate, batch_size=batch_size,
                       activation_fct=activation_fct)

    # autoencoder = AdditiveGaussianNoiseAutoencoder(n_input=trX.shape[1],
    #                                              n_z=int(50 * trX.shape[1] / 100),
    #                                             transfer_function=tf.nn.sigmoid,
    #                                            optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
    #                                           scale=0.001)

    #autoencoder = StackedSparseAutoencoder(n_input=trX.shape[1],
     #                                      network_architecture=network_architecture,
      #                                     n_z=network_architecture['n_z'],
       #                                    transfer_function=tf.nn.sigmoid,
        #                                   optimizer=tf.train.AdamOptimizer(learning_rate=0.001),
         #                                  scale=0.001)

    n_batches = int(ceil(len(trX) / batch_size))
    train_batches = [_ for _ in generate_batches(trX.values, batch_size)]

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        for epoch in range(epochs):
            epoch_loss = 0
            avg_cost = 0.

            for i in range(n_batches):
                x_batch = train_batches[i]  # trX, trY = mnist.train.next_batch(batch)
                _, c = autoencoder.partial_fit(x_batch) #,corrupt_rate=corrupt_rate)  # sess.run([SAE.optimizer, SAE.cost], feed_dict={SAE.x: x_batch})

                epoch_loss += c
                # Compute average loss
                avg_cost += c / n_batches * batch_size

            print("Epoch:", (epoch + 1), ' Loss: ', epoch_loss, "cost=", "{:.9f}".format(avg_cost))

        x_hat = autoencoder.reconstructor(teX.values) #,corrupt_rate=corrupt_rate)
        print(len(x_hat))
        print(len(teX))
        accuracy = calculate_accuracy(teX.values, x_hat)
        print('Accuracy: ', accuracy)

        # correct = tf.equal(tf.argmax(SAE.x_hat, 1), tf.argmax(SAE.x, 1))
        # accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
        # print('Old Accuracy :', accuracy.eval({SAE.x: trX}))

        """ Replace API Services """
        z = autoencoder.transformer(dataset.values)#,corrupt_rate=corrupt_rate)

        data_dict = dict()
        for i, api_name in enumerate(API_Names):
            if api_name in data_dict: print(api_name)
            else:
                data_dict[api_name] = z[i]

        print(len(data_dict.keys()))

        prepare_mashup(data_dict)
        save_reduced_dimensions(data_dict)
コード例 #8
0
    X_train, Y_train, X_test, Y_test = load_data(filename,
                                                 seq_len,
                                                 service_size=service_size)

    print('> Data Loaded. Compiling...')

    model = rnn_lstm.build_model(
        [service_size, seq_len, 2 * seq_len, service_size])

    #Tensorboard
    tbCallBack = keras.callbacks.TensorBoard(
        log_dir='./tensorboard_summary/LSTM/5',
        histogram_freq=10,
        write_graph=True,
        write_images=True)

    model.fit(X_train,
              Y_train,
              batch_size=10,
              nb_epoch=Epochos,
              validation_split=0.05,
              callbacks=[tbCallBack])

    Y_predicted = rnn_lstm.predict_point_by_point(model, X_test)

    print(calculate_accuracy(Y_test, Y_predicted))
    for i in range(10):
        print('Desire output', Y_test[i])
        print('Real output', Y_predicted[i])
コード例 #9
0
def train(cur_iter, total_iter, data_loader, model, criterion, optimizer,
          scheduler, opt, device):
    model.eval()

    # 19.3.14. add
    print("device : ", torch.cuda.get_device_name(0))
    # torch.set_default_tensor_type('torch.cuda.DoubleTensor')
    model.to(device)

    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()
    accuracies = AverageMeter()

    end_time = time.time()
    i = cur_iter

    # for debug
    # print(not(opt.no_cuda)) : True
    print('\n====> Training Start')
    while i < total_iter:
        for _, (inputs, targets) in enumerate(data_loader):

            # 19.3.7 add
            # if not opt.no_cuda:
            #     targets = targets.cuda(async=True)
            #     inputs = inputs.cuda(async=True)

            targets = Variable(targets)
            inputs = Variable(inputs)

            # 19.3.8. revision
            if not opt.no_cuda:
                targets = targets.to(device)
                inputs = inputs.to(device)

            outputs = model(inputs)

            loss = criterion(outputs, targets)

            acc = calculate_accuracy(outputs, targets)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            scheduler.step(loss.data)

            print('Iter:{} Loss_conf:{} acc:{} lr:{}'.format(
                i + 1, loss.data, acc, optimizer.param_groups[0]['lr']),
                  flush=True)
            i += 1

            if i % 2000 == 0:
                save_file_path = os.path.join(opt.result_dir,
                                              'model_iter{}.pth'.format(i))
                print("save to {}".format(save_file_path))
                states = {
                    'state_dict': model.state_dict(),
                    'optimizer': optimizer.state_dict(),
                }
                torch.save(states, save_file_path)
            if i >= total_iter:
                break

    save_file_path = os.path.join(
        opt.result_dir, 'model_final.pth'.format(opt.checkpoint_path))
    print("save to {}".format(save_file_path))
    states = {
        'state_dict': model.state_dict(),
        'optimizer': optimizer.state_dict(),
    }
    torch.save(states, save_file_path)