Ejemplo n.º 1
0
def train_net(config):
    theano.config.on_unused_input = 'warn'

    image_sz = 227

    x = T.ftensor4('x')
    y = T.lvector('y')

    ## BUILD NETWORK ##
    model = AlexNet(config)
    layers = model.layers
    batch_size = model.batch_size
    errors = model.errors

    ## COMPILE FUNCTIONS ##
    (train_model, validate_model, train_error, learning_rate, shared_x,
     shared_y, rand_arr, vels) = compile_models(model, config, flag_top_5=True)

    images = np.random.rand(batch_size, 3, image_sz,
                            image_sz).astype(np.float32)
    labels = np.random.randint(0, 1000, size=batch_size).astype(np.int32)

    shared_x.set_value(images)
    shared_y.set_value(labels)

    time_theano_run(validate_model, 'Forward')
    time_theano_run(train_model, 'Forward-Backward')
Ejemplo n.º 2
0
def time_alexnet():
    config = Config()

    # Build network
    model = AlexNet(config)
    batch_size = model.batch_size

    # Compile forward and forward-backward functions
    (forward_step, forward_backward_step, shared_x, shared_y) = \
        compile_models(model, config)

    num_warmup_iters = config.num_warmup_iters
    num_timing_iters = config.num_timing_iters
    forward_times = np.zeros(num_timing_iters)
    forward_backward_times = np.zeros(num_timing_iters)
    num_iterations = num_warmup_iters + num_timing_iters
    for minibatch_index in range(num_iterations):
        if num_warmup_iters <= minibatch_index < num_warmup_iters + num_timing_iters:
            forward_time, forward_backward_time = \
                time_model(
                    forward_step, forward_backward_step, shared_x,
                    shared_y, config, batch_size)
            forward_times[minibatch_index - num_warmup_iters] = forward_time

            forward_backward_times[minibatch_index-num_warmup_iters] = \
                forward_backward_time

        else:  # Do not measure time for the warmup iterations.
            time_model(forward_step, forward_backward_step, shared_x, shared_y,
                       config, batch_size)

    # print forward_times
    # print forward_backward_times
    print("forward time %.4f +- %.4f ms batch_size %d" %
          (np.mean(forward_times), np.std(forward_times), batch_size))
    print("gradient computation time %.4f +- %.4f ms batch_size %d" %
          (np.mean(forward_backward_times), np.std(forward_backward_times),
           batch_size))
Ejemplo n.º 3
0
def train_net(config):

    # UNPACK CONFIGS
    (flag_para_load, train_filenames, val_filenames, train_labels, val_labels,
     img_mean) = unpack_configs(config)

    # pycuda set up
    drv.init()
    dev = drv.Device(int(config['gpu'][-1]))
    ctx = dev.make_context()

    if flag_para_load:
        #  zmq set up
        sock = zmq.Context().socket(zmq.PAIR)
        sock.connect('tcp://*****:*****@ iter = ', num_iter
                print 'training cost:', cost_ij
                if config['print_train_error']:
                    print 'training error rate:', train_error()

            if flag_para_load and (count < len(minibatch_range)):
                load_send_queue.put('calc_finished')

        ############### Test on Validation Set ##################

        DropoutLayer.SetDropoutOff()

        this_validation_error, this_validation_loss = get_val_error_loss(
            rand_arr,
            shared_x,
            shared_y,
            val_filenames,
            val_labels,
            flag_para_load,
            img_mean,
            batch_size,
            validate_model,
            send_queue=load_send_queue,
            recv_queue=load_recv_queue)

        print('epoch %i: validation loss %f ' % (epoch, this_validation_loss))
        print('epoch %i: validation error %f %%' %
              (epoch, this_validation_error * 100.))
        val_record.append([this_validation_error, this_validation_loss])
        np.save(config['weights_dir'] + 'val_record.npy', val_record)

        DropoutLayer.SetDropoutOn()
        ############################################

        # Adapt Learning Rate
        step_idx = adjust_learning_rate(config, epoch, step_idx, val_record,
                                        learning_rate)

        # Save weights
        if epoch % config['snapshot_freq'] == 0:
            save_weights(layers, config['weights_dir'], epoch)
            np.save(config['weights_dir'] + 'lr_' + str(epoch) + '.npy',
                    learning_rate.get_value())
            save_momentums(vels, config['weights_dir'], epoch)

    print('Optimization complete.')
def validate_performance(config):

    # UNPACK CONFIGS
    (flag_para_load, train_filenames, val_filenames, train_labels, val_labels,
     img_mean) = unpack_configs(config)

    if flag_para_load:
        # pycuda and zmq set up
        drv.init()
        dev = drv.Device(int(config['gpu'][-1]))
        ctx = dev.make_context()
        sock = zmq.Context().socket(zmq.PAIR)
        sock.connect('tcp://localhost:{0}'.format(config['sock_data']))

        load_send_queue = config['queue_t2l']
        load_recv_queue = config['queue_l2t']
    else:
        load_send_queue = None
        load_recv_queue = None

    import theano.sandbox.cuda
    theano.sandbox.cuda.use(config['gpu'])
    import theano
    theano.config.on_unused_input = 'warn'

    from layers import DropoutLayer
    from alex_net import AlexNet, compile_models

    import theano.misc.pycuda_init
    import theano.misc.pycuda_utils

    # # BUILD NETWORK ##
    model = AlexNet(config)
    layers = model.layers
    batch_size = model.batch_size

    # # COMPILE FUNCTIONS ##
    (train_model, validate_model, train_error, learning_rate, shared_x,
     shared_y, rand_arr, vels) = compile_models(model, config, flag_top_5=True)

    print '... training'

    if flag_para_load:
        # pass ipc handle and related information
        gpuarray_batch = theano.misc.pycuda_utils.to_gpuarray(
            shared_x.container.value)
        h = drv.mem_get_ipc_handle(gpuarray_batch.ptr)
        sock.send_pyobj((gpuarray_batch.shape, gpuarray_batch.dtype, h))
        load_send_queue.put(img_mean)

    load_epoch = config['load_epoch']
    load_weights(layers, config['weights_dir'], load_epoch)

    DropoutLayer.SetDropoutOff()


    this_validation_error, this_validation_error_top_5, this_validation_loss = \
        get_val_error_loss(rand_arr, shared_x, shared_y,
                           val_filenames, val_labels,
                           flag_para_load,img_mean,
                           batch_size, validate_model,
                           send_queue=load_send_queue,
                           recv_queue=load_recv_queue,
                           flag_top_5=True)

    print('validation error %f %%' % (this_validation_error * 100.))
    print('top 5 validation error %f %%' %
          (this_validation_error_top_5 * 100.))
    print('validation loss %f ' % (this_validation_loss))

    return this_validation_error, this_validation_loss
Ejemplo n.º 5
0
from alex_net import AlexNet
import tensorflow as tf
import numpy as np
import pic_to_csv
'''
train=np.random.normal(size=(100,224,224,3))
print(train.shape)

labels=np.random.random(size=100)*4
labels=labels.astype(np.int)
'''
x, labels = pic_to_csv.getDataSet()
print(x.shape)
print(labels.shape)

model = AlexNet()
model.fit(x, labels, epochs=10, batch_size=5)
Ejemplo n.º 6
0
from alex_net import AlexNet
import tensorflow as tf
import numpy as np

train = np.random.normal(size=(100, 224, 224, 3))
print(train.shape)

labels = np.random.random(size=100) * 4
labels = labels.astype(np.int)

model = AlexNet()
model.fit(train, labels, epochs=10, batch_size=10)
Ejemplo n.º 7
0
def train_net(config, private_config):

    # UNPACK CONFIGS
    (flag_para_load, flag_datalayer, train_filenames, val_filenames,
     train_labels, val_labels, img_mean) = \
        unpack_configs(config, ext_data=private_config['ext_data'],
                       ext_label=private_config['ext_label'])

    gpu_send_queue = private_config['queue_gpu_send']
    gpu_recv_queue = private_config['queue_gpu_recv']

    # pycuda and zmq set up
    drv.init()
    dev = drv.Device(int(private_config['gpu'][-1]))
    ctx = dev.make_context()

    sock_gpu = zmq.Context().socket(zmq.PAIR)
    if private_config['flag_client']:
        sock_gpu.connect('tcp://*****:*****@ iter = ', num_iter
                    print 'training cost:', cost_ij

                if config['print_train_error']:
                    error_ij = train_error()

                    gpu_send_queue.put(error_ij)
                    that_error = gpu_recv_queue.get()
                    error_ij = (error_ij + that_error) / 2.

                    if private_config['flag_verbose']:
                        print 'training error rate:', error_ij

            if flag_para_load and (count < len(minibatch_range)):
                load_send_queue.put('calc_finished')

        ############### Test on Validation Set ##################

        DropoutLayer.SetDropoutOff()

        this_val_error, this_val_loss = get_val_error_loss(
            rand_arr,
            shared_x,
            shared_y,
            val_filenames,
            val_labels,
            flag_datalayer,
            flag_para_load,
            batch_size,
            validate_model,
            send_queue=load_send_queue,
            recv_queue=load_recv_queue)

        # report validation stats
        gpu_send_queue.put(this_val_error)
        that_val_error = gpu_recv_queue.get()
        this_val_error = (this_val_error + that_val_error) / 2.

        gpu_send_queue.put(this_val_loss)
        that_val_loss = gpu_recv_queue.get()
        this_val_loss = (this_val_loss + that_val_loss) / 2.

        if private_config['flag_verbose']:
            print('epoch %i: validation loss %f ' % (epoch, this_val_loss))
            print('epoch %i: validation error %f %%' %
                  (epoch, this_val_error * 100.))
        val_record.append([this_val_error, this_val_loss])

        if private_config['flag_save']:
            np.save(config['weights_dir'] + 'val_record.npy', val_record)

        DropoutLayer.SetDropoutOn()
        ############################################

        # Adapt Learning Rate
        step_idx = adjust_learning_rate(config, epoch, step_idx, val_record,
                                        learning_rate)

        # Save Weights, only one of them will do
        if private_config['flag_save']:
            if epoch % config['snapshot_freq'] == 0:
                save_weights(layers, config['weights_dir'], epoch)
                np.save(config['weights_dir'] + 'lr_' + str(epoch) + '.npy',
                        learning_rate.get_value())
                save_momentums(vels, config['weights_dir'], epoch)

    print('Optimization complete.')
Ejemplo n.º 8
0
def train_net(config):
    # UNPACK CONFIGS
    (flag_para_load, train_filenames, val_filenames,
     train_labels, val_labels, img_mean) = unpack_configs(config)
    if flag_para_load:
        #  zmq set up
        sock = zmq.Context().socket(zmq.PAIR)
        sock.connect('tcp://*****:*****@iter " + str(count)
            if count == 1:
                s = time.time()
            if count == 20:
                e = time.time()
                print "time per 20 iter:", (e - s)
                logger.info("time per 20 iter: %f" % (e - s)) 
            cost_ij = train_model_wrap(train_model, shared_x,
                                       shared_y, rand_arr, img_mean,
                                       count, minibatch_index,
                                       minibatch_range, batch_size,
                                       train_filenames, train_labels,
                                       flag_para_load,
                                       config['batch_crop_mirror'],
                                       send_queue=load_send_queue,
                                       recv_queue=load_recv_queue)

            if num_iter % config['print_freq'] == 0:
                #print 'training @ iter = ', num_iter
                #print 'training cost:', cost_ij
		logger.info("training @ iter = %i" % (num_iter)) 
		logger.info("training cost: %lf" % (cost_ij)) 
                if config['print_train_error']:
                    logger.info('training error rate: %lf' % train_error())
                    #print 'training error rate:', train_error()

            if flag_para_load and (count < len(minibatch_range)):
                load_send_queue.put('calc_finished')

        ############### Test on Validation Set ##################

        #"""
        DropoutLayer.SetDropoutOff()

        # result_list = [ this_validation_error, this_validation_error_top5, this_validation_loss ]
        # or
        # result_list = [ this_validation_error, this_validation_loss ]
        result_list = get_val_error_loss(
        #this_validation_error, this_validation_loss = get_val_error_loss(
            rand_arr, shared_x, shared_y,
            val_filenames, val_labels,
            flag_para_load, img_mean,
            batch_size, validate_model,
            send_queue=load_send_queue, 
            recv_queue=load_recv_queue,
            flag_top_5=flag_top5)


        logger.info(('epoch %i: validation loss %f ' %
              (epoch, result_list[-1])))
        #print('epoch %i: validation loss %f ' %
        #      (epoch, this_validation_loss))
        if flag_top5:
            logger.info(('epoch %i: validation error (top 1) %f %%, (top5) %f %%' %
                (epoch,  result_list[0] * 100., result_list[1] * 100.)))
        else:
            logger.info(('epoch %i: validation error %f %%' %
                (epoch, result_list[0] * 100.)))
        #print('epoch %i: validation error %f %%' %
        #      (epoch, this_validation_error * 100.))
        val_record.append(result_list)
        #val_record.append([this_validation_error, this_validation_loss])
        np.save(config['weights_dir'] + 'val_record.npy', val_record)

        DropoutLayer.SetDropoutOn()
        ############################################

        # Adapt Learning Rate
        step_idx = adjust_learning_rate(config, epoch, step_idx,
                                        val_record, learning_rate)

        # Save weights
        if epoch % config['snapshot_freq'] == 0:
            save_weights(layers, config['weights_dir'], epoch)
            np.save(config['weights_dir'] + 'lr_' + str(epoch) + '.npy',
                       learning_rate.get_value())
            save_momentums(vels, config['weights_dir'], epoch)
        #"""

    print('Optimization complete.')
Ejemplo n.º 9
0
        optimizer = optim.SGD(model.parameters(), lr=0.01, momentum=0.9)
    elif opt == "Adam":
        optimizer = optim.Adam(model.parameters())
    return optimizer


def train(model, optimizer, train_iter, log_interval, start_time, gpu=False):
    model.train()
    for iteration, batch in enumerate(tqdm(train_iter, desc='this epoch'), 1):
        image, pose, visibility = Variable(batch[0]), Variable(
            batch[1]), Variable(batch[2])
        if gpu:
            image, pose, visibility = image.cuda(), pose.cuda(
            ), visibility.cuda()
        optimizer.zero_grad()
        output = model(image)
        loss = mean_squared_error(output, pose, visibility)
        loss.backward()
        optimizer.step()


#model
model = AlexNet()
optimizer = get_optimizer('MomentumSGD', model)

#train
images, poses, visibilities = load_dataset(path)

start_time = time.time()
train(model, optimizer, train_iter, start_time)
Ejemplo n.º 10
0
Archivo: test.py Proyecto: sshnan7/nlos
    for i in range(batch_size):
        plt.scatter(label[i, :, 0].numpy() + i * im_size + (i+1) * grid_border_size, 
                        label[i,:,1].numpy() + grid_border_size, s=20, marker ='.', c='r')
        plt.scatter(output[i, :, 0].detach().numpy() + i * im_size + (i+1) * grid_border_size, 
                       output[i,:,1].detach().numpy() + grid_border_size, s=20, marker ='.', c='b')
        plt.title('Batch from dataloader')


if __name__ == '__main__':
    
    PATH = './weights/'
    result_path = './results/'
    print("save_path = ", PATH )

    #model
    model = AlexNet(14)
    optimizer = get_optimizer('Adam', model)
    criterion = nn.MSELoss()
    running_loss=0.0

    #train
    #images, poses, visibilities = load_dataset(path)
    test_data = LspDataset('./lsp_dataset/joints.mat', './lsp_dataset/images', mode='test')
    dataloader = DataLoader(test_data, batch_size=2, shuffle=False, num_workers=4)
    
    model.load_state_dict(torch.load(PATH +'deeppose_state_dict.pt')) 
    model.eval()
    for i_batch, sample_batched in enumerate(dataloader):
        print(i_batch, sample_batched[0].size(), sample_batched[1].size())  
        # torch.Size([1,3,202, 202]) torch.Size([1, 3, 14])
        img = sample_batched[0]
Ejemplo n.º 11
0
def get_optimizer(opt, model):
    if opt == 'MomentumSGD':
        optimizer = optim.SGD(model.parameters(), lr=0.0001, momentum=0.1)
    elif opt == "Adam":
        optimizer = optim.Adam(model.parameters())
    return optimizer


if __name__ == '__main__':

    PATH = './weights/'
    print("save_path = ", PATH)

    #model
    model = AlexNet(14)
    #model = resnet.ResNet50()
    optimizer = get_optimizer('Adam', model)
    criterion = nn.MSELoss()
    running_loss = 0.0

    #train

    train_data = LspDataset('./lspet/joints.mat',
                            './lspet/images',
                            mode='train')
    #train_data = LspDataset('./lsp_dataset/joints.mat', './lsp_dataset/images', mode='train')
    #test_data = LspDataset('./lsp_dataset/joints.mat', './lsp_dataset/images', mode='test')

    batch = 4
    ### training part
Ejemplo n.º 12
0
#import sys
#sys.path.append('./lib')
from alex_net import AlexNet
import yaml
#import scipy.misc
#import numpy as np

#THEANO_FLAGS='mode=FAST_COMPILE' python testAlexNet.py

with open('config.yaml', 'r') as f:
    config = yaml.load(f)

alexnetModel = AlexNet(config, True)

x = alexnetModel.forward(['cat.jpg'])
print x[0].shape
print type(x[0])
print 'done 1'

x = alexnetModel.forward(['cat.jpg', 'cat.jpg'])
print x[0].shape
print type(x[0])
print 'done 2'
#y = alexnetModel.forward(['cat.jpg','cat.jpg'])
#print x[0] == y[0]
#print x[0]
#print y[0]
"""
import theano.tensor as T
import theano, numpy as np
Ejemplo n.º 13
0
#import sys
#sys.path.append('./lib')
from alex_net import AlexNet
import yaml
#import scipy.misc
#import numpy as np


#THEANO_FLAGS='mode=FAST_COMPILE' python testAlexNet.py 

with open('config.yaml', 'r') as f:
    config = yaml.load(f)


alexnetModel = AlexNet(config, True)


x = alexnetModel.forward(['cat.jpg'])
print x[0].shape
print type(x[0])
print 'done 1'



x = alexnetModel.forward(['cat.jpg','cat.jpg'])
print x[0].shape
print type(x[0])
print 'done 2'
#y = alexnetModel.forward(['cat.jpg','cat.jpg'])
#print x[0] == y[0]
#print x[0]
Ejemplo n.º 14
0
config = proc_configs(config)

# UNPACK CONFIGS
(train_file, test_file, img_mean) = unpack_configs(config)

import theano.sandbox.cuda
theano.sandbox.cuda.use(config['gpu'])
import theano
theano.config.on_unused_input = 'warn'

from layers import DropoutLayer
from alex_net import AlexNet, compile_models

## BUILD NETWORK ##
model = AlexNet(config)
layers = model.layers
batch_size = model.batch_size

## LOAD DATASET.
with Timer('loading training data'):
    train_set = load_lmdb(train_file, img_mean)

with Timer('loading test data'):
    test_set = load_lmdb(test_file, img_mean)

## COMPILE FUNCTIONS ##
(train_model, validate_model, train_error, learning_rate, shared_x, shared_y,
 rand_arr, vels) = compile_models(model, config)

######################### TRAIN MODEL ################################