Example #1
0
    def __init__(self, enable_controller=[0, 3, 4]):
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller  # Default setting : "Pong"

        print "Initializing DQN..."
        print "CUDA init"
        cuda.init()

        print "Model Building"
        self.model = FunctionSet(
            l1=F.Convolution2D(4, 16, ksize=8, stride=4, wscale=np.sqrt(2)),
            l2=F.Convolution2D(16, 32, ksize=4, stride=2, wscale=np.sqrt(2)),
            l3=F.Linear(2592, 256),
            q_value=F.Linear(256, self.num_of_actions,
                             initialW=np.zeros((self.num_of_actions, 256),
                                               dtype=np.float32))
        ).to_gpu()

        print "Initizlizing Optimizer"
        self.optimizer = optimizers.RMSpropGraves(lr=0.0002, alpha=0.3, momentum=0.2)
        self.optimizer.setup(self.model.collect_parameters())

        # History Data :  D=[s, a, r, s_dash, end_episode_flag]
        self.D = [np.zeros((self.data_size, 4, 84, 84), dtype=np.uint8),
                  np.zeros(self.data_size, dtype=np.uint8),
                  np.zeros((self.data_size, 1), dtype=np.int8),
                  np.zeros((self.data_size, 4, 84, 84), dtype=np.uint8),
                  np.zeros((self.data_size, 1), dtype=np.bool)]
def main():
    if params.gpu_flag is not False:
        cuda.init(params.gpu_flag)
    print 'fetching data ...'
    fetcher = cifar(norm=False)

    bo = BO(train_model,
            {'wscale1'   : (-5, 0),
             'wscale2'   : (-5, 0),
             'wscale3'   : (-5, 0),
             'wscale4'   : (-5, 0),
             'wscale5'   : (-5, 0),
             'lr'        : (-4, -2),
             'batchsize' : (30, 300),
             'momentum'  : (0.5, 1.0),
             'decay'     : (-4, -2)
            })

    """
    bo.explore({'wscale1'   : [-4],
                'wscale2'   : [-2],
                'wscale3'   : [-2],
                'wscale4'   : [-1],
                'wscale5'   : [-1],
                'lr'        : [-3],
                'batchsize' : [100],
                'momentum'  : [0.9],
                'decay'     : [-3]
                })
    """

    bo.add_f_args('fetcher', fetcher)
    bo.maximize(init_points=params.opt_init_points, n_iter=params.opt_iter)
    print bo.res['max']
Example #3
0
    def __init__(self, enable_controller=[0, 3, 4]):
        self.num_of_actions = len(enable_controller)
        self.enable_controller = enable_controller  # Default setting : "Pong"

        print "Initializing DQN..."
        print "CUDA init"
        cuda.init()

        print "Model Building"
        self.model = FunctionSet(
            l1=F.Convolution2D(4, 32, ksize=8, stride=4, nobias=False, wscale=np.sqrt(2)),
            l2=F.Convolution2D(32, 64, ksize=4, stride=2, nobias=False, wscale=np.sqrt(2)),
            l3=F.Convolution2D(64, 64, ksize=3, stride=1, nobias=False, wscale=np.sqrt(2)),
            l4=F.Linear(3136, 512, wscale=np.sqrt(2)),
            q_value=F.Linear(512, self.num_of_actions,
                             initialW=np.zeros((self.num_of_actions, 512),
                                               dtype=np.float32))
        ).to_gpu()

        self.model_target = copy.deepcopy(self.model)

        print "Initizlizing Optimizer"
        self.optimizer = optimizers.RMSpropGraves(lr=0.00025, alpha=0.95, momentum=0.95, eps=0.0001)
        self.optimizer.setup(self.model.collect_parameters())

        # History Data :  D=[s, a, r, s_dash, end_episode_flag]
        self.D = [np.zeros((self.data_size, 4, 84, 84), dtype=np.uint8),
                  np.zeros(self.data_size, dtype=np.uint8),
                  np.zeros((self.data_size, 1), dtype=np.int8),
                  np.zeros((self.data_size, 4, 84, 84), dtype=np.uint8),
                  np.zeros((self.data_size, 1), dtype=np.bool)]
def main():
    if P.use_mean_var:
        conv6_output = 126
    else:
        conv6_output = 128

    if P.model_name is None:
        model = FunctionSet(
            conv1 = F.Convolution2D( 1, 128, 3, stride=1),
            conv2 = F.Convolution2D(128, 128, 3, stride=1),
            conv3 = F.Convolution2D(128, 128, 3, stride=1),
            conv4 = F.Convolution2D(128, 128, 3, stride=1),
            conv5 = F.Convolution2D(128, 128, 3, stride=1),
            conv6 = F.Convolution2D(128, conv6_output, 3, stride=1),
            conv7 = F.Convolution2D(128, 128, 1, stride=1),
            conv8 = F.Convolution2D(128, 1, 1, stride=1)
            )
        if P.gpu >= 0:
            cuda.init(P.gpu)
            model.to_gpu()
    else:
        if P.gpu >= 0:
            cuda.init(P.gpu)
        model = pickle.load(open(os.path.join(P.model_dir, P.model_name), 'rb'))

    optimizer = optimizers.MomentumSGD(lr=P.lr, momentum=P.momentum)
    optimizer.setup(model.collect_parameters())

    train(model, optimizer)
    return
Example #5
0
 def __init__(self, hidden, action_count):
     cuda.init()
     self.hidden = hidden
     self.action_count = action_count
     super(Q, self).__init__(
         l1=L.Linear(self.D, hidden, wscale=np.sqrt(hidden)).to_gpu(),
         l2=L.Linear(hidden, hidden, wscale=np.sqrt(hidden)).to_gpu(),
         l3=L.Linear(hidden, action_count, wscale=np.sqrt(action_count)).to_gpu()
     )
 def create_model(self, input_size, output_size):
     self.model = chainer.FunctionSet(
         l1=Function.Linear(input_size, self.layer1),
         l2=Function.Linear(self.layer1, self.layer2),
         l3=Function.Linear(self.layer2, output_size),
     )
     if self.cuda:
         print "change cuda mode"
         cuda.init()
         self.model.to_gpu()
def main():
    if params.gpu_flag is not False:
        cuda.init(params.gpu_flag)
    if params.model_name is False:
        model = init_model()
    else:
        model = load_model(params.model_name)
    optimizer = init_optimizer(model)
    print 'fetching data ...'
    fetcher = cifar(norm=False)
    print 'done'
    train_and_val(model, optimizer, fetcher)
Example #8
0
def main():
    args = parse_args()

    trace('making vocabulary ...')
    vocab, num_lines, num_words = make_vocab(args.corpus, args.vocab)

    trace('initializing CUDA ...')
    cuda.init()

    trace('start training ...')
    model = make_rnnlm_model(args.vocab, args.embed, args.hidden)

    for epoch in range(args.epoch):
        trace('epoch %d/%d: ' % (epoch + 1, args.epoch))
        log_ppl = 0.0
        trained = 0
        
        opt = optimizers.SGD()
        opt.setup(model)

        for batch in generate_batch(args.corpus, args.minibatch):
            batch = [[vocab[x] for x in words] for words in batch]
            K = len(batch)
            L = len(batch[0]) - 1

            opt.zero_grads()
            s_h = zeros((K, args.hidden))

            for l in range(L):
                s_x = make_var([batch[k][l] for k in range(K)], dtype=np.int32)
                s_t = make_var([batch[k][l + 1] for k in range(K)], dtype=np.int32)

                s_e = functions.tanh(model.w_xe(s_x))
                s_h = functions.tanh(model.w_eh(s_e) + model.w_hh(s_h))
                s_y = model.w_hy(s_h)

                loss = functions.softmax_cross_entropy(s_y, s_t)
                loss.backward()
            
                log_ppl += get_data(loss).reshape(()) * K

            opt.update()
            trained += K
            trace('  %d/%d' % (trained, num_lines))
            
        log_ppl /= float(num_words)
        trace('  log(PPL) = %.10f' % log_ppl)
        trace('  PPL      = %.10f' % math.exp(log_ppl))

        trace('  writing model ...')
        save_rnnlm_model(args.model + '.%d' % (epoch + 1), args.vocab, args.embed, args.hidden, vocab, model)

    trace('training finished.')
Example #9
0
 def __init__(self, model, optimizer, dropout_ratio, corruption_level, gpu):
     self.model = model
     #self.layers = []
     self.dropout_ratio = dropout_ratio
     self.corruption_level = corruption_level
     self.optimizer = optimizer
     self.optimizer.setup(self.model.collect_parameters())
     self.rng = np.random.RandomState(1)
     self.gpu = gpu
     if gpu>=0:
         cuda.init(gpu)
         model.to_gpu()
Example #10
0
 def _layer_setup(self):
     # Setup chainer layers for NN.
     layers = {}
     # Encoding Steps
     encode_layer_pairs = [(self.img_len, self.encode_sizes[0])]
     encode_layer_pairs += zip(self.encode_sizes[:-1], self.encode_sizes[1:])
     encode_layer_pairs += [(self.encode_sizes[-1], self.latent_dim * 2)]
     for i, (n_in, n_out) in enumerate(encode_layer_pairs):
         layers["encode_%i" % i] = F.Linear(n_in, n_out)
     # Decoding Steps
     decode_layer_pairs = [(self.latent_dim, self.decode_sizes[0])]
     decode_layer_pairs += zip(self.decode_sizes[:-1], self.decode_sizes[1:])
     decode_layer_pairs += [(self.decode_sizes[-1], self.img_len)]
     for i, (n_in, n_out) in enumerate(decode_layer_pairs):
         layers["decode_%i" % i] = F.Linear(n_in, n_out)
     model = chainer.FunctionSet(**layers)
     if self.flag_gpu:
         cuda.init()
         model.to_gpu()
     return model
Example #11
0
  def Init(self):
    TFunctionApprox.Init(self)
    L= self.Locate
    if self.Params['nn_data_x'] != None:
      self.DataX= np.array(pickle.load(open(L(self.Params['nn_data_x']), 'rb')), np.float32)
    else:
      self.DataX= np.array([],np.float32)
    if self.Params['nn_data_y'] != None:
      self.DataY= np.array(pickle.load(open(L(self.Params['nn_data_y']), 'rb')), np.float32)
    else:
      self.DataY= np.array([],np.float32)

    self.CreateNNs()

    if self.Params['nn_params'] != None:
      #self.model.copy_parameters_from(map(lambda e:np.array(e,np.float32),self.Params['nn_params']))
      self.model.copy_parameters_from(map(lambda e:np.array(e,np.float32),pickle.load(open(L(self.Params['nn_params']), 'rb')) ))
      self.is_predictable= True
    else:
      if self.Options['init_bias_randomly']:
        self.InitBias(m='mean')

    if self.Params['nn_params_err'] != None:
      #self.model_err.copy_parameters_from(map(lambda e:np.array(e,np.float32),self.Params['nn_params_err']))
      self.model_err.copy_parameters_from(map(lambda e:np.array(e,np.float32),pickle.load(open(L(self.Params['nn_params_err']), 'rb')) ))
    else:
      if self.Options['init_bias_randomly']:
        self.InitBias(m='error')

    if self.Options['gpu'] >= 0:
      cuda.init(self.Options['gpu'])
      self.model.to_gpu()
      self.model_err.to_gpu()

    self.optimizer= optimizers.AdaDelta(rho=self.Options['AdaDelta_rho'])
    self.optimizer.setup(self.model.collect_parameters())
    self.optimizer_err= optimizers.AdaDelta(rho=IfNone(self.Options['AdaDelta_rho_err'], self.Options['AdaDelta_rho']))
    self.optimizer_err.setup(self.model_err.collect_parameters())
Example #12
0
	def __init__(self, epsilon=1.0, frames_per_action=4):
		super(ChainerAgent, self).__init__()
		cuda.init()
		self.epsilon = epsilon
		self.gamma = 0.99
		self.iterations = 0
		
		self.model = FunctionSet(
			l1 = F.Linear(9 * frames_per_action, 256),
			l2 = F.Linear(256, 256),
			l3 = F.Linear(256, 256),
			l4 = F.Linear(256, 2),
		).to_gpu()

		self.optimizer = optimizers.RMSprop(lr=1e-5)
		self.optimizer.setup(self.model)
		self.update_target()

		self.num_frames = 0
		self.frames_per_action = frames_per_action
		self.prev_reward = 0.0

		self.history = ChainHistory(state_len=(9 * frames_per_action))
Example #13
0
    def fit(self, x_data, y_data):
        self.n_samples  = x_data.shape[0]
        self.n_features = x_data.shape[1]
        self.converge   = False
        if self.gpu >= 0:
            cuda.init()

        self.setup_network(self.n_features)
        if self.gpu >= 0:
            self.network.to_gpu()
        self.setup_optimizer()

        score = 0.0
        prev_time = time.time()
        batch_num = self.n_samples / self.batch_size
        for epoch in xrange(self.epochs):
            if not self.converge:
                for i in xrange(batch_num):
                    x_batch, y_batch = self.make_batch(x_data, y_data, i)
                    if self.gpu >= 0:
                        x = Variable(cuda.to_gpu(x_batch))
                        t = Variable(cuda.to_gpu(y_batch))
                    else:
                        x = Variable(x_batch)
                        t = Variable(y_batch)

                    loss = self.forward_train(x, t)
                    self.fit_update(loss, i)

                    if (i % batch_num) == 0:
                        self.fit_report(epoch, loss, score)

        elapsed = time.time() - prev_time
        print "{1} sec ({2} batch/sec) \n{0}\n".format(self,
                                                       elapsed,
                                                       batch_num / elapsed)
        return self
Example #14
0
def get_model_optimizer(result_dir, args):
    model_fn = os.path.basename(args.model)
    model_name = model_fn.split('.')[0]
    module = imp.load_source(model_fn.split('.')[0], args.model)
    Net = getattr(module, model_name)

    dst = '%s/%s' % (result_dir, model_fn)
    if not os.path.exists(dst):
        shutil.copy(args.model, dst)

    dst = '%s/%s' % (result_dir, os.path.basename(__file__))
    if not os.path.exists(dst):
        shutil.copy(__file__, dst)

    # prepare model
    model = Net()
    if args.restart_from is not None:
        if args.gpu >= 0:
            cuda.init(args.gpu)
        model = pickle.load(open(args.restart_from, 'rb'))
    if args.gpu >= 0:
        cuda.init(args.gpu)
        model.to_gpu()

    # prepare optimizer
    if args.opt == 'AdaGrad':
        optimizer = optimizers.AdaGrad(lr=0.0005)
    elif args.opt == 'MomentumSGD':
        optimizer = optimizers.MomentumSGD(lr=0.0005, momentum=0.9)
    elif args.opt == 'Adam':
        optimizer = optimizers.Adam()
    else:
        raise Exception('No optimizer is selected')
    optimizer.setup(model.collect_parameters())

    return model, optimizer
Example #15
0
	def __init__(self, frames_per_action=4):
		super(ConvQAgent, self).__init__()
		cuda.init()
		self.epsilon = 1.0
		self.gamma = 0.99
		self.iterations = 0
		
		self.model = FunctionSet(
			l1 = F.Convolution2D(frames_per_action, 32, ksize=8, stride=4, nobias=False, wscale=np.sqrt(2)),
			l2 = F.Convolution2D(32, 64, ksize=4, stride=2, nobias=False, wscale=np.sqrt(2)),
			l3 = F.Convolution2D(64, 64, ksize=3, stride=1, nobias=False, wscale=np.sqrt(2)),
			l4 = F.Linear(64 * 7 * 7, 512),
			l5 = F.Linear(512, 2)
		).to_gpu()

		self.optimizer = optimizers.RMSprop(lr=1e-5)
		self.optimizer.setup(self.model)
		self.update_target()

		self.num_frames = 0
		self.frames_per_action = frames_per_action
		self.prev_reward = 0.0

		self.history = ConvHistory((frames_per_action, 84, 84))
def get_img_label(img, model_type, model, mean, gpu):

    print('Loading Caffe model file %s...' % args.model, file=sys.stderr)
    func = caffe.CaffeFunction(args.model)
    print('Loaded', file=sys.stderr)
    if args.gpu >= 0:
        cuda.init(args.gpu)
        func.to_gpu()

    if args.model_type == 'alexnet' or args.model_type == 'caffenet':
        in_size = 227
        mean_image = np.load(args.mean)

        def forward(x, t):
            y, = func(inputs={'data': x}, outputs=['fc8'], train=False)
            return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
        def predict(x):
            y, = func(inputs={'data': x}, outputs=['fc8'], train=False)
            return F.softmax(y)
    elif args.model_type == 'googlenet':
        in_size = 224
        # Constant mean over spatial pixels
        mean_image = np.ndarray((3, 256, 256), dtype=np.float32)
        mean_image[0] = 104
        mean_image[1] = 117
        mean_image[2] = 123

        def forward(x, t):
            y, = func(inputs={'data': x}, outputs=['loss3/classifier'],
                      disable=['loss1/ave_pool', 'loss2/ave_pool'],
                      train=False)
            return F.softmax_cross_entropy(y, t), F.accuracy(y, t)
        def predict(x):
            y, = func(inputs={'data': x}, outputs=['loss3/classifier'],
                      disable=['loss1/ave_pool', 'loss2/ave_pool'],
                      train=False)
            return F.softmax(y)


    cropwidth = 256 - in_size
    start = cropwidth // 2
    stop = start + in_size
    mean_image = mean_image[:, start:stop, start:stop].copy()
    target_shape = (256, 256)
    output_side_length=256

    image = cv2.imread(args.image)
    height, width, depth = image.shape
    new_height = output_side_length
    new_width = output_side_length
    if height > width:
     new_height = output_side_length * height / width
    else:
     new_width = output_side_length * width / height
    resized_img = cv2.resize(image, (new_width, new_height))
    height_offset = (new_height - output_side_length) / 2
    width_offset = (new_width - output_side_length) / 2
    image= resized_img[height_offset:height_offset + output_side_length,
    width_offset:width_offset + output_side_length]

    image = image.transpose(2, 0, 1)
    image = image[:, start:stop, start:stop].astype(np.float32)
    image -= mean_image
    x_batch = np.ndarray(
            (1, 3, in_size,in_size), dtype=np.float32)
    x_batch[0]=image

    if args.gpu >= 0:
      x_batch=cuda.to_gpu(x_batch)
    x = chainer.Variable(x_batch, volatile=True)
    score = predict(x)

    if args.gpu >= 0:
      score=cuda.to_cpu(score.data)
    #print(score.data)

    categories = np.loadtxt("labels.txt", str, delimiter="\t")
    top_k = 20
    prediction = zip(score.data[0].tolist(), categories)
    prediction.sort(cmp=lambda x, y: cmp(x[0], y[0]), reverse=True)
    flag = False
    for rank, (score, name) in enumerate(prediction[:top_k], start=1):
        #get top score label(ad hoc)
        if flag is False:
            predicted_label = name
            flag = True
        print('#%d | %s | %4.1f%%' % (rank, name, score * 100))
    return(predicted_label)
Example #17
0
import re
import os
import argparse
from chainer import cuda
import cPickle as pickle
import numpy as np
import cv2 as cv

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--gpu', type=int, default=0)
    parser.add_argument('--model_file_dir', type=str, default='.')
    args = parser.parse_args()

    if args.gpu >= 0:
        cuda.init()

    for model_file in glob.glob('%s/*.chainermodel' % args.model_file_dir):
        epoch = int(re.search(ur'epoch_([0-9]+)', model_file).groups()[0])
        model = pickle.load(open(model_file, 'rb'))
        model.to_cpu()

        conv1 = model.parameters[0]
        conv1 = conv1.transpose((0, 2, 3, 1))

        n, h, w, c = conv1.shape
        side = int(np.ceil(np.sqrt(n)))
        pad = 2

        canvas1 = np.zeros(
            (h * side + pad * (side + 1), w * side + pad * (side + 1), c))
Example #18
0
 def test_init_unavailable(self):
     if not cuda.available:
         with self.assertRaises(RuntimeError):
             cuda.init()
    def train(self, epoch):        
        #################
        ####Prepare Data#### 
        #################
        print '=================================================='
        print '========This file will use the model file convnet_trial.py========='
        print '=================================================='

        cropsize = self.crop
        epoch_size = epoch
        resolution =  self.resol
        
        prefix = '/home/koyama-m/Research/membrane_CNN/'
        train_path  = prefix + 'data/training_dataset/256_training_dataset_crop' + str(cropsize) + '/'
        test_path = prefix + 'data/test_dataset/256_test_dataset_crop' + str(cropsize) + '/'
        models_path = prefix + 'models/' 
        data_path = prefix + 'data/'
        pkldata_path = data_path + 'data_pklformat_for_training/'
        sys.path.append(prefix)
        sys.path.append(train_path)
        sys.path.append(test_path)
        sys.path.append(models_path)
        sys.path.append(data_path)

        #######################
        ###### Setup############
        #######################
        print 'importing the required modules ... '

        import sys,pickle
        import numpy as np
        from chainer import cuda, Function, FunctionSet, gradient_check, Variable, optimizers
        import chainer.functions as F
        from matplotlib import pyplot as plt
        import logistic_reg
        import convnet_trial as convnet

        from pickle_preprocessed_dataset import  make_dataset
        import os
        reload(logistic_reg)
        reload(convnet)


        print 'COMPLETE \n' 
        ###### load dataset ######
        print 'Loading the preprocessed test data and training data ... '


        file_path = pkldata_path+'256_membrane%s.pkl' %str(cropsize)
        #If the filepath does not exist, create it. 
        if os.path.exists(file_path) != True:
            print('pkl formatted test data and train data does not exist for these parameters ... creating the pkl file...')
            save_destination = pkldata_path
            train_path = data_path + 'training_dataset/%s_training_dataset_crop%s/'%(str(resolution),str(cropsize) )
            test_path = data_path + 'test_dataset/%s_test_dataset_crop%s/'%(str(resolution),str(cropsize))
            make_dataset(train_path, test_path, save_destination, patchsize = cropsize)
        else:
            print('pkl data found. Loading the pkl data...')

        dataset = pickle.load(open(file_path))
        x_train0 = dataset['x_train']/255.
        y_train0 = dataset['y_train']
        x_test = dataset['x_test']/255.
        y_test = dataset['y_test']

        print 'COMPLETE \n '

        ##### Validation Set and  the rest #### 

        print 'Preparing the Validation data... '

        neg_index = np.where(y_train0 == 0)[0]
        pos_index = np.where(y_train0 == 1)[0]
        neg_pos_prop = [neg_index.shape[0], pos_index.shape[0] ]
        print 'Pos Neg proportion in training data ' + str(neg_pos_prop)

        validate_index = np.arange(150000, y_train0.shape[0],1)
        train_index  = np.arange(0,150000,1)

        x_valid = x_train0[validate_index]
        y_valid = y_train0[validate_index]
        x_train = x_train0[train_index]
        y_train = y_train0[train_index]
        print 'COMPLETE \n '

        ##### Tue if hte model is cnn  #####
        print 'Reshaping the dataset '


        model_is_cnn = True

        ##### reshape x for cnn  #####
        if(model_is_cnn==True):
            x_train = x_train.reshape((x_train.shape[0],1,x_train.shape[1],x_train.shape[1]))
            x_test = x_test.reshape((x_test.shape[0],1,x_test.shape[1],x_test.shape[1]))
            x_valid = x_valid.reshape((x_valid.shape[0],1,x_valid.shape[1],x_valid.shape[1]))
        ######### init GPU status #######
        cuda.init()

        #FXN MUST BE DEFINED BEFORE INITIALIZATION 
        ######## init models ########
        if(model_is_cnn==True):
            model_cpu_ver = convnet.convnet_trial(patchsize=x_train.shape[2])
            model =  convnet.convnet_trial(patchsize=x_train.shape[2]).to_gpu()
        else:
            model =  logistic_reg.logistic_r(patchsize=x_train.shape[1]).to_gpu()
        print 'COMPLETE \n '

        ######## init optimizer #######
        print 'Initializing the optimizer...\n '

        optimizer = optimizers.Adam()
        optimizer.setup(model.collect_parameters())
        optimizer.zero_grads()

        print 'Initiating the Training Sequence...'

        #######################
        ######Training###########
        #######################

        import time

        trainsize = x_train.shape[0]
        validsize = x_valid.shape[0]

        #Data Augmentation
        x_traintr = np.transpose(x_train, (0,1,3,2))
        x_trainlr  = x_train[:,:,:,::-1]
        x_trainud  = x_train[:,:,::-1,:]
        x_train_udtr = np.transpose(x_trainud, (0,1,3,2))

        start_time = time.time()

        minibatchsize = 50
        for epoch in xrange(epoch_size):

            elapsed_time = time.time() - start_time
            print 'Elapsed time is ' + str(elapsed_time)
            start_time = time.time()


            indexes = np.random.permutation(trainsize)
            n_batch = indexes.shape[0]/minibatchsize
            sum_loss = 0
            sum_accuracy = 0
            for i in xrange(0, trainsize, minibatchsize):

                x_batch_orig = x_train[indexes[i : i + minibatchsize]]
                y_batch_orig = y_train[indexes[i : i + minibatchsize]]

                x_batch_tr = x_traintr[indexes[i : i + minibatchsize]]
                y_batch_tr = y_train[indexes[i : i + minibatchsize]]        

                x_batch_lr = x_trainlr[indexes[i : i + minibatchsize]]
                y_batch_lr = y_train[indexes[i : i + minibatchsize]]

                x_batch_ud = x_trainud[indexes[i : i + minibatchsize]]
                y_batch_ud = y_train[indexes[i : i + minibatchsize]]

                x_batch_udtr = x_train_udtr[indexes[i : i + minibatchsize]]
                y_batch_udtr = y_train[indexes[i : i + minibatchsize]]        

                pre_x_batch = np.concatenate((x_batch_orig,x_batch_tr, x_batch_lr, x_batch_ud, x_batch_udtr), axis=0)
                pre_y_batch = np.concatenate((y_batch_orig,y_batch_tr, y_batch_lr, y_batch_ud,y_batch_udtr), axis=0)        

                x_batch = cuda.to_gpu(pre_x_batch)
                y_batch = cuda.to_gpu(pre_y_batch)
                optimizer.zero_grads()

                loss, accuracy,pred = model.forward(x_batch, y_batch)

                sum_loss += loss.data*minibatchsize
                sum_accuracy += accuracy.data*minibatchsize
                loss.backward()
                optimizer.update()
                #print 'train loss:' + str(loss.data)
                #print 'train accuracy(%)' + str(accuracy.data)

            sum_val_loss = 0
            sum_val_accuracy = 0
            for i in xrange(0,validsize,minibatchsize):
                x_batch = cuda.to_gpu(x_valid[i : i + minibatchsize])
                y_batch = cuda.to_gpu(y_valid[i : i + minibatchsize])
                loss, accuracy,pred = model.forward(x_batch, y_batch,False)
                sum_val_loss += loss.data*minibatchsize
                sum_val_accuracy += accuracy.data*minibatchsize

            print 'epoch ', epoch
            print 'train loss:' + str(sum_loss/trainsize)
            print 'train accuracy(%)' + str(sum_accuracy/trainsize*100)
            print 'validation loss' + str(sum_val_loss/validsize)    
            print 'validation accuracy(%)' + str(sum_val_accuracy/validsize*100)    


            print type(model)
            modelname = 'trained_model%s_crop%sepoch%s.pkl' %(str(resolution),  str(cropsize), str(epoch_size))
            print modelname
            pickle.dump(model, open(models_path+ modelname,'wb'),-1)

        elapsed_time = time.time() - start_time
        print elapsed_time
        print 'Training sequence COMPLETE'  


        print 'Initiating the Testing Sequence...'

        #######################
        ###### Testing ###########
        #######################
        testsize = x_test.shape[0]
        minibatchsize = 1000
        sum_loss = 0
        sum_accuracy = 0
        confusion_matrix = np.zeros((2,2))
        for i in xrange(0, testsize, minibatchsize):
                x_batch = cuda.to_gpu(x_test[i : i + minibatchsize])
                y_batch = cuda.to_gpu(y_test[i : i + minibatchsize])
                loss, accuracy, prob = model.forward(x_batch, y_batch,train=False)
                sum_loss += loss.data*minibatchsize
                sum_accuracy += accuracy.data*x_batch.shape[0]
                #pred = cuda.to_cpu(prob.data)[:,0]>threshold
                pred = np.argmax(cuda.to_cpu(prob.data),axis=1)

                #calc confusion matrix
                for j in xrange(x_batch.shape[0]):
                    confusion_matrix[cuda.to_cpu(y_batch)[j],pred[j]] += 1

        print 'Testing sequence COMPLETE... saving the log... '  
        txtname = 'trained_model%s_crop%sepoch%s_log.txt' %(str(resolution),  str(cropsize), str(epoch_size))
        sys.stdout = open(models_path+ txtname,"w")                        
        print 'test loss:' + str(sum_loss/testsize)

        print 'chance lebel(accuracy)' + str((np.sum(confusion_matrix[0,:])/np.sum(confusion_matrix)))
        print 'test accuracy(%)' + str((confusion_matrix[0,0]+confusion_matrix[1,1])/np.sum(confusion_matrix))

        print 'confusion_matrix:'
        print confusion_matrix

        sys.stdout.close()
 def __init__(self, modelpath, meanpath, gpu=-1):
    model = loader.load_model(modelpath)
    super(ModelVisualizer, self).__init__(meanpath, model, gpu)
    self.gpu = gpu
    if self.gpu >= 0:
       cuda.init(self.gpu)
Example #21
0
def test(args):
    # augmentation setting
    trans = Transform(padding=[args.crop_pad_inf, args.crop_pad_sup],
                      flip=args.flip,
                      size=args.size,
                      shift=args.shift,
                      lcn=args.lcn)

    # test data
    test_fn = '%s/test_joints.csv' % args.datadir
    test_dl = np.array([l.strip() for l in open(test_fn).readlines()])

    # load model
    if args.gpu >= 0:
        cuda.init(args.gpu)
    model = load_model(args)
    if args.gpu >= 0:
        model.to_gpu()
    else:
        model.to_cpu()

    # create output dir
    epoch = int(re.search(ur'epoch_([0-9]+)', args.param).groups()[0])
    result_dir = os.path.dirname(args.param)
    out_dir = '%s/test_%d' % (result_dir, epoch)
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)
    out_log = '%s.log' % out_dir
    fp = open(out_log, 'w')

    mean_error = 0.0
    N = len(test_dl)
    for i in range(0, N, args.batchsize):
        lines = test_dl[i:i + args.batchsize]
        input_data, labels = load_data(trans, args, lines)

        if args.gpu >= 0:
            input_data = cuda.to_gpu(input_data.astype(np.float32))
            labels = cuda.to_gpu(labels.astype(np.float32))

        _, preds = model.forward(input_data, labels, train=False)

        if args.gpu >= 0:
            preds = cuda.to_cpu(preds.data)
            input_data = cuda.to_cpu(input_data)
            labels = cuda.to_cpu(labels)

        for n, line in enumerate(lines):
            img_fn = line.split(',')[args.fname_index]
            img = input_data[n].transpose((1, 2, 0))
            pred = preds[n]
            img_pred, pred = trans.revert(img, pred)

            # turn label data into image coordinates
            label = labels[n]
            img_label, label = trans.revert(img, label)

            # calc mean_error
            error = np.linalg.norm(pred - label) / len(pred)
            mean_error += error

            # create pred, label tuples
            img_pred = np.array(img_pred.copy())
            img_label = np.array(img_label.copy())
            pred = [tuple(p) for p in pred]
            label = [tuple(p) for p in label]

            # all limbs
            img_label = draw_joints(
                img_label, label, args.draw_limb, args.text_scale)
            img_pred = draw_joints(
                img_pred, pred, args.draw_limb, args.text_scale)

            msg = '{:5}/{:5} {}\terror:{}\tmean_error:{}'.format(
                i + n, N, img_fn, error, mean_error / (i + n + 1))
            print(msg, file=fp)
            print(msg)

            fn, ext = os.path.splitext(img_fn)
            tr_fn = '%s/%d-%d_%s_pred%s' % (out_dir, i, n, fn, ext)
            la_fn = '%s/%d-%d_%s_label%s' % (out_dir, i, n, fn, ext)
            cv.imwrite(tr_fn, img_pred)
            cv.imwrite(la_fn, img_label)
Example #22
0
def run_nn_vae(q,optimizer_nm,train_x,train_real, train_y,test_x,test_y,cross_val,nn_n_hidden,
               vae_n_hidden, n_z, n_batch, nn_n_epochs,vae_n_epochs,n_epochs_tuning,activation,grad_clip,noise_nm,gpu=-1):
    
    # np.random.seed(123) # random値を固定
    
    n_x = train_x.shape[1]
    n_real = train_real.shape[1]
    n_y = train_y.shape[1]

    nn_n_layers = len(nn_n_hidden)
    
    vae_n_hidden_recog =vae_n_hidden
    vae_n_hidden_gen   = vae_n_hidden[::-1]
    vae_n_layers_recog = len(vae_n_hidden_recog)
    vae_n_layers_gen   = len(vae_n_hidden_gen)
    
    
    """NN pre_train"""

    layers = {}

    # Recognition model.
    nn_layer_sizes = [(n_x,nn_n_hidden[0])]
    if nn_n_layers >1:
        nn_layer_sizes += zip(nn_n_hidden[:-1], nn_n_hidden[1:])
    nn_layer_sizes += [(nn_n_hidden[-1], n_real)]

    for i, (n_incoming, n_outgoing) in enumerate(nn_layer_sizes):
        layers['nn_layer_%i' % i] = F.Linear(n_incoming, n_outgoing)

    """VAE pre_train"""

    # Recognition model.
    vae_rec_layer_sizes = [(n_real, vae_n_hidden_recog[0])]
    if vae_n_layers_recog >1:
        vae_rec_layer_sizes += zip(vae_n_hidden_recog[:-1], vae_n_hidden_recog[1:])
    vae_rec_layer_sizes += [(vae_n_hidden_recog[-1], n_z)]

    for i, (n_incoming, n_outgoing) in enumerate(vae_rec_layer_sizes):
        layers['vae_recog_%i' % i] = F.Linear(n_incoming, n_outgoing)

    layers['log_sigma'] = F.Linear(vae_n_hidden_recog[-1], n_z)

    # Generating model.
    vae_gen_layer_sizes = [(n_z, vae_n_hidden_gen[0])]
    if vae_n_layers_recog >1:
        vae_gen_layer_sizes += zip(vae_n_hidden_gen[:-1], vae_n_hidden_gen[1:])
    vae_gen_layer_sizes += [(vae_n_hidden_gen[-1], n_real)]

    for i, (n_incoming, n_outgoing) in enumerate(vae_gen_layer_sizes):
        layers['vae_gen_%i' % i] = F.Linear(n_incoming, n_outgoing)
        
    layers['output'] = F.Linear(n_z, n_y)

    model = NN_VAE(**layers)

    if gpu >= 0:
        cuda.init(gpu)
        model.to_gpu()
    
    # use Adam
    optimizers_dict = {
        "Adam":optimizers.Adam(),"AdaDelta":optimizers.AdaDelta(),
        "AdaGrad":optimizers.AdaGrad(),"MomentumSGD":optimizers.MomentumSGD(),
        "NesterovAG":optimizers.NesterovAG(),"RMSprop":optimizers.RMSprop(),
        "SGD":optimizers.SGD()
    }
    
    optimizer = optimizers_dict[optimizer_nm]
    optimizer.setup(model.collect_parameters())

    total_nn_losses = []
    
    if cross_val >=0:
        print('{}s pre-train start ...'.format(cross_val))
        
    # pre_train_NN start

    for epoch in xrange(1, nn_n_epochs + 1):
        t1 = time.time()
        
        # np.random.seed(123)
        indexes = np.random.permutation(train_x.shape[0])
        
        nn_total_loss = 0.0
        nn_out_list = np.zeros(train_real.shape)
        noisy_train_x = np.array(noisy(noise_nm,train_x),dtype = np.float32)
        for i in xrange(0, train_x.shape[0], n_batch):
            noisy_x_batch = noisy_train_x[indexes[i : i + n_batch]]
            real_batch = train_real[indexes[i : i + n_batch]]

            if gpu >= 0:
                noisy_x_batch = cuda.to_gpu(noisy_x_batch)

            optimizer.zero_grads()

            loss, nn_out = model.nn_forward(
                noisy_x_batch, real_batch, nn_n_layers,nonlinear=activation, gpu=-1,train=True
            )
            
            nn_total_loss += float(loss.data) * len(noisy_x_batch)
            loss.backward()
            optimizer.clip_grads(grad_clip)
            optimizer.update()
            nn_out_list[indexes[i : i + n_batch]] = nn_out.data

        total_nn_losses.append(nn_total_loss / train_x.shape[0])
        
    #  pre_train_VAE start
    
    total_vae_losses = []
    
    if cross_val >=0:
        print('{}s tuning start ...'.format(cross_val))
        
    nn_out_list = np.array(nn_out_list, dtype =np.float32)
    noisy_nn_out_list = np.array(noisy(noise_nm,nn_out_list),dtype = np.float32)
    

    for epoch in xrange(1, vae_n_epochs + 1):
        # np.random.seed(123)
        indexes = np.random.permutation(train_x.shape[0])
        total_loss = 0.0
        noisy_nn_out_list = np.array(noisy(noise_nm,nn_out_list),dtype = np.float32)
        for i in xrange(0, train_x.shape[0], n_batch):
            noisy_nn_out_list_batch = noisy_nn_out_list[indexes[i : i + n_batch]]
            nn_out_list_batch =nn_out_list[indexes[i : i + n_batch]]
            real_batch = train_real[indexes[i : i + n_batch]]
            
            if gpu >= 0:
                noisy_nn_out_list_batch = cuda.to_gpu(noisy_nn_out_list_batch)

            optimizer.zero_grads()

            rec_loss, kl_loss, output = model.vae_forward(
                noisy_nn_out_list_batch, real_batch, vae_n_layers_recog,
                vae_n_layers_gen, nonlinear_q=activation, nonlinear_p=activation,train=True)
            loss = rec_loss + kl_loss
            total_loss += float(loss.data) * len(noisy_nn_out_list_batch)
            loss.backward()
            optimizer.clip_grads(grad_clip)
            optimizer.update()
        total_vae_losses.append(total_loss /  train_x.shape[0])
    
    #  train_test_NN_VAE start

    total_nn_vae_losses = []
    total_test_losses = []
    total_train_losses = []
    if cross_val >=0:
        print('{}s tuning start ...'.format(cross_val))

    for epoch in xrange(1, n_epochs_tuning + 1):
        noisy_train_x = np.array(noisy(noise_nm,train_x),dtype = np.float32)
        #np.random.seed(123)
        indexes = np.random.permutation(train_x.shape[0])
        total_loss = 0.0
        for i in xrange(0, train_x.shape[0], n_batch):
            noisy_x_batch = noisy_train_x[indexes[i : i + n_batch]]
            y_batch = train_y[indexes[i : i + n_batch]]

            if gpu >= 0:
                x_batch = cuda.to_gpu(x_batch)
                y_batch = cuda.to_gpu(y_batch)

            optimizer.zero_grads()

            loss, predict_score = model.nn_vae_tuning(
                noisy_x_batch, y_batch, nn_n_layers,vae_n_layers_recog,
                nonlinear_q=activation,train=True
            )
            loss = loss ** 0.5
            total_loss += float(loss.data) * len(noisy_x_batch)
            loss.backward()
            optimizer.clip_grads(grad_clip)
            optimizer.update()
        total_nn_vae_losses.append(total_loss /  train_x.shape[0])
        
        # test
        
        sum_loss_train = 0
        
        for i in xrange(0, train_x.shape[0], n_batch):
            x_batch = train_x[indexes[i : i + n_batch]]
            y_batch = train_y[indexes[i : i + n_batch]]

            if gpu >= 0:
                x_batch = cuda.to_gpu(x_batch)
                y_batch = cuda.to_gpu(y_batch)

            loss, predict_score = model.nn_vae_tuning(
                x_batch, y_batch, nn_n_layers,vae_n_layers_recog,
                nonlinear_q=activation,train=False
            )
            loss = loss ** 0.5
            sum_loss_train += float(loss.data) * len(noisy_x_batch)
        total_train_losses.append(sum_loss_train/train_x.shape[0])

        x_batch = test_x
        y_batch = test_y

        loss, predict_score =  model.nn_vae_tuning(
            x_batch, y_batch, nn_n_layers,vae_n_layers_recog,
            nonlinear_q=activation, train=False
        )
        loss = loss ** 0.5
        total_test_losses.append(loss.data)
    q.put([total_nn_losses,total_vae_losses,total_nn_vae_losses,total_train_losses,total_test_losses])
Example #23
0
def Main():
  import argparse
  import numpy as np
  from chainer import cuda, Variable, FunctionSet, optimizers
  import chainer.functions  as F

  parser = argparse.ArgumentParser(description='Chainer example: regression')
  parser.add_argument('--gpu', '-g', default=-1, type=int,
                      help='GPU ID (negative value indicates CPU)')
  args = parser.parse_args()

  batchsize = 10
  n_epoch   = NEpoch
  n_units   = 300  #TEST

  # Prepare dataset
  data_x, data_y = LoadData()
  batchsize= max(1,min(batchsize, len(data_y)/20))  #TEST: adjust batchsize
  #dx2,dy2=GenData(300, noise=0.0); data_x.extend(dx2); data_y.extend(dy2)
  data = np.array(data_x).astype(np.float32)
  target = np.array(data_y).astype(np.int32)  #DIFF_REG

  N= len(data) #batchsize * 30
  x_train= data
  y_train= target

  #For test:
  mi,ma,me= GetStat(data_x)
  f_reduce=lambda xa:[xa[0],xa[1]]
  f_repair=lambda xa:[xa[0],xa[1]]
  nt= 20+1
  N_test= nt*nt
  x_test= np.array(sum([[f_repair([x1,x2]) for x2 in FRange1(f_reduce(mi)[1],f_reduce(ma)[1],nt)] for x1 in FRange1(f_reduce(mi)[0],f_reduce(ma)[0],nt)],[])).astype(np.float32)
  y_test= np.array([0.0 for x in x_test]).astype(np.int32)  #DIFF_REG
  #No true test data (just for plotting)

  print 'Num of samples for train:',len(y_train),'batchsize:',batchsize
  # Dump data for plot:
  DumpData('/tmp/nn/smpl_train.dat', x_train, [[y] for y in y_train], f_reduce)  #DIFF_REG

  # Prepare multi-layer perceptron model
  model = FunctionSet(l1=F.Linear(2, n_units),
                      l2=F.Linear(n_units, n_units),
                      l3=F.Linear(n_units, 3))
  #TEST: Random bias initialization
  #, bias=Rand()
  #model.l1.b[:]= [Rand() for k in range(n_units)]
  #model.l2.b[:]= [Rand() for k in range(n_units)]
  #model.l3.b[:]= [Rand() for k in range(1)]
  #print model.l2.__dict__
  if args.gpu >= 0:
    cuda.init(args.gpu)
    model.to_gpu()

  # Neural net architecture
  def forward(x_data, y_data, train=True):
    #train= False  #TEST: Turn off dropout
    dratio= 0.2  #0.5  #TEST: Dropout ratio
    x, t = Variable(x_data), Variable(y_data)
    h1 = F.dropout(F.relu(model.l1(x)),  ratio=dratio, train=train)
    h2 = F.dropout(F.relu(model.l2(h1)), ratio=dratio, train=train)
    #h1 = F.dropout(F.leaky_relu(model.l1(x),slope=0.2),  ratio=dratio, train=train)
    #h2 = F.dropout(F.leaky_relu(model.l2(h1),slope=0.2), ratio=dratio, train=train)
    #h1 = F.dropout(F.sigmoid(model.l1(x)),  ratio=dratio, train=train)
    #h2 = F.dropout(F.sigmoid(model.l2(h1)), ratio=dratio, train=train)
    #h1 = F.dropout(F.tanh(model.l1(x)),  ratio=dratio, train=train)
    #h2 = F.dropout(F.tanh(model.l2(h1)), ratio=dratio, train=train)
    #h1 = F.dropout(model.l1(x),  ratio=dratio, train=train)
    #h2 = F.dropout(model.l2(h1), ratio=dratio, train=train)
    #h1 = F.relu(model.l1(x))
    #h2 = F.relu(model.l2(h1))
    #h1 = model.l1(x)
    #h2 = model.l2(h1)
    y  = model.l3(h2)
    #return F.mean_squared_error(y, t), y
    return F.softmax_cross_entropy(y, t), F.softmax(y)  #DIFF_REG

  # Setup optimizer
  optimizer = optimizers.AdaDelta(rho=0.9)
  #optimizer = optimizers.AdaGrad(lr=0.5)
  #optimizer = optimizers.RMSprop()
  #optimizer = optimizers.MomentumSGD()
  #optimizer = optimizers.SGD(lr=0.8)
  optimizer.setup(model.collect_parameters())

  # Learning loop
  for epoch in xrange(1, n_epoch+1):
    print 'epoch', epoch

    # training
    perm = np.random.permutation(N)
    sum_loss = 0

    for i in xrange(0, N, batchsize):
      x_batch = x_train[perm[i:i+batchsize]]
      y_batch = y_train[perm[i:i+batchsize]]
      if args.gpu >= 0:
        x_batch = cuda.to_gpu(x_batch)
        y_batch = cuda.to_gpu(y_batch)

      optimizer.zero_grads()
      loss, pred = forward(x_batch, y_batch)
      loss.backward()  #Computing gradients
      optimizer.update()

      sum_loss += float(cuda.to_cpu(loss.data)) * batchsize

    print 'train mean loss={}'.format(
        sum_loss / N)


    if epoch%10==0:
      #'''
      # testing all data
      preds = []
      x_batch = x_test[:]
      y_batch = y_test[:]
      if args.gpu >= 0:
        x_batch = cuda.to_gpu(x_batch)
        y_batch = cuda.to_gpu(y_batch)
      loss, pred = forward(x_batch, y_batch, train=False)
      preds = cuda.to_cpu(pred.data)
      sum_loss = float(cuda.to_cpu(loss.data)) * len(y_test)
      #'''

      print 'test  mean loss={}'.format(
          sum_loss / N_test)

      # Dump data for plot:
      y_pred= [[y.index(max(y))]+y for y in preds.tolist()]  #DIFF_REG
      DumpData('/tmp/nn/nn_test%04i.dat'%epoch, x_test, y_pred, f_reduce, lb=nt+1)
Example #24
0
def main():
    args = parse_args()

    trace('making vocabulary ...')
    vocab, num_lines, num_words = make_vocab(args.corpus, args.vocab)

    trace('initializing CUDA ...')
    cuda.init()

    trace('start training ...')
    if args.model is 0:
        model = BasicRnnLM(args.embed, args.hidden, args.vocab)
        model.reset()
    elif args.model is 1:
        model = LSTMRnn(args.embed, args.hidden, args.vocab)
        model.reset()
    elif args.model is 2:
        model = AttentionLM(args.embed, args.hidden, args.vocab)
        model.reset()
    model.to_gpu()

    for epoch in range(args.epoch):
        trace('epoch %d/%d: ' % (epoch + 1, args.epoch))
        log_ppl = 0.0
        trained = 0
        
        opt = optimizers.AdaGrad(lr = 0.01)
        opt.setup(model)
        opt.add_hook(optimizer.GradientClipping(5))

        for batch in generate_batch(args.corpus, args.minibatch):
            K = len(batch)
            loss, perplexity= forward(batch, model)
            loss.backward()
            log_ppl += perplexity 
            opt.update()
            trained += K
            model.reset()

        trace('  %d/%d' % (trained, num_lines))      
        log_ppl /= float(num_words)
        trace('Train  log(PPL) = %.10f' % log_ppl)
        trace('Train  PPL      = %.10f' % math.exp(log_ppl))

        log_ppl = 0.0

        for batch in generate_batch(args.valid, args.minibatch):
            K = len(batch)
            loss, perplexity= forward(batch, model)
            log_ppl += perplexity 
            model.reset()

        trace('Valid  log(PPL) = %.10f' % log_ppl)
        trace('Valid  PPL      = %.10f' % math.exp(log_ppl))

        trace('  writing model ...')
        trace('saving model ...')
        prefix = 'RNNLM-'+str(args.model) + '.%03.d' % (epoch + 1)
        save_vocab(prefix + '.srcvocab',vocab) #Fix this # Fixed
        model.save_spec(prefix + '.spec')
        serializers.save_hdf5(prefix + '.weights', model)

    trace('training finished.')
def main():
    if P.gpu >= 0:
        cuda.init(P.gpu)
    test_and_save()
    return
import cPickle

import numpy as np
import six
import cv2
import os
import six.moves.cPickle as pickle

import chainer
from chainer import computational_graph as c
from chainer import cuda
import chainer.functions as F
from chainer import optimizers

#cudaのイニシャライズ
cuda.init(0)

#モデルの読み込み
model = pickle.load(open('model15','rb'))

#キャラクターの名前
chara_name = ['unknown', 'kyoko', 'akari', 'yui', 'chinatsu']

#伝播の設定
def forward(x_data):
    x = chainer.Variable(x_data, volatile=False)
    h = F.max_pooling_2d(F.relu(model.conv1(x)), ksize = 5, stride = 2, pad =2)
    h = F.max_pooling_2d(F.relu(model.conv2(h)), ksize = 5, stride = 2, pad =2)
    h = F.dropout(F.relu(model.l3(h)), train=False)
    y = model.l4(h)
    
Example #27
0
training_size = int(9.0 / 10 * data_size)
perm = np.random.permutation(data_size)
data = data[perm]
data12 = data12[perm]
target = target[perm]
x_train, x_test = np.split(data, [training_size])
x_train12, x_test12 = np.split(data12, [training_size])
y_train, y_test = np.split(target, [training_size])

test_size = data_size - training_size

## Load Network Architecture
model = facenet.FaceNet24()

if args.gpu >= 0:
    cuda.init(args.gpu)
    model.to_gpu()

## Set optimizer
optimizer = optimizers.MomentumSGD(lr=0.001, momentum=0.9)
optimizer.setup(model.collect_parameters())

## function for test phase


def forward_valid(x_test, y_test, batchsize):
    sum_accuracy = 0
    sum_loss = 0
    for i in range(0, test_size, batchsize):
        x_batch = x_test[i:i + batchsize]
        x_batch12 = x_test12[i:i + batchsize]
Example #28
0
def Main():
    import argparse
    import os, re
    import numpy as np
    import six.moves.cPickle as pickle

    #from fk_test import TFKTester

    parser = argparse.ArgumentParser(description='Chainer example: regression')
    parser.add_argument('--gpu',
                        '-g',
                        default=-1,
                        type=int,
                        help='GPU ID (negative value indicates CPU)')
    parser.add_argument('--dof', '-D', default='3', type=str, help='DoF code')
    parser.add_argument(
        '--sdof',
        '-SD',
        default='',
        type=str,
        help='DoF code of samples. Blank uses the same one as --dof.')
    parser.add_argument(
        '--mdof',
        '-MD',
        default='',
        type=str,
        help='DoF code of model file. Blank uses the same one as --dof.')
    args = parser.parse_args()

    batchsize = 10  #TEST; 20
    n_epoch = NEpoch
    n_units = 200  #TEST
    n_units2 = 20
    n_units3 = 50
    #n_units   = 500
    #batchsize = 20

    #dof = 3
    dofc = args.dof
    sdofc = args.sdof if args.sdof != '' else dofc
    mdofc = args.mdof if args.mdof != '' else dofc
    dof = int(re.search('^[0-9]+', dofc).group())
    data_x = LoadData('datak/chain%s_q.dat' % sdofc)
    if dofc not in ModelCodesWithXAll:
        data_y1 = LoadData('datak/chain%s_x.dat' % sdofc, c1=0, c2=3)
        data_y2 = LoadData('datak/chain%s_x.dat' % sdofc, c1=3, c2=None)
    else:
        data_y = LoadData('datak/chain%s_xall.dat' % sdofc,
                          c1=7)  #Skip the first pose.
    # Prepare dataset
    batchsize = max(1, min(batchsize,
                           len(data_x) / 20))  #TEST: adjust batchsize
    data = np.array(data_x).astype(np.float32)
    target1 = np.array(data_y1).astype(np.float32)
    target2 = np.array(data_y2).astype(np.float32)

    N = len(data)  #batchsize * 30
    x_train = data
    y1_train = target1
    y2_train = target2

    print 'Num of samples for train:', len(x_train)

    Dx = len(data_x[0])
    Dy1 = len(data_y1[0])
    Dy2 = len(data_y2[0])
    model1, model2 = CreateModel(dofc, Dx, Dy1, Dy2, n_units, n_units2,
                                 n_units3)
    if args.gpu >= 0:
        cuda.init(args.gpu)
        model1.to_gpu()
        model2.to_gpu()

    # Neural net architecture
    def forward(x_data, y1_data, y2_data, train=True):
        return ForwardModel(dofc, model1, model2, x_data, y1_data, y2_data,
                            train)

    # Predict for a single query x
    def predict(x):
        x_batch = np.array([x]).astype(np.float32)
        y1_batch = np.array([[0.0] * Dy1]).astype(np.float32)  #Dummy
        y2_batch = np.array([[0.0] * Dy2]).astype(np.float32)  #Dummy
        if args.gpu >= 0:
            x_batch = cuda.to_gpu(x_batch)
            y1_batch = cuda.to_gpu(y1_batch)
            y2_batch = cuda.to_gpu(y2_batch)
        loss1, loss2, pred1, pred2 = forward(x_batch,
                                             y1_batch,
                                             y2_batch,
                                             train=False)
        y1 = cuda.to_cpu(pred1.data)[0]
        y2 = cuda.to_cpu(pred2.data)[0]
        y = np.concatenate((y1, y2))
        return y

    # Setup optimizer
    optimizer1 = optimizers.AdaDelta(rho=0.9)
    optimizer2 = optimizers.AdaDelta(rho=0.9)
    #optimizer = optimizers.AdaGrad(lr=0.5)
    #optimizer = optimizers.RMSprop()
    #optimizer = optimizers.MomentumSGD()
    #optimizer = optimizers.SGD(lr=0.8)
    optimizer1.setup(model1.collect_parameters())
    optimizer2.setup(model2.collect_parameters())

    #tester= TFKTester(3)

    file_names = {
        'l': 'result/fk2log%s.dat' % mdofc,
        'm': 'result/fk2nn%s.dat' % mdofc
    }
    if os.path.exists(file_names['l']) or os.path.exists(file_names['m']):
        print 'File(s) already exists.'
        print 'Check:', file_names
        return

    fp_log = open(file_names['l'], 'w')

    # Learning loop
    for epoch in xrange(1, n_epoch + 1):
        print 'epoch', epoch

        # training
        perm = np.random.permutation(N)
        sum_loss = 0

        for i in xrange(0, N, batchsize):
            x_batch = x_train[perm[i:i + batchsize]]
            y1_batch = y1_train[perm[i:i + batchsize]]
            y2_batch = y2_train[perm[i:i + batchsize]]
            if args.gpu >= 0:
                x_batch = cuda.to_gpu(x_batch)
                y1_batch = cuda.to_gpu(y1_batch)
                y2_batch = cuda.to_gpu(y2_batch)

            optimizer1.zero_grads()
            optimizer2.zero_grads()
            loss1, loss2, pred1, pred2 = forward(x_batch, y1_batch, y2_batch)
            loss1.backward()  #Computing gradients
            loss2.backward()  #Computing gradients
            optimizer1.update()
            optimizer2.update()

            loss = loss1 + loss2
            bloss = float(cuda.to_cpu(loss.data))
            sum_loss += bloss * batchsize
            fp_log.write('%f %f\n' %
                         (epoch - 1.0 + float(i) / float(N), bloss))

        print 'train mean loss={}'.format(sum_loss / N)
        fp_log.write('%f %f %f\n' % (float(epoch), bloss, sum_loss / N))

        print predict(np.array([0.0] * dof).astype(np.float32))
        #if epoch in TestNEpochs:
        #tester.Test(f_fwdkin=predict, n_samples=100)

        model = {'model1': model1, 'model2': model2}
        pickle.dump(model, open(file_names['m'], 'wb'), -1)
        print 'Model file is dumped to:', file_names['m']
        '''
    if epoch in TestNEpochs:
      # testing all data
      preds = []
      x_batch = x_test[:]
      y_batch = y_test[:]
      if args.gpu >= 0:
        x_batch = cuda.to_gpu(x_batch)
        y_batch = cuda.to_gpu(y_batch)
      loss, pred = forward(x_batch, y_batch, train=False)
      preds = cuda.to_cpu(pred.data)
      sum_loss = float(cuda.to_cpu(loss.data)) * len(y_test)

      print 'test  mean loss={}'.format(
          sum_loss / N_test)

      # Dump data for plot:
      DumpData('/tmp/nn_test%04i.dat'%epoch, x_test, preds, f_reduce, lb=nt+1)
    #'''

    #tester.Cleanup()
    fp_log.close()
    def train(self, epochsize = 10, cooling_rate = 0.95, initial_temperature= 1.5, boringsize = 0):
        
        #################
        #Turn this on if it is necssary to make the data   SOMETHING IS WRONG WITH DATA LOADING
        makedata = True

         
        #################
        # export paths
        parentpath= '/home/koyama-m/Research/membrane_CNN/'
        models_path= parentpath + 'models/'
        logs_path = 'logs/'
        datapath_train = parentpath +  'data/training_dataset/256_training_dataset_crop%s/' %str(self.cropsize)
        datapath_test = parentpath + 'data/test_dataset/256_test_dataset_crop%s/' %str(self.cropsize)
        label_datapath_train = parentpath + 'data/training_dataset/label_256_training_dataset_crop%s/'%str(self.cropsize)
        label_datapath_test = parentpath +  'data/test_dataset/label_256_test_dataset_crop%s/'%str(self.cropsize)

        trainingfilepath_prefix = '256_training_image_'   #256_test_image_%3d%3d%3d.tif %(slice_index, row, column)
        label_filepath_prefix = 'label_256_training_image_' 
        sys.path.append(parentpath)
        sys.path.append(models_path)
        sys.path.append(datapath_train)
        sys.path.append(datapath_test)
        sys.path.append(label_datapath_train)
        sys.path.append(label_datapath_test)
        sys.path.append(trainingfilepath_prefix)
        sys.path.append(label_filepath_prefix)    
        
        
        #################
        ####Prepare Data#### 
        #################


        
        print '=================================================='
        print '========This file will use the model file convnet_conditional.py========='
        print '=================================================='     
        
        
          
        print 'Cropsize :%d,  Resolution :%d,  Epoch : %d, Boringsize : %d' %(self.cropsize, self.resolution, epochsize, boringsize)
        print  'cooling_rate:%s, initial temperature:%s, final temperature:%s' %\
         (str(cooling_rate), str(initial_temperature), str(initial_temperature*(cooling_rate**epochsize)) )
        

        
  
        #######################
        ###### Setup############
        #######################
        print 'importing the required modules ... '


        import logistic_reg
        import convnet_conditional as convnet
        import load_preprocessed_data_set as loadt 
        reload(logistic_reg)
        reload(convnet)


        print 'COMPLETE \n' 
        ###### load dataset ######
        print 'Loading the preprocessed test data and training data ... '

        if(makedata):
            dataslice_test = loadt.make_data_set(datapath_test,  patchsize =self.cropsize \
                                                 ,label_data_path = label_datapath_test, is_training = False, boring_size = boringsize)
            dataslice_train = loadt.make_data_set(datapath_train, patchsize =self.cropsize\
                                                  ,label_data_path = label_datapath_train, is_training = True, boring_size = boringsize)
        
            pickle.dump({'test': dataslice_test , 'train':dataslice_train},\
                        open('/home/koyama-m/Research/membrane_CNN/data/temp_data/temp_dataset_with_hole.pkl','wb'),pickle.HIGHEST_PROTOCOL) 
            
        dataset = pickle.load(open('/home/koyama-m/Research/membrane_CNN/data/temp_data/temp_dataset_with_hole.pkl','rb'))         
        dataslice_test = dataset['test'] 
        dataslice_train= dataset['train'] 
        
        x_train, x_test, x_valid, y_train, y_test, y_valid = self.reshape_for_chainer_cnn(dataslice_train,  dataslice_test)        

        
        ######### init GPU status #######
        cuda.init()

        #FXN MUST BE DEFINED BEFORE INITIALIZATION 
        ######## init models ########
        model_cpu_ver = convnet.convnet_cdd(patchsize=x_train.shape[2])
        model =  convnet.convnet_cdd(patchsize=x_train.shape[2]).to_gpu()

        ######## init optimizer #######
        print 'Initializing the optimizer...\n '

        optimizer = optimizers.Adam()
        optimizer.setup(model.collect_parameters())
        optimizer.zero_grads()

        print 'Initiating the Training Sequence...'

        #######################
        ######Training###########
        #######################

        import time

        trainsize = x_train.shape[0]
        validsize = x_valid.shape[0]
        print 'validsize: ', validsize
        start_time = time.time()
        temperature = initial_temperature
        
        minibatchsize = 50
        for epoch in xrange(epochsize):

            print 'current_temperature: ', temperature

            elapsed_time = time.time() - start_time
            print 'Elapsed time is ' + str(elapsed_time)
            start_time = time.time()


            indexes = np.random.permutation(trainsize)
            n_batch = indexes.shape[0]/minibatchsize
            sum_loss = 0
            sum_accuracy = 0
            for i in xrange(0, trainsize, minibatchsize):

                batchrange = indexes[i : i + minibatchsize]
                x_train_batch = x_train[batchrange]
                self.filter_batch(x_train_batch[:,1,:,:] , temperature)

                y_train_batch = y_train[batchrange]
                
                pre_x_batch, pre_y_batch = self.augment_data_batch(x_train_batch, y_train_batch)
                
                x_batch = cuda.to_gpu(pre_x_batch)
                y_batch = cuda.to_gpu(pre_y_batch)                
                
                optimizer.zero_grads()

                loss, accuracy,pred = model.forward(x_batch, y_batch)

                sum_loss += loss.data*minibatchsize
                sum_accuracy += accuracy.data*minibatchsize
                loss.backward()
                optimizer.update()


            sum_val_loss = 0
            sum_val_accuracy = 0
            for i in xrange(0,validsize,minibatchsize):
                
                
                x_valid_batch =x_valid[i : i + minibatchsize]
                y_valid_batch =y_valid[i : i + minibatchsize]
                pre_x_batch, pre_y_batch = self.augment_data_batch(x_valid_batch, y_valid_batch)
                            
                x_batch = cuda.to_gpu(pre_x_batch)
                y_batch = cuda.to_gpu(pre_y_batch)
                
                
                loss, accuracy,pred = model.forward(x_batch, y_batch,False)
                sum_val_loss += loss.data*minibatchsize
                sum_val_accuracy += accuracy.data*minibatchsize

            print 'epoch ', epoch
            print 'train loss:' + str(sum_loss/trainsize)
            print 'train accuracy(%): ' + str(sum_accuracy/trainsize*100)
            print 'validation loss: ' + str(sum_val_loss/validsize)    
            print 'validation accuracy(%)' + str(sum_val_accuracy/validsize*100)    


            print type(model)
            modelname = 'hole%s_cool_rate%sconditional_distr_trained_model%s_crop%sepoch%s.pkl' %(str(boringsize),str(cooling_rate), str(self.resolution),  str(self.cropsize), str(epochsize))
            print modelname
            pickle.dump(model, open(models_path+ modelname,'wb'),-1)
            temperature = temperature *cooling_rate
        elapsed_time = time.time() - start_time
        print elapsed_time
        print 'Training sequence COMPLETE'  


        print 'Initiating the Testing Sequence...'

        #######################
        ###### Testing ###########
        #######################
        testsize = x_test.shape[0]
        sum_loss = 0
        sum_accuracy = 0
        confusion_matrix = np.zeros((2,2))
        for i in xrange(0, testsize, minibatchsize):

                if (i %10000 == 0):
                    print "currently computing the set :", i*minibatchsize , " ~ ,"  
                x_test_batch = x_test[i : i + minibatchsize]
                y_test_batch = y_test[i : i + minibatchsize]
                pre_x_batch, pre_y_batch = self.augment_data_batch(x_test_batch, y_test_batch)
                
                x_batch = cuda.to_gpu(pre_x_batch)
                y_batch = cuda.to_gpu(pre_y_batch)
                loss, accuracy, prob = model.forward(x_batch, y_batch,train=False)
                sum_loss += loss.data*minibatchsize
                sum_accuracy += accuracy.data*x_batch.shape[0]
                #pred = cuda.to_cpu(prob.data)[:,0]>threshold
                pred = np.argmax(cuda.to_cpu(prob.data),axis=1)

                #calc confusion matrix
                for j in xrange(x_batch.shape[0]):
                    confusion_matrix[cuda.to_cpu(y_batch)[j],pred[j]] += 1
                    
                    
        print 'Testing sequence COMPLETE... saving the log... '  
        txtname = 'hole%scool_rate%sconditional_distr_trained_model%s_crop%sepoch%s_log.txt' %(str(boringsize), str(cooling_rate),str(self.resolution),  str(self.cropsize), str(epochsize))
        sys.stdout = open(models_path+ logs_path +  txtname,"w")                      
        print 'test loss:' + str(sum_loss/testsize)

        print 'chance lebel(accuracy)' + str((np.sum(confusion_matrix[0,:])/np.sum(confusion_matrix)))
        print 'test accuracy(%)' + str((confusion_matrix[0,0]+confusion_matrix[1,1])/np.sum(confusion_matrix)*100)

        print 'confusion_matrix:'
        print confusion_matrix
        print 'Done with the model \n'
        print modelname 
        print str(datetime.datetime.now())
        sys.stdout.close()
Example #30
0
mnist = data.load_mnist_data()
mnist['data'] = mnist['data'].astype(np.float32)
mnist['data'] /= 255
mnist['target'] = mnist['target'].astype(np.int32)

N = 60000
x_train, x_test = np.split(mnist['data'],   [N])
y_train, y_test = np.split(mnist['target'], [N])
N_test = y_test.size

# Prepare multi-layer perceptron model
model = chainer.FunctionSet(l1=F.Linear(784, n_units),
                            l2=F.Linear(n_units, n_units),
                            l3=F.Linear(n_units, 10))
if args.gpu >= 0:
    cuda.init(args.gpu)
    model.to_gpu()

# Neural net architecture


def forward(x_data, y_data, train=True):
    x, t = chainer.Variable(x_data), chainer.Variable(y_data)
    h1 = F.dropout(F.relu(model.l1(x)),  train=train)
    h2 = F.dropout(F.relu(model.l2(h1)), train=train)
    y = model.l3(h2)
    return F.softmax_cross_entropy(y, t), F.accuracy(y, t)

# Setup optimizer
optimizer = optimizers.Adam()
optimizer.setup(model)
Example #31
0
def Main():
    import argparse
    import numpy as np
    from sklearn.datasets import load_diabetes
    from chainer import cuda, Variable, FunctionSet, optimizers
    import chainer.functions as F

    parser = argparse.ArgumentParser(description='Chainer example: regression')
    parser.add_argument('--gpu',
                        '-g',
                        default=-1,
                        type=int,
                        help='GPU ID (negative value indicates CPU)')
    args = parser.parse_args()

    batchsize = 13
    n_epoch = 100
    n_units = 30

    # Prepare dataset
    print 'fetch diabetes dataset'
    diabetes = load_diabetes()
    data = diabetes['data'].astype(np.float32)
    target = diabetes['target'].astype(np.float32).reshape(
        len(diabetes['target']), 1)

    N = batchsize * 30  #Number of training data
    x_train, x_test = np.split(data, [N])
    y_train, y_test = np.split(target, [N])
    N_test = y_test.size

    print 'Num of samples for train:', len(y_train)
    print 'Num of samples for test:', len(y_test)
    # Dump data for plot:
    fp1 = file('/tmp/smpl_train.dat', 'w')
    for x, y in zip(x_train, y_train):
        fp1.write('%s #%i# %s\n' %
                  (' '.join(map(str, x)), len(x) + 1, ' '.join(map(str, y))))
    fp1.close()
    # Dump data for plot:
    fp1 = file('/tmp/smpl_test.dat', 'w')
    for x, y in zip(x_test, y_test):
        fp1.write('%s #%i# %s\n' %
                  (' '.join(map(str, x)), len(x) + 1, ' '.join(map(str, y))))
    fp1.close()

    # Prepare multi-layer perceptron model
    model = FunctionSet(l1=F.Linear(10, n_units),
                        l2=F.Linear(n_units, n_units),
                        l3=F.Linear(n_units, 1))
    if args.gpu >= 0:
        cuda.init(args.gpu)
        model.to_gpu()

    # Neural net architecture
    def forward(x_data, y_data, train=True):
        x, t = Variable(x_data), Variable(y_data)
        h1 = F.dropout(F.relu(model.l1(x)), train=train)
        h2 = F.dropout(F.relu(model.l2(h1)), train=train)
        y = model.l3(h2)
        return F.mean_squared_error(y, t), y

    # Setup optimizer
    optimizer = optimizers.AdaDelta(rho=0.9)
    optimizer.setup(model.collect_parameters())

    # Learning loop
    for epoch in xrange(1, n_epoch + 1):
        print 'epoch', epoch

        # training
        perm = np.random.permutation(N)
        sum_loss = 0

        for i in xrange(0, N, batchsize):
            x_batch = x_train[perm[i:i + batchsize]]
            y_batch = y_train[perm[i:i + batchsize]]
            if args.gpu >= 0:
                x_batch = cuda.to_gpu(x_batch)
                y_batch = cuda.to_gpu(y_batch)

            optimizer.zero_grads()
            loss, pred = forward(x_batch, y_batch)
            loss.backward()
            optimizer.update()

            sum_loss += float(cuda.to_cpu(loss.data)) * batchsize

        print 'train mean loss={}'.format(sum_loss / N)
        '''
    # testing per batch
    sum_loss     = 0
    preds = []
    for i in xrange(0, N_test, batchsize):
      x_batch = x_test[i:i+batchsize]
      y_batch = y_test[i:i+batchsize]
      if args.gpu >= 0:
        x_batch = cuda.to_gpu(x_batch)
        y_batch = cuda.to_gpu(y_batch)

      loss, pred = forward(x_batch, y_batch, train=False)
      preds.extend(cuda.to_cpu(pred.data))
      sum_loss     += float(cuda.to_cpu(loss.data)) * batchsize
    pearson = np.corrcoef(np.asarray(preds).reshape(len(preds),), np.asarray(y_test).reshape(len(preds),))
    #'''

        #'''
        # testing all data
        preds = []
        x_batch = x_test[:]
        y_batch = y_test[:]
        if args.gpu >= 0:
            x_batch = cuda.to_gpu(x_batch)
            y_batch = cuda.to_gpu(y_batch)
        loss, pred = forward(x_batch, y_batch, train=False)
        preds = cuda.to_cpu(pred.data)
        sum_loss = float(cuda.to_cpu(loss.data)) * len(y_test)
        pearson = np.corrcoef(
            np.asarray(preds).reshape(len(preds), ),
            np.asarray(y_test).reshape(len(preds), ))
        #'''

        print 'test  mean loss={}, corrcoef={}'.format(sum_loss / N_test,
                                                       pearson[0][1])

        # Dump data for plot:
        fp1 = file('/tmp/nn_test%04i.dat' % epoch, 'w')
        for x, y in zip(x_test, preds):
            fp1.write(
                '%s #%i# %s\n' %
                (' '.join(map(str, x)), len(x) + 1, ' '.join(map(str, y))))
        fp1.close()
Example #32
0
def getNeuralCode(directory, layer="conv5_1", gpu=-1):

  model = "illust2vec_ver200.caffemodel"
  #use illust2vec_ver200
  print('illust2vec_ver200 is being loaded!')
  #calculate load time
  timeMemory = time()
  func = caffe.CaffeFunction(model)
  print('illust2vec_ver200 was loaded!')
  print('It took ' + str(int(time() - timeMemory)) + " secondes")
  
  #gpu mode
  if gpu >= 0:
    cuda.init(gpu)
    func.to_gpu()
  
  in_size = 224
  # Constant mean over spatial pixels
  mean_image = np.load("illust2vec_image_mean.npy")
  print("neural code is extraced from layer " + layer)
  def neuralCode(x): #推測関数
    y, = func(inputs={'data': x}, outputs=[layer],
              train=False)
    return y.data[0]
  
  cropwidth = 256 - in_size
  start = cropwidth // 2
  stop = start + in_size
  mean_image = mean_image[:, start:stop, start:stop].copy()
  target_shape = (256, 256)
  output_side_length=256
  
  numPic = 0
  #count pictures
  for folderPath in directory:
    #search pictures
    picturePath = [picture for picture in os.listdir(folderPath)
                   if re.findall(r"\.png$|\.jpg$|\.JPG$|\.PNG$|\.JPEG$",picture)]
    print("you have " + str(len(picturePath)) + " pictures in " + folderPath)
    numPic = numPic + len(picturePath)
  
  print("you have totally " + str(numPic) + " pictures")
  count = 0
  answer = {}
  for folderPath in directory:
    #search pictures
    picturePath = [picture for picture in os.listdir(folderPath)
                   if re.findall(r"\.png$|\.jpg$|\.JPG$|\.PNG$|\.JPEG$",picture)]
  
    for picture in picturePath:
      timeMemory = time()
      count = count + 1
      #load image file
      image = cv2.imread(folderPath + "/" + picture)
      #resize and crop
      height, width, depth = image.shape
      new_height = output_side_length
      new_width = output_side_length
      if height > width:
          new_height = output_side_length * height / width
      else:
          new_width = output_side_length * width / height
      resized_img = cv2.resize(image, (new_width, new_height))
      height_offset = (new_height - output_side_length) / 2
      width_offset = (new_width - output_side_length) / 2
      image= resized_img[height_offset:height_offset + output_side_length,
                         width_offset:width_offset + output_side_length]
  
      #subtract mean image
      image = image.transpose(2, 0, 1)
      image = image[:, start:stop, start:stop].astype(np.float32)
      image -= mean_image
  
      x_batch = np.ndarray(
          (1, 3, in_size,in_size), dtype=np.float32)
      x_batch[0]=image
  
      if gpu >= 0:
          x_batch=cuda.to_gpu(x_batch)
  
      #get neural code
      x = chainer.Variable(x_batch, volatile=True)
      answer[folderPath + "/" +  picture] = neuralCode(x)
      sen = overwrite.bar(count,numPic)
      overwrite.overwrite(sen)
  return answer
"""
CNNsのモデルを実行するスクリプト
今回使用するデータセットはMNIST
"""

from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from mnist_classification import CNN
from chainer import cuda
import numpy as np
from sklearn.datasets import fetch_mldata

#GPUつかうよ
cuda.init(0)

print('load MNIST digit dataset')
mnist = fetch_mldata('MNIST original', data_home=".")
mnist.data = mnist.data.astype(np.float32)
mnist.data /= 255
mnist.target = mnist.target.astype(np.int32)

output_dim = 10

print('create CNNs model')
cnn = CNN(data=mnist.data,
          target=mnist.target,
          gpu=0,
          output_dim=output_dim)
Example #34
0
import unittest

import numpy

from chainer import cuda
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer import utils

if cuda.available:
    cuda.init()


class TestWalkerAlias(unittest.TestCase):
    def setUp(self):
        self.ps = [5, 3, 4, 1, 2]
        self.sampler = utils.WalkerAlias(self.ps)

    def check_sample(self):
        counts = numpy.zeros(len(self.ps), numpy.float32)
        for _ in range(1000):
            vs = self.sampler.sample((4, 3))
            numpy.add.at(counts, cuda.to_cpu(vs), 1)
        counts /= (1000 * 12)
        counts *= sum(self.ps)
        gradient_check.assert_allclose(self.ps, counts, atol=0.1, rtol=0.1)

    def test_sample_cpu(self):
        self.check_sample()
Example #35
0
    #to check the current N and M, run the following
    #[x.data.shape  for x in [conv1_1F,conv2_1F, conv3_1F, conv4_1F,conv5_1F]]
    L_style = (Fu.mean_squared_error(conv1_1G,conv1_1A)/(4*64*64*50176*50176)
    + Fu.mean_squared_error(conv2_1G,conv2_1A)/(4*128**128*12544*12544)
    + Fu.mean_squared_error(conv3_1G,conv3_1A)/(4*256*256*3136*3136)
    + Fu.mean_squared_error(conv4_1G,conv4_1A)/(4*512*512*784*784)\
    )/4 # this is equal weighting of E_l
    #
    ratio = 0.001  #alpha/beta
    loss = ratio * L_content + L_style
    return loss


#main

cuda.init(3)  # is GPU ID!!

p = readimage('satoshi_fb.png')  #read a content image
a = readimage('style.png')  #read a style image

#download a pretraind caffe model from here: https://gist.github.com/ksimonyan/3785162f95cd2d5fee77#file-readme-md
func = caffe.CaffeFunction(
    'VGG_ILSVRC_19_layers.caffemodel')  #it takes some time.
func.to_gpu()

x_data = np.random.randn(1, 3, 224, 224).astype(np.float32)
x = Variable(cuda.to_gpu(x_data))

x = readimage('imge230.png')  # if you want to start from a exsiting image

savedir = "satoshi_fb_adam"