Esempio n. 1
0
def main(config):
    prepare_dirs_and_logger(config)
    save_config(config)

    if config.is_train:
        from trainer import Trainer
        if config.dataset == 'line':
            from data_line import BatchManager
        elif config.dataset == 'ch':
            from data_ch import BatchManager
        elif config.dataset == 'kanji':
            from data_kanji import BatchManager
        elif config.dataset == 'baseball' or\
             config.dataset == 'cat':
            from data_qdraw import BatchManager

        batch_manager = BatchManager(config)
        trainer = Trainer(config, batch_manager)
        trainer.train()
    else:
        from tester import Tester
        if config.dataset == 'line':
            from data_line import BatchManager
        elif config.dataset == 'ch':
            from data_ch import BatchManager
        elif config.dataset == 'kanji':
            from data_kanji import BatchManager
        elif config.dataset == 'baseball' or\
             config.dataset == 'cat':
            from data_qdraw import BatchManager
        
        batch_manager = BatchManager(config)
        tester = Tester(config, batch_manager)
        tester.test()
Esempio n. 2
0
    def tesT_TrainingOnSentances(self):

        c = Corpus(self.txt)
        rnn = RNN(100, c.V, 50)

        trainer = Trainer(c,rnn, nepochs=50, alpha = 1.8)
        trainer.train()
Esempio n. 3
0
class NutTrainer(object):

    def __init__(self):
        self.collection = Collection('./data/')
        self.collection.importFromGProtocolBuffer('gProtoBuf')

    def command(self, argv):
        try:
            opts, args = getopt.getopt(argv,"mt:",["topic="])
        except getopt.GetoptError as err:
            print(err)
            self.usage()
            sys.exit(2)
        for opt, arg in opts:
            if opt == "-m":
                self.modify = True;
            elif opt in ("-t", "--topic"):
                if hasattr(self, 'modify'):
                    self.collection.modifyCollection(arg)
                else:
                  self.trainer = Trainer()
                  if arg in self.collection.topics:
                    self.trainer.start(self.collection.topics[arg])
                  else:
                    print("Error")
            elif o in ("-h", "--help"):
                self.usage()
                sys.exit()
            else:
                assert False, "unhandled option"
        if len(args) == 1 and hasattr(self,'topic'):
            print(args)

    def usage():
        print('remtrainer.py -t <topic>')
Esempio n. 4
0
class Test_Trainer(unittest.TestCase):

    def setUp(self):
        self.class_number = 21
        self.input_shape = (300, 300, 3)
        self.model = SSD300v2(self.input_shape, num_classes=self.class_number)

    def test_train(self):
        base_lr=3e-4
        self.trainer = Trainer(class_number=self.class_number,
                               input_shape=self.input_shape,
                               priors_file='prior_boxes_ssd300.pkl',
                               train_file='VOC2007_test.pkl',
                               path_prefix='./VOCdevkit/VOC2007/JPEGImages/',
                               model=self.model,
                               weight_file='weights_SSD300.hdf5',
                               freeze=('input_1', 'conv1_1', 'conv1_2', 'pool1',
                                       'conv2_1', 'conv2_2', 'pool2',
                                       'conv3_1', 'conv3_2', 'conv3_3', 'pool3'),
                               save_weight_file='./checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5',  # noqa
                               optim=keras.optimizers.Adam(lr=base_lr),
                               )
        self.trainer.train(nb_epoch=1)

    def teardown(self):
        try:
            subprocess.call("rm -rf " + self.trainer.log_dir, shell=True)
        except subprocess.CalledProcessError as cpe:
            print(str(cpe))
Esempio n. 5
0
def main(_):
  prepare_dirs_and_logger(config)

  if not config.task.lower().startswith('tsp'):
    raise Exception("[!] Task should starts with TSP")

  if config.max_enc_length is None:
    config.max_enc_length = config.max_data_length
  if config.max_dec_length is None:
    config.max_dec_length = config.max_data_length

  rng = np.random.RandomState(config.random_seed)
  tf.set_random_seed(config.random_seed)

  trainer = Trainer(config, rng)
  save_config(config.model_dir, config)

  if config.is_train:
    trainer.train()
  else:
    if not config.load_path:
      raise Exception("[!] You should specify `load_path` to load a pretrained model")
    trainer.test()

  tf.logging.info("Run finished.")
Esempio n. 6
0
def plot_stats(X,Y,model,costs):
	#two plots, the decision fcn and points and the cost over time
	y_onehot = Trainer.class_to_onehot(Y)
	f,(p1,p2) = plot.subplots(1,2)
	p2.plot(range(len(costs)),costs)
	p2.set_title("Cost over time")
	
	#plot points/centroids/decision fcn
	cls_ct = y_onehot.shape[1]
	y_cls = Trainer.onehot_to_int(y_onehot)
	colors = get_cmap("RdYlGn")(np.linspace(0,1,cls_ct))
	
	#model_cents = model.c.get_value()
	#p1.scatter(model_cents[:,0], model_cents[:,1], c='black', s=81)
	for curclass,curcolor in zip(range(cls_ct),colors):
		inds = [i for i,yi in enumerate(y_cls) if yi==curclass]
		p1.scatter(X[inds,0], X[inds,1], c=curcolor)
		
	nx,ny = 200, 200
	x = np.linspace(X[:,0].min()-1,X[:,0].max()+1,nx)
	y = np.linspace(X[:,1].min()-1,X[:,1].max()+1,ny)
	xv,yv = np.meshgrid(x,y)
	
	Z = np.array([z for z in np.c_[xv.ravel(), yv.ravel()]])
	Zp = Trainer.onehot_to_int(np.array(model.probability(Z)))
	Zp = Zp.reshape(xv.shape)
	p1.imshow(Zp, interpolation='nearest', 
				extent=(xv.min(), xv.max(), yv.min(), yv.max()),
				origin = 'lower', cmap=get_cmap("Set1"))
	
	p1.set_title("Decision boundaries and centroids")
	f.tight_layout()
	plot.show()					
Esempio n. 7
0
def train(args):
    debug = args.debug
    logger.info(
        "Start training in {} model".format('debug' if debug else 'normal'))
    num_bins, config_dict = parse_yaml(args.config)
    reader_conf = config_dict["spectrogram_reader"]
    loader_conf = config_dict["dataloader"]
    dcnnet_conf = config_dict["dcnet"]

    batch_size = loader_conf["batch_size"]
    logger.info(
        "Training in {}".format("per utterance" if batch_size == 1 else
                                '{} utterance per batch'.format(batch_size)))

    train_loader = uttloader(
        config_dict["train_scp_conf"]
        if not debug else config_dict["debug_scp_conf"],
        reader_conf,
        loader_conf,
        train=True)
    valid_loader = uttloader(
        config_dict["valid_scp_conf"]
        if not debug else config_dict["debug_scp_conf"],
        reader_conf,
        loader_conf,
        train=False)
    checkpoint = config_dict["trainer"]["checkpoint"]
    logger.info("Training for {} epoches -> {}...".format(
        args.num_epoches, "default checkpoint"
        if checkpoint is None else checkpoint))

    dcnet = DCNet(num_bins, **dcnnet_conf)
    trainer = Trainer(dcnet, **config_dict["trainer"])
    trainer.run(train_loader, valid_loader, num_epoches=args.num_epoches)
Esempio n. 8
0
    def __init__(self, module, dataset=None, learningrate=0.01, lrdecay=1.0,
                 momentum=0., verbose=False, batchlearning=False,
                 weightdecay=0.):
        """Create a BackpropTrainer to train the specified `module` on the
        specified `dataset`.

        The learning rate gives the ratio of which parameters are changed into
        the direction of the gradient. The learning rate decreases by `lrdecay`,
        which is used to to multiply the learning rate after each training
        step. The parameters are also adjusted with respect to `momentum`, which
        is the ratio by which the gradient of the last timestep is used.

        If `batchlearning` is set, the parameters are updated only at the end of
        each epoch. Default is False.

        `weightdecay` corresponds to the weightdecay rate, where 0 is no weight
        decay at all.
        """
        Trainer.__init__(self, module)
        self.setData(dataset)
        self.verbose = verbose
        self.batchlearning = batchlearning
        self.weightdecay = weightdecay
        self.epoch = 0
        self.totalepochs = 0
        # set up gradient descender
        self.descent = GradientDescent()
        self.descent.alpha = learningrate
        self.descent.momentum = momentum
        self.descent.alphadecay = lrdecay
        self.descent.init(module.params)
Esempio n. 9
0
def pre_train(data, das, nep = 600):
    x = data
    for ec, dc in das:
        dc.x(ec.y)
        tr = Trainer(ec.x, dc.y, src = x, dst = x, lrt = 0.005)
        tr.tune(nep, npt = 10)
        ec.x(x)
        x = ec.y().eval()
    del x
    def train(self,
              training_set_x,
              training_set_y,
              hyper_parameters,
              regularization_methods,
              activation_method,
              top=50,
              print_verbose=False,
              validation_set_x=None,
              validation_set_y=None):

        #need to convert the input into tensor variable
        training_set_x = shared(training_set_x, 'training_set_x', borrow=True)
        training_set_y = shared(training_set_y, 'training_set_y', borrow=True)

        symmetric_double_encoder = StackedDoubleEncoder(hidden_layers=[],
                                                        numpy_range=self._random_range,
                                                        input_size_x=training_set_x.get_value(borrow=True).shape[1],
                                                        input_size_y=training_set_y.get_value(borrow=True).shape[1],
                                                        batch_size=hyper_parameters.batch_size,
                                                        activation_method=activation_method)

        params = []

        #In this phase we train the stacked encoder one layer at a time
        #once a layer was added, weights not belonging to the new layer are
        #not changed
        for layer_size in hyper_parameters.layer_sizes:

            self._add_cross_encoder_layer(layer_size,
                                          symmetric_double_encoder,
                                          hyper_parameters.method_in,
                                          hyper_parameters.method_out)


        params = []
        for layer in symmetric_double_encoder:
            params.append(layer.Wx)
            params.append(layer.bias_x)
            params.append(layer.bias_y)

        params.append(symmetric_double_encoder[0].bias_x_prime)
        params.append(symmetric_double_encoder[-1].bias_y_prime)
        params.append(symmetric_double_encoder[-1].Wy)

        Trainer.train(train_set_x=training_set_x,
                      train_set_y=training_set_y,
                      hyper_parameters=hyper_parameters,
                      symmetric_double_encoder=symmetric_double_encoder,
                      params=params,
                      regularization_methods=regularization_methods,
                      print_verbose=print_verbose,
                      validation_set_x=validation_set_x,
                      validation_set_y=validation_set_y)

        return symmetric_double_encoder
def run_customization(image_loader, feature_extractor):
    logging.info("Start customize svm")
    logging.info("Generate sample")
    data = get_class_data(params.first_class_params, params.sample_size/2) + get_class_data(params.second_class_params, params.sample_size/2)
    random.shuffle(data)
    trainer = Trainer(image_loader, feature_extractor)
    c_range = [10 ** i for i in xrange(-5, 10)]
    gamma_range = [10 ** i for i in xrange(-5, 5)]
    results = trainer.svm_params_customization(data, params.svm_params, c_range, gamma_range)
    return results
Esempio n. 12
0
def run_cross_validation(image_loader, feature_extractor):
    logging.info("Start 5-fold cross validation")
    logging.info("For cat and dogs")
    logging.info(params.svm_params)
    logging.info("Generate sample")
    data = get_class_data(params.first_class_params, params.sample_size / 2) + get_class_data(
        params.second_class_params, params.sample_size / 2)
    random.shuffle(data)
    trainer = Trainer(image_loader, feature_extractor)
    return trainer.k_fold_cross_validation(5, data, params.svm_params, params.labels)
Esempio n. 13
0
def theano_perf(model, Xnew, Ynew):
	#Xnew,ynew = gaussian_data_gen()
	#Xnew,ynew = exotic_data_gen()
	ynew_onehot = Trainer.class_to_onehot(ynew)
	yhat = np.array(model.predict(Xnew))
	yhat = Trainer.onehot_to_int(yhat)
	errs= 0
	for yh,t in zip(yhat,ynew):
		errs += 1 if yh != t else 0
	err_rate = 100*float(errs)/ynew.shape[0]
	print 'Accuracy:',100-err_rate,'Errors:',errs
Esempio n. 14
0
def train(*args):
    """
    trains the model based on files in the input folder
    """
    input_folder = args[0][0]
    if not input_folder:
        print "Must specify a directory of models"
        return

    trainer = Trainer(input_folder, options.output)
    trainer.train()
Esempio n. 15
0
def fine_tune(data, das, nep = 600):
    x = data

    ## re-wire encoders and decoders
    ecs, dcs = zip(*das)
    sda = list(ecs) + list(reversed(dcs))
    for i, j in zip(sda[:-1], sda[1:]):
        j.x(i.y) # lower output -> higher input

    tr = Trainer(sda[0].x, sda[-1].y, src = data, dst = data, lrt = 0.0005)
    tr.tune(nep, npt= 10)
    return tr
Esempio n. 16
0
class Recognizer():
    def __init__(self):
        self.trainer = None

    def train(self, dataFileName):
        self.trainer = Trainer(dataFileName)
        self.trainer.trainAll()
        self.trainer.dump()

    def load(self):
        trainer = Trainer()
        trainer.load()
        self.trainer = trainer

    def classify(self, X, label1, label2):
        '''
        输入向量X, 在label1, label2间预测它的类属性
        '''
        positiveLabel = min(label1, label2)
        negativeLabel = max(label1, label2)
        svm = self.trainer.getSvmInstance(positiveLabel, negativeLabel)
        y = svm.predict(X)
        if y == 1:
            return positiveLabel
        elif y == -1:
            return negativeLabel
        else:
            raise

    def predict(self, X):
        count_dict = {} #{label : times}
        for i in range(10):
            for j in range(i, 10, 1):
                if i == j:
                    continue
                label = self.classify(X, i, j)
                if count_dict.has_key(label):
                    count_dict[label] += 1
                else:
                    count_dict[label] = 1

        maxTime = -1
        maxLabel = -1
        for label in count_dict:
            time = count_dict[label]
            if time > maxTime:
                maxTime = time
                maxLabel = label
        return maxLabel
Esempio n. 17
0
    def setUp(self):
        from trainer import Trainer
        from database import TrainingDataBase,WordDataBase,WordRecord

        self.tr_empty = Trainer(WordDataBase(),TrainingDataBase())

        wdb = WordDataBase()
        wdb.addWord(WordRecord("aaa"))
        wdb.addWord(WordRecord("bbb"))
        wdb.addWord(WordRecord("ccc"))
        tdb = TrainingDataBase()
        tdb.add([WordRecord("aaa"),WordRecord("bbb"),WordRecord("ccc")],[WordRecord("ccc"),WordRecord("bbb")])
        tdb.add([WordRecord("aaa"),WordRecord("ccc")],[WordRecord("ccc"),WordRecord("ccc")])

        self.tr_notempty = Trainer(wdb,tdb)
Esempio n. 18
0
 def __init__(self, vc, opts):
     self.vc = vc
     ret,im = vc.read()
     self.numGestures = opts.num
     self.imHeight,self.imWidth,self.channels = im.shape
     self.trainer = Trainer(numGestures=opts.num, numFramesPerGesture=opts.frames, minDescriptorsPerFrame=opts.desc, numWords=opts.words, descType=opts.type, kernel=opts.kernel, numIter=opts.iter, parent=self)
     self.tester = Tester(numGestures=opts.num, minDescriptorsPerFrame=opts.desc, numWords=opts.words, descType=opts.type, numPredictions=7, parent=self)
Esempio n. 19
0
    def test_trainSvm(self):
        return
        file = os.path.join('..', 'data', 'sample')
        trainer = Trainer(file)
        t_svm = trainer._trainSvm(5, 8)

        dataSet = DigitDataSet()
        dataSet.load(file).map(5, 8)
        svm = SVM()
        svm.train(dataSet, 2, 0.0001)
        m,n = dataSet.shape()
        for i in range(m):
            X = dataSet.getData(i)
            t_y = t_svm.predict(X)
            y = svm.predict(X)
            self.assertTrue(t_y == y)
Esempio n. 20
0
 def command(self, argv):
     try:
         opts, args = getopt.getopt(argv,"mt:",["topic="])
     except getopt.GetoptError as err:
         print(err)
         self.usage()
         sys.exit(2)
     for opt, arg in opts:
         if opt == "-m":
             self.modify = True;
         elif opt in ("-t", "--topic"):
             if hasattr(self, 'modify'):
                 self.collection.modifyCollection(arg)
             else:
               self.trainer = Trainer()
               if arg in self.collection.topics:
                 self.trainer.start(self.collection.topics[arg])
               else:
                 print("Error")
         elif o in ("-h", "--help"):
             self.usage()
             sys.exit()
         else:
             assert False, "unhandled option"
     if len(args) == 1 and hasattr(self,'topic'):
         print(args)
Esempio n. 21
0
def train_and_test(image_loader, feature_extractor):
    """
    Simple implementation of train and test function
    :param image_loader:
    :param feature_extractor:
    """
    first_class_train_data, first_class_test_data = get_train_and_test_data(params.first_class_params)
    second_class_train_data, second_class_test_data = get_train_and_test_data(params.second_class_params)

    train_data = list(first_class_train_data) + list(second_class_train_data)
    random.shuffle(train_data)
    trainer = Trainer(image_loader, feature_extractor)
    solve_container = trainer.train(train_data, params.svm_params)

    test_data = list(first_class_test_data) + list(second_class_test_data)
    tester = Tester(image_loader, solve_container)
    return tester.test(test_data)
Esempio n. 22
0
    def train(self):
        """
            Trains Jarvis brain

            Input:
            Nothing

            Returns:
            Nothing
        """
        from trainer import Trainer

        if self._word_db == None: raise JarvisException("Don't have dictionary.")
        if self._traning_db == None: raise JarvisException("Don't have traning database.")

        trainer = Trainer(self._word_db,self._traning_db)
        trainer.train(self._brain)
Esempio n. 23
0
def get_train_and_test_data(class_params):
    """
    Method of generation names of test images for class
    :param class_params:
    :return: tuple of list of train and test data
    """
    sample = get_class_data(class_params, class_params["train_count"] + class_params["test_count"])
    return Trainer.split_data(sample, [class_params["train_count"], class_params["test_count"]])
Esempio n. 24
0
 def __init__(self, module, ds_train=None, ds_val=None, gtol = 1e-05, norm = inf, 
              verbose = False, **kwargs):
     """
     Create a BFGSTrainer to train the specified `module` on the
     specified `dataset`.
     """
     Trainer.__init__(self, module)
     self.setData(ds_train)
     self.ds_val = ds_val
     self.verbose = verbose
     self.epoch = 0
     self.totalepochs = 0
     self.train_errors = []
     self.test_errors = []
     self.optimal_params = None
     self.optimal_epoch = 0
     
     self.module = module
Esempio n. 25
0
    def test_getSvm(self):
        return
        trainer = Trainer()
        trainer.load()
        svm1 = trainer.getSvmInstance(6, 7)

        dataSet = DigitDataSet()
        dataSet.load(os.path.join('..', 'data', 'sample')).map(6, 7)

        svm2 = SVM()
        svm2.train(dataSet, 2, 0.0001)

        m,n = dataSet.shape()
        for i in range(m):
            X = dataSet.getData(i)
            y1= svm1.predict(X)
            y2 = svm2.predict(X)
            self.assertTrue(y1 == y2)
Esempio n. 26
0
def fine_tune(stk, dat, rate = 0.01, epoch = 50):
    """
    fine-tune the whole stack of autoencoders
    """
    import time
    from trainer import Trainer
    tm = time.clock()
    
    print 'find-tune:', stk.dim; sys.stdout.flush()
    x = dat
    dpt = len(stk)

    ## the training should be slower when parameters is more numerous
    t = Trainer(stk, src = x, dst = x, lrt = rate/ (2*dpt) )

    ## fine tune requires more steps when network goes deeper
    t.tune(epoch * 2 * dpt, epoch)
    tm = time.clock() - tm
    print 'ran for {:.2f}m\n'.format(tm/60.);  sys.stdout.flush()
Esempio n. 27
0
def pre_train(stk, dat, rate = 0.01, epoch = 1000):
    """
    pre-train each auto encoder in the stack
    """
    import time
    from trainer import Trainer
    tm = time.clock()

    print 'pre-train:', stk.dim; sys.stdout.flush()
    x = dat
    r = rate
    for ae in stk.sa:
        print ae.dim
        t = Trainer(ae, src = x, dst = x, lrt = r)
        t.tune(epoch, 20)
        x = ae.ec(x).eval()
        r = r * 2.0
    tm = time.clock() - tm
    print 'ran for {:.2f}m\n'.format(tm/60.); sys.stdout.flush()
Esempio n. 28
0
 def __init__(self, module, dataset, totalIterations = 100,
              xPrecision = finfo(float).eps, fPrecision = finfo(float).eps,
              init_scg=True, **kwargs):
     """Create a SCGTrainer to train the specified `module` on the
     specified `dataset`.
     """
     Trainer.__init__(self, module)
     self.setData(dataset)
     self.input_sequences = self.ds.getField('input')
     self.epoch = 0
     self.totalepochs = 0
     self.module = module
     #self.tmp_module = module.copy()
     if init_scg:
         self.scg = SCG(self.module.params, self.f, self.df, self,
                        totalIterations, xPrecision, fPrecision,
                        evalFunc = lambda x: str(x / self.ds.getLength()))
     else:
         print "Warning: SCG trainer not initialized!"
Esempio n. 29
0
class GeneralizedBoltzmann(GeneralizedModel):
    attrs_ = ['trainfn', 'n', 'batch_size', 'epochs', 'learn_rate', 'beta', 'momentum', 'verbose']
    
    def __init__(self, trainfn='cdn', n=1, batch_size=10, epochs=1, learn_rate=0.1, 
                 beta=0.0001, momentum=0., verbose=False):
        self.trainfn = trainfn
        self.epochs = epochs
        self.n = n
        self.learn_rate = learn_rate
        self.beta = beta
        self.batch_size = batch_size
        self.momentum = momentum
        self.trainer = Trainer()
        self.verbose = verbose
        
    def gibbs_hvh(self, h, mf=False, **args):
        v_samples = self.propdown(h, **args)
        v = v_samples[0][1] if mf else v_samples[0][0]
        h_samples = self.propup(v, **args)
        return v_samples, h_samples
    
    def gibbs_vhv(self, v, mf=False, **args):
        h_samples = self.propup(v, **args)
        h = h_samples[-1][1] if mf else h_samples[-1][0]
        v_samples = self.propdown(h, **args)
        return v_samples, h_samples
    
    def cost(self, v):
        if len(np.shape(v)) == 1: v.shape = (1,len(v))
        use_fw = self.trainfn == 'fpcd'
        use_persist = use_fw or self.trainfn == 'pcd'
        num_points = v.shape[0]
        # positive phase
        pos_h_samples = self.propup(v)
        # negative phase
        nh0 = self.p[:num_points] if use_persist else pos_h_samples[-1][0]
        for i in range(self.n):
            neg_v_samples, neg_h_samples = self.gibbs_hvh(nh0, fw=use_fw)
            nh0 = neg_h_samples[-1][0]
        # compute gradients
        grads = self.grad(v, pos_h_samples, neg_v_samples, neg_h_samples)
        self.p[:num_points] = nh0
	# compute reconstruction error
        if self.trainfn=='cdn':
            cost = np.sum(np.square(v - neg_v_samples[0][1])) / self.batch_size
        else:
            cost = np.sum(np.square(v - self.gibbs_vhv(v)[0][0][1])) / self.batch_size
        return cost, grads
        
    def train(self, data, max_iter=1):
        args = { 'epochs': self.epochs,
                 'batch_size': self.batch_size,
                 'max_iter': max_iter,
                 'verbose': self.verbose }
        return self.trainer.train(self, data, **args)
Esempio n. 30
0
 def __init__(self, trainfn='cdn', n=1, batch_size=10, epochs=1, learn_rate=0.1, 
              beta=0.0001, momentum=0., verbose=False):
     self.trainfn = trainfn
     self.epochs = epochs
     self.n = n
     self.learn_rate = learn_rate
     self.beta = beta
     self.batch_size = batch_size
     self.momentum = momentum
     self.trainer = Trainer()
     self.verbose = verbose
Esempio n. 31
0
    eval_split_size=10,
    print_step=25,
    print_eval=False,
    mixed_precision=False,
    lr_gen=1e-4,
    lr_disc=1e-4,
    data_path=os.path.join(output_path, "../thorsten-de/wavs/"),
    output_path=output_path,
)

# download dataset if not already present
if not os.path.exists(config.data_path):
    print("Downloading dataset")
    download_path = os.path.abspath(os.path.join(os.path.abspath(config.data_path), "../../"))
    download_thorsten_de(download_path)

# init audio processor
ap = AudioProcessor(**config.audio.to_dict())

# load training samples
eval_samples, train_samples = load_wav_data(config.data_path, config.eval_split_size)

# init model
model = GAN(config, ap)

# init the trainer and 🚀
trainer = Trainer(
    TrainerArgs(), config, output_path, model=model, train_samples=train_samples, eval_samples=eval_samples
)
trainer.fit()
def main_worker(args):
    global best_prec1, dtype
    best_prec1 = 0
    dtype = torch_dtypes.get(args.dtype)
    torch.manual_seed(args.seed)
    time_stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
    if args.evaluate:
        args.results_dir = '/tmp'
    # args.save = ''
    if args.save is '':
        args.save = time_stamp
    save_path = path.join(args.results_dir, args.save)

    args.distributed = args.local_rank >= 0 or args.world_size > 1

    if args.distributed:
        dist.init_process_group(backend=args.dist_backend, init_method=args.dist_init,
                                world_size=args.world_size, rank=args.local_rank)
        args.local_rank = dist.get_rank()
        args.world_size = dist.get_world_size()
        if args.dist_backend == 'mpi':
            # If using MPI, select all visible devices
            args.device_ids = list(range(torch.cuda.device_count()))
        else:
            args.device_ids = [args.local_rank]

    if not (args.distributed and args.local_rank > 0):
        if not path.exists(save_path):
            makedirs(save_path)
        export_args_namespace(args, path.join(save_path, 'config.json'))

    setup_logging(path.join(save_path, 'log.txt'),
                  resume=args.resume is not '',
                  dummy=args.distributed and args.local_rank > 0)

    results_path = path.join(save_path, 'results')
    results = ResultsLog(results_path,
                         title='Training Results - %s' % args.save)

    logging.info("saving to %s", save_path)
    logging.debug("run arguments: %s", args)
    logging.info("creating model %s", args.model)

    if 'cuda' in args.device and torch.cuda.is_available():
        torch.cuda.manual_seed_all(args.seed)
        torch.cuda.set_device(args.device_ids[0])
        cudnn.benchmark = True
    else:
        args.device_ids = None

    # create model
    model = models.__dict__[args.model]
    model_config = {'dataset': args.dataset}
    model_config['fp8_dynamic'] = True  # moran

    if args.model_config is not '':
        model_config = dict(model_config, **literal_eval(args.model_config))

    model = model(**model_config)
    if args.sync_bn:
        model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
    logging.info("created model with configuration: %s", model_config)
    num_parameters = sum([l.nelement() for l in model.parameters()])
    logging.info("number of parameters: %d", num_parameters)

    # optionally resume from a checkpoint
    if args.evaluate:
        if not path.isfile(args.evaluate):
            parser.error('invalid checkpoint: {}'.format(args.evaluate))
        checkpoint = torch.load(args.evaluate, map_location="cpu")
        # Overrride configuration with checkpoint info
        args.model = checkpoint.get('model', args.model)
        args.model_config = checkpoint.get('config', args.model_config)
        # load checkpoint
        model.load_state_dict(checkpoint['state_dict'])
        logging.info("loaded checkpoint '%s' (epoch %s)",
                     args.evaluate, checkpoint['epoch'])

    if args.resume:
        checkpoint_file = args.resume
        if path.isdir(checkpoint_file):
            results.load(path.join(checkpoint_file, 'results.csv'))
            checkpoint_file = path.join(
                checkpoint_file, 'model_best.pth.tar')
        if path.isfile(checkpoint_file):
            logging.info("loading checkpoint '%s'", args.resume)
            checkpoint = torch.load(checkpoint_file, map_location="cpu")
            if args.start_epoch < 0:  # not explicitly set
                args.start_epoch = checkpoint['epoch']
            best_prec1 = checkpoint['best_prec1']
            model.load_state_dict(checkpoint['state_dict'])
            optim_state_dict = checkpoint.get('optim_state_dict', None)
            logging.info("loaded checkpoint '%s' (epoch %s)",
                         checkpoint_file, checkpoint['epoch'])
        else:
            logging.error("no checkpoint found at '%s'", args.resume)
    else:
        optim_state_dict = None

    # define loss function (criterion) and optimizer
    loss_params = {}
    if args.label_smoothing > 0:
        loss_params['smooth_eps'] = args.label_smoothing
    criterion = getattr(model, 'criterion', CrossEntropyLoss)(**loss_params)
    criterion.to(args.device, dtype)
    model.to(args.device, dtype)

    # Batch-norm should always be done in float
    if 'half' in args.dtype:
        FilterModules(model, module=is_bn).to(dtype=torch.float)

    # optimizer configuration
    optim_regime = getattr(model, 'regime', [{'epoch': 0,
                                              'optimizer': args.optimizer,
                                              'lr': args.lr,
                                              'momentum': args.momentum,
                                              'weight_decay': args.weight_decay}])

    optimizer = optim_regime if isinstance(optim_regime, OptimRegime) \
        else OptimRegime(model, optim_regime, use_float_copy='half' in args.dtype)

    if optim_state_dict is not None:
        optimizer.load_state_dict(optim_state_dict)

    trainer = Trainer(model, criterion, optimizer,
                      device_ids=args.device_ids, device=args.device, dtype=dtype, print_freq=args.print_freq,
                      distributed=args.distributed, local_rank=args.local_rank, mixup=args.mixup, cutmix=args.cutmix,
                      loss_scale=args.loss_scale, grad_clip=args.grad_clip,  adapt_grad_norm=args.adapt_grad_norm, enable_input_grad_statistics=True)
    if args.tensorwatch:
        trainer.set_watcher(filename=path.abspath(path.join(save_path, 'tensorwatch.log')),
                            port=args.tensorwatch_port)

    # Evaluation Data loading code
    args.eval_batch_size = args.eval_batch_size if args.eval_batch_size > 0 else args.batch_size
    val_data = DataRegime(getattr(model, 'data_eval_regime', None),
                          defaults={'datasets_path': args.datasets_dir, 'name': args.dataset, 'split': 'val', 'augment': False,
                                    'input_size': args.input_size, 'batch_size': args.eval_batch_size, 'shuffle': False,
                                    'num_workers': args.workers, 'pin_memory': True, 'drop_last': False})

    if args.evaluate:
        results = trainer.validate(val_data.get_loader())
        logging.info(results)
        return

    # Training Data loading code
    train_data_defaults = {'datasets_path': args.datasets_dir, 'name': args.dataset, 'split': 'train', 'augment': True,
                           'input_size': args.input_size,  'batch_size': args.batch_size, 'shuffle': True,
                           'num_workers': args.workers, 'pin_memory': True, 'drop_last': True,
                           'distributed': args.distributed, 'duplicates': args.duplicates, 'autoaugment': args.autoaugment,
                           'cutout': {'holes': 1, 'length': 16} if args.cutout else None}

    if hasattr(model, 'sampled_data_regime'):
        sampled_data_regime = model.sampled_data_regime
        probs, regime_configs = zip(*sampled_data_regime)
        regimes = []
        for config in regime_configs:
            defaults = {**train_data_defaults}
            defaults.update(config)
            regimes.append(DataRegime(None, defaults=defaults))
        train_data = SampledDataRegime(regimes, probs)
    else:
        train_data = DataRegime(
            getattr(model, 'data_regime', None), defaults=train_data_defaults)

    logging.info('optimization regime: %s', optim_regime)
    logging.info('data regime: %s', train_data)
    args.start_epoch = max(args.start_epoch, 0)
    trainer.training_steps = args.start_epoch * len(train_data)
    for epoch in range(args.start_epoch, args.epochs):
        trainer.epoch = epoch
        train_data.set_epoch(epoch)
        val_data.set_epoch(epoch)
        logging.info('\nStarting Epoch: {0}\n'.format(epoch + 1))

        # train for one epoch
        train_results, _ = trainer.train(train_data.get_loader(),
                                      chunk_batch=args.chunk_batch)

        # evaluate on validation set

        if args.calibrate_bn:
            train_data = DataRegime(None, defaults={'datasets_path': args.datasets_dir, 'name': args.dataset,
                                                    'split': 'train', 'augment': True,
                                                    'input_size': args.input_size, 'batch_size': args.batch_size,
                                                    'shuffle': True, 'num_workers': args.workers, 'pin_memory': True,
                                                    'drop_last': False})
            trainer.calibrate_bn(train_data.get_loader(), num_steps=200)

        val_results, _ = trainer.validate(val_data.get_loader())

        if args.distributed and args.local_rank > 0:
            continue

        # remember best prec@1 and save checkpoint
        is_best = val_results['prec1'] > best_prec1
        best_prec1 = max(val_results['prec1'], best_prec1)

        if args.drop_optim_state:
            optim_state_dict = None
        else:
            optim_state_dict = optimizer.state_dict()

        save_checkpoint({
            'epoch': epoch + 1,
            'model': args.model,
            'config': args.model_config,
            'state_dict': model.state_dict(),
            'optim_state_dict': optim_state_dict,
            'best_prec1': best_prec1
        }, is_best, path=save_path, save_all=args.save_all)

        logging.info('\nResults - Epoch: {0}\n'
                     'Training Loss {train[loss]:.4f} \t'
                     'Training Prec@1 {train[prec1]:.3f} \t'
                     'Training Prec@5 {train[prec5]:.3f} \t'
                     'Validation Loss {val[loss]:.4f} \t'
                     'Validation Prec@1 {val[prec1]:.3f} \t'
                     'Validation Prec@5 {val[prec5]:.3f} \t\n'
                     .format(epoch + 1, train=train_results, val=val_results))

        values = dict(epoch=epoch + 1, steps=trainer.training_steps)
        values.update({'training ' + k: v for k, v in train_results.items()})
        values.update({'validation ' + k: v for k, v in val_results.items()})
        results.add(**values)

        results.plot(x='epoch', y=['training loss', 'validation loss'],
                     legend=['training', 'validation'],
                     title='Loss', ylabel='loss')
        results.plot(x='epoch', y=['training error1', 'validation error1'],
                     legend=['training', 'validation'],
                     title='Error@1', ylabel='error %')
        results.plot(x='epoch', y=['training error5', 'validation error5'],
                     legend=['training', 'validation'],
                     title='Error@5', ylabel='error %')
        if 'grad' in train_results.keys():
            results.plot(x='epoch', y=['training grad'],
                         legend=['gradient L2 norm'],
                         title='Gradient Norm', ylabel='value')
        results.save()

    return results
Esempio n. 33
0
                        type=int,
                        default=1111,
                        help="Seed for initializing training. (default:1111)")
    parser.add_argument(
        "--device",
        default="",
        help="device id i.e. `0` or `0,1` or `cpu`. (default: ````).")
    args = parser.parse_args()

    print("##################################################\n")
    print("Run Training Engine.\n")
    print(args)

    create_folder("runs")
    create_folder("runs/hr")
    create_folder("runs/sr")
    create_folder("weights")

    logger.info("TrainingEngine:")
    print("\tAPI version .......... 0.1.1")
    print("\tBuild ................ 2020.11.30-1116-0c5adc7e")

    logger.info("Creating Training Engine")
    trainer = Trainer(args)

    logger.info("Staring training model")
    trainer.run()
    print("##################################################\n")

    logger.info("All training has been completed successfully.\n")
Esempio n. 34
0
lr = 0.01
epoch = 100

out_model_fn = './model/%s/' % (saveName)
if not os.path.exists(out_model_fn):
    os.makedirs(out_model_fn)

use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")

t_kwargs = {'batch_size': batch_size, 'pin_memory': True}
tr_loader = torch.utils.data.DataLoader(Data2Torch(device, 'tr'),
                                        shuffle=True,
                                        **t_kwargs)
va_loader = torch.utils.data.DataLoader(Data2Torch(device, 'te'),
                                        shuffle=True,
                                        **t_kwargs)

model = Net().to(device)
model.apply(model_init)

mp = np.array([
    794532, 230484, 99407, 99132, 24426, 14954, 11468, 8696, 8310, 4914, 3006
])
mmp = mp.astype(np.float32) / mp.sum()
cc = ((mmp.mean() / mmp) * ((1 - mmp) / (1 - mmp.mean())))**0.3
inverse_feq = torch.from_numpy(cc)
print(inverse_feq)

Trer = Trainer(model, lr, epoch, out_model_fn, 1, 1)
Trer.fit(tr_loader, inverse_feq, device)
Esempio n. 35
0
import torch

import utility
import data
import model
import loss
from option import args
from trainer import Trainer

torch.manual_seed(args.seed)
checkpoint = utility.checkpoint(args)

if checkpoint.ok:
    loader = data.Data(args)
    model = model.Model(args, checkpoint)
    loss = loss.Loss(args, checkpoint) if not args.test_only else None
    t = Trainer(args, loader, model, loss, checkpoint)
    while not t.terminate():
        t.train()
        t.test()

    checkpoint.done()


Esempio n. 36
0
    "T[-1].lower",
    "T[0].lower",
    "T[1].lower",
    "T[2].lower",
    "T[-2,-1].lower",
    "T[-1,0].lower",
    "T[0,1].lower",
    "T[1,2].lower",

    # "T[-1].isdigit", "T[0].isdigit", "T[1].isdigit",
    #
    # "T[-2].istitle", "T[-1].istitle", "T[0].istitle", "T[1].istitle", "T[2].istitle",
    # "T[0,1].istitle", "T[0,2].istitle",

    # "T[-2].is_in_dict", "T[-1].is_in_dict", "T[0].is_in_dict", "T[1].is_in_dict", "T[2].is_in_dict",
    # "T[-2,-1].is_in_dict", "T[-1,0].is_in_dict", "T[0,1].is_in_dict", "T[1,2].is_in_dict",
    # "T[-2,0].is_in_dict", "T[-1,1].is_in_dict", "T[0,2].is_in_dict",
]
tagger = CRFSequenceTagger(features)
trainer = Trainer(tagger, corpus)

params = {
    'c1': 1.0,  # coefficient for L1 penalty
    'c2': 1e-3,  # coefficient for L2 penalty
    'max_iterations': 1000,  #
    # include transitions that are possible, but not observed
    'feature.possible_transitions': True,
    'feature.possible_states': True,
}
trainer.train(params)
Esempio n. 37
0
                                                   shuffle=True,
                                                   pin_memory=False)
    testloader = LoadDataset(mode='test', **dataloader_params)

    dataloader_test = torch.utils.data.DataLoader(testloader,
                                                  hparams.batch_size,
                                                  shuffle=True,
                                                  pin_memory=False)
    now = time.time()
    noised_signal, signal = next(iter(dataloader_train))
    print("Batch uploading time : {}".format(time.time() - now))
    signal_length = noised_signal.size(-1)

    net = Denoiser(hparams.num_wavelet_levels,
                   hparams.wavelet_size,
                   hparams.sigma,
                   thresholding_algorithm=hparams.thresholding_algorithm,
                   threshold_mode=hparams.threshold_mode,
                   signal_length=signal_length,
                   thresholding_parameter=hparams.thresholding_parameter,
                   wavelet_name=hparams.wavelet_name)

    trainer = Trainer(net,
                      hparams.batch_size,
                      hparams.log_path,
                      wavelet_reg=hparams.wavelet_reg,
                      net_reg=hparams.net_reg,
                      lr=hparams.lr)

    trainer.train(dataloader_train, dataloader_test, hparams.epochs)
Esempio n. 38
0
                                                     )
        print("Training set size:", self.train_loader.dataset.__len__())
        print("Test set size:", self.test_loader.dataset.__len__())
        # snapshot
        self.snapshot_interval = 100000
        self.save_dir = os.path.join(snapshot_root, 'models/')
        self.result_dir = os.path.join(snapshot_root, 'results/')
        self.tboard_dir = tensorboard_root

        # evaluate
        self.evaluate_interval = 2
        self.evaluate_metric = ChamferLoss()

        self.check_args()

    def check_args(self):
        """checking arguments"""
        if not os.path.exists(self.save_dir):
            os.makedirs(self.save_dir)
        if not os.path.exists(self.result_dir):
            os.makedirs(self.result_dir)
        if not os.path.exists(self.tboard_dir):
            os.makedirs(self.tboard_dir)
        return self


if __name__ == '__main__':
    args = Args()
    trainer = Trainer(args)
    trainer.train()
Esempio n. 39
0
def main():
    args = parse_args()
    args.input_dim, args.mem_dim = 300, 150
    args.hidden_dim, args.num_classes = 50, 5
    args.cuda = args.cuda and torch.cuda.is_available()
    if args.sparse and args.wd != 0:
        print('Sparsity and weight decay are incompatible, pick one!')
        exit()
    print(args)
    torch.manual_seed(args.seed)
    random.seed(args.seed)
    numpy.random.seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)
        torch.backends.cudnn.benchmark = True
    if not os.path.exists(args.save):
        os.makedirs(args.save)

    train_dir = os.path.join(args.data, 'train/')
    dev_dir = os.path.join(args.data, 'dev/')
    test_dir = os.path.join(args.data, 'test/')

    # write unique words from all token files
    sick_vocab_file = os.path.join(args.data, 'sick.vocab')
    if not os.path.isfile(sick_vocab_file):
        token_files_a = [
            os.path.join(split, 'a.toks')
            for split in [train_dir, dev_dir, test_dir]
        ]
        token_files_b = [
            os.path.join(split, 'b.toks')
            for split in [train_dir, dev_dir, test_dir]
        ]
        token_files = token_files_a + token_files_b
        sick_vocab_file = os.path.join(args.data, 'sick.vocab')
        build_vocab(token_files, sick_vocab_file)

    # get vocab object from vocab file previously written
    vocab = Vocab(filename=sick_vocab_file,
                  data=[
                      Constants.PAD_WORD, Constants.UNK_WORD,
                      Constants.BOS_WORD, Constants.EOS_WORD
                  ])
    print('==> SICK vocabulary size : %d ' % vocab.size())

    # load SICK dataset splits
    train_file = os.path.join(args.data, 'sick_train.pth')
    if os.path.isfile(train_file):
        train_dataset = torch.load(train_file)
    else:
        train_dataset = SICKDataset(train_dir, vocab, args.num_classes)
        torch.save(train_dataset, train_file)
    print('==> Size of train data   : %d ' % len(train_dataset))
    dev_file = os.path.join(args.data, 'sick_dev.pth')
    if os.path.isfile(dev_file):
        dev_dataset = torch.load(dev_file)
    else:
        dev_dataset = SICKDataset(dev_dir, vocab, args.num_classes)
        torch.save(dev_dataset, dev_file)
    print('==> Size of dev data     : %d ' % len(dev_dataset))
    test_file = os.path.join(args.data, 'sick_test.pth')
    if os.path.isfile(test_file):
        test_dataset = torch.load(test_file)
    else:
        test_dataset = SICKDataset(test_dir, vocab, args.num_classes)
        torch.save(test_dataset, test_file)
    print('==> Size of test data    : %d ' % len(test_dataset))

    # initialize model, criterion/loss_function, optimizer
    model = SimilarityTreeLSTM(args.cuda, vocab.size(), args.input_dim,
                               args.mem_dim, args.hidden_dim, args.num_classes,
                               args.sparse)
    criterion = nn.KLDivLoss()
    if args.cuda:
        model.cuda(), criterion.cuda()
    if args.optim == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=args.lr,
                               weight_decay=args.wd)
    elif args.optim == 'adagrad':
        optimizer = optim.Adagrad(model.parameters(),
                                  lr=args.lr,
                                  weight_decay=args.wd)
    elif args.optim == 'sgd':
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              weight_decay=args.wd)
    metrics = Metrics(args.num_classes)

    # for words common to dataset vocab and GLOVE, use GLOVE vectors
    # for other words in dataset vocab, use random normal vectors
    emb_file = os.path.join(args.data, 'sick_embed.pth')
    if os.path.isfile(emb_file):
        emb = torch.load(emb_file)
    else:
        # load glove embeddings and vocab
        glove_vocab, glove_emb = load_word_vectors(
            os.path.join(args.glove, 'glove.840B.300d'))
        print('==> GLOVE vocabulary size: %d ' % glove_vocab.size())
        emb = torch.Tensor(vocab.size(),
                           glove_emb.size(1)).normal_(-0.05, 0.05)
        # zero out the embeddings for padding and other special words if they are absent in vocab
        for idx, item in enumerate([
                Constants.PAD_WORD, Constants.UNK_WORD, Constants.BOS_WORD,
                Constants.EOS_WORD
        ]):
            emb[idx].zero_()
        for word in vocab.labelToIdx.keys():
            if glove_vocab.get_index(word):
                emb[vocab.get_index(word)] = glove_emb[glove_vocab.get_index(
                    word)]
        torch.save(emb, emb_file)
    # plug these into embedding matrix inside model
    if args.cuda:
        emb = emb.cuda()
    model.childsumtreelstm.emb.state_dict()['weight'].copy_(emb)

    # create trainer object for training and testing
    trainer = Trainer(args, model, criterion, optimizer)

    best = -float('inf')
    for epoch in range(args.epochs):
        _ = trainer.train(train_dataset)
        train_loss, train_pred = trainer.test(train_dataset)
        dev_loss, dev_pred = trainer.test(dev_dataset)
        test_loss, test_pred = trainer.test(test_dataset)

        train_pearson = metrics.pearson(train_pred, train_dataset.labels)
        train_mse = metrics.mse(train_pred, train_dataset.labels)
        print('==> Train    Loss: {}\tPearson: {}\tMSE: {}'.format(
            train_loss, train_pearson, train_mse))
        dev_pearson = metrics.pearson(dev_pred, dev_dataset.labels)
        dev_mse = metrics.mse(dev_pred, dev_dataset.labels)
        print('==> Dev      Loss: {}\tPearson: {}\tMSE: {}'.format(
            dev_loss, dev_pearson, dev_mse))
        test_pearson = metrics.pearson(test_pred, test_dataset.labels)
        test_mse = metrics.mse(test_pred, test_dataset.labels)
        print('==> Test     Loss: {}\tPearson: {}\tMSE: {}'.format(
            test_loss, test_pearson, test_mse))

        if best < test_pearson:
            best = test_pearson
            checkpoint = {
                'model': trainer.model.state_dict(),
                'optim': trainer.optimizer,
                'pearson': test_pearson,
                'mse': test_mse,
                'args': args,
                'epoch': epoch
            }
            print('==> New optimum found, checkpointing everything now...')
            torch.save(
                checkpoint,
                '%s.pt' % os.path.join(args.save, args.expname + '.pth'))
Esempio n. 40
0
import time

from trainer import Trainer
import training_config

if __name__ == '__main__':
    #start_time = time.time()
    print("Retraining best model...", flush=True)
    trainer = Trainer()

    # Load best model
    trainer.load_best_model()

    # Load data
    trainer.load_data()

    # Train data
    trainer.train_load_models()

    # Print stats (for debugging purposes)
    trainer.print_model_info()

    #print("Sleeping for {} hours before retraining again...\n".format(training_config.BEST_RETRAIN_WAIT/(60*60)), flush=True)
    #cur_time = time.time()
    # Remove training time from the wait time so that it retrains at approximately the same time every day
    #wait = start_time + training_config.BEST_RETRAIN_WAIT - cur_time
    #time.sleep(wait)
Esempio n. 41
0
t1 = time.time()

for i, weight_scale in enumerate(weight_scales):
    print('Running weight scale %d / %d' % (i + 1, len(weight_scales)))
    bn_model = FullyConnectedNet(hidden_dims=hidden,
                                 weight_scale=weight_scale,
                                 use_batchnorm=True)
    model = FullyConnectedNet(hidden_dims=hidden,
                              weight_scale=weight_scale,
                              use_batchnorm=False)

    bn_trainer = Trainer(bn_model,
                         small_data,
                         num_epochs=10,
                         batch_size=50,
                         update_rule='adam',
                         updater_config={
                             'learning_rate': 3e-3,
                         },
                         verbose=False,
                         print_every=200)
    bn_trainer.train()
    bn_trainers[weight_scale] = bn_trainer

    trainer = Trainer(model,
                      small_data,
                      num_epochs=10,
                      batch_size=50,
                      update_rule='adam',
                      updater_config={
                          'learning_rate': 3e-3,
                      },
Esempio n. 42
0
    regularization = Objective(['policy_reg_error'], lambda reg: reg,
                               weight=args.Q_sub)
    reference_loss = Objective(['Y_pred', 'Rf'], F.mse_loss, weight=args.Q_r)
    observation_lower_bound_penalty = Objective(['Y_pred', 'Y_minf'], lambda x, xmin: torch.mean(F.relu(-x + -xmin)),
                                                weight=args.Q_con_y)
    observation_upper_bound_penalty = Objective(['Y_pred', 'Y_maxf'], lambda x, xmax: torch.mean(F.relu(x - xmax)),
                                                weight=args.Q_con_y)
    inputs_lower_bound_penalty = Objective(['U_pred', 'U_minf'], lambda x, xmin: torch.mean(F.relu(-x + -xmin)),
                                                weight=args.Q_con_u)
    inputs_upper_bound_penalty = Objective(['U_pred', 'U_maxf'], lambda x, xmax: torch.mean(F.relu(x - xmax)),
                                           weight=args.Q_con_u)

    objectives = [regularization, reference_loss]
    constraints = [observation_lower_bound_penalty, observation_upper_bound_penalty,
                   inputs_lower_bound_penalty, inputs_upper_bound_penalty]

    ##########################################
    ########## OPTIMIZE SOLUTION ############
    ##########################################
    model = Problem(objectives, constraints, components).to(device)
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.lr)
    visualizer = VisualizerClosedLoop(dataset, dynamics_model, plot_keys, args.verbosity)
    emulator = emulators.systems[args.system]() if args.system_data == 'emulator' \
        else dynamics_model if args.system_data == 'datafile' else None
    simulator = ClosedLoopSimulator(model=model, dataset=dataset, emulator=emulator)
    trainer = Trainer(model, dataset, optimizer, logger=logger, visualizer=visualizer,
                      simulator=simulator, epochs=args.epochs)
    best_model = trainer.train()
    trainer.evaluate(best_model)
    logger.clean_up()
Esempio n. 43
0
def main():
    global args
    args = parse_args()
    # global logger
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter(
        "[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
    # file logger
    fh = logging.FileHandler(os.path.join(args.save, args.expname) + '.log',
                             mode='w')
    fh.setLevel(logging.INFO)
    fh.setFormatter(formatter)
    logger.addHandler(fh)
    # console logger
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    ch.setFormatter(formatter)
    logger.addHandler(ch)
    # argument validation
    args.cuda = args.cuda and torch.cuda.is_available()
    if args.sparse and args.wd != 0:
        logger.error('Sparsity and weight decay are incompatible, pick one!')
        exit()
    logger.debug(args)
    args.data = 'learning/treelstm/data/lc_quad/'
    args.save = 'learning/treelstm/checkpoints/'
    torch.manual_seed(args.seed)
    random.seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)
        torch.backends.cudnn.benchmark = True
    if not os.path.exists(args.save):
        os.makedirs(args.save)

    train_dir = os.path.join(args.data, 'train/')
    dev_dir = os.path.join(args.data, 'dev/')
    test_dir = os.path.join(args.data, 'test/')

    # write unique words from all token files

    dataset_vocab_file = os.path.join(args.data, 'dataset.vocab')
    if not os.path.isfile(dataset_vocab_file):
        token_files_a = [
            os.path.join(split, 'a.toks')
            for split in [train_dir, dev_dir, test_dir]
        ]
        token_files_b = [
            os.path.join(split, 'b.toks')
            for split in [train_dir, dev_dir, test_dir]
        ]
        token_files = token_files_a + token_files_b
        dataset_vocab_file = os.path.join(args.data, 'dataset.vocab')
        build_vocab(token_files, dataset_vocab_file)

    # get vocab object from vocab file previously written
    vocab = Vocab(filename=dataset_vocab_file,
                  data=[
                      Constants.PAD_WORD, Constants.UNK_WORD,
                      Constants.BOS_WORD, Constants.EOS_WORD
                  ])
    logger.debug('==> Dataset vocabulary size : %d ' % vocab.size())

    # load dataset splits
    train_file = os.path.join(args.data, 'dataset_train.pth')
    if os.path.isfile(train_file):
        train_dataset = torch.load(train_file)
    else:
        train_dataset = QGDataset(train_dir, vocab, args.num_classes)
        torch.save(train_dataset, train_file)
    logger.debug('==> Size of train data   : %d ' % len(train_dataset))
    dev_file = os.path.join(args.data, 'dataset_dev.pth')
    if os.path.isfile(dev_file):
        dev_dataset = torch.load(dev_file)
    else:
        dev_dataset = QGDataset(dev_dir, vocab, args.num_classes)
        torch.save(dev_dataset, dev_file)
    logger.debug('==> Size of dev data     : %d ' % len(dev_dataset))
    test_file = os.path.join(args.data, 'dataset_test.pth')
    if os.path.isfile(test_file):
        test_dataset = torch.load(test_file)
    else:
        test_dataset = QGDataset(test_dir, vocab, args.num_classes)
        torch.save(test_dataset, test_file)
    logger.debug('==> Size of test data    : %d ' % len(test_dataset))

    similarity = DASimilarity(args.mem_dim, args.hidden_dim, args.num_classes)
    # if args.sim == "cos":
    #     similarity = CosSimilarity(1)
    # else:
    #     similarity = DASimilarity(args.mem_dim, args.hidden_dim, args.num_classes, dropout=True)

    # initialize model, criterion/loss_function, optimizer
    model = SimilarityTreeLSTM(vocab.size(), args.input_dim, args.mem_dim,
                               similarity, args.sparse)
    criterion = nn.KLDivLoss()  # nn.HingeEmbeddingLoss()

    if args.cuda:
        model.cuda(), criterion.cuda()
    else:
        torch.set_num_threads(4)
    logger.info("number of available cores: {}".format(
        torch.get_num_threads()))
    if args.optim == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=args.lr,
                               weight_decay=args.wd)
    elif args.optim == 'adagrad':
        optimizer = optim.Adagrad(model.parameters(),
                                  lr=args.lr,
                                  weight_decay=args.wd)
    elif args.optim == 'sgd':
        optimizer = optim.SGD(model.parameters(),
                              lr=args.lr,
                              weight_decay=args.wd)
    metrics = Metrics(args.num_classes)

    scheduler = torch.optim.lr_scheduler.StepLR(optimizer,
                                                step_size=2,
                                                gamma=0.25)

    # for words common to dataset vocab and GLOVE, use GLOVE vectors
    # for other words in dataset vocab, use random normal vectors
    emb_file = os.path.join(args.data, 'dataset_embed.pth')
    if os.path.isfile(emb_file):
        emb = torch.load(emb_file)
    else:
        EMBEDDING_DIM = 300
        emb = torch.zeros(vocab.size(), EMBEDDING_DIM, dtype=torch.float)
        fasttext_model = load_model("data/fasttext/wiki.en.bin")
        print('Use Fasttext Embedding')
        for word in vocab.labelToIdx.keys():
            word_vector = fasttext_model.get_word_vector(word)
            if word_vector.all() != None and len(word_vector) == EMBEDDING_DIM:
                emb[vocab.getIndex(word)] = torch.Tensor(word_vector)
            else:
                emb[vocab.getIndex(word)] = torch.Tensor(
                    EMBEDDING_DIM).uniform_(-1, 1)
        # # load glove embeddings and vocab
        # args.glove = 'learning/treelstm/data/glove/'
        # print('Use Glove Embedding')
        # glove_vocab, glove_emb = load_word_vectors(os.path.join(args.glove, 'glove.840B.300d'))
        # logger.debug('==> GLOVE vocabulary size: %d ' % glove_vocab.size())
        # emb = torch.Tensor(vocab.size(), glove_emb.size(1)).normal_(-0.05, 0.05)
        # # zero out the embeddings for padding and other special words if they are absent in vocab
        # for idx, item in enumerate([Constants.PAD_WORD, Constants.UNK_WORD, Constants.BOS_WORD, Constants.EOS_WORD]):
        #     emb[idx].zero_()
        # for word in vocab.labelToIdx.keys():
        #     if glove_vocab.getIndex(word):
        #         emb[vocab.getIndex(word)] = glove_emb[glove_vocab.getIndex(word)]
        torch.save(emb, emb_file)
    # plug these into embedding matrix inside model
    if args.cuda:
        emb = emb.cuda()
    model.emb.weight.data.copy_(emb)

    checkpoint_filename = '%s.pt' % os.path.join(args.save, args.expname)
    if args.mode == "test":
        checkpoint = torch.load(checkpoint_filename)
        model.load_state_dict(checkpoint['model'])
        args.epochs = 1

    # create trainer object for training and testing
    trainer = Trainer(args, model, criterion, optimizer)

    for epoch in range(args.epochs):
        if args.mode == "train":
            scheduler.step()

            train_loss = trainer.train(train_dataset)
            train_loss, train_pred = trainer.test(train_dataset)
            logger.info('==> Epoch {}, Train \tLoss: {} {}'.format(
                epoch, train_loss, metrics.all(train_pred,
                                               train_dataset.labels)))
            checkpoint = {
                'model': trainer.model.state_dict(),
                'optim': trainer.optimizer,
                'args': args,
                'epoch': epoch,
                'scheduler': scheduler
            }
            checkpoint_filename = '%s.pt' % os.path.join(
                args.save, args.expname +
                ',epoch={},train_loss={}'.format(epoch + 1, train_loss))
            torch.save(checkpoint, checkpoint_filename)

        dev_loss, dev_pred = trainer.test(dev_dataset)
        test_loss, test_pred = trainer.test(test_dataset)
        logger.info('==> Epoch {}, Dev \tLoss: {} {}'.format(
            epoch, dev_loss, metrics.all(dev_pred, dev_dataset.labels)))
        logger.info('==> Epoch {}, Test \tLoss: {} {}'.format(
            epoch, test_loss, metrics.all(test_pred, test_dataset.labels)))
Esempio n. 44
0
def main():
    ### set divice
    if not args.cuda:
        cfg.TRAIN.DEVICE = torch.device('cpu')
    else:
        assert torch.cuda.is_available(), "Not enough GPU"
        #assert d < torch.cuda.device_count(), "Not enough GPU"
        os.environ["CUDA_VISIBLE_DEVICES"] = ",".join(
            [str(ids) for ids in args.device_ids])
        torch.backends.cudnn.benchmark = True
        cfg.CUDA = True
        cfg.TRAIN.DEVICE = torch.device('cuda:0')
        print("Let's use", torch.cuda.device_count(), "GPUs!")

    ### set config
    cfg_from_file(args.cfg_file)
    if args.set_cfgs is not None:
        cfg_from_list(args.set_cfgs)

    cfg_from_args(args)
    print_cfg(cfg)

    # assert_and_infer_cfg()

    if not cfg.TRAIN.NO_SAVE:

        run_folder = create_folder_for_run(cfg.TRAIN.RUNS_FOLDER)
        logging.basicConfig(level=logging.INFO,
                            format='%(message)s',
                            handlers=[
                                logging.FileHandler(
                                    os.path.join(run_folder,
                                                 f'{cfg.TRAIN.NAME}.log')),
                                logging.StreamHandler(sys.stdout)
                            ])

        with open(os.path.join(run_folder, 'config_and_args.pkl'), 'wb') as f:
            blob = {'cfg': yaml.dump(cfg), 'args': args}
            pickle.dump(blob, f, pickle.HIGHEST_PROTOCOL)

        with open(os.path.join(run_folder, 'args.txt'), 'w') as f:
            for item in vars(args):
                f.write(item + ":" + str(getattr(args, item)) + '\n')
        logging.info('×' * 40)

        shutil.copy(args.cfg_file,
                    os.path.join(run_folder, cfg.TRAIN.NAME) + '_cfg')
        logging.info('save config and args in runs folder:\n %s' % run_folder)
        # if args.use_tfboard:
        #     tblogger = SummaryWriter(run_folder)

    else:
        logging.basicConfig(level=logging.INFO)
        # logger = logging.getLogger(__name__)
    # print('args:')
    # logging.info(pprint.pformat(vars(args)))
    # print('cfg:')
    # logging.info(yaml.dump(cfg.TRAIN))

    # from test_parallel import parallel_test
    # parallel_test()

    # trainloader, testloader = get_DataLoader()
    # imshow_Loader(trainloader)

    # loader = get_CIFARLoader()
    # model = get_test_Model()
    # loss = get_loss()

    # model = test_Hidden_modeling()
    # test_Hidden_Trainer(loader, model, loss)
    # ???
    # loader = get_DataLoader()

    loader = get_DataLoader()
    model = get_Model()
    model = nn.DataParallel(model)
    model.to(cfg.TRAIN.DEVICE)
    # loss = get_loss()

    logging.info(model)

    Trainer(loader, model)
Esempio n. 45
0
def main(config, resume, is_runtime, max_points, max_points_epoch, vis,
         not_save):

    adapted_config = copy.deepcopy(config)
    train_logger = Logger()

    transf_list_train = []
    transf_list_valid = []

    for transform_config in adapted_config['train_transform']:
        transf_list_train.append(get_instance_list(transform,
                                                   transform_config))
    for transform_config in adapted_config['valid_transform']:
        transf_list_valid.append(get_instance_list(transform,
                                                   transform_config))

    checker = []
    for checker_config in adapted_config['sample_checker']:
        checker.append(get_instance_list(sample_checker, checker_config))

    adapted_config['train_dataset']['args']['transform'] = transforms.Compose(
        transf_list_train)
    adapted_config['valid_dataset']['args']['transform'] = transforms.Compose(
        transf_list_valid)

    adapted_config['train_dataset']['args']['sample_checker'] = checker

    if not args.eval:
        train_dataset = get_instance(dataset, 'train_dataset', adapted_config)
        adapted_config['train_data_loader']['args']['dataset'] = train_dataset
        if adapted_config['n_gpu'] > 1:
            train_data_loader = DataListLoader(
                **adapted_config['train_data_loader']['args'])
        else:
            train_data_loader = GraphLevelDataLoader(
                **adapted_config['train_data_loader']['args'])

        train_data_loader.n_samples = lambda: len(train_data_loader.dataset)

    valid_dataset = get_instance(dataset, 'valid_dataset', adapted_config)
    adapted_config['valid_data_loader']['args']['dataset'] = valid_dataset
    if adapted_config['n_gpu'] > 1:
        valid_data_loader = DataListLoader(
            **adapted_config['valid_data_loader']['args'])
    else:
        valid_data_loader = GraphLevelDataLoader(
            **adapted_config['valid_data_loader']['args'])

    valid_data_loader.n_samples = lambda: len(valid_data_loader.dataset)

    model = get_instance(architectures, 'arch', adapted_config)
    print(model)
    loss = get_instance(module_loss, 'loss', adapted_config)

    # build optimizer, learning rate scheduler. delete every lines containing lr_scheduler for disabling scheduler
    trainable_params = filter(lambda p: p.requires_grad, model.parameters())
    optimizer = get_instance(torch.optim, 'optimizer', adapted_config,
                             trainable_params)

    lr_scheduler = get_instance(torch.optim.lr_scheduler, 'lr_scheduler',
                                adapted_config, optimizer)

    trainer = Trainer(model,
                      loss,
                      optimizer,
                      resume=resume,
                      config=config,
                      data_loader=train_data_loader if not args.eval else None,
                      valid_data_loader=valid_data_loader,
                      lr_scheduler=lr_scheduler,
                      train_logger=train_logger,
                      eval_mode=args.eval,
                      is_runtime=is_runtime,
                      max_points=max_points,
                      max_points_epoch=max_points_epoch,
                      vis=vis,
                      n_gpu=adapted_config['n_gpu'],
                      s3dis_gt_pcd=args.s3dis_gt_pcd,
                      not_save=not_save)

    trainer.train()
args.n_GPUs = 2
args.test_only = False
# saving and loading models
args.save_every = 50
args.save_models = True  # saves all intermediate models
# file name to saspyder2ve, if '.' the name is date+time
args.save = args.loss
args.save_results = True
loader = dataloader.StereoMSIDatasetLoader(args)
checkpoint = utility.checkpoint(args, loader)
debug_dir = os.path.join(checkpoint.dir, "debug_results")
os.mkdir(debug_dir)
my_loss = loss.Loss(args, checkpoint) if not args.test_only else None
my_model = network.Model(args, checkpoint)
my_model.apply(weight_init)
t = Trainer(args, loader, my_model, my_loss, checkpoint)
i = True
#args.test_only = True
#t.test_model()
#if args.test_only==True:
#    t.test_model()
while not t.terminate():
    t.train()
    # train model
    if t.epoch() % args.save_every == 0:
        t.test()
        if (t.epoch() % 50 == 0):
            imsave(debug_dir + "/sr_epoch_{}.png".format(t.epoch()),
                   normalise01(np.float64(t.sr_valid)))
            if i:
                imsave(debug_dir + "/lr.png",
Esempio n. 47
0
import torch

import data
import loss
import model
import utility
from option import args
from trainer import Trainer

torch.manual_seed(args.seed)
checkpoint = utility.checkpoint(args)

if args.data_test == "video":
    model = model.Model(args, checkpoint)
    t = VideoTester(args, model, checkpoint)
    t.test()
else:
    if checkpoint.ok:
        loader = data.Data(args)
        model = model.Model(args, checkpoint)
        loss = loss.Loss(args, checkpoint) if not args.test_only else None
        t = Trainer(args, loader, model, loss, checkpoint)
        while not t.terminate():
            t.train()
            t.test()

        checkpoint.done()
Esempio n. 48
0
	gripper_memory_buffer.load_memory(csv_path.replace("logger_{:03}".format(run), "logger_{:03}".format(run-1))+"gripper_memory.pkl")
	suction_1_memory_buffer.load_memory(csv_path.replace("logger_{:03}".format(run), "logger_{:03}".format(run-1))+"suction_1_memory.pkl")
	suction_2_memory_buffer.load_memory(csv_path.replace("logger_{:03}".format(run), "logger_{:03}".format(run-1))+"suction_2_memory.pkl")
'''
if suction_1_memory != "":
    suction_1_memory_buffer.load_memory(suction_1_memory)
if suction_2_memory != "":
    suction_2_memory_buffer.load_memory(suction_2_memory)
if gripper_memory != "":
    gripper_memory_buffer.load_memory(gripper_memory)

# trainer
if testing:
    learning_rate = 5e-5
    densenet_lr = 1e-5
trainer = Trainer(reward, discount_factor, use_cpu, learning_rate, densenet_lr)

# Load model if provided
if model_str != "":
    print "[%f]: Loading provided model..." % (time.time())
    trainer.behavior_net.load_state_dict(torch.load(model_str))
    trainer.target_net.load_state_dict(trainer.behavior_net.state_dict())

if model_str == "":
    torch.save(trainer.behavior_net.state_dict(), model_path + "initial.pth")

# Service clients
vacuum_pump_control = rospy.ServiceProxy(
    "/vacuum_pump_control_node/vacuum_control", SetBool)
check_suck_success = rospy.ServiceProxy(
    "/vacuum_pump_control_node/check_suck_success", SetBool)
Esempio n. 49
0
    TEXT = torchtext.data.Field(sequential=True,
                                tokenize=tokenizer_with_preprocessing,
                                use_vocab=True,
                                lower=True,
                                include_lengths=True,
                                batch_first=True,
                                fix_length=args.max_length,
                                init_token="<cls>",
                                eos_token="<eos>")
    LABEL = torchtext.data.Field(sequential=False,
                                 use_vocab=False,
                                 dtype=torch.float)

    train_ds = torchtext.data.TabularDataset.splits(path=NEWS_PATH,
                                                    train='text_train.tsv',
                                                    format='tsv',
                                                    fields=[('Text1', TEXT),
                                                            ('Text2', TEXT),
                                                            ('Label', LABEL)])
    train_ds = train_ds[0]

    TEXT.build_vocab(train_ds, min_freq=10)
    TEXT.vocab.freqs

    train_dl = torchtext.data.Iterator(train_ds,
                                       batch_size=args.batch_size,
                                       train=True)

    trainer = Trainer(args, TEXT, train_dl)
    trainer.run()
Esempio n. 50
0

for i in [7]:
    INPUT_WINDOW = i
    # important for network specific stuff
    layers = [(INPUT_WINDOW + OUTPUT_SEQUENCE_LENGTH+NUM_LINES*CAPACITY_LINES)*(KIND_CARS+1),128,128,128,NUM_LINES*NUM_LINES]
    net = Network(layers)
    # create name
    name = 'HRLCP_I:' + str(INPUT_WINDOW) + "_O:" + str(OUTPUT_SEQUENCE_LENGTH) + "_N:"
    for layer in layers:
       name += str(layer) + "-"
    name = name[:-1]
    name += "_NL:" + str(NUM_LINES) + "_CL:" + str(CAPACITY_LINES) + "_W:" + str(INPUT_SEQUENCE_LENGTH) + "_KC:" + str(KIND_CARS)
    # initialize training


# In[8]:


env = Environment(INPUT_SEQUENCE_LENGTH, KIND_CARS, NUM_LINES, CAPACITY_LINES, OUTPUT_SEQUENCE_LENGTH, INPUT_WINDOW, 0, initial_ratio = initial_ratio)
cpw = CPW(env)
agent = Agent(net, BUFFER_SIZE, BATCH_SIZE, UPDATE_EVERY, GAMMA, TAU, LR, SEED)
trainer = Trainer("hrl", KIND_CARS, GAMMA)


# In[ ]:


scores = trainer.train(cpw, agent, 250, n_episodes=50000, eps_start=1.0, eps_end=0.001, eps_decay=0.9998, show_picture= True, valid_actions_only = False, load_param = False)

Esempio n. 51
0
def main(args):
    # configs path to load data & save model
    from pathlib import Path
    if not Path(args.root_dir).exists():
        Path(args.root_dir).mkdir()

    p = Path(args.save_path).parent
    if not p.exists():
        p.mkdir()



    device = "cuda" if (torch.cuda.is_available() and args.use_cuda) else "cpu"
    import sys
    print(sys.version)
    print(f"Using {device}")
    print("Loading Data...")
    (src, trg), (train, valid, test), (train_loader, valid_loader, test_loader) = get_data(args)
    src_vocab_len = len(src.vocab.stoi)
    trg_vocab_len = len(trg.vocab.stoi)
    # check vocab size
    print(f"SRC vocab {src_vocab_len}, TRG vocab {trg_vocab_len}")
    enc_max_seq_len = args.max_length
    dec_max_seq_len = args.max_length
    pad_idx = src.vocab.stoi.get("<pad>") if args.pad_idx is None else args.pad_idx
    enc_sos_idx = src.vocab.stoi.get("<s>") if args.enc_sos_idx is None else args.enc_sos_idx
    enc_eos_idx = src.vocab.stoi.get("</s>") if args.enc_eos_idx is None else args.enc_eos_idx
    dec_sos_idx = trg.vocab.stoi.get("<s>") if args.dec_sos_idx is None else args.dec_sos_idx
    dec_eos_idx = trg.vocab.stoi.get("</s>") if args.dec_eos_idx is None else args.dec_eos_idx
    pos_pad_idx = 0 if args.pos_pad_idx is None else args.pos_pad_idx

    print("Building Model...")
    model = Transformer(src = src,
                        trg = trg,
                        enc_vocab_len=src_vocab_len,
                        enc_max_seq_len=enc_max_seq_len,
                        dec_vocab_len=trg_vocab_len,
                        dec_max_seq_len=dec_max_seq_len,
                        n_layer=args.n_layer,
                        n_head=args.n_head,
                        d_model=args.d_model,
                        d_k=args.d_k,
                        d_v=args.d_v,
                        d_f=args.d_f,
                        pad_idx=pad_idx,
                        pos_pad_idx=pos_pad_idx,
                        drop_rate=args.drop_rate,
                        use_conv=args.use_conv,
                        linear_weight_share=args.linear_weight_share,
                        embed_weight_share=args.embed_weight_share).to(device)

    # 서버컴퓨터로 돌릴 때 멀티 GPU로 학습할 수 있도록 하기위한 시도 코드
    # model = nn.DataParallel(model)
    # model.cuda()

    if args.load_path is not None:
        print(f"Load Model {args.load_path}")
        model.load_state_dict(torch.load(args.load_path))

    # build loss function using LabelSmoothing
    loss_function = LabelSmoothing(trg_vocab_size=trg_vocab_len,
                                   pad_idx=args.pad_idx,
                                   eps=args.smooth_eps)

    optimizer = WarmUpOptim(warmup_steps=args.warmup_steps,
                            d_model=args.d_model,
                            optimizer=optim.Adam(model.parameters(),
                                             betas=(args.beta1, args.beta2),
                                             eps=10e-9))
    trainer = Trainer(optimizer=optimizer,
                      train_loader=train_loader,
                      valid_loader=valid_loader,
                      test_loader=test_loader,
                      n_step=args.n_step,
                      device=device,
                      save_path=args.save_path,
                      enc_sos_idx=enc_sos_idx,
                      enc_eos_idx=enc_eos_idx,
                      dec_sos_idx=dec_sos_idx,
                      dec_eos_idx=dec_eos_idx,
                      metrics_method=args.metrics_method,
                      verbose=args.verbose)

    print("Start Training...")
    trainer.main(model=model, loss_function=loss_function)
Esempio n. 52
0
def main():
    global args 
    args = parse_args()

    # global logger
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    
    formatter = logging.Formatter("[%(asctime)s] %(levelname)s:%(name)s:%(message)s")
    # file logger
    fh = logging.FileHandler(os.path.join(args.save, args.expname)+'.log', mode='w')
    fh.setLevel(logging.INFO)
    fh.setFormatter(formatter)
    logger.addHandler(fh)
    # console logger
    ch = logging.StreamHandler()
    ch.setLevel(logging.DEBUG)
    ch.setFormatter(formatter)
    logger.addHandler(ch)

    if not torch.cuda.is_available() and args.cuda:
        args.cuda = False
        logger.info("CUDA is unavailable, convert to cpu mode")

    if args.sparse and args.wd != 0:
        logger.error('Sparsity and weight decay are incompatible, pick one!')
        exit()

    logger.debug(args)
    torch.manual_seed(args.seed)
    random.seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)
        torch.backends.cudnn.benchmark = True
    if not os.path.exists(args.save):
        os.makedirs(args.save)

    # set directory
    train_dir = os.path.join(args.data, 'train/')
    dev_dir = os.path.join(args.data, 'dev/')
    test_dir = os.path.join(args.data, 'test/')

    # load vocabulary
    vocab_path = os.path.join(args.data, "vocab.npy")
    vocab = Vocab(
        filename=vocab_path, 
        labels=[constants.PAD_WORD, constants.UNK_WORD, constants.BOS_WORD, constants.EOS_WORD]
    )
    logger.debug('==> vocabulary size : %d ' % len(vocab))

    # load train dataset
    train_file = os.path.join(train_dir, "ERdata.pt")
    if os.path.isfile(train_file):
        train_dataset = torch.load(train_file)
    else:
        train_dataset = ERDataset(train_dir, vocab, 2)
        torch.save(train_dataset, train_file)
    logger.debug('==> train data size: %d' % len(train_dataset))

    # load dev dataset
    dev_file = os.path.join(dev_dir, "ERdata.pt")
    if os.path.isfile(dev_file):
        dev_dataset = torch.load(dev_file)
    else:
        dev_dataset = ERDataset(dev_dir, vocab, 2)
        torch.save(dev_dataset, dev_file)
    logger.debug('==> dev data size: %d' % len(dev_dataset))

    # load test dataset   
    test_file = os.path.join(test_dir, "ERdata.pt")
    if os.path.isfile(test_file):
        test_dataset = torch.load(test_file)
    else:
        test_dataset = ERDataset(test_dir, vocab, 2)
        torch.save(test_dataset, test_file)
    logger.debug('==> test data size: %d' % len(test_dataset))

    # trainer: 
    # tree model
    model = TreeModel(
        len(vocab),
        args.input_dim,
        args.mem_dim,
        2,  # 0-1 prediction
        args.sparse,
        args.freeze_embed
    )

    # criterion
    criterion = nn.KLDivLoss()
    if args.cuda:
        model.cuda(), criterion.cuda()

    # optimizer
    if args.optim == 'adam':
        optimizer = optim.Adam(
            filter(lambda p: p.requires_grad, model.parameters()), 
            lr=args.lr, weight_decay=args.wd
        )
    elif args.optim == 'adagrad':
        optimizer = optim.Adagrad(
            filter(lambda p: p.requires_grad, model.parameters()), 
            lr=args.lr, weight_decay=args.wd
        )
    elif args.optim == 'sgd':
        optimizer = optim.SGD(
            filter(lambda p: p.requires_grad, model.parameters()), 
            lr=args.lr, weight_decay=args.wd
        )
    else:
        raise Exception("Unknown optimizer")

    # metrics
    metrics = Metrics(2)  # 0-1 prediction

    # embeddings
    sent_emb_path = os.path.join(args.data, "sent_emb.pt")
    raw_sent_emb_path = os.path.join(args.glove, 'glove.840B.300d.txt')

    sent_emb = load_word_vectors(sent_emb_path, vocab, raw_sent_emb_path)
    
    logger.debug('==> sentence embedding size: %d * %d' % (sent_emb.size()[0], sent_emb.size()[1]))
    if args.cuda:
        sent_emb.cuda()
    model.sent_emb.weight.data.copy_(sent_emb)

    trainer = Trainer(args, model, criterion, optimizer)

    # train and test
    best = float("-inf")
    for epoch in range(args.epochs):
        train_loss = trainer.train(train_dataset)

        train_loss, train_pred = trainer.test(train_dataset)
        dev_loss, dev_pred = trainer.test(dev_dataset)
        test_loss, test_pred = trainer.test(test_dataset)

        train_pearson = metrics.pearson(train_pred, train_dataset.labels)
        train_mse = metrics.mse(train_pred, train_dataset.labels)
        logger.info('==> Epoch {}, Train \tLoss: {}\tPearson: {}\tMSE: {}'.format(epoch, train_loss, train_pearson, train_mse))
        
        dev_pearson = metrics.pearson(dev_pred, dev_dataset.labels)
        dev_mse = metrics.mse(dev_pred, dev_dataset.labels)
        logger.info('==> Epoch {}, Dev \tLoss: {}\tPearson: {}\tMSE: {}'.format(epoch, dev_loss, dev_pearson, dev_mse))

        test_pearson = metrics.pearson(test_pred, test_dataset.labels)
        test_mse = metrics.mse(test_pred, test_dataset.labels)
        logger.info('==> Epoch {}, Test \tLoss: {}\tPearson: {}\tMSE: {}'.format(epoch, test_loss, test_pearson, test_mse))

        if best < dev_pearson:
            best = dev_pearson
            checkpoint = {
                'model': trainer.model.state_dict(), 
                'optim': trainer.optimizer,
                'pearson': dev_pearson, 'mse': dev_mse,
                'args': args, 'epoch': epoch
                }
            logger.debug('==> New optimum found, checkpointing everything now...')
            torch.save(checkpoint, '%s.pt' % os.path.join(args.save, args.expname))
Esempio n. 53
0
            dataset_name](batch_size=batch_size)
    except KeyError:
        raise NameError("%s doesn't exist." % dataset_name)
    return train_loader, test_loader, shape, num_classes


if __name__ == '__main__':
    args = parser.parse_args()
    if args.json_file is None:
        print('Starting manual run')
        train_loader, test_loader, shape, num_classes = parse_dataset(
            args.dataset_name, args.batch_size)
        model = parse_model(args.model_name, shape, num_classes)
        trainer = Trainer(model,
                          train_loader,
                          test_loader,
                          logs_dir=args.output,
                          device=args.device,
                          run_id=args.run_id)
        trainer.train()
    else:
        print('Automatized experiment schedule enabled using', args.json_file)
        config_dict = json.load(open(args.json_file, 'r'))
        thresholds = [
            .99
        ] if not 'threshs' in config_dict else config_dict['threshs']
        downsampling = [
            None
        ] if not 'downsampling' in config_dict else config_dict['downsampling']
        dss = config_dict['dataset'] if isinstance(
            config_dict['dataset'], list) else [config_dict['dataset']]
        optimizer = config_dict['optimizer']
import datasets
from config import cfg
from importlib import import_module
#------------prepare enviroment------------
seed = cfg.SEED
if seed is not None:
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)

gpus = cfg.GPU_ID
if len(gpus)==1:
    torch.cuda.set_device(gpus[0])

torch.backends.cudnn.benchmark = True


#------------prepare data loader------------
data_mode = cfg.DATASET
datasetting = import_module(f'datasets.setting.{data_mode}')
cfg_data = datasetting.cfg_data


#------------Prepare Trainer------------
from trainer import Trainer

#------------Start Training------------
pwd = os.path.split(os.path.realpath(__file__))[0]
cc_trainer = Trainer(cfg_data, pwd)
cc_trainer.forward()
Esempio n. 55
0
                                                                num_workers=dann_config.NUM_WORKERS,
                                                                device=device)

    train_gen_t, val_gen_t, test_gen_t = create_data_generators(dann_config.DATASET,
                                                                dann_config.TARGET_DOMAIN,
                                                                batch_size=dann_config.BATCH_SIZE,
                                                                infinite_train=True,
                                                                image_size=dann_config.IMAGE_SIZE,
                                                                num_workers=dann_config.NUM_WORKERS,
                                                                device=device)

    model = DANNModel().to(device)
    acc = AccuracyScoreFromLogits()

    scheduler = LRSchedulerSGD(blocks_with_smaller_lr=dann_config.BLOCKS_WITH_SMALLER_LR)
    tr = Trainer(model, loss_DANN)
    tr.fit(train_gen_s, train_gen_t,
           n_epochs=dann_config.N_EPOCHS,
           validation_data=[val_gen_s, val_gen_t],
           metrics=[acc],
           steps_per_epoch=dann_config.STEPS_PER_EPOCH,
           val_freq=dann_config.VAL_FREQ,
           opt='sgd',
           opt_kwargs={'lr': 0.01, 'momentum': 0.9},
           lr_scheduler=scheduler,
           callbacks=[print_callback(watch=["loss", "domain_loss", "val_loss",
                                            "val_domain_loss", 'trg_metrics', 'src_metrics']),
                      ModelSaver('DANN', dann_config.SAVE_MODEL_FREQ),
                      WandbCallback(),
                      HistorySaver('log_with_sgd', dann_config.VAL_FREQ, path='_log/DANN_Resnet_sgd',
                                   extra_losses={'domain_loss': ['domain_loss', 'val_domain_loss'],
Esempio n. 56
0
    # Build the model
    model = lstm.LSTMClassifier(
        None,
        embedding_dim=EMBEDDING_DIM,
        hidden_dim=HIDDEN_DIM,
        output_dim=OUTPUT_DIM,
        n_layers=N_LAYERS,
        dropout=DROPOUT,
        bidirectional=BIDIRECTIONAL,
        pad_idx=train_dataset.input_pad_idx,
    )

    model.to(device)
    optimizer = optim.Adam(model.parameters(),
                           weight_decay=WEIGHT_DECAY,
                           lr=LR)
    loss_fn = torch.nn.CrossEntropyLoss(
        ignore_index=train_dataset.output_pad_idx,
        reduction="sum",
    )

    trainer = Trainer(model, optimizer, loss_fn, device, log_every_n=5)
    labels = {0: "REFUTES", 1: "NOT ENOUGH INFO", 2: "SUPPORT"}

    trainer.fit(
        train_loader=train_loader,
        valid_loader=test_loader,
        labels=labels,
        n_epochs=N_EPOCHS,
    )
Esempio n. 57
0
                               comp_precision=comp_precision,
                               update_precision=update_precision,
                               initial_range=initial_range,
                               max_overflow=max_overflow)

        trainer = Trainer(rng=rng,
                          load_path=None,
                          save_path=None,
                          train_set=train_set,
                          valid_set=valid_set,
                          test_set=test_set,
                          model=model,
                          LR_start=LR_start,
                          LR_sat=n_epoch / 2,
                          LR_fin=LR_start / 10,
                          M_start=0.5,
                          M_sat=n_epoch / 4,
                          M_fin=0.7,
                          batch_size=batch_size,
                          gpu_batches=gpu_batches,
                          n_epoch=n_epoch,
                          shuffle_batches=False,
                          shuffle_examples=True,
                          format=format,
                          range_update_frequency=range_update_frequency,
                          range_init_epoch=range_init_epoch)

    elif dataset == "MNIST":

        rng = np.random.RandomState(1234)
        LR_start = 0.02
Esempio n. 58
0
    gcn_adj = load_gcn_graph(args.data)
    if args.cuda:
        gcn_adj = gcn_adj.cuda()
    biased_adj = utils.adj_bias_normalize(adj.to_dense())
    model = SLGAT(num_feature=num_feature,
                  hidden_dim=args.hidden,
                  num_class=num_class,
                  class_hidden=args.class_hidden,
                  adj=biased_adj,
                  gcn_adj=gcn_adj.to_dense(),
                  input_dropout=args.input_dropout,
                  dropout=args.dropout,
                  weight_dropout=args.weight_dropout)
    if args.cuda:
        model.cuda()
    trainer = Trainer(args, model)
elif args.model == 'SpSLGAT':
    model = SpSLGAT(num_feature=num_feature,
                    hidden_dim=args.hidden,
                    num_class=num_class,
                    class_hidden=args.class_hidden,
                    adj=adj.to_dense(),
                    gcn_adj=adj,
                    input_dropout=args.input_dropout,
                    dropout=args.dropout,
                    weight_dropout=args.weight_dropout)
    if args.cuda:
        model.cuda()
    trainer = Trainer(args, model)
else:
    raise ValueError('invalid model name: {}'.format(args.model))
        self.writer.add_scalars('Gen. Loss', {phase + ' Loss': 
                                torch.mean(torch.stack(result_outputs.gen_loss))},
                                self.current_epoch)
        self.writer.add_scalars('Disc. Loss', {phase + ' Loss': 
                                torch.mean(torch.stack(result_outputs.disc_loss))},
                                self.current_epoch)

        collated_imgs = torch.cat([*torch.cat(result_outputs.img[0], dim=3)], dim=1)
        self.writer.add_image(phase + ' Images', collated_imgs, self.current_epoch)

    def training_epoch_end(self, training_outputs):
        self._shared_end(training_outputs, is_train=True)

    def validation_epoch_end(self, validation_outputs):
        self._shared_end(validation_outputs, is_train=False)
        self.sel_ind = random.randint(0, len(self.validation_loader) - 1)

        return torch.mean(torch.stack(validation_outputs.recon_error))


parser = Trainer.add_trainer_args(parser)
args = parser.parse_args()
group_dicts = get_group_dicts(args, parser)

train_dataset = CtDataset('train', **group_dicts['data arguments'])
valid_dataset = CtDataset('val', **group_dicts['data arguments'])

GAN_experiment = SimpleGAN(args, group_dicts)
GAN_experiment.fit(train_dataset, valid_dataset)
Esempio n. 60
0
    args = parser.parse_args()
    
    torch.manual_seed(args.seed)

    vae = VAE(args.z_dim)
    optimizer = Adam(vae.parameters(), lr=args.lr)
    device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
    vae.to(device)
    # download mnist & setup loaders
    if args.mode == "train":
        train_set = MNISTDataset('./data', train=True, download=True, transform=transforms.ToTensor())
        val_set = MNISTDataset('./data', train=False, transform=transforms.ToTensor())
        train_loader = DataLoader(train_set, batch_size=128, shuffle=True)
        val_loader = DataLoader(val_set, batch_size=128, shuffle=True)

        trainer = Trainer(vae, args.epochs, train_loader, val_loader, device, loss_fn, optimizer, args.print_freq)
        trainer.train_model()
        torch.save(vae.state_dict(), args.weights)

    val_loader = DataLoader(MNIST('./data', train=False, transform=transforms.ToTensor()),
        batch_size=128, shuffle=True)
    
    if args.mode != "train":
        vae.load_state_dict(torch.load(args.weights))
        vae.eval()
    
    if args.visualise:

        latent_mnist = []
        target = []
        for data, targets in val_loader: