Пример #1
0
 def __init__(self,
              data,
              n_units,
              save=False,
              learning_rate=1e-4,
              name='',
              optimizer='rmsprop',
              num_gpus=1,
              testdata=None,
              **kwargs):
     """ initialize data and conv net """
     self.save = save
     self.model = n.CNN(n_units,
                        save,
                        learning_rate,
                        optimizer,
                        name,
                        num_gpus=num_gpus,
                        **kwargs)
     self.name = self.model.name
     self.num_gpus = num_gpus
     self.data = data
     self.testdata = testdata
     self.learning_rate = learning_rate
     if optimizer not in ['descent', 'adam', 'adagrad', 'rmsprop']:
         raise ValueError('optimizer must be \'descent\', \'adagrad\', '
                          '\'rmsprop\', or \'adam\'')
     self.optimizer = optimizer
     self.params = pp.NNParams('%s/resources/nupack/parameters/%s' %
                               (s.BASE_DIR, s.paramfile))
     self.cache = {}
     self.add_all_motifs()
     self.update_params()
Пример #2
0
def main():
    """ test existing model on given data """
    args = parse_args()
    data = d.KdDataset(args.filename.split(','), reporter=args.reporter,
                       lowmem=args.lowmem)

    # get number of units if not provided
    if args.n_units is None:
        m = re.search('([0-9]+x[0-9]+)units', args.restore)
        try:
            args.n_units = m.group(1)
        except:
            raise ValueError('unable to parse number of units from model name')

    # get model
    if args.model == 'cnn':
        print 'building cnn...'
        model = n.CNN(args.n_units, optimizer=args.optimizer, lowmem=args.lowmem)
    elif args.model == 'rnn':
        print 'building rnn...'
        model = n.RNN(args.n_units, optimizer=args.optimizer, lowmem=args.lowmem)
    else:
        raise ValueError('model must be "cnn" or "rnn"')
    model.restore(args.restore)

    data.get_predictions(model)
    print 'rmse: %.4f' % data.get_rmse(sterr=False)
    files = '+'.join([os.path.splitext(os.path.basename(f))[0]
                      for f in args.filename.split(',')])
    f = '%s/%s_%s.txt' % (s.RESULTS_DIR, os.path.basename(args.restore), files)
    data.to_file(f)
    print 'results written to %s' % f
Пример #3
0
def main():
    args = parse_args()
    cnn = nn.CNN(args.layers, write=True)
    cnn.restore(args.filename)
    # cnn.get_activation_profile(1,0, iters=1000, learning_rate=0.1)
    for i, layer in enumerate(args.layers):
        for j in range(layer):
            cnn.get_activation_profile(i + 1, j)
Пример #4
0
def train(data, layers, restore, epochs, save, keepprob=1., **kwargs):
    """ train the CNN """
    cnn = nn.CNN(layers, low_mem=True, save=save, **kwargs)
    if restore is not None:
        cnn.restore(restore)
    print 'training model %s' % cnn.name
    cnn.train(data, epochs=epochs, keepprob=keepprob)
    print 'saved model to %s/%s' % (s.MODELS_DIR, cnn.name)
    print 'writing results to %s/%s' % (s.RESULTS_DIR, cnn.name)
    if save:
        cnn.save_model()
    results = get_results(data, cnn, test=False)
    summarize_results(results, '%s_train.txt' % cnn.name)
    return cnn
Пример #5
0
 def __init__(self,
              data,
              n_units,
              save=False,
              n_cores=1,
              learning_rate=None,
              batch_norm=False,
              name='',
              low_mem=False,
              optimizer='rmsprop',
              num_gpus=1,
              testdata=None,
              **kwargs):
     """ initialize data and conv net """
     if learning_rate is None:
         if isinstance(data, e.MeltDataset):
             learning_rate = 1e-6
         else:
             learning_rate = 1e-4
     self.save = save
     self.model = n.CNN(n_units,
                        save,
                        learning_rate,
                        optimizer,
                        name,
                        low_mem=low_mem,
                        **kwargs)
     self.name = self.model.name
     self.num_gpus = num_gpus
     self.data = data
     self.testdata = testdata
     self.learning_rate = learning_rate
     if optimizer not in ['descent', 'adam', 'adagrad', 'rmsprop']:
         raise ValueError('optimizer must be \'descent\', \'adagrad\', '
                          '\'rmsprop\', or \'adam\'')
     self.optimizer = optimizer
     self.low_mem = low_mem
     self.params = pp.NNParams('%s/resources/nupack/parameters/%s' %
                               (s.BASE_DIR, s.paramfile))
     self.n_cores = n_cores
     self.cache = {}
     self.update_params()
Пример #6
0
    ######------hyperParam-----#####
    epoch = 40
    imgSize = 160
    Neuron = 320
    hidden1 = 320
    hidden2 = 160
    lr = 0.000005

    batchSize = 1
    ptName = "models/nn1.pt"
    lossPng = "lossImg/loss1.png"
    accPng = "accImg/acc1.png"

    ######------モデルの設定(損失関数も変える)-----#####
    # model       = NN.Net_log_softmax(num=6,inputSize=imgSize,Neuron=Neuron)
    model = NN.CNN(num=6, inputSize=imgSize, hidden1=hidden1, hidden2=hidden2)

    optimizer = torch.optim.Adam(params=model.parameters(), lr=lr)

    ######------個別推論用ディレクトリ設定-----#####
    andoDir = "../ReadOnlyDataSet2/Resources/test/ando"
    higashiDir = "../ReadOnlyDataSet2/Resources/test/higashi"
    kataokaDir = "../ReadOnlyDataSet2/Resources/test/kataoka"
    kodamaDir = "../ReadOnlyDataSet2/Resources/test/kodama"
    masudaDir = "../ReadOnlyDataSet2/Resources/test/masuda"
    suetomoDir = "../ReadOnlyDataSet2/Resources/test/suetomo"

    ######------学習用ディレクトリ設定-----#####
    dirImgTrainPath = "new-dataset"
    dirImgTestPath = "../ReadOnlyDataSet2/Resources" + "/test/"
Пример #7
0
def main():
    """train a neural network with given data"""
    args = parse_args()

    if args.seed is not None:
        np.random.seed(args.seed)
        tf.set_random_seed(args.seed)

    # read data from file(s)
    print 'reading data...'
    data = d.KdDataset(args.filename.split(','), reporter=args.reporter,
                       lowmem=args.lowmem)
    print '\t%s points in training data' % data.n
    print '\tstandard deviation of train data: %.2f' \
        % np.nanstd(data.data.dG_measured)
    if args.testfile is not None:
        testdata = d.KdDataset(args.testfile.split(','), reporter=args.reporter,
                               lowmem=args.lowmem)
        print '\t%s points in testing data' % testdata.n
        print '\tstandard deviation of test data: %.2f' \
            % np.nanstd(testdata.data.dG_measured)
    else:
        testdata = None
    
    # construct neural network model object
    if args.model == 'cnn':
        print 'building cnn...'
        files = '+'.join([os.path.splitext(os.path.basename(f))[0]
                          for f in args.filename.split(',')])
        name = '%s_%s_%sunits_%.2e' % \
            (files, args.model, args.n_units, args.learning_rate)
        if args.keepprob != 1.:
            name += '_keepprob%.2f' % args.keepprob
        model = n.CNN(args.n_units, args.write, args.learning_rate,
                      args.optimizer, name, batch_norm=args.batch_norm,
                      lowmem=args.lowmem, num_gpus=args.num_gpus)
    elif args.model == 'rnn':
        print 'building rnn...'
        files = '+'.join([os.path.splitext(os.path.basename(f))[0]
                          for f in args.filename.split(',')])
        name = '%s_%s_%sunits_%.2e' % \
            (files, args.model, args.n_units, args.learning_rate)
        if args.keepprob != 1.:
            name += '_keepprob%.2f' % args.keepprob
        model = n.RNN(args.n_units, args.write, args.learning_rate,
                      args.optimizer, name, bidirectional=args.bidirectional,
                      lowmem=args.lowmem, num_gpus=args.num_gpus)
    else:
        raise ValueError('model must be "cnn" or "rnn"')

    # restore model from file if specified
    if args.restore is not None:
        print 'restoring model parameters from file...'
        model.restore(args.restore)

    # train model
    print 'training model %s...' % model.name
    loss = model.train(data, args.epochs, args.batch_size, args.keepprob,
                       testdata, args.sterr)
    print 'finished training model'

    # save loss to file
    if not os.path.exists(s.RESULTS_DIR):
        os.makedirs(s.RESULTS_DIR)
    np.savetxt('%s/%s.loss' % (s.RESULTS_DIR, model.name), loss.T, delimiter='\t',
               header='train\ttest', comments='')

    # run train and test predictions for final model
    print 'getting final predictions...'
    if testdata:
        testdata.get_predictions(model)
        testdata.to_file('%s/%s_test.txt' % (s.RESULTS_DIR, model.name))
        testrmse = testdata.get_rmse()
        print 'final test: %.4f' % testrmse
        print 'final test rmse: %.4f' % testdata.get_rmse(sterr=False)
    data.get_predictions(model)

    # save and print results
    data.to_file('%s/%s_train.txt' % (s.RESULTS_DIR, model.name))
    print 'final: %.4f' % data.get_rmse()
    print 'final rmse: %.4f' % data.get_rmse(sterr=False)
    if args.save:
        print 'saving model parameters to file %s...' % model.name
        model.save('_%f' % testdata.get_rmse(sterr=False))