Example #1
0
def do_withmany(parser, token):
    args, kwargs = utils.split_args(token.contents)
    kwargs = utils.parse_arg(kwargs, parser)
    kwargs = [[k,v] for k,v in kwargs.items()]
    nodelist = parser.parse(('endwithmany',))
    parser.delete_first_token()
    return WithManyNode(kwargs, nodelist)
Example #2
0
 def inner(request, *args, **kwargs):
     # return view(request, User.objects.all()[0], *args, **kwargs)
     credential = utils.parse_arg(request.GET, 'credential', None, str,
                                  None)
     user = User.objects.filter(credential=credential)
     if user.count() > 0:
         return view(request, user[0], *args, **kwargs)
     return HttpResponse(Response(code=-2, msg='用户未登录').to_json(),
                         content_type='application/json')
Example #3
0
def debug_args(args):
    """Print out all of the arguments if debugging."""
    args.layers = parse_arg(args.layers, dtype=int)
    args.hl_neurons = parse_arg(args.hl_neurons, dtype=int)
    args.hl_activations = parse_arg(args.hl_activation, dtype=str)
    args.dropouts = parse_arg(args.dropout, dtype=float)
    args.epochs = parse_arg(args.epochs, dtype=int)
    args.learning_rates = parse_arg(args.learning_rate, dtype=float)

    debug('Batch Size: %s' % str(args.batch_size), opt=args)
    debug('Hidden Layer Neurons: %s' % str(args.hl_neurons), opt=args)
    debug('Hidden Layer Activation Functions: %s' % str(args.hl_activations),
          opt=args)
    debug('Dropout Rates: %s' % str(args.dropouts), opt=args)
    debug('Number of Epochs: %s' % str(args.epochs), opt=args)
    debug('Learning Rates: %s' % str(args.learning_rates), opt=args)
    debug('Layers: %s' % str(args.layers), opt=args)

    #   Default csv locations
    if args.train_path is None:
        args.train_path = os.path.join('data', 'train.20191028.csv')
    if args.test_path is None:
        args.test_path = os.path.join('data', 'test.20191028.csv')
    debug('Training set: %s' % str(args.train_path), opt=args)
    debug('Test set: %s' % str(args.test_path), opt=args)

    assert 0 < args.batch_size < 10000, 'Keep batch size between 1 and 10,000'
    assert all(
        0 < x < 1000 for x in
        args.hl_neurons), 'Keep number of hl_neurons between 1 and 1,000'

    activations = [
        'tanh', 'relu', 'elu', 'selu', 'sigmoid', 'hard_sigmoid',
        'exponential', 'linear'
    ]
    assert all(x in activations for x in args.hl_activations), \
        'Activation functions must be one of %s'%str(activations)
    assert all(0.0 <= x < 1.0
               for x in args.dropouts), 'Dropout must be >= 0.0 and < 1.0'
    assert all(0 < x < 100000
               for x in args.epochs), 'Keep epochs between 1 and 100,000'
    assert all(0.0 < x < 1.0
               for x in args.dropouts), 'Learning rate must be > 0.0 and < 1.0'
    assert all(
        0 < x < 1000
        for x in args.layers), 'Keep number of layers between 1 and 1,000'
    assert args.problem_type in [
        'classification', 'regression'
    ], 'problem_type must be classification or regression'
    assert os.path.exists(
        args.train_path), 'train set %s doesnt exist!' % args.train_path
    assert os.path.exists(
        args.test_path), 'test set %s doesnt exist!' % args.test_path
    assert 0.0 <= args.pca <= 1.0, 'Percentage of Principal Components to use must be between 0 and 1'
    assert 0.0 <= args.rf_select <= 1.0, 'Percentage of features for RandomForest selection must be between 0 and 1'

    return args
Example #4
0
def parse_output(arg, default_opath='', default_otag_oext='', delimiter=':'):
    opath, otag_oext = utils.parse_arg(arg, delimiter, default_opath,
                                       default_otag_oext)
    if opath.endswith('/') or opath.endswith('\\'):
        opath = opath[0:-1]
    logger.info('opth=%s,otag_oext=%s' % (opath, otag_oext))
    ofname_oext = ''
    if '.' in opath:  #format like: movies/conan/1.mp4
        opath, ofname_oext = os.path.split(opath)

    logger.info('opth=%s,ofname_oext=%s,otag_oext=%s' %
                (opath, ofname_oext, otag_oext))
    if len(opath) > 0 and not os.path.isdir(opath):
        #opt = raw_input('Directory "%s" does not exist, do you want to create it?(Y/N)' % opath)
        #if opt.lower() in 'yes':
        #    os.makedirs(opath)
        #    fflog.info('Diectory "%s" is created.' % arg)
        #else:
        #    fflog.error('Diectory "%s" does not exist, using the current directory' % opath)
        #    opath='.'
        if len(ofname_oext) == 0:
            logger.warning(
                'Directory does not exist, filename will be created')
            tmp = opath.replace('/', '_')
            ofname_oext = tmp.replace('\\', '_')
        opath = ''

    oext = ''
    oext2 = ''
    #otag=''
    #ofname=''
    if '.' in ofname_oext:  #split ofname_oext to get filename and extension
        ofname, oext = ofname_oext.split('.')
    else:
        ofname = ofname_oext

    if '.' in otag_oext:  # split the extension of output file from variable otag_oext
        otag, oext2 = otag_oext.split('.')
    else:
        otag = otag_oext

    if len(oext) == 0 and len(oext2) > 0:
        oext = oext2

    if oext.startswith('.'):
        oext = oext[1:]

    return opath, ofname, otag, oext
Example #5
0
def parse_output(arg, default_opath='', default_otag_oext='', delimiter=':'):
    opath, otag_oext = utils.parse_arg(arg, delimiter, default_opath, default_otag_oext)
    if opath.endswith('/') or opath.endswith('\\'):
        opath = opath[0:-1]
    logger.info('opth=%s,otag_oext=%s' % (opath, otag_oext))
    ofname_oext = ''
    if '.' in opath:  #format like: movies/conan/1.mp4
        opath, ofname_oext = os.path.split(opath)

    logger.info('opth=%s,ofname_oext=%s,otag_oext=%s' % (opath, ofname_oext, otag_oext))
    if len(opath) > 0 and not os.path.isdir(opath):
        #opt = raw_input('Directory "%s" does not exist, do you want to create it?(Y/N)' % opath)
        #if opt.lower() in 'yes':
        #    os.makedirs(opath)
        #    fflog.info('Diectory "%s" is created.' % arg)
        #else:
        #    fflog.error('Diectory "%s" does not exist, using the current directory' % opath)
        #    opath='.'
        if len(ofname_oext) == 0:
            logger.warning('Directory does not exist, filename will be created')
            tmp = opath.replace('/', '_')
            ofname_oext = tmp.replace('\\', '_')
        opath = ''

    oext = ''
    oext2 = ''
    #otag=''
    #ofname=''
    if '.' in ofname_oext:  #split ofname_oext to get filename and extension
        ofname, oext = ofname_oext.split('.')
    else:
        ofname = ofname_oext

    if '.' in otag_oext:  # split the extension of output file from variable otag_oext
        otag, oext2 = otag_oext.split('.')
    else:
        otag = otag_oext

    if len(oext) == 0 and len(oext2) > 0:
        oext = oext2

    if oext.startswith('.'):
        oext = oext[1:]

    return opath, ofname, otag, oext
Example #6
0
def main():
    # parse command line input
    opt = utils.parse_arg()

    # Set GPU
    opt.cuda = opt.gpuid >= 0
    if opt.cuda:
        torch.cuda.set_device(opt.gpuid)
    else:
        utils.time_str("GPU acceleration is disabled.")

    # prepare data
    db = data_prepare.prepare_db(opt)
    imagenet = data_prepare.ImageNetSmallData(opt, type='centres')
    #    imagenet = None
    #
    #    # add imagenet dataset
    db.update({'imagenet': imagenet})

    # initialize the model
    pre_trained_model = model.prepare_model(opt)

    # prepare M_0(x) model, which is a fixed pre-trained model
    opt.num_output = 1000
    fixed_model = model.prepare_model(opt)
    # prepare centres
    if not os.path.exists('../datasets/imagenet/train_centres.txt'):
        imagenet = data_prepare.ImageNetSmallData(opt, type='all')
        trainer.prepare_centres(fixed_model, imagenet, opt)

    # configurate the optimizer
    optim, sche = optimizer.prepare_optim(pre_trained_model, opt)

    # train the model
    trainer.train(pre_trained_model, optim, sche, db, opt, model_0=fixed_model)
    #    trainer.train(pre_trained_model, optim, sche, db, opt, model_0 = None)
    # save the trained model
    if opt.save:
        utils.save_model(pre_trained_model, opt)
def main():
    # parse command line input
    opt = utils.parse_arg()

    # Set GPU
    opt.cuda = opt.gpuid >= 0
    if opt.cuda:
        torch.cuda.set_device(opt.gpuid)
    else:
        utils.time_str("GPU acceleration is disabled.")

    # prepare data
    db = data_prepare.prepare_db(opt)
    # initializa the model
    pre_trained_model = model.prepare_model(opt)

    # configurate the optimizer
    optim, sche = optimizer.prepare_optim(pre_trained_model, opt)

    # train the model
    trainer.train(pre_trained_model, optim, sche, db, opt)

    # save the trained model
    utils.save_model(pre_trained_model, opt)
Example #8
0
def main():
    # logging configuration
    logging.basicConfig(level=logging.INFO,
                        format="[%(asctime)s]: %(message)s")

    # parse command line input
    opt = utils.parse_arg()

    # Set GPU
    opt.cuda = opt.gpuid >= 0
    if opt.cuda:
        torch.cuda.set_device(opt.gpuid)
    else:
        # please use GPU for training, CPU version is not supported for now.
        raise NotImplementedError
        #logging.info("GPU acceleration is disabled.")

    # prepare training and validation dataset
    db = data_prepare.prepare_db(opt)

    # sanity check for FG-NET dataset, not used for now
    # assertion: the total images in the eval set lists should be 1002
    total_eval_imgs = sum([len(db['eval'][i]) for i in range(len(db['eval']))])
    print(total_eval_imgs)
    if db['train'][0].name == 'FGNET':
        assert total_eval_imgs == 1002, 'The preparation of the evalset is incorrect.'

    # training
    if opt.train:
        best_MAEs = []
        last_MAEs = []
        # record the current time
        opt.save_dir += time.asctime(time.localtime(time.time()))
        # for FG-NET, do training multiple times for leave-one-out validation
        # for CACD, do training just once
        for exp_id in range(len(db['train'])):
            # initialize the model
            model_train = model.prepare_model(opt)
            #print("model shape:")
            #  print(db['train'].head)

            #print( np.array(db['eval']).shape)
            # configurate the optimizer and learning rate scheduler
            optim, sche = optimizer.prepare_optim(model_train, opt)

            # train the model and record mean average error (MAE)
            model_train, MAE, last_MAE = trainer.train(model_train, optim,
                                                       sche, db, opt, exp_id)
            best_MAEs += MAE
            last_MAEs.append(last_MAE.data.item())

            # remove the trained model for leave-one-out validation
            if exp_id != len(db['train']) - 1:
                del model_train

        #np.save('./MAE.npy', np.array(best_MAEs))
        #np.save('./Last_MAE.npy', np.array(last_MAEs))
        # save the final trained model
        #utils.save_model(model_train, opt)

    # testing a pre-trained model
    elif opt.evaluate:
        # path to the pre-trained model
        save_dir = opt.test_model_path
        #example: save_dir = '../model/CACD_MAE_4.59.pth'
        model_loaded = torch.load(save_dir)
        # test the model on the evaluation set
        # the last subject is the test set (compatible with FG-NET)
        trainer.evaluate(model_loaded, db['eval'][-1], opt)
    return
Example #9
0
def parse_reso_fps(arg,width=-1,height=-1,fps=-1,delimiter='x'):
    x,y,z=utils.parse_arg(arg,delimiter,str(width),str(height),str(fps))
    return int(x),int(y),int(z)
Example #10
0
def do_let(parser, token):
    args, kwargs = utils.split_args(token.contents)
    kwargs = utils.parse_arg(kwargs, parser)
    kwargs = [[k,v] for k,v in kwargs.items()]
    return LetNode(kwargs)
Example #11
0
def main(args):
    """
    This function is the main body of the program.
    
    First, parse all the command-line arguments, which comprise the hyperparameters.
    Then, debug (print and verify the correct types and values) the hyperparameters.
    Next, load the data.
    Finally, loop through each of the hyperparameter combinations and store the results of each run.
    
    Params:
        args            an argparse.parse_args() object containing the namespace and values of command-line args.
    """
    debug(
        '\n############################################\nSTARTING MAIN FUNCTION IN DEMENTIA DNN\n',
        opt=args)
    num_classes = args.num_classes
    batch_size = args.batch_size
    layers = parse_arg(args.layers, dtype=int)
    hl_neurons = parse_arg(args.hl_neurons, dtype=int)
    hl_activations = parse_arg(args.hl_activation, dtype=str)
    dropouts = parse_arg(args.dropout, dtype=float)
    output_activation = args.output_activation
    epochs = parse_arg(args.epochs, dtype=int)
    learning_rates = parse_arg(args.learning_rate, dtype=float)

    #   Print out arguments if debugging is on, and ensure that correct values are being used
    debug_args(args,
               num_classes=num_classes,
               batch_size=batch_size,
               hl_neurons=hl_neurons,
               hl_activations=hl_activations,
               dropouts=dropouts,
               output_activation=output_activation,
               epochs=epochs,
               learning_rates=learning_rates,
               layers=layers)

    #
    #   Gather data
    #   Original Headers Removed:
    #   M01	M02	M03	M04	M05	M06	M07	M08	M09	O02	O03	O05	O06	O07	J01	J02	J03	J04	J05	J06	C01	C02	C03	C04	C05	C06	C07	C08	H01	P01	P03	P04	P05	P06	P07	P09	P10	Group
    #   Original Classifications:
    #   0 = Normal, 1 = MCI, 2 = VMD, 3 = Dementia
    #
    trainset = numpy.loadtxt(os.path.join('data', 'train.csv'), delimiter=',')
    testset = numpy.loadtxt(os.path.join('data', 'test.csv'), delimiter=',')

    debug('\ntrainset shape: %s, testset shape: %s' %
          (str(trainset.shape), str(testset.shape)),
          opt=args)

    attributes = trainset.shape[1] - 1
    debug('Number of attributes: %d' % attributes, opt=args)

    # after comma is column slicing
    x_train = trainset[:, 0:attributes]
    y_train = trainset[:, attributes]
    x_test = testset[:, 0:attributes]
    y_test = testset[:, attributes]

    debug('x_train shape: %s, y_train shape: %s' %
          (str(x_train.shape), str(y_train.shape)),
          opt=args)
    debug('x_test shape: %s, y_test shape: %s' %
          (str(x_test.shape), str(y_test.shape)),
          opt=args)

    # convert class vectors to binary representation
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    # Let's loop!
    # best_score - [accuracy, epochInterval, hl1_activation, hl2_activation, outputActivation, dropout1, dropout2, batch_size, neurons
    best_score = [
        0.0, epochs[0], hl_activations[0], output_activation, dropouts[0],
        hl_neurons[0], learning_rates[0]
    ]
    scores = []
    counter = 0
    cumulative_score = 0

    for dropout in dropouts:
        for num_hl_neurons in hl_neurons:
            for learning_rate in learning_rates:
                for hl_activation in hl_activations:
                    for num_layers in layers:
                        for num_epochs in epochs:
                            counter += 1
                            print(
                                '\n################################\nEXPERIMENT %d'
                                % counter)

                            ###Build, compile and fit the model
                            model = compile_and_fit_model(
                                x_train,
                                y_train,
                                x_test,
                                y_test,
                                attributes=attributes,
                                batch_size=batch_size,
                                epochs=num_epochs,
                                hl_neurons=num_hl_neurons,
                                dropout=dropout,
                                hl_activation=hl_activation,
                                output_activation=output_activation,
                                learning_rate=learning_rate,
                                num_classes=num_classes,
                                layers=num_layers)

                            #   Test metrics
                            test_loss, test_score = model.evaluate(x_test,
                                                                   y_test,
                                                                   verbose=0)

                            #   best_score is a list of [accuracy, <<<hyperparameters>>>]
                            if (test_score > best_score[0]):
                                best_score = [
                                    test_score, num_epochs, hl_activation,
                                    output_activation, dropout, num_hl_neurons,
                                    learning_rate, num_layers
                                ]

                            print('num_epochs ' + str(num_epochs) + ', hl_activation ' + str(hl_activation) +  \
                                ', output_activation ' + str(output_activation) + ', dropout ' + str(dropout) + \
                                ', neurons ' + str(num_hl_neurons) + ', Learning Rate: ' + str(learning_rate))
                            print('Test loss: ', test_loss, 'Test accuracy: ',
                                  test_score)
                            cumulative_score += test_score

                            #   Adding
                            scores.append([
                                test_score, num_epochs, hl_activation,
                                output_activation, dropout, num_hl_neurons,
                                learning_rate, num_layers
                            ])
                        #   END for num_epochs in epochs
                    # END for num_layers in layers
                #   END for hl_activation in hl_activations
            #   END for learning_rate in learning_rates
        #   END for num_hl_neurons in hl_neurons
    #   for dropout in dropouts

    print('\nBest accuracy: \n%s\n%s' % (
        '[test_score, num_epochs, hl_activation, output_activation, dropout, num_hl_neurons, learning_rate, num_layers]',
        str(best_score)))
    sorted_average = sorted(scores, key=lambda x: (x[1]))
    for score in sorted_average:
        print(str(score))
    print('Average accuracy: %f' % (cumulative_score / counter))
Example #12
0
def parse_time(arg, startp='', dura='', delimiter='+'):
    x, y = utils.parse_arg(arg, delimiter, startp, dura)
    return x, y
Example #13
0
def parse_reso(arg, width=-2, height=-2, delimiter='x'):
    x, y = utils.parse_arg(arg, delimiter, str(width), str(height))
    return int(x), int(y)
Example #14
0
import torch
from torch.utils.tensorboard import SummaryWriter
import os
import time

from dataloader import AudioDataset, get_dataloader
from loss import CycleGANModel
from utils import parse_arg
import config as cfg
import pdb

if __name__ == "__main__":

    args = parse_arg()

    load_ckpt = args.load_ckpt

    if torch.cuda.is_available():
        device = torch.device(torch.cuda.current_device())
    else:
        device = torch.device('cpu')

    # Create directory for checkpoints
    cwd = os.getcwd()
    ckpt_path = os.path.join(cwd, cfg.ckpt)
    if not os.path.exists(ckpt_path):
        os.mkdir(ckpt_path)

    # Initialize model
    model = CycleGANModel(last_epoch=load_ckpt).to(device)
Example #15
0
def parse_time(arg, startp='', dura='', delimiter='+'):
    x, y = utils.parse_arg(arg, delimiter, startp, dura)
    return x, y
Example #16
0
def parse_reso(arg, width=-2, height=-2, delimiter='x'):
    x, y = utils.parse_arg(arg, delimiter, str(width), str(height))
    return int(x), int(y)