Esempio n. 1
0
def main():
    # logging configuration
    logging.basicConfig(level=logging.INFO,
                        format="[%(asctime)s]: %(levelname)s: %(message)s")

    # command line paser
    opt = parse.parse_arg()

    # GPU
    opt.cuda = opt.gpuid >= 0
    if opt.gpuid >= 0:
        torch.cuda.set_device(opt.gpuid)
    else:
        logging.info("WARNING: RUN WITHOUT GPU")

    # prepare dataset
    opt.dataset = 'mnist'
    db = dataset.prepare_db(opt)

    #add wavelets:
    opt.wavelets = True
    opt.cutoff = 20

    # initalize neural decision forest
    NDF = model.prepare_model(opt)

    # prepare optimizer
    optim, sche = optimizer.prepare_optim(NDF, opt)

    # train the neural decision forest5
    best_acc = trainer.train(NDF, optim, sche, db, opt)
    logging.info('The best evaluation accuracy is %f' % best_acc)
Esempio n. 2
0
def main():
    train_data_fft, train_data_raw, train_data_mfcc, train_label, valid_data_fft, valid_data_raw, valid_data_mfcc, valid_label = load_data(
    )
    model = prepare_model(window_size, mfcc_size)
    model.load_weights("model5.hdf5")
    valmodel(model, valid_data_fft, valid_data_raw, valid_data_mfcc,
             valid_label)
Esempio n. 3
0
def main():
    train_data_fft, train_data_raw, train_data_mfcc, train_label, valid_data_fft, valid_data_raw, valid_data_mfcc, valid_label = load_data(
    )
    model = prepare_model(window_size, mfcc_size)
    train_model(model, train_data_fft, train_data_raw, train_data_mfcc,
                train_label, valid_data_fft, valid_data_raw, valid_data_mfcc,
                valid_label)
    test_model(model, valid_data_fft, valid_data_raw, valid_data_mfcc,
               valid_label)
    def __init__(self, classes, model_path, rf_path, knn_path):
        self.model = prepare_model(config)
        self.model.load_weights(model_path)
        self.rf_outclass = pickle.load(open(rf_path, 'rb'))
        self.knn = pickle.load(open(knn_path, 'rb'))

        self.classes_name_to_id = classes
        self.classes_id_to_name = {}
        for key, val in self.classes_name_to_id.items():
            self.classes_id_to_name[val] = key
Esempio n. 5
0
def train_model(train_filelist, config, classes, rare_classes):
    batch_size = config['train_params']['batch_size']
    different_classes_per_batch = config['train_params']['different_classes_per_batch']
    nb_epoch = config['train_params']['num_epochs']
    steps_per_epoch = config['train_params']['steps_per_epoch']
    img_size = config['img']['img_size']

    model = prepare_model(config)
    callbacks = get_callbacks(config)
    model.fit_generator(data_generator(train_filelist, classes, rare_classes, img_size, batch_size, different_classes_per_batch),
                        steps_per_epoch=steps_per_epoch, epochs=nb_epoch, callbacks=callbacks)
    return model
Esempio n. 6
0
def main():
    # parse command line input
    opt = utils.parse_arg()

    # Set GPU
    opt.cuda = opt.gpuid >= 0
    if opt.cuda:
        torch.cuda.set_device(opt.gpuid)
    else:
        utils.time_str("GPU acceleration is disabled.")

    # prepare data
    db = data_prepare.prepare_db(opt)
    imagenet = data_prepare.ImageNetSmallData(opt, type='centres')
    #    imagenet = None
    #
    #    # add imagenet dataset
    db.update({'imagenet': imagenet})

    # initialize the model
    pre_trained_model = model.prepare_model(opt)

    # prepare M_0(x) model, which is a fixed pre-trained model
    opt.num_output = 1000
    fixed_model = model.prepare_model(opt)
    # prepare centres
    if not os.path.exists('../datasets/imagenet/train_centres.txt'):
        imagenet = data_prepare.ImageNetSmallData(opt, type='all')
        trainer.prepare_centres(fixed_model, imagenet, opt)

    # configurate the optimizer
    optim, sche = optimizer.prepare_optim(pre_trained_model, opt)

    # train the model
    trainer.train(pre_trained_model, optim, sche, db, opt, model_0=fixed_model)
    #    trainer.train(pre_trained_model, optim, sche, db, opt, model_0 = None)
    # save the trained model
    if opt.save:
        utils.save_model(pre_trained_model, opt)
def main():
    # parse command line input
    opt = utils.parse_arg()

    # Set GPU
    opt.cuda = opt.gpuid >= 0
    if opt.cuda:
        torch.cuda.set_device(opt.gpuid)
    else:
        utils.time_str("GPU acceleration is disabled.")

    # prepare data
    db = data_prepare.prepare_db(opt)
    # initializa the model
    pre_trained_model = model.prepare_model(opt)

    # configurate the optimizer
    optim, sche = optimizer.prepare_optim(pre_trained_model, opt)

    # train the model
    trainer.train(pre_trained_model, optim, sche, db, opt)

    # save the trained model
    utils.save_model(pre_trained_model, opt)
Esempio n. 8
0
#  Copyright (c) 2019. All rights reserved.
#  Author: Ruoqi Yang
#  @Imperial College London, HKU alumni
#  mailto: [email protected]
#  This file is part of the quantitative research of Nuode Fund, contact
#  [email protected] for commercial use.
from chip import CDS
from model import prepare_model
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from utils import fp, Parameters

clf, X_test, y_test = prepare_model(CDS.from_ticker('IF'), '2016-06-13',
                                    **Parameters.standard)

for case in ['实际', '预测']:
    prediction = clf.predict(X_test)

    if case == '实际':
        ups = X_test[y_test == 1]
        downs = X_test[y_test == 0]

    if case == '预测':
        ups = X_test[prediction == 1]
        downs = X_test[prediction == 0]

    fig = plt.figure()
    ax = fig.add_subplot(111, projection='3d')

    ax.scatter(ups.values[:, 0],
               ups.values[:, 1],
Esempio n. 9
0
def main():
    # logging configuration
    logging.basicConfig(level=logging.INFO,
                        format="[%(asctime)s]: %(message)s")

    # parse command line input
    opt = utils.parse_arg()

    # Set GPU
    opt.cuda = opt.gpuid >= 0
    if opt.cuda:
        torch.cuda.set_device(opt.gpuid)
    else:
        # please use GPU for training, CPU version is not supported for now.
        raise NotImplementedError
        #logging.info("GPU acceleration is disabled.")

    # prepare training and validation dataset
    db = data_prepare.prepare_db(opt)

    # sanity check for FG-NET dataset, not used for now
    # assertion: the total images in the eval set lists should be 1002
    total_eval_imgs = sum([len(db['eval'][i]) for i in range(len(db['eval']))])
    print(total_eval_imgs)
    if db['train'][0].name == 'FGNET':
        assert total_eval_imgs == 1002, 'The preparation of the evalset is incorrect.'

    # training
    if opt.train:
        best_MAEs = []
        last_MAEs = []
        # record the current time
        opt.save_dir += time.asctime(time.localtime(time.time()))
        # for FG-NET, do training multiple times for leave-one-out validation
        # for CACD, do training just once
        for exp_id in range(len(db['train'])):
            # initialize the model
            model_train = model.prepare_model(opt)
            #print("model shape:")
            #  print(db['train'].head)

            #print( np.array(db['eval']).shape)
            # configurate the optimizer and learning rate scheduler
            optim, sche = optimizer.prepare_optim(model_train, opt)

            # train the model and record mean average error (MAE)
            model_train, MAE, last_MAE = trainer.train(model_train, optim,
                                                       sche, db, opt, exp_id)
            best_MAEs += MAE
            last_MAEs.append(last_MAE.data.item())

            # remove the trained model for leave-one-out validation
            if exp_id != len(db['train']) - 1:
                del model_train

        #np.save('./MAE.npy', np.array(best_MAEs))
        #np.save('./Last_MAE.npy', np.array(last_MAEs))
        # save the final trained model
        #utils.save_model(model_train, opt)

    # testing a pre-trained model
    elif opt.evaluate:
        # path to the pre-trained model
        save_dir = opt.test_model_path
        #example: save_dir = '../model/CACD_MAE_4.59.pth'
        model_loaded = torch.load(save_dir)
        # test the model on the evaluation set
        # the last subject is the test set (compatible with FG-NET)
        trainer.evaluate(model_loaded, db['eval'][-1], opt)
    return
Esempio n. 10
0
from model import prepare_model
from main import mfcc_size, window_size, MAX_MINDWAVE_VALUE, MAX_RAW
import socket
import struct
import librosa
import numpy as np

model = prepare_model(window_size, mfcc_size)
model.load_weights("model6.hdf5")

message_size = 520


def run_server(port=4242):
    host = ''
    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
        print("waiting")
        s.bind((host, port))
        s.listen(1)
        conn, addr = s.accept()
        fft = [0.] * window_size * 8
        raw = [0.] * window_size * 512

        while (True):
            msg = conn.recv(4 * message_size)
            received = struct.unpack('<%si' % message_size, msg)
            normalized_fft = [x / MAX_MINDWAVE_VALUE for x in received[512:]]
            assert len(normalized_fft) == 8
            normalized_raw = [x / MAX_RAW for x in received[:512]]
            fft = fft[8:] + normalized_fft
            raw = raw[512:] + normalized_raw
def run_scenario(scenario_id, args=None):
    
    logd = create_logger(appname='{} - {} - details'.format(args.app_name, scenario_id),
                         logfile=join(args.scenario_log_dir,'scenario_{}_details.txt'.format(scenario_id)),
                         msg_format='%(asctime)s - %(message)s')
    logp = create_logger(appname='{} - {} - progress'.format(args.app_name, scenario_id),
                         logfile=join(args.scenario_log_dir, 'scenario_{}_progress.txt'.format(scenario_id)),
                         msg_format='%(asctime)s - %(message)s')
    
    logd.info('starting scenario {}'.format(scenario_id))
    
    # get connection, along with useful tools attached
    conn = connection(args, scenario_id, args.template_id, logd)
    
    # time steps
    ti = datetime.strptime(args.initial_timestep, args.timestep_format)
    tf = datetime.strptime(args.final_timestep, args.timestep_format)
    dates = [date for date in rrule.rrule(rrule.MONTHLY, dtstart=ti, until=tf)]
    
    timestep_dict = OrderedDict()
    conn.OAtHPt = {}
    for date in dates:
        oat = date.strftime(args.timestep_format)
        hpt = date.strftime(args.hydra_timestep_format)
        timestep_dict[date] = [hpt, oat]
        conn.OAtHPt[oat] = hpt
        
    template_attributes = conn.call('get_template_attributes', {'template_id': conn.template.id})
    attr_names = {}
    for ta in template_attributes:
        attr_names[ta.id] = ta.name
        
    # create the model
    instance = prepare_model(model_name='OpenAgua',
                             network=conn.network,
                             template=conn.template,
                             attr_names=attr_names,
                             timestep_format=args.timestep_format,
                             timestep_dict=timestep_dict)
    
    logd.info('model created')
    opt = SolverFactory(args.solver)
    results = opt.solve(instance, tee=False)
    #logd.info('model solved')

    old_stdout = sys.stdout
    sys.stdout = summary = StringIO()
    results.write()
    sys.stdout = old_stdout
    
    logd.info('model solved\n' + summary.getvalue())
    
    if (results.solver.status == SolverStatus.ok) and (results.solver.termination_condition == TerminationCondition.optimal):
        # this is feasible and optimal
        logd.info('Optimal feasible solution found.')
        outputnames = {'S': 'storage', 'I': 'inflow', 'O': 'outflow'}
        #outputnames = {'I': 'inflow', 'O': 'outflow'}
        result = conn.save_results(instance, outputnames)
        logd.info('Results saved.')
    elif results.solver.termination_condition == TerminationCondition.infeasible:
        logd.info('WARNING! Problem is infeasible. Check detailed results.')
        # do something about it? or exit?
    else:
        # something else is wrong
        logd.info('WARNING! Something went wrong. Likely the model was not built correctly.')    
    
    # Still we will report that the model is complete...
    if args.foresight == 'perfect':
        msg = 'completed timesteps {} - {} | 1/1'.format(ti, tf)
        logd.info(msg)
        logp.info(msg)
    
    # ===========================
    # start the per timestep loop
    # ===========================
   
    #T = len(dates)
    #for t, date in enumerate(dates):
        
        # ===========================
        # prepare the time steps to use in the optimization run
        # ===========================        

        # ===========================
        # prepare the inflow forecast model
        # ===========================

        # For now, forecast based on mean monthly inflow at each catchment node
        # However, this can be changed in the future

        # ===========================
        # run the model
        # ===========================
        
        #if new_model:
            #model = create_model(data)
            #instance = model.create_instance()            
        #else:
            #instance = update_instance(instance, S0, inflow)
            #instance.preprocess()
            
        # solve the model
        #results = solver.solve(instance)
        
        # load the results
        #instance.solutions.load_from(results)
        
        # set initial conditions for the next time step
        #S0 = instance.S[isIDs[0]].value
        #if S0 is None:
            #S0 = 0.0
            
        # ===========================
        # save results to memory
        # ===========================
        
        
        #logd.info('completed timestep {} | {}/{}'.format(dt.date.strftime(date, args.timestep_format), t+1, T))
    
    # ===========================
    # save results to Hydra Server
    # ===========================
    
    
    return
Esempio n. 12
0
def train(dataroot_path=None, weights_directory=None, num_samples=None):
    # print(tf.__version__)
    # print("Num GPUs Available: ", len(tf.config.experimental.list_physical_devices('GPU')))
    # tf.debugging.set_log_device_placement(True)
    text = _dutils.open_file('item_names_lower.txt')

    idx2char, char2idx = _dutils.create_dictionaries(text, True)
    dictionary_size = len(idx2char.values())
    '''
        Load necessary hyperparameters from JSON files.
    '''
    general_hyperparameters = _dutils.load_dictionary(
        'hyperparameters/hyperparameters.json')
    #If, for some reason the JSON value is not the same, update the values
    if general_hyperparameters.get('vocab_size') != dictionary_size:
        general_hyperparameters['vocab_size'] = dictionary_size
    model_hyperparameters = _dutils.load_dictionary(
        'hyperparameters/model_hyperparameters.json')
    '''
        Set the necessary logging directories
    '''
    checkpoint_dir = './training_checkpoints'
    checkpoint_prefix = os.path.join(checkpoint_dir,
                                     "weights-improvement-{epoch:02d}")
    '''
        Set callback functions here.
        Tensorboard callback doesn't seem to
        work well, so I take it out.
    '''
    reduce_lr = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss',
                                                     factor=0.8,
                                                     patience=2,
                                                     min_lr=0.0001)
    # Name of the checkpoint files
    checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
        filepath=checkpoint_prefix,
        save_best_only=True,
        mode='max',
        save_weights_only=True)
    '''
        Prepare the model for training.
    '''
    model = _model.prepare_model(model_hyperparameters,
                                 load_weights=False,
                                 weight_directory=checkpoint_dir,
                                 verbose=False)
    '''
        Prepare the dataset for training.
    '''
    padded_encoded_text = _dutils.preprocess_dataset(text)
    text_train, text_test = _dutils.create_train_test_data(
        padded_encoded_text,
        general_hyperparameters['seq_len'],
        num_samples=num_samples,
        rand_state=420,
        verbose=True)
    train_dataset = _dutils.create_dataset(text_train, dictionary_size,
                                           idx2char, char2idx, True, None)
    test_dataset = _dutils.create_dataset(text_test, dictionary_size, idx2char,
                                          char2idx, True, None)
    dset = train_dataset.shuffle(general_hyperparameters['buffer_size']).batch(
        general_hyperparameters['batch_size'])
    t_dset = test_dataset.shuffle(
        general_hyperparameters['buffer_size']).batch(
            general_hyperparameters['batch_size'])
    # Begin training the model.
    history = model.fit(dset,
                        epochs=general_hyperparameters['epochs'],
                        validation_data=t_dset,
                        validation_freq=1,
                        verbose=1,
                        callbacks=[reduce_lr, checkpoint_callback])
    history_json_file = "history_{}.json".format(
        datetime.datetime.now().strftime("%Y%m%d-%H%M%S"))
    _dutils.save_dictionary(history, history_json_file)
Esempio n. 13
0
    parser.add_argument('--heads', type=str, default='[["icon", 1, 1], ["test", 1, 1], ["synt", 1, 1], ["synt", 5, 3], ["synt", 10, 5]]',
                        help='Necessary heads with random forest and kNN in format \'[(index_type, index_pictures_per_class, knn_neighbors)]\'')
    args = parser.parse_args()
    os.environ["CUDA_VISIBLE_DEVICES"]=args.gpu_ids

    classes, classes_rare, classes_often = get_classes(args.real_path)

    data_folders = []
    if args.real_path != '':
        data_folders.append(args.real_path)
    if args.synt_path != '':
        data_folders.append(args.synt_path)
    train_filelist = get_list_of_files(args.exp_name, data_folders, classes_often + classes_rare if args.train_type == 'all_classes' else classes_often)

    if args.train_only_heads:
        model = prepare_model(config)
        model.load_weights(args.exp_name + "_" + args.nn_model)
    else:
        model = train_model(train_filelist, config, classes, classes_rare)
        model.save(args.exp_name + "_" + args.nn_model)

    img_shape = (config['img']['img_size'], config['img']['img_size'], 3)

    for head in json.loads(args.heads):
        print("Training head: ", head)
        if head[0] == 'icon':
            index_x, index_y = read_index_icon(args.icons_path, img_shape, classes, args.replication_factor)
        if head[0] == 'test':
            index_file, noindex_file = create_test_index(args.exp_name, args.test_path, args.use_existing_index)
            index_x, index_y = read_index(index_file, img_shape, classes, args.replication_factor)
        if head[0] == 'synt':
Esempio n. 14
0
def main(args):
    '''
    init 
    '''
    # read images and preprocess
    if args.content_img:
        content_img = read_image(args.content_img, args.hard_width)
    style_img = read_image(args.style_img, args.hard_width)

    # get stacked 0./1. masks
    if args.mask_n_colors > 1:  # colorful
        target_masks_origin, style_masks_origin = read_colorful_mask(
            args.target_mask, args.style_mask, args.hard_width,
            args.mask_n_colors)

    else:  # single mask
        if args.target_mask is None:
            if args.content_img:
                target_masks_origin = np.ones(content_img.shape[0:3]).astype(
                    np.float32)
            else:
                target_masks_origin = np.ones(style_img.shape[0:3]).astype(
                    np.float32)
        else:
            target_masks_origin = read_single_mask(args.target_mask,
                                                   args.hard_width)

        if args.style_mask is None:
            style_masks_origin = np.ones(style_img.shape[0:3]).astype(
                np.float32)
        else:
            style_masks_origin = read_single_mask(args.style_mask,
                                                  args.hard_width)

    # init img & target shape
    if args.content_img:
        target_shape = content_img.shape
        init_img = get_init_image(content_img, args.init_noise_ratio)
    else:
        target_shape = [1] + list(target_masks_origin.shape[1:3]) + [3]
        init_img = np.random.uniform(-20., 20.,
                                     target_shape).astype(np.float32)

    # check shape & number of masks
    if args.content_img and content_img.shape[
            1:3] != target_masks_origin.shape[1:3]:
        print('content and mask have different shape')
        sys.exit(0)
    if style_img.shape[1:3] != style_masks_origin.shape[1:3]:
        print('style and mask have different shape')
        sys.exit(0)
    if target_masks_origin.shape[0] != style_masks_origin.shape[0]:
        print('content and style have different masks')
        sys.exit(0)
    '''
    compute features & build net
    '''
    # prepare model weights
    vgg_weights = model.prepare_model(args.model_path)

    # feature maps of specific layers
    if args.content_img:
        content_features = compute_features(vgg_weights,
                                            args.feature_pooling_type,
                                            content_img, args.content_layers)
    style_features = compute_features(vgg_weights, args.feature_pooling_type,
                                      style_img, args.style_layers)

    # masks of specific layers
    target_masks = compute_layer_masks(target_masks_origin, args.style_layers,
                                       args.mask_downsample_type)
    style_masks = compute_layer_masks(style_masks_origin, args.style_layers,
                                      args.mask_downsample_type)

    # build net
    target_net = build_target_net(vgg_weights, args.feature_pooling_type,
                                  target_shape)
    '''
    loss 
    '''
    if args.content_img:
        content_loss = sum_content_loss(target_net, content_features,
                                        args.content_layers,
                                        args.content_layers_weights,
                                        args.content_loss_normalization)
    else:
        content_loss = 0.

    style_masked_loss = sum_masked_style_loss(target_net, style_features,
                                              target_masks, style_masks,
                                              args.style_layers,
                                              args.style_layers_weights,
                                              args.mask_normalization_type)

    if args.tv_weight != 0.:
        tv_loss = sum_total_variation_loss(target_net['input'], target_shape)
    else:
        tv_loss = 0.

    total_loss = args.content_weight * content_loss + \
                 args.style_weight * style_masked_loss + \
                 args.tv_weight * tv_loss
    '''
    train 
    '''
    if not os.path.exists(args.output_dir):
        os.mkdir(args.output_dir)

    if args.optimizer == 'adam':
        optimizer = tf.train.AdamOptimizer(args.learning_rate)
        train_op = optimizer.minimize(total_loss)
        #init
        init_op = tf.global_variables_initializer(
        )  # must! Adam has some varibales to init
        sess = tf.Session()
        sess.run(init_op)
        sess.run(target_net['input'].assign(init_img))
        #train
        for i in range(args.iteration):
            sess.run(train_op)
            if i % args.log_iteration == 0:
                print('Iteration %d: loss = %f' %
                      (i + 1, sess.run(total_loss)))
                result = sess.run(target_net['input'])
                output_path = os.path.join(args.output_dir,
                                           'result_%s.png' % (str(i).zfill(4)))
                write_image(output_path, result)

    elif args.optimizer == 'lbfgs':
        optimizer = tf.contrib.opt.ScipyOptimizerInterface(
            total_loss,
            method='L-BFGS-B',
            options={
                'maxiter': args.iteration,
                'disp': args.log_iteration
            })
        # init
        init_op = tf.global_variables_initializer()
        sess = tf.Session()
        sess.run(init_op)
        sess.run(target_net['input'].assign(init_img))
        # train
        optimizer.minimize(sess)
    '''
    out
    '''
    print('Iteration %d: loss = %f' % (args.iteration, sess.run(total_loss)))
    result = sess.run(target_net['input'])
    output_path = os.path.join(args.output_dir, 'result_final.png')
    write_image(output_path, result)