예제 #1
0
 def to_graph(self, dtype="float32", ctx=mx.cpu()):
     """ Convenient helper function to create model runtime,
             returns gluon.nn.SymbolBlock.
     """
     graph = gluon.nn.SymbolBlock(self.symbol, \
         [mx.sym.var(n) for n in self.input_names()])
     utils.load_parameters(graph,
                           convert_params_dtype(self.params,
                                                dest_dtype=dtype),
                           ctx=ctx)
     return graph
예제 #2
0
def load_model_and_continue_training(model_save_path, json_parameters_path,
                                     save_new_result_sheet):
    # append function name to the call sequence
    calling_sequence.append("[load_model_and_continue_training]==>>")
    print(" ==============================================")
    print(
        " [INFO] Entering function[load_model_and_continue_training]  in core.py"
    )
    # -----------------------------------------------------------------------------------------------------
    # load saved parameters
    parameters = load_parameters(json_parameters_path)
    print("loaded parameters", parameters)
    # load saved model
    model = load_pretrained_model(model_path=model_save_path)
    # -----------------------------------------------------------------------------------------------------
    # get train generator , validation_generator, test_generator, parameters from  prepare_train_valid_data
    # after loading train, validation and test data (classes names, and data for each class)
    train_generator, validation_generator, test_generator, parameters = prepare_train_valid_data(
        parameters)
    # ------------------------------------------------------------------------------------------------------
    # check if train on parallel gpus
    if (parameters['device'] == 'gpu_parallel'):
        print(" [INFO] target multi gpus...")
        _parallel = True
    else:
        print(" [INFO] target single gpu...")
        _parallel = False
    #-----------------------------------------------------------------------------------------------------------------
    # start training
    history, parameters = train(model,
                                parameters,
                                train_generator,
                                validation_generator,
                                test_generator,
                                parallel=_parallel)
    #-----------------------------------------------------------------------------------------------------------------
    # apply testset
    # TODO: change this to work with generators
    max_prediction_time, parameters = calculate_accuaracy_data_set(
        DataPath=parameters['test_data_path'],
        parameters=parameters,
        model=model)
    print("max prediction time = ", max_prediction_time)
    #-----------------------------------------------------------------------------------------------------------------
    # save train result
    save_train_result(history,
                      parameters,
                      initiate_new_result_sheet=save_new_result_sheet)
    #-----------------------------------------------------------------------------------------------------------------
    update_accuracy_in_paramters_on_end_then_save_to_json(parameters, history)
    #-----------------------------------------------------------------------------------------------------------------
    # clear seassion
    del model, train_generator, validation_generator, parameters, history
    K.clear_session()

    print(" [INFO] calling sequence -> ", calling_sequence)
    calling_sequence.clear()

    print(" [INFO] Leaving function[load_model_and_continue_training]")
    print(" ==============================================")
예제 #3
0
    def load(cls, dirname, session, training=False):
        """
        Load a previously saved file.
        :param dirname: directory with model files
        :param session: tensorflow session
        :param training: whether to create training tensors
        :return: an instance of MultiFeedForward
        :rtype: MultiFeedForwardClassifier
        """
        params = utils.load_parameters(dirname)
        model = cls._init_from_load(params, training)

        tensorflow_file = os.path.join(dirname, 'model')
        saver = tf.train.Saver(tf.trainable_variables())
        saver.restore(session, tensorflow_file)

        # if training, optimizer values still have to be initialized
        if training:
            train_vars = [
                v for v in tf.global_variables()
                if v.name.startswith('training')
            ]
            init_op = tf.variables_initializer(train_vars)
            session.run(init_op)

        return model
예제 #4
0
def describe_model(model_ix, op_dict):
    modeldir = os.path.join(os.getcwd(), 'training')
    fname = os.path.join(modeldir, 'model' + str(model_ix).zfill(3),
                         'parameters')
    opts = utils.load_parameters(fname)
    m_dict = vars(opts)
    print(f"\nix: {model_ix}, Model attributes:")
    for k in op_dict.keys():
        print(f'{k}: {m_dict[k]}')
예제 #5
0
def main():
    args = parser.parse_args()
    
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    hyperparams = load_parameters(args.hyperparameter_path)
    
    orion_hp_string, hyperparams = prep_orion(args, hyperparams)

    save_loc, hyperparams = generate_save_loc(args, hyperparams, orion_hp_string)
    
    save_parameters(save_loc, hyperparams)
    
    if not os.path.exists(save_loc):
        os.makedirs(save_loc)
        
    data_dict   = read_data(args.data_path)
    
    train_dl, valid_dl, plotter, model, objective = prep_model(model_name  = args.model,
                                                               data_dict   = data_dict,
                                                               data_suffix = args.data_suffix,
                                                               batch_size  = args.batch_size,
                                                               device = device,
                                                               hyperparams = hyperparams)
        
    print_model_description(model)
        
    transforms = trf.Compose([])
    
    optimizer, scheduler = prep_optimizer(model, hyperparams)
        
    if args.use_tensorboard:
        writer, rm_plotter = prep_tensorboard(save_loc, plotter, args.restart)
    else:
        writer = None
        rm_plotter = None
        
    run_manager = RunManager(model      = model,
                             objective  = objective,
                             optimizer  = optimizer,
                             scheduler  = scheduler,
                             train_dl   = train_dl,
                             valid_dl   = valid_dl,
                             transforms = transforms,
                             writer     = writer,
                             plotter    = rm_plotter,
                             max_epochs = args.max_epochs,
                             save_loc   = save_loc,
                             do_health_check = args.do_health_check,
                             detect_local_minima = args.detect_local_minima,
                             load_checkpoint=(not args.restart))

    run_manager.run()
        
    save_figs(save_loc, run_manager.model, run_manager.valid_dl, plotter)
    pickle.dump(run_manager.loss_dict, open(save_loc+'/loss.pkl', 'wb'))
예제 #6
0
def test_yxnet_mnist():
    mnist_sym = make_mnist_graph()

    inputs_ext = {
        'data': {
            'shape': (1, 1, 28, 28),
            'precision': 8,
        }
    }
    in_shape = (1, 1, 28, 28)
    arg_shapes, _, aux_shapes = mnist_sym.infer_shape(data=in_shape)
    args, auxs = mnist_sym.list_arguments(), mnist_sym.list_auxiliary_states()
    infer_shapes = {args[i]: arg_shapes[i] for i in range(len(args))}
    infer_shapes.update({auxs[i]: aux_shapes[i] for i in range(len(auxs))})

    root = "/home/serving/warehouse"
    _, bd = load_parameters(
        mnist_sym, infer_shapes,
        root + "/ca3d0286d5758697cdef653c1375960a868ac08a/data/params")
    mnist_sym, bd = spass.mx_set_precs(mnist_sym, bd, inputs_ext)

    dump_sym, dump_par = '/tmp/mnist_yxnet.symbol', '/tmp/mnist_yxnet.params'
    with open(dump_sym, 'w') as fout:
        fout.write(mnist_sym.tojson())
    nd.save(dump_par, bd)

    inputs = [mx.sym.var('data')]
    data = np.load(root + '/ba9fedfc87ccb6064fcd437fd2287f5edef1bd84/data')
    data = nd.array([data.astype(np.int8)])

    if False:
        graph = nn.SymbolBlock(mnist_sym, inputs)
        utils.load_parameters(graph, bd)
        res = graph.forward(data).astype('int32')
    else:
        prefix = "/tmp/yxnet/mnist"
        dump_sym, dump_params = prefix + ".json", prefix + ".params"
        print(sutils.sym_collect_attr(mnist_sym))
        spass.mxnet_to_nnvm(mnist_sym, bd, {'data': {
            'shape': (1, 1, 28, 28)
        }}, dump_sym, dump_params)
        exit()
    print(res.asnumpy().flatten()[:100])
예제 #7
0
    def load(cls, dirname, session=None, graph=None, training=False, re_init=False, batch_size=32):
        """
        Load a previously saved file.

        :param dirname: directory with model files
        :param session: tensorflow session
        :param training: whether to create training tensors
        :return: an instance of MultiFeedForward
        :rtype: MultiFeedForwardClassifier
        """
        params = utils.load_parameters(dirname)
        if graph is not None:
            with graph.as_default():
                model = cls._init_from_load(params, training=training, batch_size=batch_size)
                model_vars = tf.trainable_variables()
        else:
            model = cls._init_from_load(params, training)
            model_vars = tf.trainable_variables()
        if session is None:
            session = tf.Session(graph=graph)

        tensorflow_file = os.path.join(dirname, 'model')
        if not params['train_embed'] and FLAGS.train_ranker_only:
            try:
                saver.restore(sesssion, tensorflow_file)
            except:
                model_saved_vars = [v for v in model_vars if not 'matching/Variable' in v.name]
                saver = tf.train.Saver(model_saved_vars)
                saver.restore(session, tensorflow_file)
                svaer = tf.train.Saver(model_vars)
        else:
            saver = tf.train.Saver(model_vars)
            saver.restore(session, tensorflow_file)

        if training:
            if graph is not None:
                with graph.as_default():
                    train_vars = [v for v in tf.global_variables() if 'Adagrad' in v.name]
                    init_op = tf.variables_initializer(train_vars)
                session.run(init_op)
            else:
                train_vars = [v for v in tf.global_variables()
                            if v.name.startswith('training')]
                init_op = tf.variables_initialize(train_vars)
                session.run(init_op)
            return model, session, saver

        return model, sesssion, saver
예제 #8
0
def main():

    utils.set_up_data_directories()

    snapshots = {}
    parameters = {}
    for dataset in config.datasets:
        # shape: N_h x N
        # i.e. #DOFs x #snapshots
        snapshots[dataset] = utils.load_snapshots(dataset)
        parameters[dataset] = utils.load_parameters(dataset)

    for component in config.components:
        assert config.datasets[0] == 'train', 'The first dataset must be train'
        print(f'\nComputing targets for component {component}')

        for dataset in config.datasets:
            # Snapshot matrix, non-centered
            S_n = utils.reduce(snapshots[dataset], component)

            if dataset == 'train':
                # Compute and store ..
                # .. mean and POD
                S_mean = np.mean(S_n, axis=1)
                S = np.array([col - S_mean for col in S_n.T]).T
                V, D = do_POD(S)
                utils.save_POD(V, D, S_mean, component)
                # .. scaler
                scaler = StandardScaler()
                scaler.fit(parameters[dataset])
                utils.save_scaler(scaler)
            else:
                # Compute centered snapshot matrix
                S = np.array([col - S_mean for col in S_n.T]).T

            # Now V, D, S_mean and scaler are available

            # Compute and store ..
            # .. features
            features = compute_features(scaler, parameters[dataset])
            utils.save_features(dataset, features)
            # .. targets
            targets = compute_targets(S, V, D)
            utils.save_targets(dataset, component, targets)
            # .. projection error
            err_POD_sq = compute_error_POD_sq(S, V, D)
            utils.save_error_POD_sq(dataset, component, err_POD_sq)
예제 #9
0
    def load(cls, dirname, session):
        """
        Load a previously saved file.
        :param dirname: directory with model files
        :param session: tensorflow session
        :return: an instance of MultiFeedForward
        """
        params = utils.load_parameters(dirname)

        model = cls(params['num_units'], params['num_classes'],
                    params['vocab_size'], params['embedding_size'], 
                    params['max_len'], params['mode'], training=False)

        tensorflow_file = os.path.join(dirname, 'model')
        saver = tf.train.Saver(get_weights_and_biases())
        saver.restore(session, tensorflow_file)

        return model
예제 #10
0
def analyze_dist():
    root = './training/'
    ix = np.arange(3, 23)
    dirs = [os.path.join(root, 'model' + str(n).zfill(3)) for n in ix]
    dfs = []
    for d in dirs:
        fname = os.path.join(d, 'parameters')
        opts = utils.load_parameters(fname)
        df = analysis.analyze_selectivity(opts, eval=False)
        if df is None:
            continue
        dfs.append(df)

    df = pd.concat(dfs, axis=1)
    # print('concatencated df2\n', pd.concat(dfs, axis=1))
    dfname = os.path.join(root, 'selectivity_counts.h5')
    # df.to_hdf(dfname, key='df', mode='w')
    df.to_pickle(dfname)
    analysis.table()
예제 #11
0
파일: train_ae.py 프로젝트: blazm/baxter
def prepare_optimizer_object(optimizer_string, lr=0.001):
    # TODO: create Adam, AdamW with custom parameters if needed
    from AdamW import AdamW
    from keras.optimizers import Adam
    if "adamw" in optimizer_string:
        parameters_filepath = "config.ini"
        parameters = load_parameters(parameters_filepath)
        num_epochs = int(parameters["hyperparam"]["num_epochs"])
        batch_size = int(parameters["hyperparam"]["batch_size"])
        opt = AdamW(lr=lr,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=None,
                    decay=0.,
                    weight_decay=0.025,
                    batch_size=batch_size,
                    samples_per_epoch=1000,
                    epochs=num_epochs)
        return opt
    elif 'adam' in optimizer_string:
        opt = Adam(lr=lr)
        return opt
    return optimizer_string
예제 #12
0
def find_model(op_dict):
    # load parameter files, search for appropriate model
    modeldir = os.path.join(os.getcwd(), 'training')
    exp = re.compile('model([0-9]+)')
    found = []
    with os.scandir(modeldir) as models:
        for mod in models:
            dir = os.path.join(modeldir, mod.name)
            if not os.listdir(dir):
                continue

            match = exp.match(mod.name)
            if match:
                fname = os.path.join(modeldir, mod.name, 'parameters')
                opts = utils.load_parameters(fname)
                m_dict = vars(opts)
                m_property_match = np.array(
                    [m_dict[k] == v for k, v in op_dict.items()])
                if np.prod(m_property_match) == 1:  # all true
                    print(f'Model {match.group(1)} located in ' +
                          opts.save_path)
                    found.append(opts)
    return found
예제 #13
0
                               dilation=(1, 1, 1)))

        self.add_module('relu1', nn.ReLU())


if __name__ == "__main__":

    import pdb

    from utils import load_parameters

    x = torch.rand((10, 1, 20, 128, 128)).to('cuda')
    print('input size', x.shape)
    batch_size, num_ch, seq_len, w, h = x.shape

    hyperparams = load_parameters('./hyperparameters/lorenz/conv3d_lfads.yaml')

    model = Conv3d_LFADS_Net(
        input_dims=(100, 128, 128),
        conv_type='2d',
        channel_dims=hyperparams['model']['channel_dims'],
        obs_encoder_size=hyperparams['model']['obs_encoder_size'],
        obs_latent_size=hyperparams['model']['obs_latent_size'],
        obs_controller_size=hyperparams['model']['obs_controller_size'],
        conv_dense_size=hyperparams['model']['conv_dense_size'],
        factor_size=hyperparams['model']['factor_size'],
        g_encoder_size=hyperparams['model']['g_encoder_size'],
        c_encoder_size=hyperparams['model']['c_encoder_size'],
        g_latent_size=hyperparams['model']['g_latent_size'],
        u_latent_size=hyperparams['model']['u_latent_size'],
        controller_size=hyperparams['model']['controller_size'],
예제 #14
0
def main():
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print('Using device: %s' % device, flush=True)
    args = parser.parse_args()
    _, system_name, model_name = args.parameter_path.split('/')[-1].split(
        '.')[0].split('_')

    # Load hyperparameters
    hyperparams = load_parameters(args.parameter_path)

    # Alter run name to describe parameter settings and date of model run
    hyperparams['run_name'] += '_f%i_g1%i_eg1%i_u%i' % (
        hyperparams['factors_dim'], hyperparams['g_dim'],
        hyperparams['g0_encoder_dim'], hyperparams['u_dim'])

    if hyperparams['u_dim'] > 0:
        hyperparams['run_name'] += '_c1%i_ec1%i' % (
            hyperparams['c_controller_dim'], hyperparams['c_encoder_dim'])

    if model_name == 'ladder':
        hyperparams['run_name'] += '_g2%i_c2%i_eg2%i_ec2%i' % (
            hyperparams['h_dim'], hyperparams['a_controller_dim'],
            hyperparams['h0_encoder_dim'], hyperparams['a_encoder_dim'])
    elif model_name in ['gaussian', 'edgeworth']:
        hyperparams['run_name'] += '_k%i' % hyperparams['kernel_dim']

    hyperparams['run_name'] += '_%s' % time.strftime('%y%m%d')

    # Load data
    data_dict = read_data(args.data_path)
    datatype = model_name if model_name in ['spikes', 'oasis'] else 'fluor'

    train_data = torch.Tensor(data_dict['train_spikes']).to(device)
    valid_data = torch.Tensor(data_dict['valid_spikes']).to(device)

    train_truth = {'rates': data_dict['train_rates']}

    valid_truth = {'rates': data_dict['valid_rates']}

    if model_name == 'ladder':
        train_truth['spikes'] = data_dict['train_spikes']
        valid_truth['spikes'] = data_dict['valid_spikes']

    if 'train_latent' in data_dict.keys():
        train_truth['latent'] = data_dict['train_latent']

    if 'valid_latent' in data_dict.keys():
        valid_truth['latent'] = data_dict['valid_latent']

    train_ds = torch.utils.data.TensorDataset(train_data)
    valid_ds = torch.utils.data.TensorDataset(valid_data)

    num_trials, num_steps, num_cells = train_data.shape
    print('Data dimensions: N=%i, T=%i, C=%i' % train_data.shape, flush=True)
    print('Number of datapoints = %s' % train_data.numel(), flush=True)

    # Initialize Network
    Net = LFADS if model_name in [
        'spikes', 'oasis'
    ] else MomentLFADS if model_name in ['gaussian', 'edgeworth'
                                         ] else LadderLFADS
    model = Net(inputs_dim=num_cells,
                T=num_steps,
                dt=float(data_dict['dt']),
                device=device,
                model_hyperparams=hyperparams).to(device)

    # Train Network
    if args.batch_size:
        batch_size = args.batch_size
    else:
        batch_size = int(num_trials / 16)

    model.fit(train_dataset=train_ds,
              valid_dataset=valid_ds,
              train_truth=train_truth,
              valid_truth=valid_truth,
              max_epochs=args.max_epochs,
              batch_size=batch_size,
              use_tensorboard=False,
              health_check=False,
              home_dir=args.output)
예제 #15
0
    ax[0].set_title('Full activation', fontsize=6)
    ax[1].set_title('Full current', fontsize=6)
    ax[2].set_title('EL current', fontsize=6)
    ax[3].set_title('ER current', fontsize=6)
    ax[4].set_title('IL current', fontsize=6)
    ax[5].set_title('IR current', fontsize=6)
    ax[6].set_title('L current', fontsize=6)
    ax[7].set_title('R current', fontsize=6)
    [hide_ticks(a) for a in ax]
    if title:
        plt.suptitle(title)

    # plt.tight_layout()
    f.savefig(os.path.join(plot_path, 'excitatory ring current.pdf'),
              format='pdf',
              bbox_inches='tight')
    plt.close()


if __name__ == '__main__':
    root = './training/'
    ix = [3]
    dirs = [os.path.join(root, 'model' + str(n).zfill(3)) for n in ix]
    for d in dirs:
        fname = os.path.join(d, 'parameters')
        opts = utils.load_parameters(fname)
        # run_silencing(opts)
        analyze_experiment(opts)

        # run_posn_blast(opts)
예제 #16
0
 def reload(self, path):
     self.params = load_parameters(path + "/parameters.pkl")
     self.m = load_parameters(path + "/m.pkl")
     self.v = load_parameters(path + "/v.pkl")
예제 #17
0
def main():
    args = parser.parse_args()
    
    os.environ["CUDA_VISIBLE_DEVICES"]=str(args.gpu)
    if torch.cuda.is_available():
        device = torch.device('cuda')
    else:
        device = 'cpu'
#     device = 'cuda' if torch.cuda.is_available() else 'cpu'

    hyperparams = load_parameters(args.hyperparameter_path)
    
    orion_hp_string = ''
    if args.lr:
        lr = args.lr
        hyperparams['optimizer']['lr_init'] = lr
        hyperparams['scheduler']['lr_min']  = lr * 1e-3
        orion_hp_string += 'lr= %.4f\n'%lr
    
    data_name = args.data_path.split('/')[-1]

    model_name = hyperparams['model_name']
    mhp_list = [key.replace('size', '').replace('deep', 'd').replace('obs', 'o').replace('_', '')[:4] + str(val) for key, val in hyperparams['model'].items() if 'size' in key]

    mhp_list.sort()
    hyperparams['run_name'] = '_'.join(mhp_list)
    orion_hp_string = orion_hp_string.replace('\n', '-').replace(' ', '').replace('=', '')
    orion_hp_string = '_orion-'+orion_hp_string
    hyperparams['run_name'] += orion_hp_string
    save_loc = '%s/%s/%s/%s/'%(args.output_dir, data_name, model_name, hyperparams['run_name'])

    if not os.path.exists(save_loc):
        os.makedirs(save_loc)
        
    # Setup DataLoader goes here
    data_dict   = read_data(args.data_path)
    train_dl    = torch.utils.data.DataLoader(SyntheticCalciumVideoDataset(traces= data_dict['train_fluor'], cells=data_dict['cells'], device=device), batch_size=args.batch_size,drop_last=True)
    valid_dl    = torch.utils.data.DataLoader(SyntheticCalciumVideoDataset(traces= data_dict['valid_fluor'], cells=data_dict['cells'], device=device), batch_size=args.batch_size,drop_last=True)

    num_trials, num_steps, num_cells = data_dict['train_fluor'].shape
    num_cells, width, height = data_dict['cells'].shape

    model = Conv3d_LFADS_Net(input_dims      = (num_steps, width, height),
                             conv_dense_size = hyperparams['model']['conv_dense_size'],
                             channel_dims    = hyperparams['model']['channel_dims'],
                             factor_size     = hyperparams['model']['factor_size'],
                             g_encoder_size  = hyperparams['model']['g_encoder_size'],
                             c_encoder_size  = hyperparams['model']['c_encoder_size'],
                             g_latent_size   = hyperparams['model']['g_latent_size'],
                             u_latent_size   = hyperparams['model']['u_latent_size'],
                             controller_size = hyperparams['model']['controller_size'],
                             generator_size  = hyperparams['model']['generator_size'],
                             prior           = hyperparams['model']['prior'],
                             clip_val        = hyperparams['model']['clip_val'],
                             conv_dropout    = hyperparams['model']['conv_dropout'],
                             lfads_dropout   = hyperparams['model']['lfads_dropout'],
                             do_normalize_factors = hyperparams['model']['normalize_factors'],
                             max_norm        = hyperparams['model']['max_norm'],
                             device          = device)
    
    model = _CustomDataParallel(model).to(device) #
    model.to(dtype=train_dl.dataset.dtype)
    torch.set_default_dtype(train_dl.dataset.dtype)
    
    transforms = trf.Compose([])
    
    loglikelihood = LogLikelihoodGaussian()
    objective = Conv_LFADS_Loss(loglikelihood=loglikelihood,
                                loss_weight_dict={'kl': hyperparams['objective']['kl'],
                                                  'l2': hyperparams['objective']['l2']},
                                                   l2_con_scale= hyperparams['objective']['l2_con_scale'],
                                                   l2_gen_scale= hyperparams['objective']['l2_gen_scale']).to(device)
    total_params = 0
    for ix, (name, param) in enumerate(model.named_parameters()):
        print(ix, name, list(param.shape), param.numel(), param.requires_grad)
        total_params += param.numel()
    
    print('Total parameters: %i'%total_params)
    
    optimizer = opt.Adam([p for p in model.parameters() if p.requires_grad],
                         lr=hyperparams['optimizer']['lr_init'],
                         betas=hyperparams['optimizer']['betas'],
                         eps=hyperparams['optimizer']['eps'])
    
    scheduler = LFADS_Scheduler(optimizer      = optimizer,
                                mode           = 'min',
                                factor         = hyperparams['scheduler']['scheduler_factor'],
                                patience       = hyperparams['scheduler']['scheduler_patience'],
                                verbose        = True,
                                threshold      = 1e-4,
                                threshold_mode = 'abs',
                                cooldown       = hyperparams['scheduler']['scheduler_cooldown'],
                                min_lr         = hyperparams['scheduler']['lr_min'])

    TIME = torch._np.arange(0, num_steps*data_dict['dt'], data_dict['dt'])
    
    train_truth = {}
    if 'train_latent' in data_dict.keys():
        train_truth['latent'] = data_dict['train_latent']
        
    valid_truth = {}
    if 'valid_latent' in data_dict.keys():
        valid_truth['latent'] = data_dict['valid_latent']

    plotter = {'train' : Plotter(time=TIME, truth=train_truth),
               'valid' : Plotter(time=TIME, truth=valid_truth)}
    
    if args.use_tensorboard:
        import importlib
        #if importlib.util.find_spec('torch.utils.tensorboard'):
        if importlib.util.find_spec('tensorboardX'):
            tb_folder = save_loc + 'tensorboard/'
            if not os.path.exists(tb_folder):
                os.mkdir(tb_folder)
            elif os.path.exists(tb_folder) and args.restart:
                os.system('rm -rf %s'%tb_folder)
                os.mkdir(tb_folder)

            #from torch.utils.tensorboard import SummaryWriter
            from tensorboardX import SummaryWriter
            writer = SummaryWriter(tb_folder)
            rm_plotter = plotter
        else:
            writer = None
            rm_plotter = None
    else:
        writer = None
        rm_plotter = None
        
    run_manager = RunManager(model      = model,
                             objective  = objective,
                             optimizer  = optimizer,
                             scheduler  = scheduler,
                             train_dl   = train_dl,
                             valid_dl   = valid_dl,
                             transforms = transforms,
                             writer     = writer,
                             plotter    = rm_plotter,
                             max_epochs = args.max_epochs,
                             save_loc   = save_loc,
                             do_health_check = args.do_health_check)

    run_manager.run()
    
#     if importlib.find_loader('orion'):
#          report_results([dict(name= 'valid_loss',
#                              type= 'objective',
#                              value= run_manager.best)])

    fig_folder = save_loc + 'figs/'
    
    if os.path.exists(fig_folder):
        os.system('rm -rf %s'%fig_folder)
    os.mkdir(fig_folder)
    
    
    model_to_plot = Conv3d_LFADS_Net(input_dims      = (num_steps, width, height),
                    conv_dense_size = hyperparams['model']['conv_dense_size'],
                    channel_dims    = hyperparams['model']['channel_dims'],
                    factor_size     = hyperparams['model']['factor_size'],
                    g_encoder_size  = hyperparams['model']['g_encoder_size'],
                    c_encoder_size  = hyperparams['model']['c_encoder_size'],
                    g_latent_size   = hyperparams['model']['g_latent_size'],
                    u_latent_size   = hyperparams['model']['u_latent_size'],
                    controller_size = hyperparams['model']['controller_size'],
                    generator_size  = hyperparams['model']['generator_size'],
                    prior           = hyperparams['model']['prior'],
                    clip_val        = hyperparams['model']['clip_val'],
                    conv_dropout    = hyperparams['model']['conv_dropout'],
                    lfads_dropout   = hyperparams['model']['lfads_dropout'],
                    do_normalize_factors = hyperparams['model']['normalize_factors'],
                    max_norm        = hyperparams['model']['max_norm'],
                    device          = 'cuda:0')
    state_dict = torch.load(save_loc + 'checkpoints/'+'best.pth')
    model_to_plot.load_state_dict(state_dict['net'])
    model_to_plot = model_to_plot.to('cuda:0')
    import matplotlib
    matplotlib.use('Agg')
    
    fig_dict = plotter['valid'].plot_summary(model = model_to_plot, dl=run_manager.valid_dl, mode='video', num_average=4, save_dir = fig_folder) #
    for k, v in fig_dict.items():
        if type(v) == matplotlib.figure.Figure:
            v.savefig(fig_folder+k+'.svg')
예제 #18
0
def filter_students(students, demographics_file_path, gender):
    gender_students = []
    with open(demographics_file_path, "r") as demographics_file:
        for line in demographics_file.readlines():
            splits = line.split(";")
            if splits[1] == gender:
                gender_students.append(splits[0])
    return students.intersection(set(gender_students))


if __name__ == '__main__':
    logging.basicConfig(level=logging.DEBUG,
                        filename="dcnn_regression_gender_losocv.log",
                        format="%(asctime)s %(levelname)s %(message)s")
    yaml_file_path = sys.argv[1]
    parameters = load_parameters(yaml_file_path)
    labels_file_path = parameters["labels_file_path"][0]
    input_folder_path = parameters["input_folder_path"][0]
    psychological_construct = parameters["psychological_construct"][0]
    epochs = parameters["epochs"][0]
    likert_score_file_path = parameters["likert_scores_file_path"][0]
    loss_function = parameters["loss_function"][0]
    demographics_file_path = parameters["demographics_file"][0]
    gender = parameters["gender"][0]
    print(gender)
    dataset_loader = DatasetLoader(
        labels_file_path=labels_file_path,
        input_folder_path=input_folder_path,
        likert_scores_file_path=likert_score_file_path)
    index = dataset_loader.get_index(psychological_construct)
    labels = dataset_loader.labels
예제 #19
0
def random_data_generator(dir_with_src_images,
                          base_image_filename,
                          object_image_list,
                          img_shape=(28, 28, 1),
                          batch_size=32,
                          resized_objects=None):

    h, w, ch = img_shape

    # define inputs
    batch_inputs = np.zeros((batch_size, h, w, ch), dtype=np.float32)
    # define outputs
    batch_outputs = np.zeros((batch_size, h, w, ch), dtype=np.float32)
    # define attention masks (by default ones as everything has the same importance)
    batch_masks = np.ones((batch_size, h, w, ch), dtype=np.float32)

    def preprocess_size_helper(new_dim=(h, w)):
        return lambda image: preprocess_size(image, new_dim)

    preprocessors = [
        preprocess_size_helper(new_dim=(h, w)), preprocess_enhance_edges
    ]

    # load images
    base_image = loadAndResizeImages2(dir_with_src_images,
                                      [base_image_filename])[0]
    objects = loadAndResizeImages2(dir_with_src_images,
                                   object_image_list,
                                   load_alpha=True)

    # load params, since some of them are needed to generate data:
    parameters_filepath = "config.ini"
    parameters = load_parameters(parameters_filepath)

    size_factor = float(parameters["synthetic"]["size_factor"])
    save_batch = eval(parameters["synthetic"]["save_batch"])
    calc_masks = eval(parameters["synthetic"]["calc_masks"])
    dilate_masks = int(parameters["synthetic"]["dilate_masks"])
    blur_masks = eval(parameters["synthetic"]["blur_masks"])
    blur_kernel = eval(parameters["synthetic"]["blur_kernel"])

    obj_attention = float(parameters["synthetic"]["obj_attention"])
    back_attention = float(parameters["synthetic"]["back_attention"])

    subtract_median = eval(parameters["synthetic"]["subtract_median"])

    add_noise = eval(parameters["synthetic"]["add_noise"])
    noise_amnt = float(parameters["synthetic"]["noise_amnt"])

    loss = (parameters["hyperparam"]["loss"])

    # median threshold
    threshold = .5

    # resize to desired size
    orig_h, orig_w, _ = base_image.shape
    ratio_h = orig_h / h
    ratio_w = orig_w / w

    base_image = preprocess_size(base_image, (h, w))
    if resized_objects is None:
        resized_objects = []
    for o in objects:
        ho, wo, cho = o.shape
        if ho == wo:
            hn = int((ho / ratio_w) * size_factor)
            wn = int((wo / ratio_w) * size_factor)
        else:
            hn = int((ho / ratio_h) * size_factor)
            wn = int((wo / ratio_w) * size_factor)
        resized_o = preprocess_size(o, (hn, wn))
        resized_objects.append(resized_o)

    # serve randomly generated images
    while True:

        # go through the entire dataset, using batch_sized chunks each time
        for i in range(0, batch_size):

            np.copyto(batch_inputs[i], base_image)

            # TODO: randomly place the objects:
            for o in resized_objects:
                o_rot = random_rotation(o)

                ho, wo, cho = o_rot.shape

                x = np.random.randint(low=0, high=w - wo)  # +wo
                #print((100 / ratio_h))
                # 30 is the magic number to limit the random placement of objects inside image
                y = np.random.randint(low=(60 / ratio_h),
                                      high=h - ho - (30 / ratio_h))

                #imsave("tmp/{}.png".format("obj_generated_" + str(i)),  o_rot)

                mask = o_rot[:, :, 3]  # / 255.0
                #print(mask.max(), mask.min())
                batch_inputs[i][y:y + ho, x:x + wo,
                                0] = batch_inputs[i][y:y + ho, x:x + wo, 0] * (
                                    1 - mask) + mask * o_rot[:, :, 0]  #*255.0
                batch_inputs[i][y:y + ho, x:x + wo,
                                1] = batch_inputs[i][y:y + ho, x:x + wo, 1] * (
                                    1 - mask) + mask * o_rot[:, :, 1]  #*255.0
                batch_inputs[i][y:y + ho, x:x + wo,
                                2] = batch_inputs[i][y:y + ho, x:x + wo, 2] * (
                                    1 - mask) + mask * o_rot[:, :, 2]  #*255.0

            #imsave("tmp/{}.png".format("in_generated_" + str(i)),  batch_inputs[i])
            np.copyto(batch_outputs[i], batch_inputs[i])
            #imsave("tmp/{}.png".format("out_generated_" + str(i)),  batch_outputs[i])
            #print(batch_outputs[i].max(), batch_outputs[i].min())
        batch_median = np.median(batch_outputs, axis=0, keepdims=True)

        #print("Batch median shape: ", batch_median.shape)
        #print("Batch median shape: ", batch_outputs.shape)
        if calc_masks:
            median_min = batch_median[0].min()
            median_max = batch_median[0].max()
            for i in range(0, batch_size):

                tmp = cdist(batch_median[0], batch_inputs[i],
                            keepdims=True)  # color distance between images
                mask = (tmp > threshold * max_cdist).astype(float)
                batch_masks[i] = mask * obj_attention
                #back_mask = ( tmp <= 0 ).astype(float) + back_attention

                #batch_masks[i][batch_masks[i] > 0.5] += 0.1
                # uncomment to blur the images (soft attention)
                if dilate_masks > 0:
                    #print("dilating masks...")
                    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                       (3, 3))
                    batch_masks[i] = cv2.dilate(batch_masks[i],
                                                kernel,
                                                iterations=dilate_masks)
                if back_attention > 0.0:
                    #print("Setting background weights...")
                    #    back_mask = ( tmp <= 0 ).astype(float) + back_attention
                    batch_masks[i] += ((1 - (mask).astype(int)).astype(float) *
                                       back_attention)

                if blur_masks:
                    #print("Blurring masks....")
                    batch_masks[i] = cv2.blur(
                        batch_masks[i], blur_kernel)  # add blur if needed

                if save_batch:  # save generated images to tmp folder
                    me_min = batch_masks[i].min()
                    me_max = batch_masks[i].max()
                    label = str(np.random.randint(0, 1000))
                    imsave(
                        "tmp/{}.png".format("mask_{}_{}_{}".format(
                            label, me_min, me_max)), batch_masks[i])
                    imsave(
                        "tmp/{}.png".format("input_{}_{}_{}".format(
                            label, me_min, me_max)), batch_inputs[i])

            if save_batch:  # save only first batch
                save_batch = False

        #batch_percentile = np.percentile(batch_outputs, 99.9, axis=0, keepdims=True)
        #label = str(np.random.randint(0, 1000))
        #imsave("tmp/{}.png".format("percentile_99.9_" + str(label)), batch_percentile[0])
        if subtract_median:
            #batch_mean = batch_outputs.mean(axis=0, keepdims=True)
            # careful - batch_size must be greater than 1!!!
            #batch_median = np.median(batch_outputs, axis=0, keepdims=True)

            #imsave("tmp/{}.png".format("median_" + str(i)), batch_median[0])
            batch_outputs = batch_median - batch_outputs
            #imsave("tmp/{}.png".format("out1_" + str(i)), batch_outputs[0])

        if add_noise:
            batch_inputs += noise_amnt * np.random.normal(
                loc=0.0, scale=1.0, size=batch_inputs.shape)

        #label = str(np.random.randint(0, 1000))
        #imsave("tmp/{}.png".format(label + "_in_generated_" + str(i)),  batch_inputs[0])
        #imsave("tmp/{}.png".format(label + "_out_generated_" + str(i)),  batch_median[0] - batch_outputs[0])
        #print(batch_median.shape)
        #if 'wmse' in loss and 'out-median' in mode:
        #    yield [batch_inputs, np.repeat(np.array([batch_median]), batch_size, axis=0).reshape((batch_size, h, w, 3))], batch_outputs
        if 'wmse' in loss:
            yield [batch_inputs, batch_masks], batch_outputs
        else:
            yield batch_inputs, batch_outputs
예제 #20
0
파일: models.py 프로젝트: blazm/baxter
def build_conv_only_ae(img_shape=(32, 32, 3),
                       latent_size=16,
                       opt='adam',
                       loss='mse',
                       conv_layers=4,
                       initial_filters=4):

    _, _, ch = img_shape
    input_img = Input(
        shape=img_shape
    )  # adapt this if using `channels_first` image data format
    input_mask = Input(
        shape=img_shape
    )  # input layer for mask (it is only used in the calculation of the loss)
    filters = initial_filters
    kernel_size = (3, 3)
    s = 1  # stride parameter

    x = input_img
    #x = Conv2D(1, (1,1), activation='relu', padding='same', kernel_initializer='glorot_uniform', bias_initializer='zeros')(x) # turn to grayscale
    for i in range(conv_layers):
        filters = initial_filters if i < conv_layers - 1 else 1  #*= 2
        #x = Dropout(rate=0.1)(x)
        conv_lyr = Conv2D(filters=initial_filters,
                          kernel_size=kernel_size,
                          activation='elu',
                          strides=s,
                          padding='same',
                          kernel_initializer='glorot_normal',
                          bias_initializer='zeros')
        x = conv_lyr(x)
        conv_lyr = Conv2D(
            filters=filters,
            kernel_size=kernel_size,
            activation='elu' if i < conv_layers - 1 else
            'sigmoid',  # to generate latent space in between 0 and 1
            strides=2,
            padding='same',
            kernel_initializer='glorot_normal',
            bias_initializer='zeros')
        x = conv_lyr(x)
        '''
        conv_lyr = Conv2D(filters=filters,
                kernel_size=kernel_size,
                activation='relu',
                strides=s,
                padding='same')
        x = conv_lyr(x)
        '''
        mp = conv_lyr
        #x = BatchNormalization()(x)
        #mp = AveragePooling2D((2,2), padding='same')
        #mp = MaxPooling2D((2,2), padding='same')
        #x = mp(x)
    '''
    x = Conv2D(32, kernel_size, activation='relu', 
                                padding='same', 
                                strides=(s,s)
                                )(input_img) # 
    x = Conv2D(64, kernel_size, activation='relu', 
                                padding='same', 
                                strides=(s,s)
                                )(x) # 

    conv_lyr = Conv2D(128, kernel_size, activation='relu', padding='same', strides=(s,s)) 
    x = conv_lyr(x)
    '''
    conv_shape = mp.output_shape[1:]
    #conv_shape = conv_lyr.output_shape[1:] #
    print(conv_shape)
    latent_size = conv_shape[0] * conv_shape[1] * conv_shape[2]
    #conv_shape = mp.output_shape[1:] # without the batch_size

    encoded_layer = mp  # Dense(latent_size, activation='relu', name='latent', activity_regularizer=l1(10e-5))
    encoded = x  # encoded_layer(x)

    for i in range(conv_layers):
        filters = initial_filters if i < conv_layers - 1 else 3
        x = Conv2DTranspose(filters=filters,
                            kernel_size=kernel_size,
                            activation='elu',
                            strides=2,
                            padding='same',
                            kernel_initializer='glorot_normal',
                            bias_initializer='zeros')(x)
        x = Conv2DTranspose(filters=filters,
                            kernel_size=kernel_size,
                            activation='elu',
                            strides=s,
                            padding='same',
                            kernel_initializer='glorot_normal',
                            bias_initializer='zeros')(x)
        '''
        x = Conv2DTranspose(filters=filters,
                        kernel_size=kernel_size,
                        activation='relu',
                        strides=s,
                        padding='same')(x)
        '''
        #x = BatchNormalization()(x)
        #x = UpSampling2D((2,2))(x)
        #filters //= 2

    decoded_layer = Conv2D(
        ch,
        kernel_size,
        activation=
        'sigmoid',  #'linear' if not 'bin-xent' in loss else 'sigmoid', 
        padding='same',
        kernel_initializer='glorot_normal',
        bias_initializer='zeros')
    decoded = decoded_layer(x)

    if loss == 'wmse' or loss == 'wbin-xent':
        autoencoder = Model([input_img, input_mask], decoded)
    else:
        autoencoder = Model(input_img, decoded)
    print(autoencoder.summary())
    # TODO: specify learning rate?
    #if opt == 'adam':

    if opt == 'adam':
        opt = Adam(lr=0.001)  # try bigger learning rate
    if opt == 'adamw':
        parameters_filepath = "config.ini"
        parameters = load_parameters(parameters_filepath)
        num_epochs = int(parameters["hyperparam"]["num_epochs"])
        batch_size = int(parameters["hyperparam"]["batch_size"])

        opt = AdamW(lr=0.001,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=None,
                    decay=0.,
                    weight_decay=0.025,
                    batch_size=batch_size,
                    samples_per_epoch=1000,
                    epochs=num_epochs)

    from inspect import isfunction
    if isfunction(loss):
        loss = loss(input_mask)
    elif loss == 'wbin-xent':
        loss = masked_binary_crossentropy(input_mask)
    elif loss == 'bin-xent':
        loss = 'binary_crossentropy'
    elif loss == 'dssim':
        loss = DSSIMObjective()
    elif loss == 'wmse':
        loss = masked_mse_wrapper(input_mask)

    autoencoder.compile(optimizer=opt, loss=loss, metrics=[mse, rmse, psnr])

    #print(autoencoder.summary())
    #input("Press any key...")
    #print("# AE layers: ", len(autoencoder.layers))

    # create encoder model, which will be able to encode the image into latent representation
    encoder = Model(input_img, encoded)

    #encoded_shape = encoded.get_shape().as_list()
    #_, enc_h, enc_w, enc_ch = encoded_shape
    #enc_h, enc_w, enc_ch = 4, 4, 8
    #print("latent shape: ", latent_size)
    #print("decoded shape: ", autoencoder.layers[-8].input.shape)

    # re-create decoder model, which will be able to decode encoded input
    encoded_input = Input(shape=conv_shape)  # skip batch size which is None
    #print(autoencoder.layers[-6](encoded_input))
    #deco = autoencoder.layers[-8](encoded_input)
    #deco = autoencoder.layers[-7](encoded_input)

    deco = encoded_input
    assemble = False
    for layer in autoencoder.layers:
        if assemble:
            deco = layer(deco)
        if layer == encoded_layer:
            assemble = True

    decoded_output = deco
    '''
    deco = autoencoder.layers[-11](encoded_input)
    for i in range(10, 1):
        deco = autoencoder.layers[-i](deco)
    decoded_output = autoencoder.layers[-1](deco)
    '''
    '''
    deco = autoencoder.layers[-6](encoded_input)
    deco = autoencoder.layers[-5](deco)
    deco = autoencoder.layers[-4](deco)
    deco = autoencoder.layers[-3](deco)
    deco = autoencoder.layers[-2](deco)
    decoded_output = autoencoder.layers[-1](deco)
    '''
    decoder = Model(encoded_input, decoded_output)

    return autoencoder, encoder, decoder, latent_size
예제 #21
0
sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
if True:
    mrt = _mrt.MRT(sym, params, inputs_ext)
    mrt.set_data('data', data)
    mrt.calibrate(ctx=ctx)
    mrt.set_input_prec('data', 16)
    mrt.set_fixed('data')
    mrt.set_output_prec(8)
    qsym, qparams, inputs_ext = mrt.quantize()
else:
    inputs_ext['data']['data'] = data
    th_dict = calib.sym_calibrate(sym, params, inputs_ext, ctx=ctx)
    qsym, qparams, _ = calib.pure_int8_quantize(sym, params, inputs_ext,
                                                th_dict)
net2 = gluon.nn.SymbolBlock(qsym, inputs)
utils.load_parameters(net2, qparams, ctx=ctx)


def quantize(data):
    data = sim.load_real_data(data, 'data', inputs_ext)
    res = net2(data.as_in_context(ctx))
    return res


quant_sym, quant_params, quant_ext = load_fname("sym.quantize", with_ext=True)
open(quant_sym, "w").write(qsym.tojson())

if False:
    inputs_ext['data']['shape'] = (38, 1)
    data = data[:, 0].reshape(38, 1)
    _mrt.std_dump(qsym,
예제 #22
0
    if stationary == 0:
        W_i_b = weight_dict['model/input/W_i_b:0']
        W_i_b_sorted = W_i_b[:, sort_ix]
        data.append(W_i_b)
        data.append(W_i_b_sorted)
        titles.append('W_i_b')
        titles.append('sorted')

    plot_name = os.path.join(save_path, image_folder, 'weights__.png')
    utils.subimage_easy(zip(titles, data), col=2, row=4, save_name=plot_name)


if __name__ == '__main__':
    root = '../experiments/vary_randomize_ab_ba/files/01/'
    d0 = root + 'stationary/'
    d1 = root + 'moving/'

    for d in [d0, d1]:
        if os.path.exists(d0):
            opts = utils.load_parameters(d + 'parameters')
            opts.save_path = d
            # plot_activity(opts)
            plot_weights(opts)
            plot_sorted_weights(opts)

            # if opts.stationary == 0:
            #     points, activity, labels = receptive_field.get_activity(opts)
            #     receptive_field.plot_receptive_field(opts, points, activity,
            #                                          plot_stationary=opts.stationary)
            #     receptive_field.plot_receptive_field(opts, points, activity, plot_stationary=True)
예제 #23
0
batch_size = 16
embed_word_dim = 300
filter_size = 3
num_filters = 100

embed_id_dim = 32
attention_size = 32
n_latent = 32

# Load files
TPS_DIR = './preprocess/music'
print('Loadindg files...')

user_num, item_num, review_num_u, review_num_i, review_len_u, review_len_i,\
    vocabulary_user, vocabulary_item, train_length, test_length, u_text, i_text,\
    user_vocab_size, item_vocab_size = load_parameters(TPS_DIR, 'music.para')

# print(vocabulary_item)

uid_tr, iid_tr, reuid_tr, reiid_tr, yrating_tr, texts_u_tr, texts_i_tr = load_train_test_data(
    TPS_DIR, 'music.train', u_text, i_text)
uid_va, iid_va, reuid_va, reiid_va, yrating_va, texts_u_va, texts_i_va = load_train_test_data(
    TPS_DIR, 'music.test', u_text, i_text)
print(
    'Training set: {} samples and validation set: {} samples prepared'.format(
        len(uid_tr), len(uid_va)))

initW_u = np.random.uniform(-1.0, 1.0, (user_vocab_size, 300))
initW_i = np.random.uniform(-1.0, 1.0, (item_vocab_size, 300))
# initW_u, initW_i = load_word_embedding_weights(TPS_DIR, 'initW_u', 'initW_i')
print('word2vec weights initialization done')
예제 #24
0
def test_mrt_quant(batch_size=1, iter_num=10):
    logger = logging.getLogger("log.test.mrt.quantize")

    ctx = mx.gpu(1)
    qctx = mx.gpu(3)
    input_size = 512
    h, w = input_size, input_size
    inputs_ext = {
        'data': {
            'shape': (batch_size, 3, h, w),
        }
    }

    val_data = dataset.load_voc(batch_size, input_size)
    val_data_iter = iter(val_data)

    def data_iter_func():
        data, label = next(val_data_iter)
        return data, label

    sym_file, param_file = load_fname()
    sym, params = mx.sym.load(sym_file), nd.load(param_file)
    sym, params = spass.sym_quant_prepare(sym, params, inputs_ext)
    keys = [
        "ssd0_multiperclassdecoder0_concat0",
        "ssd0_multiperclassdecoder0__mulscalar0",
        "ssd0_multiperclassdecoder0_slice_axis0",
        "ssd0_multiperclassdecoder0_zeros_like1",
        "ssd0_normalizedboxcenterdecoder0_concat0",
    ]
    base, base_params, base_inputs_ext, top, top_params, top_inputs_ext \
            = _mrt.split_model(sym, params, inputs_ext, keys)
    dump_sym, dump_params = load_fname("mrt.base")
    open(dump_sym, "w").write(base.tojson())
    nd.save(dump_params, base_params)
    dump_sym, dump_params, dump_ext = load_fname("mrt.top", True)
    open(dump_sym, "w").write(top.tojson())
    nd.save(dump_params, top_params)
    sim.save_ext(dump_ext, top_inputs_ext)

    dump_sym, dump_params = load_fname("mrt.base")
    base, base_params = mx.sym.load(dump_sym), nd.load(dump_params)
    dump_sym, dump_params, dump_ext = load_fname("mrt.top", True)
    top, top_params = mx.sym.load(dump_sym), nd.load(dump_params)
    (top_inputs_ext, ) = sim.load_ext(dump_ext)

    base_inputs = [mx.sym.var(n) for n in inputs_ext]
    base_graph = mx.gluon.nn.SymbolBlock(base, base_inputs)
    utils.load_parameters(base_graph, base_params, ctx=ctx)

    top_inputs = [mx.sym.var(n) for n in top_inputs_ext]
    top_graph = mx.gluon.nn.SymbolBlock(top, top_inputs)
    utils.load_parameters(top_graph, top_params, ctx=ctx)

    metric = dataset.load_voc_metric()
    metric.reset()

    def yolov3(data, label):
        def net(data):
            tmp = base_graph(data.as_in_context(ctx))
            outs = top_graph(*tmp)
            return outs

        acc = validate_data(net, data, label, metric)
        return "{:6.2%}".format(acc)

    # utils.multi_validate(yolov3, data_iter_func,
    # iter_num=iter_num, logger=logger)
    # exit()

    if True:
        mrt = _mrt.MRT(base, base_params, inputs_ext)
        for i in range(16):
            data, _ = data_iter_func()
            mrt.set_data('data', data)
            th_dict = mrt.calibrate(ctx=ctx)
        _, _, dump_ext = load_fname("mrt.dict", True)
        sim.save_ext(dump_ext, th_dict)

    _, _, dump_ext = load_fname("mrt.dict", True)
    (th_dict, ) = sim.load_ext(dump_ext)
    if True:
        mrt = _mrt.MRT(base, base_params, base_inputs_ext)
        mrt.set_th_dict(th_dict)
        mrt.set_threshold('data', 2.64)
        mrt.set_fixed("ssd0_multiperclassdecoder0_concat0")
        mrt.set_fixed("ssd0_multiperclassdecoder0__mulscalar0")
        mrt.set_fixed("ssd0_multiperclassdecoder0_zeros_like1")
        mrt.set_threshold("ssd0_multiperclassdecoder0_slice_axis0", 1)
        #  mrt.set_threshold("ssd0_normalizedboxcenterdecoder0_concat0", 512)
        mrt.set_output_prec(30)
        qbase, qbase_params, qbase_inputs_ext = mrt.quantize()
        oscales = mrt.get_output_scales()
        maps = mrt.get_maps()
        dump_sym, dump_params, dump_ext = load_fname("mrt.quantize", True)
        open(dump_sym, "w").write(qbase.tojson())
        nd.save(dump_params, qbase_params)
        sim.save_ext(dump_ext, qbase_inputs_ext, oscales, maps)

    # merge quantize model
    if True:
        qb_sym, qb_params, qb_ext = load_fname("mrt.quantize", True)
        qbase, qbase_params = mx.sym.load(qb_sym), nd.load(qb_params)
        qbase_inputs_ext, oscales, maps = sim.load_ext(qb_ext)

        name_maps = {
            "ssd0_slice_axis41": "ssd0_multiperclassdecoder0_concat0",
            "ssd0_slice_axis42": "ssd0_multiperclassdecoder0_slice_axis0",
            "ssd0_slice_axis43": "ssd0_normalizedboxcenterdecoder0_concat0",
        }
        oscales_dict = dict(zip([c.attr('name') for c in base], oscales))
        oscales = [oscales_dict[name_maps[c.attr('name')]] for c in top]

        def box_nms(node, params, graph):
            name, op_name = node.attr('name'), node.attr('op_name')
            childs, attr = sutils.sym_iter(
                node.get_children()), node.list_attr()
            if op_name == '_greater_scalar':
                valid_thresh = sutils.get_attr(attr, 'scalar', 0)
                attr['scalar'] = int(valid_thresh * oscales[1])
                node = sutils.get_mxnet_op(op_name)(*childs, **attr, name=name)
            elif op_name == '_contrib_box_nms':
                valid_thresh = sutils.get_attr(attr, 'valid_thresh', 0)
                attr['valid_thresh'] = int(valid_thresh * oscales[1])
                node = sutils.get_mxnet_op(op_name)(*childs, **attr, name=name)
            return node

        qsym, qparams = _mrt.merge_model(qbase, qbase_params, top, top_params,
                                         maps, box_nms)
        sym_file, param_file, ext_file = load_fname("mrt.all.quantize", True)
        open(sym_file, "w").write(qsym.tojson())
        nd.save(param_file, qparams)
        sim.save_ext(ext_file, qbase_inputs_ext, oscales)

    if True:
        dump_sym, dump_params, dump_ext = load_fname("mrt.all.quantize", True)
        net2_inputs_ext, oscales = sim.load_ext(dump_ext)
        inputs = [mx.sym.var(n) for n in net2_inputs_ext]
        net2 = utils.load_model(dump_sym, dump_params, inputs, ctx=qctx)
        net2_metric = dataset.load_voc_metric()
        net2_metric.reset()

        def mrt_quantize(data, label):
            def net(data):
                data = sim.load_real_data(data, 'data', net2_inputs_ext)
                outs = net2(data.as_in_context(qctx))
                outs = [
                    o.as_in_context(ctx) / oscales[i]
                    for i, o in enumerate(outs)
                ]
                return outs

            acc = validate_data(net, data, label, net2_metric)
            return "{:6.2%}".format(acc)

    utils.multi_validate(yolov3,
                         data_iter_func,
                         mrt_quantize,
                         iter_num=iter_num,
                         logger=logger)
예제 #25
0
def main():
    args = parser.parse_args()

    device = 'cuda' if torch.cuda.is_available() else 'cpu'

    hyperparams = load_parameters(args.hyperparameter_path)

    if args.lr:
        hyperparams['optimizer']['lr_init'] = args.lr
        hyperparams['scheduler']['lr_min'] = args.lr * 1e-3

    if args.patience:
        hyperparams['scheduler']['scheduler_patience'] = args.patience

    if args.weight_schedule_dur:
        hyperparams['objective']['kl'][
            'weight_schedule_dur'] = args.weight_schedule_dur
        hyperparams['objective']['l2'][
            'weight_schedule_dur'] = args.weight_schedule_dur

    if args.kl_max:
        hyperparams['objective']['kl']['max'] = args.kl_max

    data_name = args.data_path.split('/')[-1]
    model_name = hyperparams['model_name']
    mhp_list = [
        key.replace('size', '').replace('_', '')[:4] + str(val)
        for key, val in hyperparams['model'].items() if 'size' in key
    ]
    mhp_list.sort()
    hyperparams['run_name'] = '_'.join(mhp_list) + '_retest'
    save_loc = '%s/%s/%s/%s/' % (args.output_dir, data_name, model_name,
                                 hyperparams['run_name'])

    if not os.path.exists(save_loc):
        os.makedirs(save_loc)

    data_dict = read_data(args.data_path)
    train_data = torch.Tensor(data_dict['train_%s' %
                                        args.data_suffix]).to(device)
    valid_data = torch.Tensor(data_dict['valid_%s' %
                                        args.data_suffix]).to(device)

    num_trials, num_steps, input_size = train_data.shape

    train_ds = torch.utils.data.TensorDataset(train_data)
    valid_ds = torch.utils.data.TensorDataset(valid_data)
    train_dl = torch.utils.data.DataLoader(train_ds,
                                           batch_size=args.batch_size,
                                           shuffle=True)
    valid_dl = torch.utils.data.DataLoader(valid_ds,
                                           batch_size=valid_data.shape[0])

    transforms = trf.Compose([])

    loglikelihood = LogLikelihoodPoisson(dt=float(data_dict['dt']))

    objective = LFADS_Loss(
        loglikelihood=loglikelihood,
        loss_weight_dict={
            'kl': hyperparams['objective']['kl'],
            'l2': hyperparams['objective']['l2']
        },
        l2_con_scale=hyperparams['objective']['l2_con_scale'],
        l2_gen_scale=hyperparams['objective']['l2_gen_scale']).to(device)

    model = LFADS_SingleSession_Net(
        input_size=input_size,
        factor_size=hyperparams['model']['factor_size'],
        g_encoder_size=hyperparams['model']['g_encoder_size'],
        c_encoder_size=hyperparams['model']['c_encoder_size'],
        g_latent_size=hyperparams['model']['g_latent_size'],
        u_latent_size=hyperparams['model']['u_latent_size'],
        controller_size=hyperparams['model']['controller_size'],
        generator_size=hyperparams['model']['generator_size'],
        prior=hyperparams['model']['prior'],
        clip_val=hyperparams['model']['clip_val'],
        dropout=hyperparams['model']['dropout'],
        do_normalize_factors=hyperparams['model']['normalize_factors'],
        max_norm=hyperparams['model']['max_norm'],
        device=device).to(device)

    total_params = 0
    for ix, (name, param) in enumerate(model.named_parameters()):
        print(ix, name, list(param.shape), param.numel(), param.requires_grad)
        total_params += param.numel()

    print('Total parameters: %i' % total_params)

    optimizer = opt.Adam(model.parameters(),
                         lr=hyperparams['optimizer']['lr_init'],
                         betas=hyperparams['optimizer']['betas'],
                         eps=hyperparams['optimizer']['eps'])

    scheduler = LFADS_Scheduler(
        optimizer=optimizer,
        mode='min',
        factor=hyperparams['scheduler']['scheduler_factor'],
        patience=hyperparams['scheduler']['scheduler_patience'],
        verbose=True,
        threshold=1e-4,
        threshold_mode='abs',
        cooldown=hyperparams['scheduler']['scheduler_cooldown'],
        min_lr=hyperparams['scheduler']['lr_min'])

    TIME = torch._np.arange(0, num_steps * data_dict['dt'], data_dict['dt'])

    train_truth = {}
    if 'train_rates' in data_dict.keys():
        train_truth['rates'] = data_dict['train_rates']
    if 'train_latent' in data_dict.keys():
        train_truth['latent'] = data_dict['train_latent']

    valid_truth = {}
    if 'valid_rates' in data_dict.keys():
        valid_truth['rates'] = data_dict['valid_rates']
    if 'valid_latent' in data_dict.keys():
        valid_truth['latent'] = data_dict['valid_latent']

    plotter = {
        'train': Plotter(time=TIME, truth=train_truth),
        'valid': Plotter(time=TIME, truth=valid_truth)
    }

    if args.use_tensorboard:
        import importlib
        if importlib.util.find_spec('torch.utils.tensorboard'):
            tb_folder = save_loc + 'tensorboard/'
            if not os.path.exists(tb_folder):
                os.mkdir(tb_folder)
            elif os.path.exists(tb_folder) and args.restart:
                os.system('rm -rf %s' % tb_folder)
                os.mkdir(tb_folder)

            from torch.utils.tensorboard import SummaryWriter
            writer = SummaryWriter(tb_folder)
            rm_plotter = plotter
        else:
            writer = None
            rm_plotter = None
    else:
        writer = None
        rm_plotter = None

    run_manager = RunManager(model=model,
                             objective=objective,
                             optimizer=optimizer,
                             scheduler=scheduler,
                             train_dl=train_dl,
                             valid_dl=valid_dl,
                             transforms=transforms,
                             writer=writer,
                             plotter=rm_plotter,
                             max_epochs=args.max_epochs,
                             save_loc=save_loc,
                             do_health_check=args.do_health_check)

    run_manager.run()

    report_results(
        [dict(name='valid_loss', type='objective', value=run_manager.best)])

    fig_folder = save_loc + 'figs/'

    if os.path.exists(fig_folder):
        os.system('rm -rf %s' % fig_folder)
    os.mkdir(fig_folder)

    from matplotlib.figure import Figure
    import matplotlib
    matplotlib.use('Agg')
    fig_dict = plotter['valid'].plot_summary(model=run_manager.model,
                                             dl=run_manager.valid_dl)
    for k, v in fig_dict.items():
        if type(v) == Figure:
            v.savefig(fig_folder + k + '.svg')
예제 #26
0
파일: Model.py 프로젝트: y0ast/VIMCO
 def reload(self, path):
     self.params = load_parameters(path + "/parameters.pkl")
     self.m = load_parameters(path + "/m.pkl")
     self.v = load_parameters(path + "/v.pkl")
예제 #27
0
파일: models.py 프로젝트: blazm/baxter
def build_conv_dense_ae(img_shape=(32, 32, 3),
                        latent_size=16,
                        opt='adam',
                        loss='mse',
                        conv_layers=4,
                        initial_filters=4):

    _, _, ch = img_shape
    input_img = Input(
        shape=img_shape
    )  # adapt this if using `channels_first` image data format
    input_mask = Input(shape=img_shape)
    filters = initial_filters
    kernel_size = (3, 3)
    s = 1  # stride parameter

    x = input_img
    for i in range(conv_layers):
        #filters = initial_filters if i < conv_layers-1 else 4 #*= 2
        conv_lyr = Conv2D(filters=initial_filters,
                          kernel_size=kernel_size,
                          activation='elu',
                          strides=s,
                          padding='same',
                          kernel_initializer='glorot_normal',
                          bias_initializer='zeros')
        x = conv_lyr(x)
        conv_lyr = Conv2D(filters=initial_filters,
                          kernel_size=kernel_size,
                          activation='elu',
                          strides=s,
                          padding='same',
                          kernel_initializer='glorot_normal',
                          bias_initializer='zeros')
        x = conv_lyr(x)
        conv_lyr = Conv2D(filters=filters,
                          kernel_size=kernel_size,
                          activation='elu',
                          strides=2,
                          padding='same',
                          kernel_initializer='glorot_normal',
                          bias_initializer='zeros')
        x = conv_lyr(x)
        mp = conv_lyr

    conv_shape = mp.output_shape[1:]
    #conv_shape = conv_lyr.output_shape[1:] #
    print(conv_shape)
    #conv_shape = mp.output_shape[1:] # without the batch_size

    #flat_lyr = GlobalAveragePooling2D() # GAP layer
    #x = flat_lyr(x)
    #flatten_dim = conv_shape[0]*conv_shape[1]*conv_shape[2] #flat_lyr.output_shape[-1]
    #gap_dim = flat_lyr.output_shape[-1]
    print("conv_shape:", conv_shape)
    flat_lyr = Flatten(name='flat_in')
    x = flat_lyr(x)  # first call the layer, then it will have its shape
    flatten_dim = flat_lyr.output_shape[
        -1]  # last entry in the tuple of f is the flattened dimension
    print(type(x), type(flat_lyr))
    print("flatten_dim: ", flatten_dim)
    encoded_layer = Dense(latent_size,
                          activation='elu',
                          name='latent',
                          kernel_initializer='glorot_normal',
                          bias_initializer='zeros')
    encoded = encoded_layer(x)
    end_flat_layer = Dense(flatten_dim,
                           activation='elu',
                           name='flat_out',
                           kernel_initializer='glorot_normal',
                           bias_initializer='zeros')
    x = end_flat_layer(
        encoded
    )  # increase the dimension of data to the point before latent size (to match first Flatten layer shape)
    x = Reshape(target_shape=conv_shape)(x)

    for i in range(conv_layers):
        filters = initial_filters if i < conv_layers - 1 else 3
        x = Conv2DTranspose(filters=initial_filters,
                            kernel_size=kernel_size,
                            activation='elu',
                            strides=s,
                            padding='same',
                            kernel_initializer='glorot_normal',
                            bias_initializer='zeros')(x)
        x = Conv2DTranspose(filters=initial_filters,
                            kernel_size=kernel_size,
                            activation='elu',
                            strides=s,
                            padding='same',
                            kernel_initializer='glorot_normal',
                            bias_initializer='zeros')(x)
        x = Conv2DTranspose(filters=filters,
                            kernel_size=kernel_size,
                            activation='elu',
                            strides=2,
                            padding='same',
                            kernel_initializer='glorot_normal',
                            bias_initializer='zeros')(x)
        #x = BatchNormalization()(x)
        #x = UpSampling2D((2,2))(x)
        #filters //= 2

    decoded_layer = Conv2D(
        ch,
        kernel_size,
        activation='linear' if not 'bin-xent' in loss else 'sigmoid',
        padding='same',
        kernel_initializer='glorot_normal',
        bias_initializer='zeros')
    decoded = decoded_layer(x)

    decoded_layer = Conv2D(
        ch,
        kernel_size,
        activation='linear' if not 'bin-xent' in loss else 'sigmoid',
        padding='same')
    decoded = decoded_layer(x)

    if loss == 'wmse':
        autoencoder = Model([input_img, input_mask], decoded)
    else:
        autoencoder = Model(input_img, decoded)
    print(autoencoder.summary())
    # TODO: specify learning rate?
    #if opt == 'adam':

    if opt == 'adam':
        opt = Adam(lr=0.001)  # try bigger learning rate
    if opt == 'adamw':
        parameters_filepath = "config.ini"
        parameters = load_parameters(parameters_filepath)
        num_epochs = int(parameters["hyperparam"]["num_epochs"])
        batch_size = int(parameters["hyperparam"]["batch_size"])
        opt = AdamW(lr=0.001,
                    beta_1=0.9,
                    beta_2=0.999,
                    epsilon=None,
                    decay=0.,
                    weight_decay=0.025,
                    batch_size=batch_size,
                    samples_per_epoch=1000,
                    epochs=num_epochs)

    if loss == 'wbin-xent':
        loss = masked_binary_crossentropy(input_mask)
    if loss == 'bin-xent':
        loss = 'binary_crossentropy'
    if loss == 'dssim':
        loss = DSSIMObjective()
    if loss == 'wmse':
        loss = masked_mse_wrapper(input_mask)
    autoencoder.compile(optimizer=opt, loss=loss, metrics=[mse, rmse, psnr])

    #print(autoencoder.summary())
    #input("Press any key...")
    #print("# AE layers: ", len(autoencoder.layers))

    # create encoder model, which will be able to encode the image into latent representation
    encoder = Model(input_img, encoded)

    #encoded_shape = encoded.get_shape().as_list()
    #_, enc_h, enc_w, enc_ch = encoded_shape
    #enc_h, enc_w, enc_ch = 4, 4, 8
    #print("latent shape: ", latent_size)
    #print("decoded shape: ", autoencoder.layers[-8].input.shape)

    # re-create decoder model, which will be able to decode encoded input
    encoded_input = Input(
        shape=(latent_size, ))  # skip batch size which is None
    #print(autoencoder.layers[-6](encoded_input))
    #deco = autoencoder.layers[-8](encoded_input)
    #deco = autoencoder.layers[-7](encoded_input)

    deco = encoded_input
    assemble = False
    for layer in autoencoder.layers:
        if layer == end_flat_layer:
            assemble = True
        if assemble:
            deco = layer(deco)

    decoded_output = deco
    '''
    deco = autoencoder.layers[-11](encoded_input)
    for i in range(10, 1):
        deco = autoencoder.layers[-i](deco)
    decoded_output = autoencoder.layers[-1](deco)
    '''
    '''
    deco = autoencoder.layers[-6](encoded_input)
    deco = autoencoder.layers[-5](deco)
    deco = autoencoder.layers[-4](deco)
    deco = autoencoder.layers[-3](deco)
    deco = autoencoder.layers[-2](deco)
    decoded_output = autoencoder.layers[-1](deco)
    '''
    decoder = Model(encoded_input, decoded_output)

    return autoencoder, encoder, decoder, latent_size
embed_word_dim = 300
filter_size = 3
num_filters = 100

embed_id_dim = 32
attention_size = 32
n_latent = 32


# Load files
TPS_DIR = 'data/toyandgame'
print('Loadindg files...')

user_num, item_num, review_num_u, review_num_i, review_len_u, review_len_i,\
    vocabulary_user, vocabulary_item, train_length, test_length, u_text, i_text,\
    user_vocab_size, item_vocab_size = load_parameters(TPS_DIR, 'toyandgame.para')

initW_u = np.random.uniform(-1.0, 1.0, (user_vocab_size, 300))
initW_i = np.random.uniform(-1.0, 1.0, (user_vocab_size, 300))


# Build the model
model = DeepRecSys(l2_reg_lambda, random_seed, dropout_keep_prob, embed_word_dim, embed_id_dim,
                   filter_size, num_filters, attention_size, n_latent,
                   user_num, item_num, user_vocab_size, item_vocab_size,
                   review_num_u, review_len_u, review_num_i, review_len_i,
                   initW_u, initW_i, is_output_weights=True)

print('Model created with {} layers'.format(len(model.layers)))
print(model.summary())
예제 #29
0
def brownian_data_generator(dir_with_src_images,
                            base_image_filename,
                            object_image_list,
                            img_shape=(28, 28, 1),
                            batch_size=32,
                            resized_objects=None):

    h, w, ch = img_shape

    # define inputs
    batch_inputs = np.zeros((batch_size, h, w, ch), dtype=np.float32)
    # define outputs
    batch_outputs = np.zeros((batch_size, h, w, ch), dtype=np.float32)
    # define attention masks (by default ones as everything has the same importance)
    batch_masks = np.ones((batch_size, h, w, ch), dtype=np.float32)

    def preprocess_size_helper(new_dim=(h, w)):
        return lambda image: preprocess_size(image, new_dim)

    preprocessors = [
        preprocess_size_helper(new_dim=(h, w)), preprocess_enhance_edges
    ]

    # load images
    base_image = loadAndResizeImages2(dir_with_src_images,
                                      [base_image_filename])[0]
    objects = loadAndResizeImages2(dir_with_src_images,
                                   object_image_list,
                                   load_alpha=True)

    # load params, since some of them are needed to generate data:
    parameters_filepath = "config.ini"
    parameters = load_parameters(parameters_filepath)

    size_factor = float(parameters["synthetic"]["size_factor"])
    save_batch = eval(parameters["synthetic"]["save_batch"])
    calc_masks = eval(parameters["synthetic"]["calc_masks"])
    dilate_masks = int(parameters["synthetic"]["dilate_masks"])
    blur_masks = eval(parameters["synthetic"]["blur_masks"])
    blur_kernel = eval(parameters["synthetic"]["blur_kernel"])

    obj_attention = float(parameters["synthetic"]["obj_attention"])
    back_attention = float(parameters["synthetic"]["back_attention"])

    subtract_median = eval(parameters["synthetic"]["subtract_median"])

    add_noise = eval(parameters["synthetic"]["add_noise"])
    noise_amnt = float(parameters["synthetic"]["noise_amnt"])

    loss = (parameters["hyperparam"]["loss"])

    # median threshold
    threshold = .5

    # resize to desired size
    orig_h, orig_w, _ = base_image.shape
    ratio_h = orig_h / h
    ratio_w = orig_w / w

    base_image = preprocess_size(base_image, (h, w))
    if resized_objects is None:
        resized_objects = []
    objects_pos = []
    rotated_objs = [None] * len(objects)
    for o in objects:
        ho, wo, cho = o.shape
        if ho == wo:
            hn = int((ho / ratio_w) * size_factor)
            wn = int((wo / ratio_w) * size_factor)
        else:
            hn = int((ho / ratio_h) * size_factor)
            wn = int((wo / ratio_w) * size_factor)
        resized_o = preprocess_size(o, (hn, wn))
        resized_objects.append(resized_o)

        #
        #print(w, " - ", wo)
        x = np.random.randint(low=0, high=w - wn)  # +wo
        y = np.random.randint(low=(60 / ratio_h), high=h - hn - (30 / ratio_h))
        objects_pos.append((x, y))

    L = wn // 2
    #print("L: ", L)
    a = 0
    replace_robots = True
    P_greed = 1. / 2.  #1 / 20. #1 / 33.

    iteration = -1

    # serve randomly generated images
    while True:
        iteration += 1
        # go through the entire dataset, using batch_sized chunks each time

        batch_actions = np.zeros((batch_size, 1), dtype=np.float32)
        batch_rewards = np.zeros((batch_size, 1), dtype=np.float32)

        for i in range(0, batch_size):

            np.copyto(batch_inputs[i], base_image)

            # TODO: randomly place the objects:
            if replace_robots:
                for ix, o in enumerate(resized_objects):
                    a = np.random.uniform(-180, 180)
                    o_rot = random_rotation(o, angle=a -
                                            90)  # +90 since robot is sideways
                    #if ix == 1:
                    #    batch_actions[i] = a / 180
                    ho, wo, cho = o_rot.shape

                    x = np.random.randint(low=0, high=w - wo)  # +wo
                    #print((100 / ratio_h))
                    # 30 is the magic number to limit the random placement of objects inside image
                    y = np.random.randint(low=(60 / ratio_h),
                                          high=h - ho - (30 / ratio_h))

                    xg = x - (wo // 2)
                    yg = y - (ho // 2)

                    if xg + wo > w:
                        xg = w - wo
                    if yg + ho > h:
                        yg = h - ho
                    if xg < 0:
                        xg = 0
                    if yg < 0:
                        yg = 0

                    x = xg + (wo // 2)
                    y = yg + (ho // 2)

                    objects_pos[ix] = (x, y)
                    rotated_objs[ix] = o_rot

                replace_robots = False
            else:
                # move the robot into random orientation with fixed direction
                #imsave("tmp/{}.png".format("obj_generated_" + str(i)),  o_rot)
                robot_object_distance = h + w
                for ix, o in enumerate(resized_objects):
                    if ix == 1:  # only do this for the robot and not the object

                        x, y = objects_pos[ix]
                        x_t, y_t = objects_pos[ix - 1]
                        # select angle towards the object in 10% of the time
                        if np.random.random() <= P_greed:
                            a = np.arctan2(x_t - x, y_t - y)
                            a = np.degrees(a)  # -180 <> 180
                            #print("target angle: ", a)
                        else:  # select angle randomly in 90% of the time
                            a = np.random.uniform(-180, 180)
                            #a = a * 360 # -180 <> 180

                        action = a  # action in degrees

                        o_rot = random_rotation(
                            o, a - 90)  # +90 since robot is sideways
                        ho, wo, cho = o_rot.shape

                        # this is new position of the robot
                        a = np.radians(
                            a
                        )  # need to convert to radians before using sin and cos
                        batch_actions[
                            i] = action / 180  #np.sin(a), np.cos(a) # action / 180

                        x = x + L * np.sin(a)
                        y = y + L * np.cos(a)

                        x = int(np.round(x))
                        y = int(np.round(y))

                        xg = x - (wo // 2)
                        yg = y - (ho // 2)

                        if xg + wo > w:
                            xg = w - wo
                        if yg + ho > h:
                            yg = h - ho
                        if xg < 0:
                            xg = 0
                        if yg < 0:
                            yg = 0

                        x = xg + (wo // 2)
                        y = yg + (ho // 2)

                        objects_pos[ix] = (x, y)
                        rotated_objs[ix] = o_rot

                        robot_object_distance = np.sqrt((x - x_t)**2 +
                                                        (y - y_t)**2)
                        #print("robot dist / L: {} / {}".format(robot_object_distance, L))

                if robot_object_distance < L:
                    reward = 1
                    batch_rewards[i] = reward
                    replace_robots = True

            for ix, o in enumerate(resized_objects):
                x, y = objects_pos[ix]
                o_rot = rotated_objs[ix]
                ho, wo, cho = o_rot.shape
                mask = o_rot[:, :, 3]  # / 255.0
                #print(mask.max(), mask.min())
                #print(x, y, ho, wo, cho)
                xg = x - (wo // 2)
                yg = y - (ho // 2)

                batch_inputs[i][
                    yg:yg + ho, xg:xg + wo,
                    0] = batch_inputs[i][yg:yg + ho, xg:xg + wo, 0] * (
                        1 - mask) + mask * o_rot[:, :, 0]  #*255.0
                batch_inputs[i][
                    yg:yg + ho, xg:xg + wo,
                    1] = batch_inputs[i][yg:yg + ho, xg:xg + wo, 1] * (
                        1 - mask) + mask * o_rot[:, :, 1]  #*255.0
                batch_inputs[i][
                    yg:yg + ho, xg:xg + wo,
                    2] = batch_inputs[i][yg:yg + ho, xg:xg + wo, 2] * (
                        1 - mask) + mask * o_rot[:, :, 2]  #*255.0

            #imsave("tmp/{}.png".format("in_generated_" + str(i)),  batch_inputs[i])
            np.copyto(batch_outputs[(i - 1) % batch_size], batch_inputs[i])
            #imsave("tmp/{}.png".format("out_generated_" + str(i)),  batch_outputs[i])
            #print(batch_outputs[i].max(), batch_outputs[i].min())
        batch_median = np.median(batch_outputs, axis=0, keepdims=True)

        #print("Batch median shape: ", batch_median.shape)
        #print("Batch median shape: ", batch_outputs.shape)
        if calc_masks:
            median_min = batch_median[0].min()
            median_max = batch_median[0].max()
            for i in range(0, batch_size):

                tmp = cdist(batch_median[0], batch_inputs[i],
                            keepdims=True)  # color distance between images
                mask = (tmp > threshold * max_cdist).astype(float)
                batch_masks[i] = mask * obj_attention
                #back_mask = ( tmp <= 0 ).astype(float) + back_attention

                #batch_masks[i][batch_masks[i] > 0.5] += 0.1
                # uncomment to blur the images (soft attention)
                if dilate_masks > 0:
                    #print("dilating masks...")
                    kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                       (3, 3))
                    batch_masks[i] = cv2.dilate(batch_masks[i],
                                                kernel,
                                                iterations=dilate_masks)
                if back_attention > 0.0:
                    #print("Setting background weights...")
                    #    back_mask = ( tmp <= 0 ).astype(float) + back_attention
                    batch_masks[i] += ((1 - (mask).astype(int)).astype(float) *
                                       back_attention)

                if blur_masks:
                    #print("Blurring masks....")
                    batch_masks[i] = cv2.blur(
                        batch_masks[i], blur_kernel)  # add blur if needed

                if save_batch:  # save generated images to tmp folder
                    me_min = batch_actions[i][0]  # batch_masks[i].min()
                    me_max = batch_rewards[i][0]  #batch_masks[i].max()
                    label = str(i)  #str(np.random.randint(0, 1000))
                    imsave(
                        "tmp/{}.png".format("m_{}_{}_{:01.2f}_{}".format(
                            iteration, label, me_min, me_max)), batch_masks[i])
                    imsave(
                        "tmp/{}.png".format("A_{}_{}_{:01.2f}_{}".format(
                            iteration, label, me_min, me_max)),
                        batch_inputs[i])
                    imsave(
                        "tmp/{}.png".format("B_{}_{}_{:01.2f}_{}".format(
                            iteration, label, me_min, me_max)),
                        batch_outputs[i])

            if save_batch:  # save only first batch
                save_batch = False

        #batch_percentile = np.percentile(batch_outputs, 99.9, axis=0, keepdims=True)
        #label = str(np.random.randint(0, 1000))
        #imsave("tmp/{}.png".format("percentile_99.9_" + str(label)), batch_percentile[0])
        if subtract_median:
            #batch_mean = batch_outputs.mean(axis=0, keepdims=True)
            # careful - batch_size must be greater than 1!!!
            #batch_median = np.median(batch_outputs, axis=0, keepdims=True)

            #imsave("tmp/{}.png".format("median_" + str(i)), batch_median[0])
            batch_outputs = batch_median - batch_outputs
            #imsave("tmp/{}.png".format("out1_" + str(i)), batch_outputs[0])

        if add_noise:
            batch_inputs += noise_amnt * np.random.normal(
                loc=0.0, scale=1.0, size=batch_inputs.shape)

        #label = str(np.random.randint(0, 1000))
        #imsave("tmp/{}.png".format(label + "_in_generated_" + str(i)),  batch_inputs[0])
        #imsave("tmp/{}.png".format(label + "_out_generated_" + str(i)),  batch_median[0] - batch_outputs[0])
        #print(batch_median.shape)
        #if 'wmse' in loss and 'out-median' in mode:
        #    yield [batch_inputs, np.repeat(np.array([batch_median]), batch_size, axis=0).reshape((batch_size, h, w, 3))], batch_outputs
        #print(batch_actions.shape)
        if 'wmse' in loss:
            yield [batch_inputs, batch_masks, batch_actions
                   ], batch_outputs  # , batch_actions, batch_masks
        else:
            yield [batch_inputs, batch_actions], batch_outputs
예제 #30
0
def test_mrt_quant(batch_size=1, iter_num=10, from_scratch=0):
    logger = logging.getLogger("log.test.mrt.quantize")
    flag = [False]*from_scratch + [True]*(4-from_scratch)

    ctx = mx.gpu(4)
    qctx = mx.gpu(3)
    input_size = 416
    input_shape = (batch_size, 3, input_size, input_size)

    # define data iter function, get:
    # get_iter_func
    val_data = dataset.load_voc(batch_size, input_size)
    val_data_iter = iter(val_data)
    def data_iter_func():
        data, label = next(val_data_iter)
        return data, label

    # split model, get:
    # base, base_params, top, top_params, top_inputs_ext 
    base, base_params, top, top_params, top_inputs_ext = \
            None, None, None, None, None
    if flag[0]:
        sym_file, param_file = load_fname("_darknet53_voc")
        sym, params = mx.sym.load(sym_file), nd.load(param_file)
        mrt = MRT(sym, params, input_shape)
        keys = [
          'yolov30_yolooutputv30_expand_dims0',
          'yolov30_yolooutputv31_expand_dims0',
          'yolov30_yolooutputv32_expand_dims0',
          'yolov30_yolooutputv30_tile0',
          'yolov30_yolooutputv31_tile0',
          'yolov30_yolooutputv32_tile0',
          'yolov30_yolooutputv30_broadcast_add1',
          'yolov30_yolooutputv31_broadcast_add1',
          'yolov30_yolooutputv32_broadcast_add1',
        ]
        base, base_params, top, top_params, top_inputs_ext \
                = split_model(mrt.csym, mrt.cprm, {'data': input_shape}, keys)
        dump_sym, dump_params = load_fname("_darknet53_voc", "mrt.base")
        open(dump_sym, "w").write(base.tojson())
        nd.save(dump_params, base_params)
        dump_sym, dump_params, dump_ext = \
                load_fname("_darknet53_voc", "mrt.top", True)
        open(dump_sym, "w").write(top.tojson())
        nd.save(dump_params, top_params)
        sim.save_ext(dump_ext, top_inputs_ext)
    else:
        dump_sym, dump_params = load_fname("_darknet53_voc", "mrt.base")
        base, base_params = mx.sym.load(dump_sym), nd.load(dump_params)
        dump_sym, dump_params, dump_ext = \
                load_fname("_darknet53_voc", "mrt.top", True)
        top, top_params = mx.sym.load(dump_sym), nd.load(dump_params)
        (top_inputs_ext,) = sim.load_ext(dump_ext)

    base_graph = mx.gluon.nn.SymbolBlock(base, [mx.sym.var('data')])
    nbase_params = convert_params_dtype(base_params, src_dtypes="float64",
            dest_dtype="float32")
    utils.load_parameters(base_graph, nbase_params, ctx=ctx)

    top_graph = mx.gluon.nn.SymbolBlock(top,
            [mx.sym.var(n) for n in top_inputs_ext])
    ntop_params = convert_params_dtype(top_params, src_dtypes="float64",
            dest_dtype="float32")
    utils.load_parameters(top_graph, ntop_params, ctx=ctx)

    # calibrate split model, get:
    # th_dict
    th_dict = None
    if flag[1]:
        mrt = MRT(base, base_params, input_shape)
        for i in range(1):
            data, _ = data_iter_func()
            mrt.set_data(data)
            mrt.calibrate(ctx=ctx)
        _, _, dump_ext = load_fname("_darknet53_voc", "mrt.dict", True)
        th_dict = mrt.th_dict
        sim.save_ext(dump_ext, th_dict)
    else:
        _, _, dump_ext = load_fname("_darknet53_voc", "mrt.dict", True)
        (th_dict,) = sim.load_ext(dump_ext)

    # quantize split model, get:
    # qbase, qbase_params, qbase_inputs_ext, oscales, maps
    qbase, qbase_params, qbase_inputs_ext, oscales, maps = \
            None, None, None, None, None
    if flag[2]:
        mrt = MRT(base, base_params, input_shape)
        mrt.set_th_dict(th_dict)
        mrt.set_threshold('data', 2.64)
        mrt.set_threshold('yolov30_yolooutputv30_expand_dims0', 1)
        mrt.set_threshold('yolov30_yolooutputv31_expand_dims0', 1)
        mrt.set_threshold('yolov30_yolooutputv32_expand_dims0', 1)
        mrt.set_threshold('yolov30_yolooutputv30_tile0', 416)
        mrt.set_threshold('yolov30_yolooutputv31_tile0', 416)
        mrt.set_threshold('yolov30_yolooutputv32_tile0', 416)
        mrt.set_output_prec(30)

        qbase, qbase_params, qbase_inputs_ext = mrt.quantize()

        oscales = mrt.get_output_scales()
        maps = mrt.get_maps()
        dump_sym, dump_params, dump_ext = load_fname("_darknet53_voc", "mrt.quantize", True)
        open(dump_sym, "w").write(qbase.tojson())
        nd.save(dump_params, qbase_params)
        sim.save_ext(dump_ext, qbase_inputs_ext, oscales, maps)
    else:
        qb_sym, qb_params, qb_ext = load_fname("_darknet53_voc", "mrt.quantize", True)
        qbase, qbase_params = mx.sym.load(qb_sym), nd.load(qb_params)
        qbase_inputs_ext, oscales, maps = sim.load_ext(qb_ext)

    # merge quantized split model, get:
    # qsym, qparams, oscales2
    qsym, qparams = None, None
    if flag[3]:
        def box_nms(node, params, graph):
            name, op_name = node.attr('name'), node.attr('op_name')
            childs, attr = sutils.sym_iter(node.get_children()), node.list_attr()
            if op_name == '_contrib_box_nms':
                valid_thresh = sutils.get_attr(attr, 'valid_thresh', 0)
                attr['valid_thresh'] = int(valid_thresh * oscales[3])
                node = sutils.get_mxnet_op(op_name)(*childs, **attr, name=name)
            return node
        qsym, qparams = merge_model(qbase, qbase_params,
                top, top_params, maps, box_nms)
        oscales2 = [oscales[1], oscales[0], oscales[2]]
        sym_file, param_file, ext_file = \
                load_fname("_darknet53_voc", "mrt.all.quantize", True)
        open(sym_file, "w").write(qsym.tojson())
        nd.save(param_file, qparams)
        sim.save_ext(ext_file, qbase_inputs_ext, oscales2)
    else:
        dump_sym, dump_params, dump_ext = \
                load_fname("_darknet53_voc", "mrt.all.quantize", True)
        qsym, qparams = mx.sym.load(dump_sym), nd.load(dump_params)
        _, oscales2 = sim.load_ext(dump_ext)

    if False:
        compile_to_cvm(qsym, qparams, "yolo_tfm",
                datadir="/data/ryt", input_shape=(1, 3, 416, 416))
        exit()

    metric = dataset.load_voc_metric()
    metric.reset()
    def yolov3(data, label):
       def net(data):
           tmp = base_graph(data.as_in_context(ctx))
           outs = top_graph(*tmp)
           return outs
       acc = validate_data(net, data, label, metric)
       return "{:6.2%}".format(acc)

    net2 = mx.gluon.nn.SymbolBlock(qsym,
            [mx.sym.var(n) for n in qbase_inputs_ext])
    utils.load_parameters(net2, qparams, ctx=qctx)
    net2_metric = dataset.load_voc_metric()
    net2_metric.reset()
    def mrt_quantize(data, label):
        def net(data):
            data = sim.load_real_data(data, 'data', qbase_inputs_ext)
            outs = net2(data.astype("float64").as_in_context(qctx))
            outs = [o.as_in_context(ctx) / oscales2[i] \
                    for i, o in enumerate(outs)]
            return outs
        acc = validate_data(net, data, label, net2_metric)
        return "{:6.2%}".format(acc)

    utils.multi_validate(yolov3, data_iter_func,
            mrt_quantize,
            iter_num=iter_num, logger=logger)
예제 #31
0
    latent_size = 16  # 25, 36, 49, 64
    conv_layers = 3
    loss = 'mse'  #bin-xent' #'mse' #   #
    opt = 'adadelta'  #'adam' # #
    model_label = 'ae_v2'
    do_train = False
    do_test = True
    interactive = True
    num_filters = 3
    kernel_size = 3
    kernel_mult = 1

    # TODO: load config from config.ini
    parameters_filepath = "config.ini"
    # TODO: copy parameters to separate versioned files (to archive parameters for each run)
    parameters = load_parameters(parameters_filepath)

    do_train = eval(parameters["general"]["do_train"])
    do_test = eval(parameters["general"]["do_test"])
    selected_gpu = eval(parameters["general"]["selected_gpu"])
    interactive = eval(parameters["general"]["interactive"])
    include_forward_model = eval(
        parameters["general"]["include_forward_model"])
    train_only_forward = eval(parameters["general"]["train_only_forward"])

    train_dir = eval(parameters["dataset"]["train_dir"])
    valid_dir = eval(parameters["dataset"]["valid_dir"])
    test_dir = eval(parameters["dataset"]["test_dir"])
    img_shape = eval(parameters["dataset"]["img_shape"])

    total_images = int(parameters["synthetic"]["total_images"])
# Check parameters validity
assert os.path.isfile(opts.train)
assert os.path.isfile(opts.dev)
if opts.pre_emb:
    assert opts.embedding_dim in [50, 100, 200, 300]

# load datasets
if not opts.load:
    dictionaries = prepare_dictionaries(Parse_parameters)
else:
    # load dictionaries
    with open(opts.load+'/dictionaries.dic', 'rb') as f:
        dictionaries = cPickle.load(f)
    # load parameters
    opts = load_parameters(opts.load, opts)


tagset_size = len(dictionaries['tag_to_id'])

train_data = load_dataset(Parse_parameters, opts.train, dictionaries)
dev_data = load_dataset(Parse_parameters, opts.dev, dictionaries)


# Model parameters
Model_parameters = OrderedDict()
Model_parameters['vocab_size'] = opts.vocab_size
Model_parameters['embedding_dim'] = opts.embedding_dim
Model_parameters['hidden_dim'] = opts.hidden_dim
Model_parameters['tagset_size'] = tagset_size
Model_parameters['decode_method'] = opts.decode_method