Пример #1
0
    def __init__(self, problem, run, evaluator, **kwargs):
        self.rank = MPI.COMM_WORLD.Get_rank()
        if self.rank == 0:
            super().__init__(problem, run, evaluator, cache_key=key, **kwargs)
        MPI.COMM_WORLD.Barrier()
        if self.rank != 0:
            super().__init__(problem, run, evaluator, cache_key=key, **kwargs)
        # set in super : self.problem
        # set in super : self.run_func
        # set in super : self.evaluator

        self.num_episodes = kwargs.get('num_episodes')
        if self.num_episodes is None:
            self.num_episodes = math.inf

        self.reward_rule = util.load_attr_from(
            'nas4candle.nasapi.search.nas.agent.utils.' +
            kwargs['reward_rule'])

        self.space = self.problem.space

        logger.debug(f'evaluator: {type(self.evaluator)}')

        self.num_agents = MPI.COMM_WORLD.Get_size(
        ) - 1  # one is  the parameter server

        logger.debug(f'num_agents: {self.num_agents}')
        logger.debug(f'rank: {self.rank}')
Пример #2
0
    def __init__(self, problem, run, evaluator, **kwargs):
        self.rank = MPI.COMM_WORLD.Get_rank()
        if self.rank == 0:
            super().__init__(problem, run, evaluator, cache_key=key, **kwargs)
        MPI.COMM_WORLD.Barrier()
        if self.rank != 0:
            super().__init__(problem, run, evaluator, cache_key=key, **kwargs)
        # set in super : self.problem
        # set in super : self.run_func
        # set in super : self.evaluator
        # self.evaluator = Evaluator.create(self.run_func,
        #                                   cache_key=key,
        #                                   method=evaluator)

        self.num_episodes = kwargs.get('num_episodes')
        if self.num_episodes is None:
            self.num_episodes = math.inf
        self.clip_param = kwargs.get('clip_param')
        self.entcoeff = kwargs.get('entcoeff')
        self.optim_epochs = kwargs.get('optim_epochs')
        self.optim_stepsize = kwargs.get('optim_stepsize')
        self.optim_batch_size = kwargs.get('optim_batchsize')
        self.gamma = kwargs.get('gamma')
        self.lam = kwargs.get('lam')

        self.reward_rule = util.load_attr_from('nas4candle.nasapi.search.nas.agent.utils.'+kwargs['reward_rule'])

        self.space = self.problem.space

        logger.debug(f'evaluator: {type(self.evaluator)}')

        self.num_agents = MPI.COMM_WORLD.Get_size() - 1 # one is  the parameter server

        logger.debug(f'num_agents: {self.num_agents}')
        logger.debug(f'rank: {self.rank}')
Пример #3
0
def run_model_posttraining(config):
    data = load_data_source()

    x_train_list = [data[f'x_train_{i}'] for i in range(4)]
    y_train = data['y_train']
    x_val_list = [data[f'x_val_{i}'] for i in range(4)]
    y_val = data['y_val']

    num_epochs = config['hyperparameters']['num_epochs']
    batch_size = 32  # config['hyperparameters']['batch_size']

    config['create_structure']['func'] = util.load_attr_from(
        config['create_structure']['func'])

    input_shape = [np.shape(a)[1:] for a in x_train_list]
    output_shape = (1, )

    cs_kwargs = config['create_structure'].get('kwargs')
    if cs_kwargs is None:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape)
    else:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape,
                                                       **cs_kwargs)

    arch_seq = config['arch_seq']

    structure.set_ops(arch_seq)
    structure.draw_graphviz('nas_model_uno.dot')

    model = structure.create_model()

    from keras.utils import plot_model
    plot_model(model, 'keras_model_uno.png', show_shapes=True)

    n_params = model.count_params()

    optimizer = optimizers.deserialize({'class_name': 'adam', 'config': {}})

    model.compile(loss='mse', optimizer=optimizer, metrics=[mae, r2])

    t1 = time.time()
    history = model.fit(x_train_list,
                        y_train,
                        batch_size=batch_size,
                        epochs=num_epochs,
                        validation_data=(x_val_list, y_val))
    t2 = time.time()

    data = history.history
    data['n_parameters'] = n_params
    data['training_time'] = t2 - t1

    print(data)

    return data
Пример #4
0
def run_model(config):
    params = initialize_parameters()

    args = Struct(**params)

    data = load_data2()

    x_train_list = [data[f'x_train_{i}'] for i in range(4)]
    y_train = data['y_train']
    x_val_list = [data[f'x_val_{i}'] for i in range(4)]
    y_val = data['y_val']

    num_epochs = config['hyperparameters']['num_epochs']
    batch_size = args.batch_size  # config['hyperparameters']['batch_size']

    config['create_structure']['func'] = util.load_attr_from(
        config['create_structure']['func'])

    input_shape = [np.shape(a)[1:] for a in x_train_list]
    print('input_shape: ', input_shape)
    output_shape = (1, )
    print('output_shape: ', output_shape)

    cs_kwargs = config['create_structure'].get('kwargs')
    if cs_kwargs is None:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape)
    else:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape,
                                                       **cs_kwargs)

    arch_seq = config['arch_seq']

    print(f'actions list: {arch_seq}')

    structure.set_ops(arch_seq)
    #structure.draw_graphviz('model_global_uno.dot')

    model = structure.create_model()

    #from keras.utils import plot_model
    #plot_model(model, 'model_global_combo.png', show_shapes=True)

    model.summary()

    optimizer = optimizers.deserialize({'class_name': 'adam', 'config': {}})

    model.compile(loss='mse', optimizer=optimizer, metrics=[mae, r2])

    stop_if_unfeasible = StopIfUnfeasible(time_limit=900)

    history = model.fit(x_train_list,
                        y_train,
                        batch_size=batch_size,
                        epochs=num_epochs,
                        callbacks=[stop_if_unfeasible],
                        validation_data=(x_val_list, y_val))

    print('avr_batch_timing :', stop_if_unfeasible.avr_batch_time)
    print('avr_timing: ', stop_if_unfeasible.estimate_training_time)
    print('stopped: ', stop_if_unfeasible.stopped)

    print(history.history)

    try:
        return history.history['val_r2'][0]
    except:
        return -1.0
Пример #5
0
def run(config):
    # load functions
    load_data = util.load_attr_from(config['load_data']['func'])
    config['load_data']['func'] = load_data
    config['create_structure']['func'] = util.load_attr_from(
        config['create_structure']['func'])

    # Loading data
    kwargs = config['load_data'].get('kwargs')
    data = load_data() if kwargs is None else load_data(**kwargs)
    logger.info(f'Data loaded with kwargs: {kwargs}')

    # Set data shape
    if type(data) is tuple:
        if len(data) != 2:
            raise RuntimeError(
                f'Loaded data are tuple, should ((training_input, training_output), (validation_input, validation_output)) but length=={len(data)}'
            )
        (t_X, t_y), (v_X, v_y) = data
        if type(t_X) is np.ndarray and  type(t_y) is np.ndarray and \
            type(v_X) is np.ndarray and type(v_y) is np.ndarray:
            input_shape = np.shape(t_X)[1:]
        elif type(t_X) is list and type(t_y) is np.ndarray and \
            type(v_X) is list and type(v_y) is np.ndarray:
            input_shape = [np.shape(itX)[1:] for itX in t_X
                           ]  # interested in shape of data not in length
        else:
            raise RuntimeError(
                f'Data returned by load_data function are of a wrong type: type(t_X)=={type(t_X)},  type(t_y)=={type(t_y)}, type(v_X)=={type(v_X)}, type(v_y)=={type(v_y)}'
            )
        output_shape = np.shape(t_y)[1:]
        config['data'] = {
            'train_X': t_X,
            'train_Y': t_y,
            'valid_X': v_X,
            'valid_Y': v_y
        }
    elif type(data) is dict:
        config['data'] = data
        input_shape = [
            data['shapes'][0][f'input_{i}']
            for i in range(len(data['shapes'][0]))
        ]
        output_shape = data['shapes'][1]
    else:
        raise RuntimeError(
            f'Data returned by load_data function are of an unsupported type: {type(data)}'
        )

    logger.info(f'input_shape: {input_shape}')
    logger.info(f'output_shape: {output_shape}')

    cs_kwargs = config['create_structure'].get('kwargs')
    if cs_kwargs is None:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape)
    else:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape,
                                                       **cs_kwargs)

    arch_seq = config['arch_seq']

    logger.info(f'actions list: {arch_seq}')

    structure.set_ops(arch_seq)

    if config.get('preprocessing') is not None:
        preprocessing = util.load_attr_from(config['preprocessing']['func'])
        config['preprocessing']['func'] = preprocessing
    else:
        config['preprocessing'] = None

    model_created = False
    if config['regression']:
        try:
            model = structure.create_model()
            model_created = True
        except:
            model_created = False
            logger.info('Error: Model creation failed...')
            logger.info(traceback.format_exc())
        if model_created:
            trainer = TrainerRegressorTrainValid(config=config, model=model)
    else:
        try:
            model = structure.create_model(activation='softmax')
            model_created = True
        except:
            model_created = False
            logger.info('Error: Model creation failed...')
            logger.info(traceback.format_exc())
        if model_created:
            trainer = TrainerClassifierTrainValid(config=config, model=model)

    if model_created:
        # 0 < reward regression < 105
        # 0 < reward classification < 100
        res = trainer.train()
        if config['regression']:
            if res < np.finfo('float32').min:
                res = np.finfo('float32').min
            res = -np.log(res) + np.log(np.finfo('float32').max)
        result = res
    else:
        # penalising actions if model cannot be created
        result = -1
    return result
Пример #6
0
def run_model(config):

    t1 = time.time()
    num_epochs = config['hyperparameters']['num_epochs']

    config['create_structure']['func'] = util.load_attr_from(
        config['create_structure']['func'])

    input_shape = [(942, ), (3820, ), (3820, )]
    output_shape = (1, )

    cs_kwargs = config['create_structure'].get('kwargs')
    if cs_kwargs is None:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape)
    else:
        structure = config['create_structure']['func'](input_shape,
                                                       output_shape,
                                                       **cs_kwargs)

    arch_seq = config['arch_seq']

    print(f'actions list: {arch_seq}')

    structure.set_ops(arch_seq)
    # structure.draw_graphviz('model_global_combo.dot')

    model = structure.create_model()

    # from keras.utils import plot_model
    # plot_model(model, 'model_global_combo.png', show_shapes=True)

    model.summary()
    t2 = time.time()
    t_model_create = t2 - t1
    print('Time model creation: ', t_model_create)
    import sys
    t1 = time.time()
    params = initialize_parameters()
    args = Struct(**params)
    set_seed(args.rng_seed)

    optimizer = optimizers.deserialize({
        'class_name': args.optimizer,
        'config': {}
    })
    base_lr = args.base_lr or K.get_value(optimizer.lr)
    if args.learning_rate:
        K.set_value(optimizer.lr, args.learning_rate)

    model.compile(loss=args.loss, optimizer=optimizer, metrics=[mae, r2])

    if config.get('load_data') is None:
        data = combo_ld_numpy(args)
    else:
        if not (config['load_data'].get('prop') is None):
            print('Data prop: ', config['load_data']['prop'])
            data = combo_ld_numpy(args, prop=config['load_data']['prop'])
        else:
            data = combo_ld_numpy(args)

    x_train_list = [data['x_train_0'], data['x_train_1'], data['x_train_2']]
    y_train = data['y_train']
    x_val_list = [data['x_val_0'], data['x_val_1'], data['x_val_2']]
    y_val = data['y_val']
    print('y_val shape:  ', np.shape(y_val))
    t2 = time.time()
    t_data_loading = t2 - t1
    print('Time data loading: ', t_data_loading)

    stop_if_unfeasible = StopIfUnfeasible(time_limit=1200)
    t1 = time.time()
    history = model.fit(x_train_list,
                        y_train,
                        batch_size=args.batch_size,
                        shuffle=args.shuffle,
                        epochs=num_epochs,
                        callbacks=[stop_if_unfeasible],
                        validation_data=(x_val_list, y_val))
    t2 = time.time()
    t_training = t2 - t1
    print('Time training: ', t_training)

    print('avr_batch_timing :', stop_if_unfeasible.avr_batch_time)
    print('avr_timing: ', stop_if_unfeasible.estimate_training_time)
    print('stopped: ', stop_if_unfeasible.stopped)

    print(history.history)

    try:
        return history.history['val_r2'][0]
    except:
        return -1.0