Exemplo n.º 1
0
def set_params_by_dataset(params_config_path):
    config_dict = config_to_dict(params_config_path)
    if global_vars.get('dataset') in config_dict.keys():
        key = global_vars.get('dataset')
    elif global_vars.get('problem') == 'regression':
        key = 'default_regression'
    else:
        key = 'default'
    for param_name in config_dict[key]:
        global_vars.set_if_not_exists(param_name, config_dict[key][param_name])
    if global_vars.get('ensemble_iterations'):
        global_vars.set(
            'evaluation_metrics',
            global_vars.get('evaluation_metrics') + ['raw', 'target'])
        if not global_vars.get('ensemble_size'):
            global_vars.set('ensemble_size',
                            int(global_vars.get('pop_size') / 100))
    if global_vars.get('dataset') == 'netflow_asflow' and global_vars.get(
            'all_netflow_files'):
        file_paths = os.listdir(
            f'{os.path.dirname(os.path.abspath(__file__))}/../data/netflow/all_AS'
        )
        file_paths = [
            f for f in file_paths
            if (os.path.getsize(f'../eegnas/EEGNAS/data/netflow/all_AS/{f}') >>
                20) > global_vars.get('min_netflow_file_size')
        ]
        global_vars.set('netflow_file_names', file_paths)
Exemplo n.º 2
0
def get_sleep_classifier():
    model = nn.Sequential()

    model.add_module('permute_1',
                     Expression(MyModel._transpose_shift_and_swap))
    model.add_module(
        'conv_1',
        nn.Conv2d(1,
                  global_vars.get('eeg_chans'),
                  kernel_size=(global_vars.get('eeg_chans'), 1)))
    model.add_module('permute_2',
                     Expression(MyModel._transpose_channels_with_length))
    model.add_module('conv_2', nn.Conv2d(1, 8, kernel_size=(1, 64), stride=1))
    model.add_module('pool_1', nn.MaxPool2d(kernel_size=(1, 16),
                                            stride=(1, 1)))
    model.add_module('conv_3', nn.Conv2d(8, 8, kernel_size=(1, 64), stride=1))
    model.add_module('pool_2', nn.MaxPool2d(kernel_size=(1, 16),
                                            stride=(1, 1)))
    model.add_module('flatten', Flatten())
    model.add_module('dropout', nn.Dropout(p=0.5))

    input_shape = (2, global_vars.get('eeg_chans'),
                   global_vars.get('input_time_len'), 1)
    out = model.forward(np_to_var(np.ones(input_shape, dtype=np.float32)))
    dim = 1
    for muldim in out.shape[1:]:
        dim *= muldim
    model.add_module(
        'dense',
        nn.Linear(in_features=dim, out_features=global_vars.get('n_classes')))
    model.add_module('softmax', nn.Softmax())

    return model
Exemplo n.º 3
0
def get_netflow_threshold(file, stds, handover='sum'):
    df = get_whole_netflow_data(file)
    values = df[handover].values
    values = values.reshape(-1, 1)
    if global_vars.get('normalize_netflow_data'):
        scaler = MinMaxScaler()
        scaler.fit(values)
        values = scaler.transform(values)
    if stds == 'auto':
        for std in reversed(np.arange(-3, 3.1, 0.1)):
            if global_vars.get('k_fold'):
                relevant_df = df.iloc[2234:]
            else:
                relevant_df = df.iloc[20457:]
            relevant_df = relevant_df[np.logical_and(
                17 <= relevant_df.index.hour, relevant_df.index.hour <= 21)]
            relevant_values = relevant_df[handover].values.reshape(-1, 1)
            relevant_values = relevant_values.reshape(-1, 5).max(axis=1)
            relevant_values = scaler.transform(relevant_values.reshape(-1, 1))
            thresh_to_test = values.mean() + values.std() * std
            num_over = np.count_nonzero(relevant_values > thresh_to_test)
            num_under = np.count_nonzero(relevant_values <= thresh_to_test)
            overflow_ratio = num_over / (num_over + num_under)
            if 0.07 <= overflow_ratio <= 0.2:
                stds = std
                return values.mean() + values.std() * stds, stds
        for r in reversed(sorted(relevant_values)):
            num_over = np.count_nonzero(relevant_values > r)
            num_under = np.count_nonzero(relevant_values <= r)
            overflow_ratio = num_over / (num_over + num_under)
            if 0.07 <= overflow_ratio:
                return r[0], 'custom'
    else:
        return values.mean() + values.std() * stds, stds
Exemplo n.º 4
0
def target_model(model_name):
    input_time_len = global_vars.get('input_time_len')
    n_classes = global_vars.get('n_classes')
    eeg_chans = global_vars.get('eeg_chans')
    models = {
        'deep':
        deep4.Deep4Net(eeg_chans,
                       n_classes,
                       input_time_len,
                       final_conv_length='auto'),
        'shallow':
        shallow_fbcsp.ShallowFBCSPNet(eeg_chans,
                                      n_classes,
                                      input_time_len,
                                      final_conv_length='auto'),
        'eegnet':
        eegnet.EEGNet(eeg_chans,
                      n_classes,
                      input_time_length=input_time_len,
                      final_conv_length='auto'),
        'sleep_classifier':
        get_sleep_classifier(),
        'oh_parkinson':
        OhParkinson
    }
    final_conv_sizes = {'deep': 2, 'shallow': 30, 'eegnet': 2}
    final_conv_sizes = defaultdict(int, final_conv_sizes)
    global_vars.set('final_conv_size', final_conv_sizes[model_name])
    if model_name == 'sleep_classifier':
        return models[model_name]
    else:
        model = models[model_name].create_network()
        return model
Exemplo n.º 5
0
def get_data_from_npy(data_folder):
    X_train = np.load(
        f'{os.path.dirname(os.path.abspath(__file__))}/{data_folder}{global_vars.get("dataset")}/X_train.npy'
    )
    X_test = np.load(
        f'{os.path.dirname(os.path.abspath(__file__))}/{data_folder}{global_vars.get("dataset")}/X_test.npy'
    )
    y_train = np.load(
        f'{os.path.dirname(os.path.abspath(__file__))}/{data_folder}{global_vars.get("dataset")}/y_train.npy'
    )
    y_test = np.load(
        f'{os.path.dirname(os.path.abspath(__file__))}/{data_folder}{global_vars.get("dataset")}/y_test.npy'
    )
    global_vars.set('eeg_chans', X_train.shape[1])
    global_vars.set('input_height', X_train.shape[2])
    if X_train.ndim > 3:
        global_vars.set('input_width', X_train.shape[3])
    if global_vars.get('problem') == 'regression':
        global_vars.set('n_classes', y_train.shape[1])
    else:
        global_vars.set('n_classes', len(np.unique(y_train)))
    X_train, X_val, y_train, y_val = train_test_split(
        X_train, y_train, test_size=global_vars.get('valid_set_fraction'))
    train_set, valid_set, test_set = makeDummySignalTargets(
        X_train, y_train, X_val, y_val, X_test, y_test)
    return train_set, valid_set, test_set
Exemplo n.º 6
0
def avg_class_tf_report(model, dataset, folder_name):
    report_file_name = f'{folder_name}/{global_vars.get("report")}.pdf'
    if os.path.isfile(report_file_name):
        return
    eeg_chans = list(range(global_vars.get('eeg_chans')))
    dataset = unify_dataset(dataset)
    class_examples = []
    for class_idx in range(global_vars.get('n_classes')):
        class_examples.append(dataset.X[np.where(dataset.y == class_idx)])
        if global_vars.get('to_eeglab'):
            tensor_to_eeglab(class_examples[-1], f'{folder_name}/avg_class_tf/{label_by_idx(class_idx)}.mat')
    chan_data = np.zeros((global_vars.get('n_classes'), len(eeg_chans), global_vars.get('num_frex'), global_vars.get('input_height')))
    for class_idx in range(global_vars.get('n_classes')):
        for eeg_chan in eeg_chans:
            chan_data[class_idx, eeg_chan] = get_tf_data_efficient(class_examples[class_idx], eeg_chan,
                                                 global_vars.get('frequency'), global_vars.get('num_frex'),
                                                                   dB=global_vars.get('db_normalization'))
    max_value = np.max(chan_data)
    tf_plots = []
    for class_idx in range(global_vars.get('n_classes')):
        tf_plots.append(tf_plot(chan_data[class_idx], f'average TF for {label_by_idx(class_idx)}', max_value))
    story = [get_image(tf) for tf in tf_plots]
    create_pdf_from_story(report_file_name, story)
    for tf in tf_plots:
        os.remove(tf)
Exemplo n.º 7
0
 def generate_dummy_data(batch_size):
     dummy_data = DummySignalTarget(
         np.random.rand(batch_size, global_vars.get('eeg_chans'),
                        global_vars.get('input_time_len')),
         np.random.randint(0,
                           global_vars.get('n_classes') - 1, batch_size))
     return dummy_data, dummy_data, dummy_data
Exemplo n.º 8
0
 def run_one_epoch(self, datasets, model):
     model.train()
     batch_generator = self.iterator.get_batches(datasets['train'],
                                                 shuffle=True)
     for inputs, targets in batch_generator:
         input_vars = np_to_var(inputs,
                                pin_memory=global_vars.get('pin_memory'))
         target_vars = np_to_var(targets,
                                 pin_memory=global_vars.get('pin_memory'))
         if self.cuda:
             with torch.cuda.device(0):
                 input_vars = input_vars.cuda()
                 target_vars = target_vars.cuda()
         self.optimizer.zero_grad()
         outputs = model(input_vars)
         if self.loss_function == F.mse_loss:
             target_vars = target_vars.float()
         loss = self.loss_function(outputs.squeeze(), target_vars)
         loss.backward()
         self.optimizer.step()
     self.monitor_epoch(datasets, model)
     if global_vars.get('log_epochs'):
         self.log_epoch()
     if global_vars.get('remember_best'):
         self.rememberer.remember_epoch(self.epochs_df, model,
                                        self.optimizer)
Exemplo n.º 9
0
 def __init__(self,
              kernel_height=None,
              kernel_width=None,
              filter_num=None,
              stride=None,
              dilation_height=None,
              name=None):
     Layer.__init__(self, name)
     if kernel_height is None:
         kernel_height = random.randint(
             1, global_vars.get('kernel_height_max'))
     if kernel_width is None:
         kernel_width = random.randint(1,
                                       global_vars.get('kernel_width_max'))
     if filter_num is None:
         filter_num = random.randint(1, global_vars.get('filter_num_max'))
     if stride is None:
         stride = random.randint(1, global_vars.get('conv_stride_max'))
     if dilation_height is None:
         dilation_height = random.randint(
             1, global_vars.get('max_dilation_height'))
     self.kernel_height = kernel_height
     self.kernel_width = kernel_width
     self.filter_num = filter_num
     self.stride = stride
     self.dilation_height = dilation_height
Exemplo n.º 10
0
def check_grid_shapes(layer_grid):
    input_chans = global_vars.get('eeg_chans')
    input_time = global_vars.get('input_time_len')
    input_shape = {'time': input_time, 'chans': input_chans}
    layers = layer_grid.copy()
    layers.nodes['input']['shape'] = input_shape
    descendants = nx.descendants(layers, 'input')
    descendants.add('input')
    to_remove = []
    for node in list(layers.nodes):
        if node not in descendants:
            to_remove.append(node)
    for node in to_remove:
        layers.remove_node(node)
    nodes_to_check = list(nx.topological_sort(layers))
    for node in nodes_to_check[1:]:
        predecessors = list(layers.predecessors(node))
        try:
            pred_shapes = [
                layers.nodes[pred]['shape']['time'] for pred in predecessors
            ]
            min_time = int(min(pred_shapes))
            sum_chans = int(
                sum([
                    layers.nodes[pred]['shape']['chans']
                    for pred in predecessors
                ]))
            layers.nodes[node]['shape'] = calc_shape_channels(
                {
                    'time': min_time,
                    'chans': sum_chans
                }, layers.nodes[node]['layer'])
        except ValueError:
            return False
    return True
Exemplo n.º 11
0
    def test_draw_grid_model(self):
        layer_grid = create_empty_copy(nx.to_directed(nx.grid_2d_graph(5, 5)))
        for node in layer_grid.nodes.values():
            node['layer'] = models_generation.random_layer()
        layer_grid.add_node('input')
        layer_grid.add_node('output_conv')
        layer_grid.nodes['output_conv']['layer'] = models_generation.IdentityLayer()
        layer_grid.nodes[(0,0)]['layer'] = ConvLayer(filter_num=50)
        layer_grid.nodes[(0,1)]['layer'] = ConvLayer(filter_num=50)
        layer_grid.nodes[(0,2)]['layer'] = DropoutLayer()
        layer_grid.add_edge('input', (0, 0))
        layer_grid.add_edge((0, 5 - 1), 'output_conv')
        for i in range(5 - 1):
            layer_grid.add_edge((0, i), (0, i + 1))
        layer_grid.graph['height'] = 5
        layer_grid.graph['width'] = 5
        if models_generation.check_legal_grid_model(layer_grid):
            print('legal model')
        else:
            print('illegal model')

        # model = models_generation.random_grid_model(10)
        layer_grid.add_edge((0,0), (0, 2))
        real_model = models_generation.ModelFromGrid(layer_grid)

        # model = models_generation.random_model(10)
        # real_model = finalize_model(model)

        # for i in range(100):
        #     add_random_connection(model)
        input_shape = (60, global_vars.get('eeg_chans'), global_vars.get('input_time_len'), 1)
        out = real_model(np_to_var(np.ones(input_shape, dtype=np.float32)))
        s = Source(make_dot(out), filename="test.gv", format="png")
        s.view()
Exemplo n.º 12
0
 def activate_model_evaluation(self,
                               model,
                               state=None,
                               subject=None,
                               final_evaluation=False,
                               ensemble=False):
     print(
         f'free params in network:{NAS_utils.pytorch_count_params(model)}')
     if subject is None:
         subject = self.subject_id
     if self.cuda:
         torch.cuda.empty_cache()
     if final_evaluation:
         self.stop_criterion = Or([
             MaxEpochs(global_vars.get('final_max_epochs')),
             NoIncreaseDecrease(
                 'valid_accuracy',
                 global_vars.get('final_max_increase_epochs'))
         ])
     if global_vars.get('cropping'):
         self.set_cropping_for_model(model)
     dataset = self.get_single_subj_dataset(subject, final_evaluation)
     nn_trainer = NN_Trainer(self.iterator, self.loss_function,
                             self.stop_criterion, self.monitors)
     return nn_trainer.train_and_evaluate_model(model, dataset, state=state)
Exemplo n.º 13
0
def find_optimal_samples_report(pretrained_model, dataset, folder_name):
    report_file_name = f'{folder_name}/{global_vars.get("report")}.pdf'
    if os.path.isfile(report_file_name):
        return
    eeg_chans = list(range(global_vars.get('eeg_chans')))
    plot_dict = OrderedDict()
    dataset = unify_dataset(dataset)
    for layer_idx, layer in list(enumerate(pretrained_model.children()))[global_vars.get('layer_idx_cutoff'):]:
        max_examples = get_max_examples_per_channel(dataset.X, layer_idx, pretrained_model)
        for chan_idx, example_idx in enumerate(max_examples):
            tf_data = []
            for eeg_chan in eeg_chans:
                tf_data.append(get_tf_data_efficient(dataset.X[example_idx][None, :, :], eeg_chan, 250))
            max_value = np.max(np.array(tf_data))
            class_str = ''
            if layer_idx >= len(list(pretrained_model.children())) - 3:
                class_str = f', class:{label_by_idx(chan_idx)}'
            plot_dict[(layer_idx, chan_idx)] = tf_plot(tf_data,
                                                      f'TF plot of example {example_idx} for layer '
                                                      f'{layer_idx}, channel {chan_idx}{class_str}',max_value)
            print(f'plot most activating TF for layer {layer_idx}, channel {chan_idx}')

    img_paths = list(plot_dict.values())
    story = []
    story.append(Paragraph('<br />\n'.join([f'{x}:{y}' for x,y in pretrained_model._modules.items()]), style=styles["Normal"]))
    for im in img_paths:
        story.append(get_image(im))
    create_pdf_from_story(report_file_name, story)
    for im in img_paths:
        os.remove(im)
Exemplo n.º 14
0
def get_data_by_balanced_folds(ASs, fold_idxs, required_num_samples=None):
    prev_autonomous_systems = global_vars.get('autonomous_systems')
    folds = {i: {'X_train': [], 'X_test': [], 'y_train': [], 'y_test': []} for i in range(global_vars.get('n_folds'))}
    for AS in ASs:
        global_vars.set('autonomous_systems', [AS])
        dataset = get_dataset('all')
        concat_train_val_sets(dataset)
        dataset = unify_dataset(dataset)
        if np.count_nonzero(dataset.X) == 0:
            print(f'dropped AS {AS} - no common handovers')
            continue
        try:
            if required_num_samples is not None:
                assert len(dataset.X) == required_num_samples
            for fold_idx in range(global_vars.get('n_folds')):
                folds[fold_idx]['X_train'].extend(dataset.X[fold_idxs[fold_idx]['train_idxs']])
                folds[fold_idx]['X_test'].extend(dataset.X[fold_idxs[fold_idx]['test_idxs']])
                folds[fold_idx]['y_train'].extend(dataset.y[fold_idxs[fold_idx]['train_idxs']])
                folds[fold_idx]['y_test'].extend(dataset.y[fold_idxs[fold_idx]['test_idxs']])
        except IndexError:
            print(f'dropped AS {AS}')
        except AssertionError:
            print(f'dropped AS {AS}')
    for key in folds.keys():
        for inner_key in folds[key].keys():
            folds[key][inner_key] = np.stack(folds[key][inner_key], axis=0)
    global_vars.set('autonomous_systems', prev_autonomous_systems)
    return folds
Exemplo n.º 15
0
def maintain_bb_population(bb_population, weighted_population):
    for bb_idx, bb in enumerate(bb_population):
        found = False
        for pop in weighted_population:
            if is_sublist(bb['bb'], pop['model'])[0]:
                found = True
                break
        if not found:
            rand_pop = random.choice(weighted_population)['model']
            rand_idx = random.randint(0, len(rand_pop) - 2)
            bb = rand_pop[rand_idx:rand_idx + 2]
            bb_population[bb_idx] = {'bb': bb, 'fitness': 0}
        else:
            if random.random() < global_vars.get('puzzle_expansion_rate'):
                for pop in weighted_population:
                    sblst, sblst_idx = is_sublist(bb['bb'], pop['model'])
                    if sblst:
                        if random.random() < 0.5:
                            if sblst_idx < len(pop['model']) - len(bb['bb']):
                                bb['bb'].append(pop['model'][sblst_idx +
                                                             len(bb['bb'])])
                        else:
                            if sblst_idx > 0:
                                bb['bb'].insert(0, pop['model'][sblst_idx - 1])
                        break
            if random.random() < global_vars.get('puzzle_replacement_rate'):
                rand_pop = random.choice(weighted_population)['model']
                rand_idx = random.randint(0, len(rand_pop) - 2)
                bb = rand_pop[rand_idx:rand_idx + 2]
                bb_population[bb_idx] = {'bb': bb, 'fitness': 0}
    for elem in [[str(bbi) for bbi in bb['bb']] for bb in bb_population]:
        print(elem)
Exemplo n.º 16
0
 def __init__(self, iterator, exp_folder, exp_name, loss_function,
              train_set, val_set, test_set, stop_criterion, monitors,
              subject_id, fieldnames, strategy, evolution_file, csv_file):
     global model_train_times
     model_train_times = []
     self.iterator = iterator
     self.exp_folder = exp_folder
     self.exp_name = exp_name
     self.monitors = monitors
     self.loss_function = loss_function
     self.stop_criterion = stop_criterion
     self.subject_id = subject_id
     self.datasets = OrderedDict(
         (('train', train_set), ('valid', val_set), ('test', test_set)))
     self.cuda = global_vars.get('cuda')
     self.loggers = [Printer()]
     self.fieldnames = fieldnames
     self.models_set = []
     self.genome_set = []
     self.evo_strategy = {
         'per_subject': self.one_strategy,
         'cross_subject': self.all_strategy
     }[strategy]
     self.csv_file = csv_file
     self.evolution_file = evolution_file
     self.current_model_index = -1
     if isinstance(self.subject_id, int):
         self.current_chosen_population_sample = [self.subject_id]
     else:
         self.current_chosen_population_sample = []
     self.mutation_rate = global_vars.get('mutation_rate')
Exemplo n.º 17
0
def add_model_to_stats(pop, model_index, model_stats):
    if global_vars.get('grid'):
        if global_vars.get('grid_as_ensemble'):
            for key, value in pop['weighted_avg_params'].items():
                model_stats[key] = value
    else:
        for i, layer in enumerate(pop['model']):
            model_stats[f'layer_{i}'] = type(layer).__name__
            for key, value in vars(layer).items():
                model_stats[f'layer_{i}_{key}'] = value
    if global_vars.get('perm_ensembles'):
        model_stats['ensemble_role'] = (model_index %
                                        global_vars.get('ensemble_size'))
        assert pop['perm_ensemble_role'] == model_stats['ensemble_role']
        model_stats['perm_ensemble_id'] = pop['perm_ensemble_id']
    if global_vars.get('delete_finalized_models'):
        finalized_model = finalize_model(pop['model'])
    else:
        finalized_model = pop['finalized_model']
    model_stats['trainable_params'] = pytorch_count_params(finalized_model)
    layer_stats = {
        'average_conv_width': (ConvLayer, 'kernel_width'),
        'average_conv_height': (ConvLayer, 'kernel_height'),
        'average_conv_filters': (ConvLayer, 'filter_num'),
        'average_pool_width': (PoolingLayer, 'pool_height'),
        'average_pool_stride': (PoolingLayer, 'stride_height')
    }
    for stat in layer_stats.keys():
        model_stats[stat] = get_average_param([pop['model']],
                                              layer_stats[stat][0],
                                              layer_stats[stat][1])
Exemplo n.º 18
0
 def one_strategy(self, weighted_population):
     self.current_chosen_population_sample = [self.subject_id]
     for i, pop in enumerate(weighted_population):
         start_time = time.time()
         if NAS_utils.check_age(pop):
             weighted_population[i] = weighted_population[i - 1]
             weighted_population[i]['train_time'] = 0
             weighted_population[i]['num_epochs'] = 0
             continue
         finalized_model = finalize_model(pop['model'])
         self.current_model_index = i
         final_time, evaluations, model, model_state, num_epochs = \
             self.activate_model_evaluation(finalized_model, pop['model_state'], subject=self.subject_id)
         if global_vars.get('grid_as_ensemble') and global_vars.get(
                 'delete_finalized_models'):
             pop['weighted_avg_params'] = model
         self.current_model_index = -1
         NAS_utils.add_evaluations_to_weighted_population(
             weighted_population[i], evaluations)
         weighted_population[i]['model_state'] = model_state
         weighted_population[i]['train_time'] = final_time
         weighted_population[i]['finalized_model'] = model
         weighted_population[i]['num_epochs'] = num_epochs
         end_time = time.time()
         show_progress(end_time - start_time, self.exp_name)
         print('trained model %d in generation %d' %
               (i + 1, self.current_generation))
Exemplo n.º 19
0
def random_grid_model(dim):
    layer_grid = create_empty_copy(
        nx.to_directed(nx.grid_2d_graph(dim[0], dim[1])))
    for node in layer_grid.nodes.values():
        if global_vars.get('simple_start'):
            node['layer'] = IdentityLayer()
        else:
            node['layer'] = random_layer()
    layer_grid.add_node('input')
    layer_grid.nodes['input']['layer'] = IdentityLayer()
    if global_vars.get('grid_as_ensemble'):
        for row in range(dim[0]):
            layer_grid.add_node(f'output_conv_{row}')
            layer_grid.nodes[f'output_conv_{row}']['layer'] = IdentityLayer()
            layer_grid.add_edge((row, dim[1] - 1), f'output_conv_{row}')
    else:
        layer_grid.add_node('output_conv')
        layer_grid.nodes['output_conv']['layer'] = IdentityLayer()
        layer_grid.add_edge((0, dim[1] - 1), 'output_conv')
    layer_grid.add_edge('input', (0, 0))
    for i in range(dim[1] - 1):
        layer_grid.add_edge((0, i), (0, i + 1))
    layer_grid.graph['height'] = dim[0]
    layer_grid.graph['width'] = dim[1]
    if global_vars.get('parallel_paths_experiment'):
        set_parallel_paths(layer_grid)
    if check_legal_grid_model(layer_grid):
        return layer_grid
    else:
        return random_grid_model(dim)
Exemplo n.º 20
0
def train_model_for_netflow(model, dataset, trainer):
    print(f'start training model: {type(model).__name__}')
    if 'MultiOutputRegressor' == type(model).__name__:
        train_sklearn_for_netflow(model, dataset)
    elif 'ImageRegressor' == type(model).__name__:
        train_autokeras_for_netflow(model, dataset)
    elif 'Ensemble' in type(model).__name__:
        for mod in model.models:
            mod.cuda()
            mod.train()
            train_model_for_netflow(mod, dataset, trainer)
        if type(model) == BasicEnsemble:
            model.freeze_child_models(False)
            trainer.train_model(model, dataset, final_evaluation=True)
            model.freeze_child_models(True)
    else:
        if global_vars.get('dataset') in ['solar', 'electricity', 'exchange_rate'] and global_vars.get('training_method') == 'LSTNet':
            data = Data_utility(f'../EEGNAS/EEGNAS/data/MTS_benchmarks/'
                                f'{global_vars.get("dataset")}.txt', 0.6, 0.2, device='cuda', window=24 * 7, horizon=12)
            optim = Optim(model.parameters(), 'adam', 0.001, 10.)
            criterion = torch.nn.MSELoss(size_average=False)
            MTS_train(model, data, criterion, optim, 100, 128)
        else:
            if type(model).__name__ == 'Sequential' and global_vars.get('skip_cnn_training'):
                print('skipping CNN training')
                return
            trainer.train_model(model, dataset, final_evaluation=True)
Exemplo n.º 21
0
def breed_layers_modules(first_model,
                         second_model,
                         first_model_state=None,
                         second_model_state=None,
                         cut_point=None):
    second_model = copy.deepcopy(second_model)
    if cut_point is None:
        cut_point = random.randint(0, len(first_model) - 1)
    for i in range(cut_point):
        second_model[i] = first_model[i]
    save_weights = global_vars.get(
        'inherit_weights_crossover') and global_vars.get(
            'inherit_weights_normal')
    if check_legal_model(second_model):
        if save_weights:
            finalized_new_model = finalize_model(second_model)
            finalized_new_model_state = finalized_new_model.state_dict()
            if None not in [first_model_state, second_model_state]:
                for i in range(cut_point):
                    add_layer_to_state(finalized_new_model_state,
                                       second_model[i], i, first_model_state)
                for i in range(cut_point + 1, global_vars.get('num_layers')):
                    add_layer_to_state(finalized_new_model_state,
                                       second_model[i - cut_point], i,
                                       second_model_state)
        else:
            finalized_new_model_state = None
        return second_model, finalized_new_model_state, cut_point
    else:
        global_vars.set('failed_breedings',
                        global_vars.get('failed_breedings') + 1)
        return None, None, None
Exemplo n.º 22
0
def get_settings():
    if global_vars.get('cropping'):
        global_vars.set('original_input_time_len', global_vars.get('input_time_len'))
        global_vars.set('input_time_len', global_vars.get('input_time_cropping'))
        stop_criterion, iterator, loss_function, monitors = get_cropped_settings()
    else:
        stop_criterion, iterator, loss_function, monitors = get_normal_settings()
    return stop_criterion, iterator, loss_function, monitors
Exemplo n.º 23
0
 def __init__(self, X, y, y_type=np.longlong):
     self.X = np.array(X, dtype=np.float32)
     if global_vars.get('autoencoder'):
         y_type = np.float32
     if global_vars.get('problem') == 'regression':
         self.y = np.array(y)
     else:
         self.y = np.array(y, dtype=y_type)
Exemplo n.º 24
0
def calculate_ensemble_fitness(weighted_population, ensemble):
    if global_vars.get('cross_subject'):
        ensemble_fit = 0
        for subject in range(1, global_vars.get('num_subjects') + 1):
            ensemble_fit += one_ensemble_fitness(weighted_population, ensemble)
        return ensemble_fit / global_vars.get('num_subjects')
    else:
        return one_ensemble_fitness(weighted_population, ensemble)
Exemplo n.º 25
0
 def __init__(self, models):
     super(AveragingEnsemble, self).__init__()
     self.avg_layer = LinearWeightedAvg(global_vars.get('n_classes'),
                                        global_vars.get('ensemble_size'),
                                        true_avg=True)
     self.models = models
     self.softmax = nn.Softmax()
     self.flatten = _squeeze_final_output()
Exemplo n.º 26
0
 def _stack_input_by_time(x):
     if global_vars.config['DEFAULT']['channel_dim'] == 'one':
         return x.view(x.shape[0], -1,
                       int(x.shape[2] / global_vars.get('time_factor')),
                       x.shape[3])
     else:
         return x.view(x.shape[0], x.shape[1],
                       int(x.shape[2] / global_vars.get('time_factor')), -1)
Exemplo n.º 27
0
def train_time_penalty(weighted_population):
    train_time_indices = [
        i[0] for i in sorted(enumerate(weighted_population),
                             key=lambda x: x[1]['train_time'])
    ]
    for rank, idx in enumerate(train_time_indices):
        weighted_population[idx]['fitness'] -= (rank / global_vars.get('pop_size')) * \
                                               weighted_population[idx]['fitness'] * global_vars.get('penalty_factor')
Exemplo n.º 28
0
    def ea_deap(self,
                population,
                toolbox,
                ngen,
                stats=None,
                verbose=__debug__):
        history = History()
        toolbox.decorate("mate", history.decorator)
        history.update(population)
        logbook = tools.Logbook()
        logbook.header = ['gen', 'nevals'] + (stats.fields if stats else [])

        # Evaluate all individuals
        fitnesses = toolbox.map(toolbox.evaluate, population)
        for ind, fit in zip(population, fitnesses):
            ind.fitness.values = fit

        record = stats.compile(population) if stats else {}
        logbook.record(gen=0, nevals=len(population), **record)
        if verbose:
            print(logbook.stream)

        # Begin the generational process
        for gen in range(1, ngen + 1):
            self.current_generation += 1
            # Select the next generation individuals
            old_offspring = deepcopy(population)
            offspring = toolbox.select(population, len(population))
            # Vary the pool of individuals
            new_offspring = toolbox.mate(deepcopy(offspring), toolbox)
            hash_models_deap(old_offspring, new_offspring, self.genome_set,
                             self.models_set)
            self.update_mutation_rate()
            # Evaluate the individuals with an invalid fitness
            fitnesses = toolbox.map(toolbox.evaluate, new_offspring)
            for ind, fit in zip(new_offspring, fitnesses):
                ind.fitness.values = fit
                ind['age'] += 1

            # Replace the current population by the offspring
            population[:] = new_offspring

            # Append the current generation statistics to the logbook
            record = stats.compile(population) if stats else {}
            logbook.record(gen=gen, nevals=len(population), **record)
            if verbose:
                print(logbook.stream)
            pop_stats = self.calculate_stats(self.population)
            for stat, val in pop_stats.items():
                global_vars.get('sacred_ex').log_scalar(
                    f'avg_{stat}', val, self.current_generation)
            self.write_to_csv({k: str(v)
                               for k, v in pop_stats.items()},
                              self.current_generation)
            self.print_to_evolution_file(self.population,
                                         self.current_generation)
        return population, logbook
Exemplo n.º 29
0
def show_progress(train_time, exp_name):
    global model_train_times
    total_trainings = global_vars.get('num_generations') * global_vars.get(
        'pop_size') * len(global_vars.get('subjects_to_check'))
    model_train_times.append(train_time)
    avg_model_train_time = sum(model_train_times) / len(model_train_times)
    time_left = (total_trainings -
                 len(model_train_times)) * avg_model_train_time
    print(f"Experiment: {exp_name}, time left: {time_f(time_left)}")
Exemplo n.º 30
0
def one_ensemble_fitness(weighted_population, ensemble):
    if global_vars.get('cross_subject'):
        ensemble_fit = 0
        for subject in global_vars.get('subjects_to_check'):
            ensemble_fit += one_subject_one_ensemble_fitness(
                weighted_population, ensemble, str_prefix=f'{subject}_')
        return ensemble_fit / len(global_vars.get('subjects_to_check'))
    else:
        return one_subject_one_ensemble_fitness(weighted_population, ensemble)