Exemple #1
0
    def fit(self, x_train, y_train, x_test, y_test,y_true):
        if not tf.test.is_gpu_available:
            print('error')
            exit()
        best_df_metrics = None
        best_valid_loss = np.inf

        output_directory_root = self.output_directory 
        # grid search 
        for pool_factor in self.pool_factors:
            for filter_size in self.filter_sizes:
                self.output_directory = output_directory_root+'/hyper_param_search/'+'/pool_factor_'+ \
                    str(pool_factor)+'/filter_size_'+str(filter_size)+'/' 
                create_directory(self.output_directory)
                df_metrics, model , valid_loss = self.train(x_train, y_train, x_test, 
                                                          y_test,y_true,pool_factor,filter_size)

                if (valid_loss < best_valid_loss): 
                    best_valid_loss = valid_loss
                    best_df_metrics = df_metrics
                    best_df_metrics.to_csv(output_directory_root+'df_metrics.csv', index=False)
                    model.save(output_directory_root+'best_model.hdf5')

                model = None
                # clear memeory 
                keras.backend.clear_session()
Exemple #2
0
def init_train(root_dir='./', results_dir='./'):
    # this is the code used to launch an experiment on a dataset
    archive_name = 'UCRArchive_2018'
    dataset_name = 'MITECG'
    classifier_names = ['fcn', 'resnet', 'mlp', 'tlenet']
    itr = '_itr_1'

    for classifier_name in classifier_names:
        print(
            f'Empezando entrenamiento utilizando el algoritmo {classifier_name}'
        )
        output_directory = results_dir + '/' + classifier_name + '/' + archive_name + itr + '/' + \
                           dataset_name + '/'
        test_dir_df_metrics = output_directory + 'df_metrics.csv'
        print('Method: ', archive_name, dataset_name, classifier_name, itr)
        if os.path.exists(test_dir_df_metrics):
            print('Already done')
        else:
            create_directory(output_directory)
            datasets_dict = read_dataset(root_dir, archive_name, dataset_name)
            fit_classifier(datasets_dict, dataset_name, classifier_name,
                           output_directory)
            print('LISTO')
            # the creation of this directory means
            create_directory(output_directory + '/DONE')
Exemple #3
0
    def __init__(self, output_directory,para, input_shape):
        self.output_directory = output_directory
        print(self.output_directory)
        create_directory(self.output_directory)

        print(self.output_directory)

        self.model = self.build_model(input_shape, para)
def create_test_data(consumption_data_with_property, property):
    create_directory(PREPARED_DATA_ROOT_DIRECTORY + property)
    with open(
            PREPARED_DATA_ROOT_DIRECTORY + property + '/' + property + '_test',
            'w+') as myfile:
        wr = csv.writer(myfile)
        counter = len(consumption_data_with_property)
        amount_of_rows = len(consumption_data_with_property)
        while counter < amount_of_rows:
            wr.writerow(consumption_data_with_property[counter])
            counter = counter + 1
def create_training_data(consumption_data_with_property, property):
    create_directory(PREPARED_DATA_ROOT_DIRECTORY + property)
    with open(
            PREPARED_DATA_ROOT_DIRECTORY + property + '/' + property +
            '_train', 'w+') as myfile:
        wr = csv.writer(myfile)
        counter = 0
        amount_of_rows = len(consumption_data_with_property)
        for row in consumption_data_with_property:
            if counter < amount_of_rows:
                wr.writerow(row)
                counter = counter + 1
Exemple #6
0
 def create_explanations(self):
     train_data = self.get_training_data()
     test_data = self.get_explainees()
     feature_names = self.get_feature_names(train_data[0])
     background = train_data[np.random.choice(
         train_data.shape[0], 100, replace=False
     )]  # we use the first 100 training examples as our background dataset to integrate over
     explainer = shap.KernelExplainer(self.model.predict,
                                      train_data,
                                      link="logit")
     shap_values = explainer.shap_values(test_data, nsamples=100)
     counter = 0
     create_directory(ROOT_DIRECTORY + 'explanations')
Exemple #7
0
def write_predictions(w_path, predictions):
    create_directory(w_path)
    count = 0
    for doc, res in predictions.items():
        f_path = join_path(w_path, '{}.ann'.format(doc))
        with codecs.open(f_path, 'w') as f:
            ent_count = 0
            for ent in res:
                ent_count += 1
                f.write('T{}\t{} {} {} {}\n'.format(ent_count, ent['class'],
                                                    ent['st_idx'],
                                                    ent['end_idx'],
                                                    ent['text']))
        count += 1
    print('{} files created: {}'.format(count, w_path))
Exemple #8
0
 def write_data_for_splits(train_split, dev_split, write_path):
     create_directory(write_path)
     write_pickle(train_split, join_path(write_path,
                                         'train-token_span.pkl'))
     write_pickle(dev_split, join_path(write_path, 'dev-token_span.pkl'))
     parse_from_list(txt_files=list(train_split.keys()),
                     w_path=join_path(write_path, 'train.txt'),
                     doc_token_span_w_path=join_path(
                         write_path, 'train-token_span.pkl'),
                     train_data_path=join_path(data_path, 'train/'),
                     dev_data_path=join_path(data_path, 'dev/'))
     parse_from_list(txt_files=list(dev_split.keys()),
                     w_path=join_path(write_path, 'dev.txt'),
                     doc_token_span_w_path=join_path(
                         write_path, 'dev-token_span.pkl'),
                     train_data_path=join_path(data_path, 'train/'),
                     dev_data_path=join_path(data_path, 'dev/'))
Exemple #9
0
    def __init__(self,
                 output_directory,
                 input_shape,
                 nb_classes,
                 verbose=False):

        self.archive_name = ARCHIVE_NAMES[0]

        self.dataset_name = output_directory.split('/')[-2]

        # this should contain the transferred datasets
        self.transfer_directory = '/b/home/uha/hfawaz-datas/dl-tsc/transfer-learning-results/fcn_2000_train_all/'

        self.verbose = verbose

        self.output_directory = output_directory

        create_directory(self.output_directory)
Exemple #10
0
    def fit(self, x_train, y_train, x_test, y_test, y_true):
        best_train_acc = -1

        self.num_dim = x_train.shape[2]
        self.T = x_train.shape[1]
        self.x_test = x_test
        self.y_true = y_true
        self.y_test = y_test

        # split train to validation set to choose best hyper parameters
        self.x_train, self.x_val, self.y_train,self.y_val = \
         train_test_split(x_train,y_train, test_size=0.2)
        self.N = self.x_train.shape[0]

        # limit the hyperparameter search if dataset is too big
        if self.x_train.shape[0] > 1000 or self.x_test.shape[0] > 1000:
            for config in self.configs:
                config['N_x'] = 100
            self.configs = [self.configs[0], self.configs[1], self.configs[2]]

        output_directory_root = self.output_directory
        # grid search
        for idx_config in range(len(self.configs)):
            for rho in self.rho_s:
                self.rho = rho
                self.N_x = self.configs[idx_config]['N_x']
                self.connect = self.configs[idx_config]['connect']
                self.scaleW_in = self.configs[idx_config]['scaleW_in']
                self.lamda = self.configs[idx_config]['lamda']
                self.output_directory = output_directory_root+'/hyper_param_search/'+\
                 '/config_'+str(idx_config)+'/'+'rho_'+str(rho)+'/'
                create_directory(self.output_directory)
                df_metrics, train_acc = self.train()

                if best_train_acc < train_acc:
                    best_train_acc = train_acc
                    df_metrics.to_csv(output_directory_root + 'df_metrics.csv',
                                      index=False)
                    np.savetxt(output_directory_root + 'W_in.txt', self.W_in)
                    np.savetxt(output_directory_root + 'W.txt', self.W)
                    np.savetxt(output_directory_root + 'W_out.txt', self.W_out)

                gc.collect()
        return df_metrics['accuracy']
Exemple #11
0
 def __init__(self,
              output_directory,
              input_shape,
              nb_classes,
              verbose=False):
     self.classifiers = ['mlp', 'fcn', 'resnet', 'encoder', 'mcdcnn', 'cnn']
     self.classifiers = ['fcn', 'resnet',
                         'encoder']  # this represents NNE in the paper
     out_add = ''
     for cc in self.classifiers:
         out_add = out_add + cc + '-'
     self.archive_name = ARCHIVE_NAMES[0]
     self.output_directory = output_directory.replace(
         'nne', 'nne' + '/' + out_add)
     create_directory(self.output_directory)
     self.dataset_name = output_directory.split('/')[-2]
     self.verbose = verbose
     self.models_dir = output_directory.replace('nne', 'classifier')
     self.iterations = 10
Exemple #12
0
def run_cv(args, augmentator, augmentator_name, tmp_output_directory,
           datasets_dict, classifier_name, epochs, start, cv):
    print('\t\taugmentator_name: ', augmentator_name)

    for dataset_name in dataset_names_for_archive[ARCHIVE_NAMES[0]]:

        print('\t\t\tdataset_name: ', dataset_name)

        output_directory = tmp_output_directory + augmentator_name + '/' + dataset_name

        done = check_dir(output_directory)

        if not done:

            create_directory(output_directory)

            cv_fit_classifier_aug(args, augmentator, datasets_dict,
                                  dataset_name, classifier_name, epochs,
                                  output_directory, cv)

            create_directory(output_directory + '/DONE')
Exemple #13
0
 def __init__(self,
              output_directory,
              input_shape,
              nb_classes,
              verbose=False,
              nb_iterations=5,
              clf_name='inception'):
     self.classifiers = [clf_name]
     out_add = ''
     for cc in self.classifiers:
         out_add = out_add + cc + '-'
     self.archive_name = ARCHIVE_NAMES[0]
     self.iterations_to_take = [i for i in range(nb_iterations)]
     for cc in self.iterations_to_take:
         out_add = out_add + str(cc) + '-'
     self.output_directory = output_directory.replace(
         'nne', 'nne' + '/' + out_add)
     create_directory(self.output_directory)
     self.dataset_name = output_directory.split('/')[-2]
     self.verbose = verbose
     self.models_dir = output_directory.replace('nne', 'classifier')
Exemple #14
0
def main(args):

    print(tf.test.is_gpu_available())
    classifier_name = args.classifier.replace('_', '-')
    # rate = args.dataset.split('-')[-1].split('.')[0]
    lit = os.path.basename(args.dataset).split('_')[1].split('.')[0]

    root_dir = args.root + '/' + classifier_name + '/' + lit + '/'
    if args.itr == '':
        itr = 0
        for prev in os.listdir(root_dir):
            if lit in prev:
                prev_itr = int(prev.split('_')[2])
                itr = np.max((itr, prev_itr + 1))

        sitr = '_itr_' + str(itr)
    else:
        sitr = args.itr
    if sitr == '_itr_0':
        sitr = ''

    print(sitr)

    output_directory = root_dir + lit + sitr + '/'

    print(output_directory)

    test_dir_df_metrics = output_directory + 'df_metrics.csv'

    print('Method: ', args.dataset, classifier_name, sitr)

    if os.path.exists(test_dir_df_metrics):
        print('Already done')
    else:
        create_directory(output_directory)
        fit_classifier(args.dataset, args.train_idx, args.test_idx,
                       classifier_name, output_directory, args.gen_idx)
        print('DONE')

        create_directory(output_directory + '/DONE')
Exemple #15
0
def create_graph_data(db_file_name, output_dir, colum_list, error_list):
    dataframe = pd.read_csv(db_file_name)
    df_size = len(dataframe)
    print(dataframe.shape)
    print(df_size)

    if os.path.exists(output_dir):
        print('Already done')
    else:
        create_directory(output_dir)

    i = 1
    f = open(output_dir + 'info.txt', mode='wt')
    for colum in colum_list:
        print('colum : ' + colum)
        data = dataframe[colum].values

        data = np.where(data < -999, 0, data)

        good_data = np.array(data[(np.where(data > -999))])
        bad_data = np.array(data[(np.where(data < -999))])

        size = len(data)
        g2 = len(good_data)
        b2 = len(bad_data)

        remain = g2 / size * 100

        sns.distplot(good_data)
        info = '[' + colum + ']' + ' size : ' + str(size) + ' , good : ' + str(
            g2) + ' , bad : ' + str(b2) + ' , per : ' + str(int(remain)) + '% '

        plt.title(info)
        f.write(info + '\n')
        file_name = output_dir + str(i) + '_' + colum + '.png'
        plt.savefig(file_name)
        #plt.show()
        i += 1
    f.close()
    return
Exemple #16
0
def run_iterations(args, augmentator, augmentator_name, tmp_output_directory,
                   iterations, datasets_dict, classifier_name, epochs, start):
    print('\t\twithout augmentation: ', augmentator_name)

    for dataset_name in dataset_names_for_archive[ARCHIVE_NAMES[0]]:

        print('\t\t\tdataset_name: ', dataset_name)

        upper_dir = tmp_output_directory + augmentator_name + '/' + dataset_name

        done = check_dir(upper_dir)

        if not done:
            #save all the predictions and the corresponding true class
            predicted_y = []
            expected_y = []

            for iter in range(iterations):
                print('\t\t\t\titer', iter)
                trr = '_itr_' + str(iter)

                output_directory = upper_dir + '/' + trr + '/'
                #print(output_directory)

                create_directory(output_directory)

                y_pred, y_true = fit_classifier_aug(args, augmentator,
                                                    datasets_dict,
                                                    dataset_name,
                                                    classifier_name, epochs,
                                                    output_directory)

                print('\t\t\t\tDONE')

                # the creation of this directory means
                create_directory(output_directory + '/DONE')

                if (y_pred.shape == y_true.shape):
                    predicted_y.extend(y_pred)
                    expected_y.extend(y_true)
                else:
                    raise Exception("FALSE: y_pred.shape==y_true.shape.")

            totalduration = time.time() - start
            df_metrics = calculate_metrics(expected_y, predicted_y,
                                           totalduration)
            df_metrics.to_csv(upper_dir + '/avg_metrics.csv', index=False)
            create_directory(upper_dir + '/DONE')

            print('iterations DONE!')
            print(df_metrics)
Exemple #17
0
def signal_handler(sig, frame, video_writer):
    print('You pressed Ctrl+C!')
    video_writer.release()
    sys.exit(0)

if __name__ == "__main__":
    args = get_args()
    model = load_model(args, nclass=11, temporal=args.demo_temporal)

    if args.demo_img_folder is not None:
        # rgb_demo_dataset = DeepSightDemoRGB(args.demo_img_folder)
        rgb_demo_dataset = DeepSightDemoDepth(args.demo_img_folder)
        data_loader = DataLoader(rgb_demo_dataset, batch_size=32, shuffle=True)
        pred_dir = os.path.join(args.demo_img_folder, "pred")
        transform_dir = os.path.join(args.demo_img_folder, "transform")
        create_directory(pred_dir)
        create_directory(transform_dir)
        for i, sample in enumerate(tqdm(data_loader)):
            image, target, names = sample['image'], sample['label'], sample['id']
            imgs, imgs_np, masks, flow = inference(image, model)
            save_image(flow, os.path.join(pred_dir, "flow.png"))
            for i in range(len(imgs)):
                masks[i].save(os.path.join(pred_dir, names[i]))
                imgs[i].save(os.path.join(transform_dir, names[i]))

        # create the video
        # images = sorted(get_files(transform_dir))
        images = sorted(get_files(transform_dir), key=lambda x: int(x.split(".")[0]))
        # images = sorted(get_files(transform_dir), key=lambda x: int(x.split(".")[0][14:]))
        print(images)
        fig = None
Exemple #18
0
            for iter in range(ITERATIONS):
                print('\t\titer', iter)

                trr = ''
                if iter != 0:
                    trr = '_itr_' + str(iter)

                tmp_output_directory = root_dir + '/results/' + classifier_name + '/' + archive_name + trr + '/'

                for dataset_name in utils.constants.dataset_names_for_archive[archive_name]:
                    print('\t\t\tdataset_name: ', dataset_name)

                    output_directory = tmp_output_directory + dataset_name + '/'

                    create_directory(output_directory)

                    fit_classifier()

                    print('\t\t\t\tDONE')

                    # the creation of this directory means
                    create_directory(output_directory + '/DONE')

elif sys.argv[1] == 'transform_mts_to_ucr_format':
    transform_mts_to_ucr_format()
elif sys.argv[1] == 'visualize_filter':
    visualize_filter(root_dir)
elif sys.argv[1] == 'viz_for_survey_paper':
    viz_for_survey_paper(root_dir)
elif sys.argv[1] == 'viz_cam':
Exemple #19
0
    argv = 'visualize_transfer_learning'

    if argv == 'visualize_transfer_learning':
        plot_utils.visualize_transfer_learning(root_dir)

    elif argv == 'training_from_scratch':
        for dataset_name in ALL_DATASET_NAMES:
            # get the directory of the model for this current dataset_name
            scratch_output_dir = os.path.join(scratch_dir_root, dataset_name,
                                              '')
            write_output_dir = scratch_output_dir
            # set model output path
            model_save_path = os.path.join(scratch_output_dir,
                                           'best_model.hdf5')
            # create directory
            create_directory(scratch_output_dir)

            x_train, x_test, y_train, y_test = read_dataset(
                root_dir, dataset_name)
            callbacks = callback_maker(model_save_path, scratch_output_dir)
            train(x_train, y_train, x_test, y_test)

    elif argv == 'transfer_learning':
        # loop through all datasets
        for dataset_name in ALL_DATASET_NAMES:
            # get the directory of the model for this current dataset_name
            scratch_output_dir = os.path.join(scratch_dir_root, dataset_name,
                                              '')
            # loop through all the datasets to transfer to the learning
            for dataset_name_tranfer in ALL_DATASET_NAMES:
                # check if its the same dataset
Exemple #20
0
set_session(tf.Session(config=config))

from utils.utils import transform_labels
from utils.utils import create_synthetic_dataset
from utils.utils import create_directory
from utils.utils import check_if_file_exits
from utils.utils import generate_array_of_colors

import matplotlib

import matplotlib.pyplot as plt

root_dir = '/b/home/uha/hfawaz-datas/temp-dl-tsc/'
root_output_directory = root_dir + 'receptive-field/exp/'
root_output_directory_df = root_dir + 'receptive-field/'
create_directory(root_output_directory_df)

BATCH_SIZE = 128
NB_EPOCHS = 500

pattern_lens = [[0.1]]

pattern_poss = [[0.1, 0.65]]
# uncomment the following to experiment with the number of classes in a dataset
# pattern_poss = [
#     [0.1, 0.65],
#     [0.1, 0.3, 0.65],
#     [0.1, 0.3, 0.5, 0.65],
#     [0.1, 0.3, 0.5, 0.7, 0.9],
#     [0.1, 0.2, 0.3, 0.4, 0.5, 0.65],
#     [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7],
Exemple #21
0
    def fit(self, x_train, y_train, x_val, y_val, y_true):

        self.y_true = y_true

        self.x_test = x_val
        self.y_test = y_val

        self.x_train, self.x_val, self.y_train, self.y_val = \
            train_test_split(x_train, y_train, test_size=0.2)

        # 1. Tune ESN and num_filter
        self.x_train, self.y_train, self.x_val, self.x_test, model_init, hist_init, duration_init, acc_init, num_filter = self.tune_esn()

        current_acc = acc_init
        hist_final = hist_init
        model_final = model_init
        duration_final = duration_init
        ratio_final = [0.1, 0.2]

        for ratio in self.ratio[1:]:

            # 1. Build Model
            input_shape = (self.len_series, self.units, 1)
            model = self.build_model(
                input_shape, self.nb_classes, self.len_series, ratio, num_filter)
            #if(self.verbose == True):
                #model.summary()

            # 3. Train Model
            batch = self.batch
            epoch = self.epoch

            start_time = time.time()

            hist = model.fit(self.x_train, self.y_train, batch_size=batch, epochs=epoch,
                             verbose=False, validation_data=(self.x_val, self.y_val), callbacks=self.callbacks)

            duration = time.time() - start_time

            model_loss, model_acc = model.evaluate(
                self.x_val, self.y_val, verbose=False)

            print('val_loss: {0}, val_acc: {1}'.format(
                model_loss, model_acc))

            y_pred = model.predict(self.x_test)
            # convert the predicted from binary to integer
            y_pred = np.argmax(y_pred, axis=1)
            df_metrics = calculate_metrics(self.y_true, y_pred, duration)

            temp_output_dir = self.output_dir + str(self.it)+'/'
            create_directory(temp_output_dir)

            df_metrics.to_csv(temp_output_dir +
                              'df_metrics.csv', index=False)
            model.save(temp_output_dir + 'model.hdf5')

            params = [self.final_params_selected[0],
                      self.final_params_selected[1], self.final_params_selected[2], ratio]
            param_print = pd.DataFrame(np.array([params], dtype=object), columns=[
                'input_scaling', 'connectivity', 'num_filter', 'ratio'])
            param_print.to_csv(temp_output_dir + 'df_params.csv', index=False)

            if (model_acc > current_acc):
                print('New winner')
                hist_final = hist
                model_final = model
                duration_final = duration
                ratio_final = ratio
                current_acc = model_acc

            keras.backend.clear_session()
            self.it += 1

        print('Final ratio: {0}'.format(ratio_final))
        self.final_params_selected.append(ratio_final)
        self.model = model_final
        self.hist = hist_final

        y_pred = self.model.predict(self.x_test)

        # convert the predicted from binary to integer
        y_pred = np.argmax(y_pred, axis=1)

        param_print = pd.DataFrame(np.array([self.final_params_selected], dtype=object), columns=[
                                   'input_scaling', 'connectivity', 'num_filter', 'ratio'])

        param_print.to_csv(self.output_dir +
                           'df_final_params.csv', index=False)

        save_logs(self.output_dir, self.hist, y_pred, self.y_true,
                  duration_final, self.verbose, lr=False)

        keras.backend.clear_session()
Exemple #22
0
                trr = ''
                if _iter != 0:
                    trr = '_itr_{}'.format(_iter)

                tmp_output_directory = os.path.join(ROOT_DIR, 'results',
                                                    classifier_name,
                                                    archive_name + trr)

                for dataset_name in constants.dataset_names_for_archive[
                        archive_name]:
                    print('\t\t\tdataset_name: ', dataset_name)

                    output_directory = os.path.join(tmp_output_directory,
                                                    dataset_name)
                    utils.create_directory(output_directory)

                    dataset = datasets_dict[dataset_name]
                    fit_classifier(classifier_name, dataset, output_directory)

                    print('\t\t\t\tDONE')

                    # the creation of this directory means
                    utils.create_directory(
                        os.path.join(output_directory, 'DONE'))

elif sys.argv[1] == 'transform_mts_to_ucr_format':
    utils.transform_mts_to_ucr_format()
elif sys.argv[1] == 'visualize_filter':
    utils.visualize_filter(ROOT_DIR)
elif sys.argv[1] == 'viz_for_survey_paper':
Exemple #23
0
        print('dataset_name: ', dataset_name)
        # read dataset
        x_train, y_train, x_test, y_test, nb_classes, classes, max_prototypes, \
        init_clusters = read_data_from_dataset(use_init_clusters=False)

        # specify the output directory for this experiment
        output_dir = root_dir_output + archive_name + '/' + dataset_name + '/'

        _, classes_counts = np.unique(y_train, return_counts=True)
        # this means that all classes will have a number of time series equal to
        # nb_prototypes
        nb_prototypes = classes_counts.max()

        temp = output_dir
        # create the directory if not exists
        output_dir = create_directory(output_dir)
        # check if directory already exists
        if output_dir is None:
            print('Already_done')
            print(temp)
            continue

        if do_ensemble == False:
            # create the resnet classifier
            classifier = Classifier_RESNET(
                output_dir,
                x_train.shape[1:],
                nb_classes,
                nb_prototypes,
                classes,
                verbose=True,
Exemple #24
0
    def fit(self, x_train, y_train, x_test, y_test, y_true):
        # no training since models are pre-trained
        start_time = time.time()

        y_pred = np.zeros(shape=y_test.shape)

        l = 0

        # loop through all classifiers
        for model_name in self.classifiers:
            # loop through different initialization of classifiers
            for itr in range(self.iterations):
                if itr == 0:
                    itr_str = ''
                else:
                    itr_str = '_itr_' + str(itr)

                curr_archive_name = self.archive_name + itr_str

                curr_dir = self.models_dir.replace('classifier',
                                                   model_name).replace(
                                                       self.archive_name,
                                                       curr_archive_name)

                model = self.create_classifier(model_name,
                                               None,
                                               None,
                                               curr_dir,
                                               build=False)

                predictions_file_name = curr_dir + 'y_pred.npy'
                # check if predictions already made
                if check_if_file_exits(predictions_file_name):
                    # then load only the predictions from the file
                    curr_y_pred = np.load(predictions_file_name)
                else:
                    # then compute the predictions
                    curr_y_pred = model.predict(x_test,
                                                y_true,
                                                x_train,
                                                y_train,
                                                y_test,
                                                return_df_metrics=False)
                    keras.backend.clear_session()

                    np.save(predictions_file_name, curr_y_pred)

                y_pred = y_pred + curr_y_pred

                l += 1

        # average predictions
        y_pred = y_pred / l

        # save predictiosn
        np.save(self.output_directory + 'y_pred.npy', y_pred)

        # convert the predicted from binary to integer
        y_pred = np.argmax(y_pred, axis=1)

        duration = time.time() - start_time

        df_metrics = calculate_metrics(y_true, y_pred, duration)

        df_metrics.to_csv(self.output_directory + 'df_metrics.csv',
                          index=False)

        # the creation of this directory means
        create_directory(self.output_directory + '/DONE')

        gc.collect()
Exemple #25
0
	if Regressor_name == 'cnn':  # Time-CNN
		from Regressors import cnn
		return cnn.Regressor_CNN ( output_directory, input_shape)


############################################### main
n_bins = [200]
noise_scale =[0.00055]
l2 = [0]
dropout= [0]
kernel = [5]
min_filter = [16]
batch_size = [128]


Regressor_name = sys.argv[1]

for x in product(kernel, min_filter, batch_size, l2,dropout, n_bins, noise_scale):

	output_directory = f"{root_dir}results/{str(Regressor_name)}/para{x}"
	create_directory(output_directory)

	print("made file on:", output_directory)

	print ( 'Method: ', Regressor_name)

	fit_Regressor(para=x, root_dir= root_dir, output_directory=output_directory)

	print('DONE')

Exemple #26
0
    def fit(self, x_train, y_train, x_val, y_val, y_true):
        ##################################
        ##Train n individual classifiers##
        ##################################
        for it in range(self.num_ensemble_it):
            if it == 0:
                itr_str = 'network'
                verbosity = self.verbose
            else:
                itr_str = 'network'+str(it)
                verbosity = False

            tmp_output_dir = self.output_dir + itr_str + '/'
            create_directory(tmp_output_dir)

            inception = Classifier_INCEPTION(
                tmp_output_dir, self.input_shape, self.nb_classes, verbose = verbosity)
            
            print('Fitting network {0} out of {1}'.format(
                it+1, self.num_ensemble_it))
            
            inception.fit(x_train, y_train, x_val, y_val, y_true)

        #######################################
        ##Ensemble the individual classifiers##
        #######################################
        start_time = time.time()

        y_pred = np.zeros(shape=y_val.shape)

        ll = 0

        for it in range(self.num_ensemble_it):
            if it == 0:
                itr_str = 'network'
            else:
                itr_str = 'network'+str(it)

            classifier_dir = self.output_dir + itr_str + '/'

            predictions_file_name = classifier_dir + 'y_pred.npy'

            curr_y_pred = np.load(predictions_file_name)

            y_pred = y_pred + curr_y_pred

            ll += 1

        # average predictions
        y_pred = y_pred / ll

        # save predictions
        np.save(self.output_dir + 'y_pred.npy', y_pred)

        # convert the predicted from binary to integer
        y_pred = np.argmax(y_pred, axis=1)

        duration = time.time() - start_time

        df_metrics = calculate_metrics(y_true, y_pred, duration)
        print(df_metrics)

        df_metrics.to_csv(self.output_dir + 'df_metrics.csv', index=False)

        gc.collect()
Exemple #27
0
 for archive_name in ARCHIVE_NAMES:
     # read all datasets
     datasets_dict = read_all_datasets(root_dir, archive_name)
     # loop through all datasets
     for dataset_name in ALL_DATASET_NAMES:
         # get the directory of the model for this current dataset_name
         output_dir = results_dir + archive_name + '/' + dataset_name + '/'
         # loop through all the datasets to transfer to the learning
         for dataset_name_tranfer in ALL_DATASET_NAMES:
             # check if its the same dataset
             if dataset_name == dataset_name_tranfer:
                 continue
             # set the output directory to write new transfer learning results
             write_output_dir = write_dir_root+archive_name+'/'+dataset_name+\
              '/'+dataset_name_tranfer+'/'
             write_output_dir = create_directory(write_output_dir)
             if write_output_dir is None:
                 continue
             print('Tranfering from ' + dataset_name + ' to ' +
                   dataset_name_tranfer)
             # load the model to transfer to other datasets
             pre_model = keras.models.load_model(output_dir +
                                                 'best_model.hdf5')
             # output file path for the new tranfered re-trained model
             file_path = write_output_dir + 'best_model.hdf5'
             # callbacks
             # reduce learning rate
             reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='loss',
                                                           factor=0.5,
                                                           patience=50,
                                                           min_lr=0.0001)
Exemple #28
0
            for iter in range(ITERATIONS):
                print('\t\titer', iter)

                trr = ''
                if iter != 0:
                    trr = '_itr_' + str(iter)

                tmp_output_directory = root_dir + '/results/' + classifier_name + '/' + archive_name + trr + '/'

                for dataset_name in dataset_names_for_archive[archive_name]:
                    print('\t\t\tdataset_name: ', dataset_name)

                    output_directory = tmp_output_directory + dataset_name + '/'

                    create_directory(output_directory)
                    create_directory(output_directory + '/dp/')
                    create_directory(output_directory + '/normal/')

                    fit_classifier()

                    print('\t\t\t\tDONE')

                    # the creation of this directory means
                    create_directory(output_directory + '/DONE')

elif sys.argv[1] == 'transform_mts_to_ucr_format':
    transform_mts_to_ucr_format()
elif sys.argv[1] == 'visualize_filter':
    visualize_filter(root_dir)
elif sys.argv[1] == 'viz_for_survey_paper':
        trr = ''
        if iter != 0:
            trr = '_itr_' + str(iter)

        tmp_output_directory = root_dir + '/results/' + classifier_name + '/' + archive_name + trr + '/'

        for dataset_name in config['ARCHIVE']['data_name_list']:
            print('\t\t\tdataset_name: ', dataset_name)

            x_train, y_train, x_test, y_test, y_true, nb_classes, y_true_train, enc = prepare_data(
            )

            output_directory = tmp_output_directory + dataset_name + '/'

            temp_output_directory = create_directory(output_directory)

            if temp_output_directory is None:
                print('Already_done', tmp_output_directory, dataset_name)
                continue

            fit_classifier()

            print('\t\t\t\tTRAINING DONE')

            # the creation of this directory means
            create_directory(output_directory + '/DONE')

    # run the ensembling of these iterations of Inception
    classifier_name = 'nne'  # test the model
Exemple #30
0
else:
    if len(sys.argv) > 5:
        if sys.argv[5] == 'normalize':
            NORMALIZE = True
    # this is the code used to launch an experiment on a dataset
    for archive_name in sys.argv[1].split(','):
        for dataset_name in sys.argv[2].split(','):
            for classifier_name in sys.argv[3].split(','):
                for itr in sys.argv[4].split(','):
                    try:
                        if itr == '_itr_0':
                            itr = ''

                        output_directory_name = root_dir + '/results/' + classifier_name + '/' + archive_name + itr + '/' + \
                                                dataset_name + '/'
                        create_directory(output_directory_name)
                        print('Method: ', archive_name, dataset_name,
                              classifier_name, itr)
                        if os.path.exists(f'{output_directory_name}/DONE'):
                            print('Already done')

                        else:
                            datasets_dict = read_dataset(
                                root_dir, archive_name, dataset_name)
                            res, total_time = fit_classifier()
                            print('DONE')
                            # the creation of this directory means
                            create_directory(output_directory_name + '/DONE')
                            exp_line = add_exp(classifier_name, dataset_name,
                                               itr, total_time, res[0])
                            upload_exp_results_to_gdrive(