예제 #1
0
def main():
    print(device)
    trainloader, testloader = prepare_loader(args)
    model = prepare_model(args).to(device)
    #    ptname='./MNIST_trained/MNIST_naive_SGD_epoch45.pt'
    ptname = './MNIST_trained/MNIST_naive_Adam_dropout_epoch10.pt'
    st = torch.load(ptname)
    model.load_state_dict(st)
    for r in [0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 0.99, 1.0]:
        print('drop rate:{}'.format(r))
        dropout_rate(model, r)
        train_test(model, testloader, 10, train_flag=False)
예제 #2
0
파일: a.py 프로젝트: slimanej4c/amira
    def tf_idf_1(self):


        li2=[]

        for i in  self.statement:
            li1=[]
            li1.append(i)
            li2.append(li1)
        y1 = self.y // 20
        y2 = self.y // 14
        y3 = self.y - y1 - y2
        x3 = self.x // 5
        h = y3 - y2
        w= 3 * x3 - 30
        self.go=tf_idf1.tf_idf(li2[0:1000], self.centre_frame1, h, w)
        self.bafficheidf_train = Button(self.haut_frame3, text='afficher idf_tf train', command=self.go.afficher_idf,
                                        width=self.x // 40, bg='#0000FF',
                                        fg='white', relief=GROOVE, )

        self.bafficheidf_train.grid(row=0, column=1)

        self.go_train = train_test.train_test(self.data['statement'], self.data['sentiment'], self.vmethod.get(),
                                   None)
        vscore = StringVar()
        vscore.set('')
        def tester():
            self.btest_data.grid(row=2, column=0)

        self.butiliser_model = Button(self.haut_frame3, text='tester',
                                 command=tester,width=self.x // 40, bg='#1B2631',fg='white', relief=GROOVE, )


        self.bscore = Button(self.gauche_frame, text='afficher le score', command=lambda: self.go_train.tester(vscore, self.butiliser_model),
                             width=self.x // 40, bg='#0000FF',
                             fg='white', relief=GROOVE, )

        self.lscore = Label(self.gauche_frame, textvariable=vscore)
        self.lscore.grid(row=9, column=0)
        v='entrainer avec '+self.vmethod.get()
        self.bentrainer = Button(self.gauche_frame, text=v,
                                command=lambda: self.go_train.entrainer(self.bscore, self.root), width=self.x // 40, bg='#0000FF',
                                fg='white', relief=GROOVE, )
        self.bentrainer.grid(row=4, column=0)
        self.z='1'
        self.hide_show('train','0')
예제 #3
0
    def fit_model(self, train, test):

        model = Sequential()
        # Conv
        model.add(Conv2D(32,(3,3), input_shape = (520, 520,3), activation = 'relu'))
        # Pooling
        model.add(MaxPooling2D(pool_size = (2,2)))
        # Second Conv
        model.add(Conv2D(32,(3,3), activation='relu'))
        model.add(MaxPooling2D(pool_size = (2,2)))
        # Flatting
        model.add(Flatten())
        # Full Connected
        model.add(Dense(units = 128, activation = 'relu'))
        model.add(Dense(units = 1, activation = 'sigmoid'))
        # Compiling CNN
        model.compile(optimizer = 'rmsprop', 
                        loss = 'categorical_crossentropy',
                        metrics = ['accuracy'])

        ## Normalize data
        training_data = train_test()
예제 #4
0
    try:
        #         import pdb;pdb.set_trace()
        checkpoint = torch.load(folder_name + '/' + check +
                                'checkpoint.pth.tar',
                                map_location=lambda storage, loc: storage,
                                pickle_module=pickle,
                                encoding='latin1')
        #         checkpoint=torch.load(folder_name+'/'+check+'checkpoint.pth.tar')
        res.load_state_dict(checkpoint['state_dict'], strict=True)
        #         import pdb;pdb.set_trace()
        epoch = checkpoint['epoch']
        mean_best = checkpoint['mean_best']
        print(
            "=> loaded checkpoint when best_prediction {} and epoch {}".format(
                mean_best, checkpoint['epoch']))
    except:
        print('Failed to load checkPoint')
        exit()

if hyp == 't':
    try:
        print('Loading previous Hyperparameters')
        optim1.load_state_dict(checkpoint['optimizer'])
        scheduler.load_state_dict(checkpoint['scheduler'])
    except:
        print('Failed to load previous Hyperparameters')

train_test(res, optim1, scheduler, dataloader, number_of_epochs,
           breaking_point, saving_epoch, folder_name, batch_size, infr, epoch,
           mean_best, visualize)
def predict(data: list,
            forecast_horizon: int = 1,
            feature_sets: dict = {'covariate': 'mRMR'},
            forced_covariates: list = [],
            test_type: str = 'whole-as-one',
            models: list = ['knn'],
            mixed_models: list = [],
            model_type: str = 'regression',
            splitting_type: str = 'training-validation',
            instance_testing_size: int or float = 0.2,
            instance_validation_size: int or float = 0.3,
            instance_random_partitioning: bool = False,
            fold_total_number: int = 5,
            feature_scaler: str = None,
            target_scaler: str = None,
            performance_benchmark: str = 'MAPE',
            performance_measures: list = ['MAPE'],
            performance_mode: str = 'normal',
            scenario: str or None = 'current',
            validation_performance_report: bool = True,
            testing_performance_report: bool = True,
            save_predictions: bool = True,
            plot_predictions: bool = False,
            verbose: int = 0):
    
    """
    Args:
        data:
        forecast_horizon:
        feature_sets:
        forced_covariates:
        test_type:
        models:
        mixed_models:
        model_type:
        splitting_type:
        instance_testing_size:
        instance_validation_size:
        instance_random_partitioning:
        fold_total_number:
        feature_scaler:
        target_scaler:
        performance_benchmark:
        performance_measures:
        performance_mode:
        scenario:
        validation_performance_report:
        testing_performance_report:
        save_predictions:
        plot_predictions:
        verbose:
    Returns:
    """
    # input checking
    # data
    if not isinstance(data, list):
        sys.exit("The input 'data' must be a list of DataFrames or a list of data addresses.")
    str_check = [isinstance(d, str) for d in data]
    df_check = [isinstance(d, pd.DataFrame) for d in data]
    if not (all(str_check) or all(df_check)):
        sys.exit("The input 'data' must be a list of DataFrames or a list data addresses.")
    # forecast_horizon
    if not (isinstance(forecast_horizon, int) and forecast_horizon >= 1):
        sys.exit("The input 'forecast_horizon' must be integer and greater than or equal to one.")
    # feature_scaler
    if feature_scaler not in configurations.FEATURE_SCALERS:
        sys.exit(f"The input 'feature_scaler' must be string and one of the following options:\n"
                 f"{configurations.FEATURE_SCALERS}")
    # target_scaler
    if target_scaler not in configurations.TARGET_SCALERS:
        sys.exit(f"The input 'target_scaler' must be string and one of the following options:\n"
                 f"{configurations.TARGET_SCALERS}")
    # test_type
    if test_type not in configurations.TEST_TYPES:
        sys.exit(f"The input 'test_type' must be string and one of the following options:\n"
                 f"{configurations.TEST_TYPES}")
    # feature_sets input checking
    if not (isinstance(feature_sets, dict) and len(feature_sets.keys()) == 1):
        sys.exit("feature_sets input format is not valid.")
    if not (list(feature_sets.keys())[0] in configurations.FEATURE_SELECTION_TYPES
            and list(feature_sets.values())[0] in configurations.RANKING_METHODS):
        sys.exit("feature_sets input is not valid.")
    # forced_covariates checking
    if not isinstance(forced_covariates, list):
        sys.exit("Error: The input 'forced_covariates' must be a list of covariates or an empty list.")
    # model_type input checking
    if model_type not in configurations.MODEL_TYPES:
        sys.exit("model_type input is not valid.")
    models_list = []
    # models input checking
    if not isinstance(models, list):
        sys.exit("models input format is not valid.")
    for model in models:
        if isinstance(model, str):
            if model not in configurations.PRE_DEFINED_MODELS:
                sys.exit("models input is not valid.")
            elif model not in models_list:
                models_list.append(model)
            else:models.remove(model)
        elif isinstance(model, dict):
            if len(list(model.keys())) == 1:
                if list(model.keys())[0] not in configurations.PRE_DEFINED_MODELS:
                    sys.exit("models input is not valid.")
                elif list(model.keys())[0] not in models_list:
                    models_list.append(list(model.keys())[0])
                else:models.remove(model)
            else:
                sys.exit("models input is not valid.")
        elif callable(model):
            if model.__name__ not in models_list:
                models_list.append(model.__name__)
            else:models.remove(model)
        else:
            sys.exit("Models input is not valid.")
    # mixed_models input checking
    if not isinstance(mixed_models, list):
        sys.exit("Mixed_models input format is not valid.")
    for model in mixed_models:
        if isinstance(model, str):
            if model not in configurations.PRE_DEFINED_MODELS:
                sys.exit("Mixed_models input is not valid.")
            elif 'mixed_'+model not in models_list:
                models_list.append('mixed_'+model)
            else:mixed_models.remove(model)
        elif isinstance(model, dict):
            if len(list(model.keys())) == 1:
                if list(model.keys())[0] not in configurations.PRE_DEFINED_MODELS:
                    sys.exit("Mixed_models input is not valid.")
                elif 'mixed_'+list(model.keys())[0] not in models_list:
                    models_list.append('mixed_'+list(model.keys())[0])
                else:mixed_models.remove(model)
            else:
                sys.exit("Mixed_models input is not valid.")
        elif callable(model):
            if model.__name__ not in models_list:
                models_list.append(model.__name__)
            else:mixed_models.remove(model)
        else:
            sys.exit("Mixed_models input is not valid.")
    # instance_testing_size input checking
    if not ((isinstance(instance_testing_size, float) and 0 < instance_testing_size < 1) or (
            isinstance(instance_testing_size, int) and instance_testing_size > 0)):
        sys.exit("instance_testing_size input is not valid.")
    # splitting_type input checking
    if splitting_type not in configurations.SPLITTING_TYPES:
        sys.exit("splitting_type input is not valid.")
    # instance_validation_size input checking
    if not ((isinstance(instance_validation_size, float) and 0 < instance_validation_size < 1) or (
            isinstance(instance_validation_size, int) and instance_validation_size > 0)):
        sys.exit("instance_validation_size input is not valid.")
    # instance_random_partitioning input checking
    if not isinstance(instance_random_partitioning, bool):
        sys.exit("instance_random_partitioning input is not valid.")
    # fold_total_number input checking
    if not (isinstance(fold_total_number, int), fold_total_number > 1):
        sys.exit("fold_total_number input is not valid.")
    # performance_benchmark input checking
    if performance_benchmark not in configurations.PERFORMANCE_BENCHMARKS:
        sys.exit("performance_benchmark input is not valid.")
    # performance_mode input checking
    if not isinstance(performance_mode, str):
        sys.exit("performance_mode input format is not valid.")
    if not any(performance_mode.startswith(performance_mode_starts_with)
               for performance_mode_starts_with in configurations.PERFORMANCE_MODES_STARTS_WITH):
        sys.exit("performance_mode input is not valid.")
    # performance_measures input checking
    if not (isinstance(performance_measures, list) and len(performance_measures) > 0):
        sys.exit("performance_measures input format is not valid.")
    for performance_measure in performance_measures:
        if performance_measure not in configurations.PERFORMANCE_MEASURES:
            sys.exit("performance_measures input is not valid.")
    # scenario
    if not ((isinstance(scenario, str) and scenario in configurations.SCENARIOS) or scenario is None):
        sys.exit("scenario input is not valid.")
    # validation_performance_report input checking
    if not isinstance(validation_performance_report, bool):
        sys.exit("validation_performance_report input is not valid.")
    # testing_performance_report input checking
    if not isinstance(testing_performance_report, bool):
        sys.exit("testing_performance_report input is not valid.")
    # save_predictions input checking
    if not isinstance(save_predictions, bool):
        sys.exit("save_predictions input is not valid.")
    # plot_predictions input checking
    if not isinstance(plot_predictions, bool):
        sys.exit("plot_predictions input is not valid.")
    elif (plot_predictions == True) and (save_predictions == False):
        sys.exit("For plotting the predictions, both plot_predictions and save_predictions inputs must be set to TRUE.")
    elif (plot_predictions == True) and (model_type == 'classification'):
        sys.exit("The plot_predictions input can be set to True only for regression model_type.")
        
    # verbose input checking
    if verbose not in configurations.VERBOSE_OPTIONS:
        sys.exit("verbose input is not valid.")

    # removing prediction and performance directories and test_process_backup csv file
    if os.path.exists('prediction'):
        shutil.rmtree('prediction')
    if os.path.exists('performance'):
        shutil.rmtree('performance')
    if os.path.isfile('test_process_backup.csv'):
        os.remove('test_process_backup.csv')

    # data preparing
    if isinstance(data[0], str):
        try:
            data = [pd.read_csv(d).sort_values(by=['temporal id', 'spatial id']) for d in data]
        except Exception as e:
            sys.exit(str(e))

    # forced_covariates manipulation
    forced_covariates = list(set(forced_covariates))
    forced_covariates = [forced_covariate
                         for forced_covariate in forced_covariates
                         if forced_covariate is not None and forced_covariate != '']

    # classification checking
    labels = None
    if model_type == 'classification':
        if not set(performance_measures) <= set(configurations.CLASSIFICATION_PERFORMANCE_MEASURES):
            sys.exit("Error: The input 'performance_measures' is not valid according to 'model_type=classification'.")
        if performance_benchmark not in configurations.CLASSIFICATION_PERFORMANCE_BENCHMARKS:
            sys.exit("Error: The input 'performance_benchmark' is not valid according to 'model_type=classification'.")
        if performance_mode != 'normal':
            performance_mode = 'normal'
            print("Warning: The input 'performance_mode' is set to 'normal' according to model_type=classification'.")
        if target_scaler is not None:
            target_scaler = None
            print("Warning: The input 'target_scaler' is set to None according to model_type=classification'.")
        target_column_name = list(filter(lambda x: x.startswith('Target'), data[0].columns.values))[0]
        labels = data[0].loc[:, target_column_name].unique().tolist()
        labels = [label for label in labels if not (label is None or str(label) == 'nan')]
        if len(labels) < 2:
            sys.exit("Error: The labels length must be at least two.")

    # one_by_one checking
    if test_type == 'one-by-one':
        splitting_type = 'training-validation'
        instance_validation_size = 1
        instance_random_partitioning = False
        if data[0]['spatial id'].nunique() == 1:
            if 'AUC' in performance_measures:
                performance_measures.remove('AUC')
            if 'R2_score' in performance_measures:
                performance_measures.remove('R2_score')
            if 'AUPR' in performance_measures:
                performance_measures.remove('AUPR')
            if len(performance_measures) == 0:
                sys.exit("Error: The input 'performance_measures' is not valid according to 'test_type=one-by-one'.")
            if 'AUC' in performance_benchmark:
                sys.exit("Error: The input 'performance_benchmark' is not valid according to 'test_type=one-by-one'.")
            if 'R2_score' in performance_measures:
                sys.exit("Error: The input 'performance_benchmark' is not valid according to 'test_type=one-by-one'.")
            if 'AUPR' in performance_measures:
                sys.exit("Error: The input 'performance_benchmark' is not valid according to 'test_type=one-by-one'.")
                
    # get target quantities
    granularity = [1]*len(data)
    for index in range(len(data)):
        target_mode, target_granularity, granularity[index], _ = get_target_quantities(data=data[index].copy())
        data[index], _ = get_target_temporal_ids(temporal_data = data[index].copy(), forecast_horizon = forecast_horizon,
                                              granularity = granularity[index])
        if model_type == 'classification':
            if not target_mode == 'normal':
                sys.exit(
                    "Error: The parameter 'target_mode' must be 'normal' according to 'model_type=classification'.")
            if not target_granularity == 1:
                sys.exit(
                    "Error: The parameter 'target_mode' must be 'normal' according to 'model_type=classification'.")
            if not granularity[index] == 1:
                sys.exit(
                    "Error: The temporal scale of input data must not be transformed according to 'model_type=classification'.")
    
    data, future_data = get_future_data(data=[d.copy() for d in data],
                                        forecast_horizon=forecast_horizon)
    
    # change the name of temporal id to be identified as shifted to target time point
    for index in range(len(data)):
        data[index] = data[index].rename(columns = {'temporal id':'target temporal id'})
        future_data[index] = future_data[index].rename(columns = {'temporal id':'target temporal id'})
    
    # # ranking
    # print('Ranking Process')
    # feature_selection_type = list(feature_sets.keys())[0]
    # ranking_method = list(feature_sets.values())[0]
    # ordered_covariates_or_features = []
    # if feature_selection_type == 'covariate':
    #     ordered_covariates_or_features = rank_covariates(data=data[0].copy(),
    #                                                      ranking_method=ranking_method,
    #                                                      forced_covariates=forced_covariates)
    # else:
    #     for d in data:
    #         ordered_covariates_or_features.append(rank_features(data=d.copy(),
    #                                                             ranking_method=ranking_method,
    #                                                             forced_covariates=forced_covariates))
    # # ordered_covariates_or_features = ordered_covariates_or_features[:7]
    # # print(ordered_covariates_or_features[2])

    # main process
    if test_type == 'whole-as-one':
        print('Whole As One')
        data_temporal_ids = [d['target temporal id'].unique() for d in data]
        # train_validate
        print(100 * '-')
        print('Train Validate Process')
        best_model, best_model_parameters, best_history_length, best_feature_or_covariate_set, base_models, _ = \
            train_validate(data=[d.copy() for d in data],
                           forecast_horizon=forecast_horizon,
                           feature_scaler=feature_scaler,
                           target_scaler=target_scaler,
                           feature_sets=feature_sets,
                           forced_covariates=forced_covariates,
                           model_type=model_type,
                           labels=labels,
                           models=models,
                           mixed_models=mixed_models,
                           instance_testing_size=instance_testing_size,
                           splitting_type=splitting_type,
                           instance_validation_size=instance_validation_size,
                           instance_random_partitioning=instance_random_partitioning,
                           fold_total_number=fold_total_number,
                           performance_benchmark=performance_benchmark,
                           performance_measures=performance_measures,
                           performance_report=validation_performance_report,
                           save_predictions=save_predictions,
                           verbose=verbose)

        # train_test
        print(100 * '-')
        print('Train Test Process')
        test_trained_model = train_test(data=data[best_history_length - 1].copy(),
                                                       forecast_horizon=forecast_horizon,
                                                       history_length=best_history_length,
                                                       feature_scaler=feature_scaler,
                                                       target_scaler=target_scaler,
                                                       feature_or_covariate_set=best_feature_or_covariate_set,
                                                       model_type=model_type,
                                                       labels=labels,
                                                       model=best_model,
                                                       base_models = base_models,
                                                       model_parameters=best_model_parameters,
                                                       instance_testing_size=instance_testing_size,
                                                       performance_measures=performance_measures,
                                                       performance_mode=performance_mode,
                                                       performance_report=testing_performance_report,
                                                       save_predictions=save_predictions,
                                                       verbose=verbose)
        # predict_future
        best_model, best_model_parameters, best_history_length, best_feature_or_covariate_set, base_models, _ = \
            train_validate(data=[d[d['target temporal id'].isin((
                                data_temporal_ids[index][:] if (forecast_horizon*granularity[index])-1 == 0 else data_temporal_ids[index][:-((forecast_horizon*granularity[index])-1)]))].copy()
                                for index, d in enumerate(data)],
                           forecast_horizon=forecast_horizon,
                           feature_scaler=feature_scaler,
                           target_scaler=target_scaler,
                           feature_sets=feature_sets,
                           forced_covariates=forced_covariates,
                           model_type=model_type,
                           labels=labels,
                           models=models,
                           mixed_models=mixed_models,
                           instance_testing_size=0,
                           splitting_type=splitting_type,
                           instance_validation_size=instance_validation_size,
                           instance_random_partitioning=instance_random_partitioning,
                           fold_total_number=fold_total_number,
                           performance_benchmark=performance_benchmark,
                           performance_measures=performance_measures,
                           performance_report=False,#validation_performance_report,
                           save_predictions=False,#save_predictions,
                           verbose=0)
        
        
        best_data = data[best_history_length - 1].copy()
        best_future_data = future_data[best_history_length - 1].copy()
        best_data_temporal_ids = best_data['target temporal id'].unique()
        temp = forecast_horizon*granularity[best_history_length - 1] - 1
        trained_model = predict_future(data=best_data[best_data['target temporal id'].isin((best_data_temporal_ids
                                                                                     if temp == 0
                                                                                     else best_data_temporal_ids[:-temp]
                                                                                     ))].copy(),
                                       future_data=best_future_data.copy(),
                                       forecast_horizon=forecast_horizon,
                                       feature_scaler=feature_scaler,
                                       target_scaler=target_scaler,
                                       feature_or_covariate_set=best_feature_or_covariate_set,
                                       model_type=model_type,
                                       labels=labels,
                                       model=best_model,
                                       base_models = base_models,
                                       model_parameters=best_model_parameters,
                                       scenario=scenario,
                                       save_predictions=save_predictions,
                                       verbose=verbose)

    elif test_type == 'one-by-one':
        print('One By One')
        # loop over test points
        data_temporal_ids = [d['target temporal id'].unique() for d in data]
        if isinstance(instance_testing_size, float):
            instance_testing_size = int(round(instance_testing_size * len(data_temporal_ids[0])))
        for i in range(instance_testing_size):
            print(100 * '#')
            print('test_point =', i + 1)
            # train_validate
            print(100 * '-')
            print('Train Validate Process')
            best_model, best_model_parameters, best_history_length, best_feature_or_covariate_set, base_models, _ = \
                train_validate(data=
                               [d[d['target temporal id'].isin((
                                   data_temporal_ids[index][:] if i == 0 else data_temporal_ids[index][:-i]))].copy()
                                for index, d in enumerate(data)],
                               forecast_horizon=forecast_horizon,
                               feature_scaler=feature_scaler,
                               forced_covariates=forced_covariates,
                               target_scaler=target_scaler,
                               feature_sets=feature_sets,
                               model_type=model_type,
                               labels=labels,
                               models=models,
                               mixed_models=mixed_models,
                               instance_testing_size=1,
                               splitting_type=splitting_type,
                               instance_validation_size=instance_validation_size,
                               instance_random_partitioning=instance_random_partitioning,
                               fold_total_number=fold_total_number,
                               performance_benchmark=performance_benchmark,
                               performance_measures=performance_measures,
                               performance_report=validation_performance_report,
                               save_predictions=save_predictions,
                               verbose=verbose)
            

            # train_test
            print(100 * '-')
            print('Train Test Process')
            d = data[best_history_length - 1].copy()
            test_trained_model = train_test(data=d[d['target temporal id'].isin(
                (data_temporal_ids[best_history_length - 1][:]
                 if i == 0
                 else data_temporal_ids[best_history_length - 1][:-i]
                 ))].copy(),
                                                           forecast_horizon=forecast_horizon,
                                                           history_length=best_history_length,
                                                           feature_scaler=feature_scaler,
                                                           target_scaler=target_scaler,
                                                           feature_or_covariate_set=best_feature_or_covariate_set,
                                                           model_type=model_type,
                                                           labels=labels,
                                                           model=best_model,
                                                           base_models = base_models,
                                                           model_parameters=best_model_parameters,
                                                           instance_testing_size=1,
                                                           performance_measures=performance_measures,
                                                           performance_mode=performance_mode,
                                                           performance_report=testing_performance_report,
                                                           save_predictions=save_predictions,
                                                           verbose=verbose)
        # predict_future
        print(100 * '-')
        print('Train Validate Process')
        best_model, best_model_parameters, best_history_length, best_feature_or_covariate_set, base_models, _ = \
            train_validate(data=[d[d['target temporal id'].isin((
                                   data_temporal_ids[index][:] if (forecast_horizon*granularity[index])-1 == 0 else data_temporal_ids[index][:-((forecast_horizon*granularity[index])-1)]))].copy()
                                for index, d in enumerate(data)],
                           forecast_horizon=forecast_horizon,
                           feature_scaler=feature_scaler,
                           target_scaler=target_scaler,
                           feature_sets=feature_sets,
                           forced_covariates=forced_covariates,
                           model_type=model_type,
                           labels=labels,
                           models=models,
                           mixed_models=mixed_models,
                           instance_testing_size=0,
                           splitting_type=splitting_type,
                           instance_validation_size=instance_validation_size,
                           instance_random_partitioning=instance_random_partitioning,
                           fold_total_number=fold_total_number,
                           performance_benchmark=performance_benchmark,
                           performance_measures=performance_measures,
                           performance_report=False,#validation_performance_report,
                           save_predictions=False,#save_predictions,
                           verbose=0)
        
            
        best_data = data[best_history_length - 1].copy()
        best_future_data = future_data[best_history_length - 1].copy()
        best_data_temporal_ids = best_data['target temporal id'].unique()
        best_future_data_temporal_ids = best_future_data['target temporal id'].unique()
        for i in range(forecast_horizon*granularity[best_history_length - 1]):
            print(150 * '*')
            print('i =', i + 1)
            temp = forecast_horizon*granularity[best_history_length - 1] - i - 1
            print(100 * '-')
            print('Predict Future Process')
            trained_model = predict_future(data=best_data[best_data['target temporal id'].isin(
                (best_data_temporal_ids if temp == 0
                 else best_data_temporal_ids[:-temp]))].copy(),
                                           future_data=best_future_data[best_future_data['target temporal id'] ==
                                                                        best_future_data_temporal_ids[i]].copy(),
                                           forecast_horizon=forecast_horizon,
                                           feature_scaler=feature_scaler,
                                           target_scaler=target_scaler,
                                           feature_or_covariate_set=best_feature_or_covariate_set,
                                           model_type=model_type,
                                           labels=labels,
                                           model=best_model,
                                           base_models = base_models,
                                           model_parameters=best_model_parameters,
                                           scenario=scenario,
                                           save_predictions=save_predictions,
                                           verbose=verbose)
            
    if (validation_performance_report == True and testing_performance_report == True):
        performance_bar_plot(forecast_horizon,test_type,performance_benchmark)
        performance_summary(forecast_horizon,test_type,performance_benchmark)
        
    if plot_predictions == True:
        if len(data[0]['spatial id'].unique())<3:
            spatial_ids = data[0]['spatial id'].unique()
        else:
            spatial_ids = list(random.sample(list(data[0]['spatial id'].unique()),3))
        plot_prediction(data = data[0].copy(), test_type = test_type, forecast_horizon = forecast_horizon,
                         plot_type = 'test', granularity = granularity[0], spatial_ids = spatial_ids)
        plot_prediction(data = data[0].copy(), test_type = test_type, forecast_horizon = forecast_horizon,
                         plot_type = 'future', granularity = granularity[0], spatial_ids = spatial_ids)


    return None
예제 #6
0
def result():
    if request.method == 'POST':
        uploaded_files = request.files.getlist("file[]")
        for file in uploaded_files:
            if file and allowed_file(file.filename):
                filename = secure_filename(file.filename)
                file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))
                label, mean, std, img_name, global_features_shape, global_labels_shape, trainDataGlobal_shape, testDataGlobal_shape, trainLabelsGlobal_shape, testLabelsGlobal_shape = train_test(
                )

        return render_template('result.html',
                               label=label,
                               acc=mean,
                               std=std,
                               img_name=img_name,
                               global_features_shape=global_features_shape,
                               global_labels_shape=global_labels_shape,
                               trainDataGlobal_shape=trainDataGlobal_shape,
                               testDataGlobal_shape=testDataGlobal_shape,
                               trainLabelsGlobal_shape=trainLabelsGlobal_shape,
                               testLabelsGlobal_shape=testLabelsGlobal_shape)
예제 #7
0
def run_model_run(dataset=None, *, session="s7"):
    try:
        train_transforms, test_transforms = train_test_dataloader.define_train_test_transformers(
            session=session)
        train_data, test_data = train_test_dataloader.download_data(
            dataset_name=utility.get_dataset_name(session=session),
            train_transforms=train_transforms,
            test_transforms=test_transforms)

        train_loader, test_loader = train_test_dataloader.get_train_test_dataloaders(
            train_data=train_data,
            test_data=test_data,
            data_loader_args=utility.get_dataloader_args())

        all_regularizations_list, tracker = utility.get_combos_and_trackers()
        device = utility.get_device()
        # utility.get_all_models_summary()
        loss_fn = nn.functional.nll_loss
        model = None

        for combo in all_regularizations_list:
            print("\nRunning for: ", combo)

            if dataset and dataset.lower() == "mnist":
                if CONSTANTS.GBN in combo.lower():
                    model = basic_mnist.GBNNet().to(device)
                else:
                    model = basic_mnist.S6_MNIST().to(device)
            elif "s7" in session.lower() or dataset.lower() == "cifar10":
                model = cifar10_groups_dws_s7_model.S7_CIFAR10()
                model = model.to(device)
                loss_fn = nn.CrossEntropyLoss()

            optimizer = utility.get_optimizer(model=model)
            scheduler = utility.get_scheduler(optimizer=optimizer)
            utility.show_model_summary(
                title=model.__doc__,
                model=model,
                input_size=utility.get_input_size(
                    dataset=utility.get_dataset_name(session=session)))

            train_test.train_test(
                model=model,
                device=device,
                train_loader=train_loader,
                optimizer=optimizer,
                epochs=int(utility.get_config_details()[CONSTANTS.MODEL_CONFIG]
                           [CONSTANTS.EPOCHS]),
                scheduler=scheduler,
                test=True,
                test_loader=test_loader,
                type_=combo,
                tracker=tracker,
                loss_fn=loss_fn)

        for plot_type in utility.get_config_details()[CONSTANTS.PLOTS][
                CONSTANTS.TO_PLOT].strip().split(','):
            utility.plot(title="Plot is for:" + plot_type,
                         x_label='Epochs',
                         y_label=plot_type.lower(),
                         tracker=tracker,
                         category=plot_type)
    except Exception as e:
        print(traceback.format_exc(e))
예제 #8
0
import sys
sys.path.append('.')

import models
import train_test

models = models.get_models()

model_name = 'vgg'
model = models[model_name]
train_test.train_test(model,
                      model_name,
                      batch_size=100,
                      augment=True,
                      epochs=50)

model_name = 'conv_simple'
model = models[model_name]
train_test.train_test(model,
                      model_name,
                      batch_size=100,
                      augment=True,
                      epochs=50)

model_name = 'one_hidden'
model = models[model_name]
train_test.train_test(model,
                      model_name,
                      batch_size=100,
                      augment=True,
                      epochs=50)
예제 #9
0
파일: amina2.py 프로젝트: slimanej4c/amira
    def tf_idf_2(self):

        li2 = []

        for i in self.statement2:
            li1 = []
            li1.append(i)
            li2.append(li1)
        y1 = self.y // 20
        y2 = self.y // 14
        y3 = self.y - y1 - y2
        x3 = self.x // 5
        h = y3 - y2
        w = 3 * x3 - 30
        self.go = tf_idf2.tf_idf(li2[0:1000], self.centre_frame1, h, w)
        self.bafficheidf_test = Button(
            self.haut_frame3,
            text='afficher idf_tf test',
            command=self.go.afficher_idf,
            width=self.x // 60,
            bg='#1B2631',
            fg='white',
            relief=GROOVE,
        )

        self.bafficheidf_test.grid(row=0, column=3)
        import train_test
        go = train_test.train_test(self.data['statement'],
                                   self.data['sentiment'],
                                   self.data2['statement'],
                                   self.data2['sentiment'])
        vscore = StringVar()
        vscore.set('')
        butiliser_model = Button(
            self.haut_frame3,
            text='utilser le model',
            command=lambda: go.utiliser_svm(self.centre_frame1),
            width=self.x // 60,
            bg='#1B2631',
            fg='white',
            relief=GROOVE,
        )

        self.tester = Button(
            self.gauche_frame,
            text='tester',
            command=lambda: go.tester(vscore, butiliser_model),
            width=self.x // 60,
            bg='#1B2631',
            fg='white',
            relief=GROOVE,
        )

        self.lscore = Label(self.gauche_frame, textvariable=vscore)
        self.lscore.grid(row=6, column=0)
        self.entrainer = Button(
            self.gauche_frame,
            text='entrainer',
            command=lambda: go.entrainer(self.tester, self.root),
            width=self.x // 60,
            bg='#1B2631',
            fg='white',
            relief=GROOVE,
        )
        self.entrainer.grid(row=4, column=0)