示例#1
0
def para_train(model, modelname_prefix, dataconf,x_train, y_train, x_valid, y_valid, x_test, y_test):

    modelname = "%s#%s"% (model,modelname_prefix)
    modelconf = ModelConf.ModelConf(dataconf=dataconf, batch_size=600, learning_rate=0.0001, epochs=50)
    modelbuild = ModelBuilder.ModelBuilder(modelconf, modelname, allconfig['target'])
    if model == 'cnn':
        os.environ['CUDA_VISIBLE_DEVICES'] = '8'
        modelbuild.train_cnn(x_train, y_train, x_valid, y_valid, figplot=True)
        modelbuild.test(x_test, y_test, ROC=False)

    elif model == 'vgg':
        os.environ['CUDA_VISIBLE_DEVICES'] = '7'
        modelbuild.train_vgg(x_train, y_train, x_valid, y_valid, figplot=True)
        modelbuild.test(x_test, y_test, ROC=False)

    elif model == 'vgglstm':
        os.environ['CUDA_VISIBLE_DEVICES'] = '6'
        modelbuild.train_vgg_lstm(x_train, y_train, x_valid, y_valid, figplot=True)
        modelbuild.test(x_test, y_test, ROC=False)
    # elif model == 'lstm':
    #
    #     modelbuild.train_lstm(x_train, y_train, x_valid, y_valid, figplot=True)
    #     modelbuild.test(x_test, y_test, ROC=False)
    elif model == 'bilstm':
        os.environ['CUDA_VISIBLE_DEVICES'] = '5'
        modelbuild.train_bilstm(x_train, y_train, x_valid, y_valid, figplot=True)
        modelbuild.test(x_test, y_test, ROC=False)
示例#2
0
def TrainRNN(setupClient):
    (X_train, y_train), (X_test, y_test), (w_train,w_test), (ix_train, ix_test) = LoadDataRNN(setupClient)

    modelpath = setupClient.ModelSavePath

    model=ModelBuilder.BuildTestRNN(setupClient,X_train)
    model.compile('adam', 'binary_crossentropy')
    print (Fore.BLUE+"--------------------------")
    print (Back.BLUE+"       TRAINING RNN       ")
    print (Fore.BLUE+"--------------------------")
    try:
        modelMetricsHistory =  model.fit([X_train], y_train, batch_size=16,
        #     class_weight={
        #         0 : 0.20 * (float(len(y)) / (y == 0).sum()),
        #         1 : 0.40 * (float(len(y)) / (y == 1).sum()),
        #         2 : 0.40 * (float(len(y)) / (y == 2).sum())
        # },
        callbacks = [
            EarlyStopping(verbose=True, patience=10, monitor='val_loss'),
            ModelCheckpoint(modelpath+'/model.h5', monitor='val_loss', verbose=True, save_best_only=True)
        ],
        epochs=30,
        validation_split = 0.2)

    except KeyboardInterrupt:
        print ('Training ended early.')

    #store the configuration of the training to disk
    outfile = open(modelpath+'/RNN_Setup','wb')
    pickle.dump(setupClient,outfile)
    outfile.close()

    return modelMetricsHistory
def recur(in_state):
    '''after calling the recur function, these things should be done:
    1. Actions (if any) in the in_state are complete
    2. Transitions are checked
    3. For those good transitions, the destination states are activated (except ['*'] as destination)
    4. If any transition is gotten through, which means destination state is activated, in_state should be deactivated)
    Therefore, we can keep running the recursion with: in_state = all active states.
    '''

    StateDiagram().add_state(state_name=in_state)  # Add in_state

    Action = State(name=in_state).attrs  # Get State Attribute

    evaluate_action = State_Attr(state_name=in_state).evaluate(
    )  # Add Action in state and check if action complete
    dest_state = GetDict().source_destination()[
        in_state]  # Get List of Destination from in_state

    if evaluate_action == True:
        connection = []
        for item in range(0, len(dest_state)):
            connection.append((in_state, dest_state[item]))
            '''If in_state is a SuperState,
            we run the childStates inside this superstate,
            start with the start point of the childstate'''

            #TODO: SuperState List in GetList module has to be developed.
            if State(name=in_state).num_substates > 0:
                #TODO: SuperState:ChildStart Dictionary has to be developed in GetDict
                start_superstate = ModelBuilder.StateModelBuilder(
                    state_id=in_state).start_superstate()
                recur(start_superstate)

            if connection[item] in GetDict().states_transition():
                Transition = GetDict().states_transition()[
                    connection[item]]  # Get List of Transitions (if any)
                for n in range(0, len(Transition)):  # Add Transitions
                    pass
                    #StateAlgo.StateAlgorithm().add_trasition(transition = Transition[n],
                    #                                       current_state = in_state, next_state = dest_state[item])

                if StateAlgo.StateAlgorithm().evaluate_transitions():
                    '''Deactivate in_state; activate dest_state (except ['*'])'''
                    if dest_state[item] != '[*]':
                        StateAlgo.StateAlgorithm().change_state(
                            done_state=in_state,
                            new_state=dest_state[item],
                            check_permissive=True)
                    else:
                        StateAlgo.State(state_id=in_state).deactivate()
                    '''If in_state is a superstate and transition of in_state complete,
                    deactivate all childstates inside the in_state
                    '''
                    #TODO: SuperState:ChildState Dictionary has to be developed in GetDict.
                    if in_state[item] in GetList().SuperState():
                        for item in GetDict().superstate_child():
                            StateAlgo.State(state_id=GetDict().SuperState()
                                            [item]).deactivate()
示例#4
0
def para_train(model, modelname_prefix, dataconf, target, x_train, y_train, x_valid, y_valid, x_test, y_test):

    modelname = "%s#%s"% (model,modelname_prefix)
    modelconf = ModelConf.ModelConf(dataconf=dataconf, batch_size=600, learning_rate=0.0001, epochs=50)
    modelbuild = ModelBuilder.ModelBuilder(modelconf, modelname, target)
    if model == 'cnn':

        modelbuild.train_cnn(x_train, y_train, x_valid, y_valid, figplot=True)
        modelbuild.test(x_test, y_test, ROC=False)
    elif model == 'vgglstm':

        modelbuild.train_vgg_lstm(x_train, y_train, x_valid, y_valid, figplot=False)
        modelbuild.test(x_test, y_test, ROC=True)
    elif model == 'vgg':

        modelbuild.train_vgg(x_train, y_train, x_valid, y_valid, figplot=True)
        modelbuild.test(x_test, y_test, ROC=False)
    elif model == 'lstm':
        modelbuild.train_lstm(x_train, y_train, x_valid, y_valid, figplot=True)
        modelbuild.test(x_test, y_test, ROC=False)
示例#5
0
    def StartDemo(self, ref, model_path):
        self.captureWidth = 640
        self.captureHeight = 480
        self.cap = cv2.VideoCapture(0)
        self.cap.set(3, self.captureWidth)
        self.cap.set(4, self.captureHeight)

        # self.graph = tf.get_default_graph()

        bp = 'trained_model/'

        a = read_model("models/model99.txt")
        modelObject = ModelBuilder.ModelBuilder(a, (80, 80, 2))
        self.model = modelObject.model
        self.model.load_weights(bp+model_path)

        #self.model = load_model(bp+model_path)
        self.ref_img = FaceExtractionPipeline.SingletonPipeline().FaceExtractionPipelineImage(skimage.io.imread(ref))

        demo.Window(ref)
 def build(self):
     self.label_error.config(text="Begin building")
     self.modelBuilder = ModelBuilder.ModelBuilder(self.isStructure)
     self.modelTrainer = ModelTrainer.ModelTrainer(self.isTrain,
                                                   self.modelBuilder,
                                                   self.entry_bins.get())
     self.modelTrainer.readFile()
     maxbins = self.modelTrainer.getMaxbins()
     if maxbins < int(self.entry_bins.get()):
         tkMessageBox.showinfo("Alert Message", "Invalid number of bins")
     elif os.stat(self.isTest).st_size == 0 or os.stat(
             self.isTrain).st_size == 0 or os.stat(
                 self.isStructure).st_size == 0:
         tkMessageBox.showinfo("Alert Message", "One of the files is empty")
     else:
         self.modelTrainer.fillMissingValues()
         self.modelTrainer.discretization()
         tkMessageBox.showinfo(
             "Alert Message",
             "Building classifier using train-set is done!")
         self.button_classify['state'] = 'normal'
         self.label_error.config(text='')
示例#7
0
TREPONEMAL ANTIBODY SCREEN
T_PALLIDUM IGG
TREPONEMAL B CMIA'''.split('\n')

rl = '''NON-REACTIVE
NEGATIVE
NONREACTIVE
EQUIVOCAL
LOW_POSITIVE
NEAT'''.split('\n')

#tl = ['Test1','Sample Test','The other test']
#rl = ['POSITVE','Non Negative','NA','Mostky ok']

tests = Tags.TagSet('Tests')
tests.labels = [Tags.Tag(ModelBuilder.splitText(x.strip())) for x in tl]

results = Tags.TagSet('Results')
results.labels = [Tags.Tag(ModelBuilder.splitText(x.strip())) for x in rl]


def tagsFromFullRules(data, rules, nodes, tags, rowIndex=0):
    text = data.mainData
    words = splitText(text)
    for index, x in enumerate(rules):
        s = 0
        l = len(x[1])
        while s < l and x[1][s].getData()[1] != TAG:
            s += 1
        if s != 0 and s != l:
            i = boyerMoore([y.getData() for y in x[1][:s]], words)
示例#8
0
    fmt = '.2f' if normalize else 'd'
    thresh = cm.max() / 2.
    for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
        plt.text(j,
                 i,
                 format(cm[i, j], fmt),
                 horizontalalignment="center",
                 color="white" if cm[i, j] > thresh else "black")

    plt.tight_layout()
    plt.ylabel('True label')
    plt.xlabel('Predicted label')


a = read_model("models/model08.txt")
modelObject = ModelBuilder(a, (80, 80, 2))
model = modelObject.model
model.load_weights(
    'trained_model/2018-07-04 22:29:20/model08.txt_2018-07-05 12:24:21.h5')
#plot_model(model, to_file='model_graph.png', show_shapes=True, show_layer_names=True)

(X_test, y_test, _) = GetData('lfw-whofitinram_p80x80')
Y_test = np_utils.to_categorical(y_test, num_classes)
X_test = X_test.astype('float32')
X_test /= np.max(X_test)  # Normalise data to [0, 1] range
model.compile(
    loss='categorical_crossentropy',  # using the cross-entropy loss function
    optimizer='adam',  # using the Adam optimiser
    metrics=['accuracy'])

model.summary()
batch_size = 100
train_dataset, training_count = dlt.load(batch_size)
test_dataset, testing_count = dlt.load(batch_size, pathToData='./DATA/images/TEST/')

# Create general iterator 
iterator = tf.data.Iterator.from_structure(train_dataset.output_types,
                                               train_dataset.output_shapes)
next_element = iterator.get_next()


# Create the model 

should_drop = tf.placeholder(tf.bool)  # The actual value can be set up in 'Trainer.py' directly
dropout_rate1_placeholder = tf.placeholder(tf.float32, shape=(), name='dropout1_rate')

mb = ModelBuilder.ModelBuilder(120, 160, 4, dropout_rate1_placeholder, should_drop)

with tf.variable_scope('model') as scope: 
    logits = mb.networkBuilder(next_element[0])

trainer = TrainerOpt.TrainerOpt(logits, should_drop, dropout_rate1_placeholder, iterator, next_element, train_dataset, test_dataset)

# Define hyper parameters and others 
learning_rate = 0.0005 #Best found : 0.0005
epochs = 600
iterations = epochs*round(float(training_count)/batch_size)
drop1 = False
dropout_rate1 = 0.3
backup_folder = "models/"+"reference_model"    # folder in which save the model and other useful information 

start_time = time.time()
 ValMir_Pol_A = ValInputBatch['Mir_Pol_A']
 Mir_Pol_A = tf.cond(bValidation, lambda: tf.identity(TrainMir_Pol_A), lambda: tf.identity(ValMir_Pol_A))
 
 All0DSignalsSplits=tf.split(All0DSignals,GpuNum)
 StoredEnergySplits=tf.split(StoredEnergy,GpuNum)
 Ne_R0Splits=tf.split(Ne_R0,GpuNum)
 Mir_Pol_ASplits=tf.split(Mir_Pol_A,GpuNum)
 EmergencyValueSplits=tf.split(EmergencyValue,GpuNum)
 MeanSquaredErrorTower=[]
 RNNOutputMeanTower=[]
 Ne_R0CNNOutputMeanTower=[]
 
 with tf.variable_scope(tf.get_variable_scope()):
     for GpuIndex in range(GpuNum):
         with tf.device('/GPU:%s' % GpuIndex):
             StoredEnergyCNNOutput = ModelBuilder.CNNForStoredEnergy(StoredEnergySplits[GpuIndex], DropoutProb, IsTraining=True)
             Ne_R0CNNOutput = ModelBuilder.CNNForNe_R0(Ne_R0Splits[GpuIndex], DropoutProb, IsTraining=True)
             Mir_Pol_ACNNOutput = ModelBuilder.CNNForMir_Pol_A(Mir_Pol_ASplits[GpuIndex], DropoutProb, IsTraining=True)
             
             AllSignalsSplits = tf.concat([All0DSignalsSplits[GpuIndex],
                                           StoredEnergyCNNOutput,
                                           Ne_R0CNNOutput,
                                           Mir_Pol_ACNNOutput,
                                           ],2,name='AllSignalsSplits')
             
             RNNOutput=ModelBuilder.RNNForAllSignals(AllSignalsSplits ,DropoutProb)
             
             MeanSquaredErrorSplit = tf.losses.mean_squared_error(EmergencyValueSplits[GpuIndex], RNNOutput)
             RNNOutputMeanSplit = tf.reduce_mean(RNNOutput)
             Ne_R0CNNOutputMeanSplit = tf.reduce_mean(Ne_R0CNNOutput)
             
示例#11
0
if __name__ == "__main__":
    import os
    import config
    import ModelBuilder
    from Attributes import AttributeBuilder

    config.sys_utils.set_pp_on()

    # specify file path to model
    file_path = os.path.join(config.specs_path, 'EM', 'S_EMC_PRESS_CND.puml')

    # create attribute builder instance for solving attributes
    #abuilder = AttributeBuilder.create_attribute_builder(server_ip='127.0.0.1', server_port=5489)
    abuilder = AttributeBuilder.create_attribute_builder(server_ip='10.0.1.200', server_port=5489)
    # build StateDiagram instance
    diagram = ModelBuilder.build_state_diagram(file_path, attribute_builder=abuilder, preprocess=True)

    # generate test cases from model
    test_gen = TestCaseGenerator(diagram)
    test_gen.generate_test_cases()

    # verify paths manually (for now)
    print "Drawing possible diagram paths...",
    # test_gen.draw_test_paths()
    print "complete."

    print "State diagram complexity: " + str(test_gen.calculate_complexity())
    print "Total test cases: ", len(test_gen.test_cases.keys())

    # generate drawing of flattened graph - will work on getting better syntax
    # test_gen.draw_solved_graph()
示例#12
0
def TrainDNN(setupClient):
    (X_train, y_train), (X_test,
                         y_test), (w_train,
                                   w_test), (ix_train,
                                             ix_test) = LoadData(setupClient)

    n_dim = X_train.shape[1]
    modelpath = setupClient.ModelSavePath

    Nsig = (y_train == 1).sum()
    Nbkg = (y_train != 1).sum()

    # TODO: Check this is working or still needed
    if (Nsig != Nbkg) and setupClient.useEqualSizeSandB == True:
        print(
            'You have selected to use equal portions of signal and background events but the numbers are not equal'
        )
        print(Nsig, Nbkg)
        quit()

    print(Fore.BLUE + "--------------------------")
    print(Back.BLUE + "       TRAINING...!       ")
    print(Fore.BLUE + "--------------------------")
    print("Number of input variables : ", X_train.shape[1])

    from collections import Counter
    cls_ytrain_count = Counter(y_train)
    Nclass = len(cls_ytrain_count)

    lossFunc = 'binary_crossentropy'
    if setupClient.runMode == 'multi':
        print(Fore.GREEN + 'Number of events per class in Train Sample:')
        for channel in channelDic:
            print('{:<15}{:<15}'.format(channel,
                                        cls_ytrain_count[channelDic[channel]]))

        lossFunc = 'sparse_categorical_crossentropy'
        model = ModelBuilder.BuildDNNMulti(setupClient, Nclass, n_dim,
                                           setupClient.Params['Width'],
                                           setupClient.Params['Depth'])
    else:
        print(Fore.GREEN + 'Number of events per class in Train Sample:')
        print('{:<15}{:<15}'.format('Background', cls_ytrain_count[0]))
        print('{:<15}{:<15}'.format('Signal', cls_ytrain_count[1]))
        model = ModelBuilder.BuildDNN(setupClient, n_dim,
                                      setupClient.Params['Width'],
                                      setupClient.Params['Depth'])

    model.compile(loss=lossFunc,
                  optimizer=setupClient.Params['Optimizer'],
                  metrics=['accuracy'])
    K.set_value(model.optimizer.lr, setupClient.Params['LearningRate'])
    # model.summary()
    print(model.get_config())
    # print (model.optimizer.__class__.__name__)
    # print (K.get_value(model.optimizer.lr))

    callbacks = [
        # if we don't have a decrease of the loss for 4 epochs, terminate training.
        EarlyStopping(verbose=True, patience=3, monitor='val_loss'),
        # Always make sure that we're saving the model weights with the best val loss.
        ModelCheckpoint(modelpath + '/model.h5',
                        monitor='val_loss',
                        verbose=True,
                        save_best_only=True)
    ]

    # TODO: make the number of classes be found and treated automatically by the flow
    #store the configuration of the training to disk
    outfile = open(modelpath + '/DNN_Setup', 'wb')
    pickle.dump(setupClient, outfile)
    outfile.close()

    if setupClient.runMode == 'multi':
        Nsig = float(cls_ytrain_count[1])
        NZjets = float(cls_ytrain_count[0])
        NDiboson = float(cls_ytrain_count[2])
        NTop = float(cls_ytrain_count[3])

        wZjets = round(Nsig / NZjets, 3)
        wDiboson = round(Nsig / NDiboson, 3)
        wTop = round(Nsig / NTop, 3)
        wsig = round(1.0, 2)

        print(Fore.GREEN + 'Weights to apply:')
        print('{:<15}{:<15}'.format('Zjets', wZjets))
        print('{:<15}{:<15}'.format('Signal', wsig))
        print('{:<15}{:<15}'.format('Diboson', wDiboson))
        print('{:<15}{:<15}'.format('Top', wTop))

        modelMetricsHistory = model.fit(
            X_train,
            y_train,
            class_weight={
                0: wZjets,
                1: wsig,  ## Signal
                2: wDiboson,
                3: wTop
            },
            epochs=setupClient.Params['Epochs'],
            batch_size=setupClient.Params['BatchSize'],
            validation_split=0.2,
            callbacks=callbacks,
            verbose=1)
    else:
        if setupClient.useEqualSizeSandB == True:
            modelMetricsHistory = model.fit(
                X_train,
                y_train,
                epochs=setupClient.Params['Epochs'],
                batch_size=setupClient.Params['BatchSize'],
                validation_split=0.2,
                callbacks=callbacks,
                verbose=0)

            print(modelMetricsHistory.history['val_loss'])
        else:
            print('{:<25}'.format(
                Fore.BLUE +
                'Training with class_weights because of unbalance classes !!'))
            nsignal = cls_ytrain_count[1]
            nbackground = cls_ytrain_count[0]
            print('Signal=', nsignal, 'Background=', nbackground)

            wbkg = (nsignal / nbackground)
            wsig = 1.0

            if nsignal > nbackground:
                wbkg = 1.0
                wsig = (nbackground / nsignal)
            print(Fore.GREEN + 'Weights to apply:')
            print('{:<15}{:<15}'.format('Background', round(wbkg, 3)))
            print('{:<15}{:<15}'.format('Signal', wsig))

            modelMetricsHistory = model.fit(
                X_train,
                y_train,
                class_weight={
                    0: wbkg,
                    1: wsig
                },
                epochs=setupClient.Params['Epochs'],
                batch_size=setupClient.Params['BatchSize'],
                validation_split=0.2,
                callbacks=callbacks,
                verbose=1)

    return modelMetricsHistory
示例#13
0
    import config
    import ModelBuilder
    from Attributes import AttributeBuilder

    config.sys_utils.set_pp_on()

    # specify file path to model
    file_path = os.path.join(config.specs_path, 'EM', 'S_EMC_PRESS_CND.puml')

    # create attribute builder instance for solving attributes
    #abuilder = AttributeBuilder.create_attribute_builder(server_ip='127.0.0.1', server_port=5489)
    abuilder = AttributeBuilder.create_attribute_builder(
        server_ip='10.0.1.200', server_port=5489)
    # build StateDiagram instance
    diagram = ModelBuilder.build_state_diagram(file_path,
                                               attribute_builder=abuilder,
                                               preprocess=True)

    # generate test cases from model
    test_gen = TestCaseGenerator(diagram)
    test_gen.generate_test_cases()

    # verify paths manually (for now)
    print "Drawing possible diagram paths...",
    # test_gen.draw_test_paths()
    print "complete."

    print "State diagram complexity: " + str(test_gen.calculate_complexity())
    print "Total test cases: ", len(test_gen.test_cases.keys())

    # generate drawing of flattened graph - will work on getting better syntax
示例#14
0
def doKFold(setupClient):
    print(Fore.BLUE + "--------------------------")
    print(Back.BLUE + " K-Fold Cross Validation  ")
    print(Fore.BLUE + "--------------------------")

    pdtoLoad_Train = setupClient.PDPath + setupClient.MixPD_TrainTestTag + '_Train.pkl'
    pdtoLoad_Test = setupClient.PDPath + setupClient.MixPD_TrainTestTag + '_Test.pkl'

    print('{:<45}{:<25}'.format("Train sample", Fore.GREEN + pdtoLoad_Train))
    print('{:<45}{:<25}'.format("Test sample", Fore.GREEN + pdtoLoad_Test))
    if not os.path.isfile(pdtoLoad_Train):
        print("PD file", pdtoLoad_Train, " not found!")
        quit()
    if not os.path.isfile(pdtoLoad_Test):
        print("PD file", pdtoLoad_Test, " not found!")
        quit()

    df_Train = pd.read_pickle(pdtoLoad_Train)
    df_Test = pd.read_pickle(pdtoLoad_Test)

    ## Add them together:
    df_tot = pd.concat([df_Train, df_Test], ignore_index=True)

    VariablesSet = setupClient.InputDNNVariables[setupClient.VarSet]
    print('{:<45}{:<25}'.format(
        "Variable set",
        Fore.GREEN + str(setupClient.VarSet) + ' ' + str(VariablesSet)))

    X = df_tot[VariablesSet].as_matrix()
    scaler = StandardScaler()
    le = LabelEncoder()
    Y = le.fit_transform(df_tot['isSignal'])
    kfold = StratifiedKFold(n_splits=5, shuffle=False, random_state=None)
    cvscores = []
    ii = 0
    tprs = []
    aucs = []
    mean_fpr = np.linspace(0, 1, 100)

    for train, test in kfold.split(X, Y):
        print('Doing Fold', ii)
        cls_ytrain_count = Counter(Y[train])
        print(Fore.GREEN + 'Number of events per class in Train Sample:')
        print('{:<15}{:<15}'.format('Background', cls_ytrain_count[0]))
        print('{:<15}{:<15}'.format('Signal', cls_ytrain_count[1]))

        X[train] = scaler.fit_transform(X[train])
        X[test] = scaler.fit_transform(X[test])

        n_dim = X[train].shape[1]

        lossFunc = 'binary_crossentropy'
        model = ModelBuilder.BuildDNN(setupClient, n_dim,
                                      setupClient.Params['Width'],
                                      setupClient.Params['Depth'])
        if setupClient.runMode == 'multi':
            lossFunc = 'sparse_categorical_crossentropy'
            model = ModelBuilder.BuildDNNMulti(setupClient, Nclass, n_dim,
                                               setupClient.Params['Width'],
                                               setupClient.Params['Depth'])

        model.compile(loss=lossFunc,
                      optimizer=setupClient.Params['Optimizer'],
                      metrics=['accuracy'])
        K.set_value(model.optimizer.lr, setupClient.Params['LearningRate'])

        callbacks = [
            EarlyStopping(verbose=True, patience=5, monitor='val_loss'),
            ModelCheckpoint(setupClient.ModelSavePath + '/model_kfold' +
                            str(ii) + '.h5',
                            monitor='val_loss',
                            verbose=True,
                            save_best_only=True)
        ]

        wbkg = (cls_ytrain_count[1] / cls_ytrain_count[0])
        wsig = 1.0
        print(Fore.GREEN + 'Weights to apply:')
        print('{:<15}{:<15}'.format('Background', round(wbkg, 3)))
        print('{:<15}{:<15}'.format('Signal', wsig))

        kf_history = model.fit(X[train],
                               Y[train],
                               class_weight={
                                   0: wbkg,
                                   1: wsig
                               },
                               epochs=setupClient.Params['Epochs'],
                               batch_size=setupClient.Params['BatchSize'],
                               validation_split=0.2,
                               callbacks=callbacks,
                               verbose=1)

        kf_scores = model.evaluate(X[test], Y[test], verbose=1)
        print("%s: %.3f%%" % (model.metrics_names[1], kf_scores[1] * 100))
        cvscores.append(kf_scores[1] * 100)

        kf_yhat_test = model.predict(X[test])

        # Get 'Receiver operating characteristic' (ROC)
        fpr, tpr, thresholds = roc_curve(Y[test], kf_yhat_test)
        tprs.append(np.interp(mean_fpr, fpr, tpr))
        tprs[-1][0] = 0.0
        roc_auc = auc(fpr, tpr)
        aucs.append(roc_auc)
        plt.plot(fpr,
                 tpr,
                 lw=1,
                 alpha=0.3,
                 label='ROC fold %d (AUC = %0.3f)' % (ii, roc_auc))

        np.save(
            os.path.join(setupClient.ModelSavePath,
                         'cv_metrics_fold' + str(ii) + '.npy'), kf_scores)
        np.save(
            os.path.join(setupClient.ModelSavePath,
                         'cv_thresholds_fold' + str(ii) + '.npy'), thresholds)
        np.save(
            os.path.join(setupClient.ModelSavePath,
                         'cv_tpr_fold' + str(ii) + '.npy'), tpr)
        np.save(
            os.path.join(setupClient.ModelSavePath,
                         'cv_fpr_fold' + str(ii) + '.npy'), fpr)
        ii += 1

    plt.plot([0, 1], [0, 1],
             linestyle='--',
             lw=2,
             color='r',
             label='Luck',
             alpha=.8)
    mean_tpr = np.mean(tprs, axis=0)
    mean_tpr[-1] = 1.0
    mean_auc = auc(mean_fpr, mean_tpr)
    std_auc = np.std(aucs)
    plt.plot(mean_fpr,
             mean_tpr,
             color='b',
             label=r'Mean ROC (AUC = %0.3f $\pm$ %0.3f)' % (mean_auc, std_auc),
             lw=1,
             alpha=.7)

    std_tpr = np.std(tprs, axis=0)
    tprs_upper = np.minimum(mean_tpr + std_tpr, 1)
    tprs_lower = np.maximum(mean_tpr - std_tpr, 0)
    plt.fill_between(mean_fpr,
                     tprs_lower,
                     tprs_upper,
                     color='grey',
                     alpha=.2,
                     label=r'$\pm$ 1 std. dev.')

    plt.xlim([-0.05, 1.05])
    plt.ylim([-0.05, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    # plt.title('Receiver operating characteristic example')
    plt.xticks(np.arange(0.0, 1.1, 0.1))
    plt.yticks(np.arange(0.0, 1.1, 0.1))
    plt.title('ROC curves for Signal vs Background')
    plt.legend(loc="lower right", fontsize='x-small')
    plt.savefig(setupClient.ModelSavePath + "/KFold_ROC.png")
    plt.clf()
 def solve(self):
     model_builder = ModelBuilder.ModelBuilder(self.doctors, self.hospitals, self.shifts, self.shift_days,
                                               self.placements, self.nightshifts, self.total_work_time)
     return model_builder.solve()
示例#16
0
# defining the folders path train and test
TRAINING_DATASET_FOLDER_NAME = '3_preprocessed_1_dataset train'

X_train, Y_train, _ = GetData(TRAINING_DATASET_FOLDER_NAME, limit_value=1)
Y_train = np_utils.to_categorical(Y_train, 2)
X_train = X_train.astype('float32')
X_train /= np.max(X_train)

width = 80
height = 80
depth = 2
num_classes = 2

# load the model architecture from file
a = read_model("models/model01.txt")
modelObject = ModelBuilder.ModelBuilder(a, (height, width, depth))
model = modelObject.model

model.compile(
    loss='categorical_crossentropy',  # using the cross-entropy loss function
    optimizer='adam',  # using the Adam optimiser
    metrics=['accuracy'])  # reporting the accuracy

# train the model
model.fit(X_train,
          Y_train,
          batch_size=128,
          epochs=1,
          verbose=1,
          validation_split=0.2)
示例#17
0
文件: Main.py 项目: Juncai/CS6140
def main(config_path):
    '''
    Main script for classifier building and testing
    '''
    config = loader.load_config(config_path)
    training_data = None
    testing_data = None
    # load training and testing data from files, normalize if necessary
    if c.TRAINING_D in config.keys():
        training_data = loader.load_dataset(config[c.TRAINING_D])
    if c.TESTING_D in config.keys():
        testing_data = loader.load_dataset(config[c.TESTING_D])
    if c.NORM_METHOD in config.keys():
        method = None
        if config[c.NORM_METHOD] == c.SHIFT_SCALE:
            method = Preprocess.shift_and_scale
        elif config[c.NORM_METHOD] == c.ZERO_MEAN_UNIT_VAR:
            method = Preprocess.zero_mean_unit_var
        if c.TESTING_D in config.keys():
            Preprocess.normalize_features_all(method, training_data[0], testing_data[0])
        else:
            Preprocess.normalize_features_all(method, training_data[0])

    # generate thresholds file if needed
    if c.THRESHS in config.keys() and not os.path.isfile(config[c.THRESHS]):
        Preprocess.generate_thresholds(training_data[0], config[c.THRESHS])

    # get path to store models and output results
    model_path = config[c.MODEL_PATH]
    output_path = config[c.OUTPUT_PATH]

    # use different validation method base on the config
    match = re.match(c.K_FOLD_RE, config[c.VALID_METHOD])
    if match:
        # perform k-fold validation
        k = int(match.group(c.K_GROUP))
        training_errs = []
        testing_errs = []
        for i in range(k):
            (tr_data, te_data) = Preprocess.prepare_k_fold_data(training_data, k, i + 1)
            model = builder.build_model(tr_data, config)
            training_errs.append(model.test(tr_data[0], tr_data[1], Utilities.get_test_method(config)))
            testing_errs.append(model.test(te_data[0], te_data[1], Utilities.get_test_method(config)))
        mean_training_err = np.mean(training_errs)
        mean_testing_err = np.mean(testing_errs)
        print str(k) + '-fold validation done. Training errors are:'
        print training_errs
        print 'Mean training error is:'
        print mean_training_err
        print 'Testing errors are:'
        print testing_errs
        print 'Mean testing error is:'
        print mean_testing_err
        config['TrainingErrs'] = str(training_errs)
        config['MeanTrainingErr'] = str(mean_training_err)
        config['TestingErrs'] = str(testing_errs)
        config['MeanTestingErr'] = str(mean_testing_err)
    elif config[c.VALID_METHOD] == c.HAS_TESTING_DATA:
        # perform testing with given testing dataset
        model = builder.build_model(training_data, config)
        training_err = model.test(training_data[0], training_data[1], Utilities.get_test_method(config))
        testing_err = model.test(testing_data[0], testing_data[1], Utilities.get_test_method(config))
        print 'Error for training data is:'
        print training_err
        print 'Error for testing data is:'
        print testing_err
        config['TrainingErr'] = str(training_err)
        config['TestingErr'] = str(testing_err)

    # Log the err
    f = open(output_path, 'w+')
    f.write(str(config))
    f.close()
    return