Ejemplo n.º 1
0
    def generateHP(self):
        activations = [
            "elu", "relu", "selu", "tanh", "sigmoid", "exponential", "linear"
        ]
        print("Generating Hyperparameters \n")

        hp = Hyperparameter()
        hp.add("fc1_neurons", random.randint(5, 50))
        hp.add("fc2_neurons", random.randint(5, 50))

        hp.add("fc1_activ", activations[random.randint(0,
                                                       len(activations) - 1)])
        hp.add("fc2_activ", activations[random.randint(0,
                                                       len(activations) - 1)])

        hp.add("learning_rate", random.uniform(0.0001, 0.01))

        print("Finished generating hyperparameters \n")
        print(hp)
        return hp
Ejemplo n.º 2
0
 def prediction(self, path_test, path_train, path_bestModel):
     self.hyperpara = Hyperparameter()
     eval_test = Eval()
     ner = NER()
     reader = Reader()
     traininsts = reader.readfiles(path_train)
     testinsts = reader.readfiles(path_test)
     ner.create_alphabet(traininsts)
     self.hyperpara.tag_size = ner.hyperpara.tag_size
     self.hyperpara.embedding_num = ner.hyperpara.embedding_num
     self.model = BiLstm(self.hyperpara)  # BiLstm模型
     if self.hyperpara.loadModel == 1 and\
        self.hyperpara.load_pattern ==  1:
         try:
             self.model.load_state_dict(torch.load(path_bestModel))
         except Exception:
             print('模型参数不匹配')
         else:
             pass
     elif self.hyperpara.loadModel == 1 and\
          self.hyperpara.load_pattern == 0 :
         try:
             self.model = torch.load(path_bestModel)
         except Exception:
             print('模型参数不匹配')
         else:
             pass
     testExamples = ner.change(testinsts)
     for idx in range(len(testExamples)):
         test_list = []
         test_list.append(testExamples[idx])
         x, y = ner.variable(test_list)
         lstm_feats = self.model(x)
         predict = ner.getMaxIndex(lstm_feats)
         predictLabels = []
         for idy in range(len(predict)):
             predictLabels.append(ner.label_AlphaBet.list[predict[idy]])
         testinsts[idx].evalPRF(predictLabels, eval_test)
         a, e = testinsts[idx].extractA_and_E()
         self.Attr.append(a)
         self.Eval.append(e)
Ejemplo n.º 3
0
 def __init__(self):
     self.word_AlphaBet = AlphaBet()
     self.tag_AlphaBet = AlphaBet()
     self.hyperpara = Hyperparameter()
Ejemplo n.º 4
0
def run(model_name, train_file, val_file, num_classes, filename, dropout, input_shape_arg = (224,224,3)):
    """
        fit dataset and run training process

        Arguments:\n
        train_file -->  h5 file, fit to model\n
        val_file --> h5 file, for validation\n
        num_classes --> int, total classes \n
        dropout_value --> float, range 0 - 1 for dropout\n
        epoch --> int\n
        batch_size --> int, [8, 16, 32, 64, 128, 256, etc.]\n
        input_shape_arg --> shape of image (W,H,C)\n
        lr_value --> learning rate value\n
        optimizer --> Adam, SGD\n

        Returns:\n
        model\n
        x_test\n
        y_test
    """
    
    # preprocessing data
    X_train, Y_train, X_val, Y_val = dataset_preprocess(num_classes, train_file, val_file)

    _epoch = 80
    lr_value_array = [1e-3, 1e-4]
    if model_name == "resnet50":
        batch_size_array = [8, 16, 32]
        LABEL = ["e-3(8)", "e-3(16)", "e-3(32)", "e-4(8)", "e-4(16)", "e-4(32)"]
    elif model_name == "resnet18":
        batch_size_array = [16, 32, 64]
        LABEL = ["e-3(16)", "e-3(32)", "e-3(64)", "e-4(16)", "e-4(32)", "e-4(64)"]

    HP = []
    for lr in lr_value_array:
        for bs in batch_size_array:
            hp = Hyperparameter("adam", lr, bs, dropout)
            HP.append(hp)

    HISTORY = []
    ROC = []
    for hp in HP:
        K.clear_session()
        model = None
        # compile model
        if model_name == "resnet50":
            print("resnet50")
            model = ResNet50(input_shape=input_shape_arg, classes=int(num_classes), dropout_value=hp.dropout)
            model.compile(optimizer=hp.get_optimizer(), loss='categorical_crossentropy', metrics=['accuracy', metrics.AUC(), metrics.Precision(), metrics.Recall()])
        elif model_name == "resnet18":
            print("resnet18")
            model = ResNet18(input_shape=input_shape_arg, classes=int(num_classes), dropout_value=hp.dropout)
            model.compile(optimizer=hp.get_optimizer(), loss='categorical_crossentropy', metrics=['accuracy', metrics.AUC(), metrics.Precision(), metrics.Recall()])
        elif model_name == "vgg19":
            print("VGG19")
            # configure model input    
            base_model = applications.vgg19.VGG19(weights= None, include_top=False, input_shape= input_shape_arg)
            # configure model output
            x = base_model.output
            x = GlobalAveragePooling2D()(x)
            x = Dropout(hp.dropout(x))
            out = Dense(int(num_classes), activation= 'softmax')(x)
            # combine model then compile
            model = Model(inputs = base_model.input, outputs = out)
            model.compile(optimizer= hp.get_optimizer(), loss='categorical_crossentropy', metrics=['accuracy'])
        elif model_name == "vgg16":
            print("VGG16")
            # configure model input
            base_model = applications.vgg16.VGG16(weights= None, include_top=False, input_shape= input_shape_arg)
            # configure model output
            x = base_model.output
            x = GlobalAveragePooling2D()(x)
            x = Dropout(hp.dropout(x))
            out = Dense(int(num_classes), activation= 'softmax')(x)
            # combine model then compile
            model = Model(inputs = base_model.input, outputs = out)
            model.compile(optimizer= hp.get_optimizer(), loss='categorical_crossentropy', metrics=['accuracy'])

        # optimizer == adam
        # train the model
        history = model.fit(X_train, Y_train, epochs = _epoch, batch_size = hp.batch_size, 
                validation_data=(X_val, Y_val), 
                shuffle=True)
        HISTORY.append(history)
        del model

        print(f"DONE for: {hp.optim}-{hp.lr_value}-{hp.batch_size}-{hp.dropout}")

    
    plt.figure(1)
    mpl.style.use('seaborn')
    i = 0
    for history in HISTORY:
        plt.plot(history.history["val_accuracy"], f"C{i}", label=LABEL[i])
        i = i+1
    plt.ylabel('val_acc')
    plt.xlabel('epoch')
    plt.title(f"Accuracy {filename}")
    plt.legend()
    plt.savefig(f"ACC-{filename}.png")

    print("VAL ACC:")
    i = 0
    for history in HISTORY:
        print(LABEL[i])
        i = i + 1
        print("auc: {}" .format(statistics.mean( history.history["val_auc_1"] )) )
        print("recall: {}" .format(statistics.mean( history.history["val_recall_1"] )) )
        print("Prec: {}" .format(statistics.mean( history.history["val_precision_1"] )) )
        print("MEAN: {}" .format(statistics.mean( history.history["val_accuracy"] )) )
        print("STD: {}" .format(statistics.pstdev( history.history['val_accuracy'] )) )
 def __init__(self):
     self.feature_alphabet = feature_alphabet()
     self.hyperparameter_1 = Hyperparameter()
     self.bias = np.array([0.0, 0.0, 0.0, 0.0, 0.0])
     self.average_bias = np.array([0.0, 0.0, 0.0, 0.0, 0.0])
 def __init__(self):
     self.feature_alphabet = feature_alphabet()
     self.all_inst = all_inst()
     self.hyperparameter_1 = Hyperparameter()
Ejemplo n.º 7
0
 def __init__(self):
     self.word_state = collections.OrderedDict()
     self.word_AlphaBet = AlphaBet()
     self.label_AlphaBet = AlphaBet()
     self.hyperpara = Hyperparameter()