Пример #1
0
def train_LSTM():
    list_diseases = Diseases.query.order_by(Diseases.Disease).all()
    for i in list_diseases:
        df = pd.read_sql(
            """SELECT EntryTime, sum(NoOfCases) as Count FROM """ + i.Disease +
            """ GROUP BY EntryTime ORDER BY EntryTime;""", db.engine)
        raw_seq = np.ravel(df.iloc[:, 1:2].values)
        raw_seq = raw_seq.reshape(len(raw_seq), 1)
        scaler = MinMaxScaler(feature_range=(0, 1))
        scaler = scaler.fit(raw_seq)
        raw_seq = scaler.transform(raw_seq)
        n_steps = 7
        X, y = split_sequence(raw_seq, n_steps)
        n_features = 1
        X = X.reshape((X.shape[0], X.shape[1], n_features))
        model = Sequential()
        model.add(
            LSTM(50, activation='relu', input_shape=(n_steps, n_features)))
        model.add(Dense(1))
        model.compile(optimizer='adam', loss='mse', metrics=[Accuracy()])
        model.fit(X, y, epochs=200, verbose=0)
        fd = os.getcwd() + "/MLModels/LSTM_" + i.Disease + ".tf"
        print(fd)
        model.save(fd)
        fd = os.getcwd() + "/MLModels/LSTM_" + i.Disease + "_Scaler.gz"
        print(fd)
        dump(scaler, fd)
        print("Training complete")
def print_metrics(y_true, y_predicted):
    keras_acc = Accuracy()
    keras_acc.update_state(y_true, y_predicted)
    print("Hamming Score ('Accuracy' by Keras):\t%.3f" %
          (100 * keras_acc.result().numpy()))
    print("Hamming Score (= 1 - Hamming Loss):\t%.3f" %
          (100 * (1 - metrics.hamming_loss(y_true, y_predicted))))
    print("Exact match ratio (Subset Accuracy):\t%.3f" %
          (100 * metrics.accuracy_score(y_true, y_predicted)))
    print("F1-Score Micro Averaged:\t\t%.3f" %
          (100 * metrics.f1_score(y_true, y_predicted, average='micro')))
    print("F1-Score Macro Averaged:\t\t%.3f" %
          (100 * metrics.f1_score(y_true, y_predicted, average='macro')))
    print("F1-Score Weighted Average:\t\t%.3f" %
          (100 * metrics.f1_score(y_true, y_predicted, average='weighted')))
    print(
        "Precision Score Micro Averaged:\t\t%.3f" %
        (100 * metrics.precision_score(y_true, y_predicted, average='micro')))
    print("Recall Score Micro Averaged:\t\t%.3f" %
          (100 * metrics.recall_score(y_true, y_predicted, average='micro')))
Пример #3
0
def nn_sm(df, df_t):

    X_train = df.iloc[:, :16]
    y_train = df.iloc[:, 16:]

    # SMOTE Technique (OverSampling) After splitting and Cross Validating
    sm = SMOTE(sampling_strategy='minority', random_state=42)
    # Xsm_train, ysm_train = sm.fit_sample(X_train, y_train)

    # This will be the data were we are going to
    Xsm_train, ysm_train = sm.fit_sample(X_train, y_train)

    n_inputs = df.iloc[:, :16].shape[1]

    nn = Sequential([
        Dense(n_inputs, input_shape=(n_inputs, ), activation='relu'),
        Dense(32, activation='relu'),
        Dense(1, activation='sigmoid')
    ])

    nn.compile(optimizer='sgd',
               loss='binary_crossentropy',
               metrics=[Accuracy(), Recall()])

    nn.fit(Xsm_train,
           ysm_train,
           validation_split=0.2,
           batch_size=100,
           epochs=50,
           shuffle=True,
           verbose=2)

    df_t.iloc[:, 6] = np.random.permutation(df_t.iloc[:, 6].values)

    predictions = nn.predict_classes(df_t.iloc[:, :16], verbose=0)

    probas = nn.predict_proba(df_t.iloc[:, :16])

    y_test = df_t.iloc[:, 16:]['y_t+2'].values

    #Metrics
    f1 = f1_score(y_test, predictions)
    acc = accuracy_score(y_test, predictions)
    recall = recall_score(y_test, predictions)
    precision = precision_score(y_test, predictions)
    #roc_auc = roc_auc_score(y_test, predictions)

    return predictions, y_test, f1, acc, recall, precision, probas
    def __init__(self, classes_number: int, batch_size: int = 1):

        self.__model = Sequential(
            (Conv2D(10,
                    kernel_size=(4, 4),
                    strides=(1, 1),
                    data_format='channels_last',
                    input_shape=IMAGES_SHAPE), Activation('relu'),
             Conv2D(20,
                    kernel_size=(3, 3),
                    strides=(2, 2),
                    data_format='channels_last'), Activation('relu'),
             Conv2D(40,
                    kernel_size=(2, 2),
                    strides=(2, 2),
                    data_format='channels_last'), Activation('relu'),
             Flatten(data_format='channels_last'), Dense(100),
             Activation('relu'), Dense(classes_number), Activation('softmax')))

        self.__model.compile(optimizer=SGD(), loss=MSE, metrics=[
            Accuracy(),
        ])
def get_unet_128(input_shape=(128, 128, 3), num_classes=1):
    tf.config.run_functions_eagerly(True)
    tf.config.experimental_run_functions_eagerly(True)

    input_size = input_shape[0]
    nClasses = 9  # 9 keypoints
    input_height, input_width, sigma = 128, 128, 5

    inputs = Input(shape=input_shape)
    # 128

    down1 = Conv2D(32, (3, 3), padding='same')(inputs)
    down1 = BatchNormalization()(down1)
    down1 = Activation('relu')(down1)
    down1 = Conv2D(32, (3, 3), padding='same')(down1)
    down1 = BatchNormalization()(down1)
    down1 = Activation('relu')(down1)
    down1_pool = MaxPooling2D((2, 2), strides=(2, 2))(down1)
    # 64

    down2 = Conv2D(64, (3, 3), padding='same')(down1_pool)
    down2 = BatchNormalization()(down2)
    down2 = Activation('relu')(down2)
    down2 = Conv2D(64, (3, 3), padding='same')(down2)
    down2 = BatchNormalization()(down2)
    down2 = Activation('relu')(down2)
    down2_pool = MaxPooling2D((2, 2), strides=(2, 2))(down2)
    # 32

    down3 = Conv2D(128, (3, 3), padding='same')(down2_pool)
    down3 = BatchNormalization()(down3)
    down3 = Activation('relu')(down3)
    down3 = Conv2D(128, (3, 3), padding='same')(down3)
    down3 = BatchNormalization()(down3)
    down3 = Activation('relu')(down3)
    down3_pool = MaxPooling2D((2, 2), strides=(2, 2))(down3)
    # 16

    down4 = Conv2D(256, (3, 3), padding='same')(down3_pool)
    down4 = BatchNormalization()(down4)
    down4 = Activation('relu')(down4)
    down4 = Conv2D(256, (3, 3), padding='same')(down4)
    down4 = BatchNormalization()(down4)
    down4 = Activation('relu')(down4)
    down4_pool = MaxPooling2D((2, 2), strides=(2, 2))(down4)
    # 8

    center = Conv2D(512, (3, 3), padding='same')(down4_pool)
    center = BatchNormalization()(center)
    center = Activation('relu')(center)
    center = Conv2D(512, (3, 3), padding='same')(center)
    center = BatchNormalization()(center)
    center = Activation('relu')(center)
    # center

    up4 = UpSampling2D((2, 2))(center)
    up4 = concatenate([down4, up4], axis=3)
    up4 = Conv2D(256, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = Activation('relu')(up4)
    up4 = Conv2D(256, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = Activation('relu')(up4)
    up4 = Conv2D(256, (3, 3), padding='same')(up4)
    up4 = BatchNormalization()(up4)
    up4 = Activation('relu')(up4)
    # 16

    up3 = UpSampling2D((2, 2))(up4)
    up3 = concatenate([down3, up3], axis=3)
    up3 = Conv2D(128, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    up3 = Activation('relu')(up3)
    up3 = Conv2D(128, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    up3 = Activation('relu')(up3)
    up3 = Conv2D(128, (3, 3), padding='same')(up3)
    up3 = BatchNormalization()(up3)
    up3 = Activation('relu')(up3)
    # 32

    up2 = UpSampling2D((2, 2))(up3)
    up2 = concatenate([down2, up2], axis=3)
    up2 = Conv2D(64, (3, 3), padding='same')(up2)
    up2 = BatchNormalization()(up2)
    up2 = Activation('relu')(up2)
    up2 = Conv2D(64, (3, 3), padding='same')(up2)
    up2 = BatchNormalization()(up2)
    up2 = Activation('relu')(up2)
    up2 = Conv2D(64, (3, 3), padding='same')(up2)
    up2 = BatchNormalization()(up2)
    up2 = Activation('relu')(up2)
    # 64

    up1 = UpSampling2D((2, 2))(up2)
    up1 = concatenate([down1, up1], axis=3)
    up1 = Conv2D(32, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = Activation('relu')(up1)
    up1 = Conv2D(32, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = Activation('relu')(up1)
    up1 = Conv2D(32, (3, 3), padding='same')(up1)
    up1 = BatchNormalization()(up1)
    up1 = Activation('relu')(up1)
    # 128

    output1 = Conv2D(num_classes, (1, 1), activation='sigmoid',
                     name='output1')(up1)
    lamb = Lambda(overlay)([output1, inputs])

    k1 = Conv2D(64, (3, 3),
                activation='relu',
                padding='same',
                name='block1_conv1',
                data_format="channels_last")(lamb)
    k1 = Conv2D(64, (3, 3),
                activation='relu',
                padding='same',
                name='block1_conv2',
                data_format="channels_last")(k1)
    block1 = MaxPooling2D((2, 2),
                          strides=(2, 2),
                          name='block1_pool',
                          data_format="channels_last")(k1)

    # Encoder Block 2
    k2 = Conv2D(128, (3, 3),
                activation='relu',
                padding='same',
                name='block2_conv1',
                data_format="channels_last")(block1)
    k2 = Conv2D(128, (3, 3),
                activation='relu',
                padding='same',
                name='block2_conv2',
                data_format="channels_last")(k2)
    k2 = MaxPooling2D((2, 2),
                      strides=(2, 2),
                      name='block2_pool',
                      data_format="channels_last")(k2)

    # bottoleneck
    k3 = (Conv2D(32 * 5, (int(input_height / 4), int(input_width / 4)),
                 activation='relu',
                 padding='same',
                 name="bottleneck_1",
                 data_format="channels_last"))(k2)
    k3 = (Conv2D(32 * 5, (1, 1),
                 activation='relu',
                 padding='same',
                 name="bottleneck_2",
                 data_format="channels_last"))(k3)

    # upsamping to bring the feature map size to be the same as the one from block1
    o_block1 = Conv2DTranspose(64,
                               kernel_size=(2, 2),
                               strides=(2, 2),
                               use_bias=False,
                               name='upsample_1',
                               data_format="channels_last")(k3)
    o = Add()([o_block1, block1])
    output2 = Conv2DTranspose(nClasses,
                              kernel_size=(2, 2),
                              strides=(2, 2),
                              use_bias=False,
                              name='output2',
                              data_format="channels_last")(o)

    model = Model(inputs=inputs, outputs=[output1, output2])
    model.summary()

    # model.compile(optimizer=RMSprop(lr=0.001), loss=bce_dice_loss, metrics=[dice_loss])
    # model.compile(loss='mse', optimizer=RMSprop(lr=0.001), sample_weight_mode="temporal",
    #               metrics=[])

    model.compile(optimizer=RMSprop(lr=0.001),
                  loss={
                      'output1': bce_dice_loss,
                      'output2': 'mse'
                  },
                  metrics={
                      'output1': [dice_loss, MeanIoU(2)],
                      'output2': [Accuracy(), MeanSquaredError()]
                  })

    return input_size, model


# get_unet_128()
 n_frames = 30
 ppf.frames_per_video = n_frames
 print("4")
 saved_model_path = "weights-improvement-{epoch:02d}-{val_accuracy:.2f}.hdf5"
 checkpoint = ModelCheckpoint(saved_model_path,
                              monitor="val_accuracy",
                              verbose=1,
                              save_best_only=True)
 earlystop = EarlyStopping(monitor="val_accuracy",
                           min_delta=0.01,
                           patience=5,
                           restore_best_weights=True)
 callbacks_list = [checkpoint, earlystop]
 optimizer = Adam()
 binloss = BinaryCrossentropy()
 acc = Accuracy()
 print("5")
 # # # Run the training job
 try:
     zipfiles = dppvm.extract_zips(dppvm.zip_down_dir)
     print("se descargo")
 except:
     print("no se descargo ni madres")
 DATA = Path("download")
 DEST = dppvm.DEST
 print("6")
 logging.basicConfig(filename='extract.log', level=logging.INFO)
 zipfiles = sorted(list(DATA.glob('dfdc_train_part_*.zip')),
                   key=lambda x: x.stem)
 # Extract the zip files
 print("7")
        labels_dir=DATASETS['coco']['train']['labels'],
        names_path=DATASETS['coco']['names']
    )
    test_generator = CocoStyleDataGenerator(
        images_dir=DATASETS['coco']['test']['images'],
        labels_dir=DATASETS['coco']['test']['labels'],
        names_path=DATASETS['coco']['names']
    )

    model = Sequential()
    model.add(Conv2D(
        filters=1024,
        kernel_size=(3, 3),
        activation='relu'
    ))
    model.add(Dense(4096))
    model.add(Dense(100 * 84))
    model.add(Reshape(target_shape=(100, 84)))
    model.add(Activation('softmax'))

    model.compile(
        optimizer=SGD(),
        loss=MSE,
        metrics=[Accuracy(), ]
    )

    model.fit_generator(
        generator=train_generator,
        validation_data=test_generator
    )
Пример #8
0
    def __init__(self):
        self._model = Sequential()

        # 1st block of the scheme
        self._model.add(ZeroPadding2D((3, 3)))
        self._model.add(
            Conv2D(filters=64,
                   kernel_size=(7, 7),
                   strides=(2, 2),
                   activation='relu'))
        self._model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

        # 2nd block of the scheme
        self._model.add(ZeroPadding2D((1, 1)))
        self._model.add(
            Conv2D(filters=192, kernel_size=(3, 3), activation='relu'))
        self._model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

        # 3rd block of the scheme
        self._model.add(
            Conv2D(filters=128, kernel_size=(1, 1), activation='relu'))

        self._model.add(ZeroPadding2D((1, 1)))
        self._model.add(
            Conv2D(filters=256, kernel_size=(3, 3), activation='relu'))
        self._model.add(
            Conv2D(filters=256, kernel_size=(1, 1), activation='relu'))

        self._model.add(ZeroPadding2D((1, 1)))
        self._model.add(
            Conv2D(filters=512, kernel_size=(3, 3), activation='relu'))
        self._model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

        # 4th block of the scheme
        for i in range(4):
            self._model.add(
                Conv2D(filters=256, kernel_size=(1, 1), activation='relu'))
            self._model.add(ZeroPadding2D((1, 1)))
            self._model.add(
                Conv2D(filters=512, kernel_size=(3, 3), activation='relu'))

        self._model.add(
            Conv2D(filters=512, kernel_size=(1, 1), activation='relu'))

        self._model.add(ZeroPadding2D((1, 1)))
        self._model.add(
            Conv2D(filters=1024, kernel_size=(3, 3), activation='relu'))

        self._model.add(MaxPool2D(pool_size=(2, 2), strides=(2, 2)))

        # 5th block of the scheme
        for i in range(2):
            self._model.add(
                Conv2D(filters=512, kernel_size=(1, 1), activation='relu'))
            self._model.add(ZeroPadding2D((1, 1)))
            self._model.add(
                Conv2D(filters=1024, kernel_size=(3, 3), activation='relu'))
        self._model.add(ZeroPadding2D((1, 1)))
        self._model.add(
            Conv2D(filters=1024, kernel_size=(3, 3), activation='relu'))

        self._model.add(ZeroPadding2D((1, 1)))
        self._model.add(
            Conv2D(filters=1024,
                   kernel_size=(3, 3),
                   strides=(2, 2),
                   activation='relu'))

        # 6th block of the scheme
        self._model.add(ZeroPadding2D((1, 1)))
        self._model.add(
            Conv2D(filters=1024, kernel_size=(3, 3), activation='relu'))
        self._model.add(ZeroPadding2D((1, 1)))
        self._model.add(
            Conv2D(filters=1024, kernel_size=(3, 3), activation='relu'))

        # 7th block of the scheme
        self._model.add(Flatten())
        self._model.add(Dense(4096))

        # 8 block of the scheme
        self._model.add(Dense(30 * 7 * 7))
        self._model.add(Activation('softmax'))
        self._model.add(Reshape(target_shape=(30, 7, 7)))

        self._model.compile(optimizer=SGD(), loss=MSE, metrics=[
            Accuracy(),
        ])

        # call parent constructor
        ObjectDetectionNet.__init__(self)