def train():
    train, valid = load_annotations(0.20)
    x_train_set, y_train_set = generate_sets(train)
    x_valid_set, y_valid_set = generate_sets(valid)
    train_generator = BatchGenerator(x_train_set, y_train_set, 16)
    valid_generator = BatchGenerator(x_valid_set, y_valid_set, 16)
    print(len(train_generator))
    print(len(valid_generator))

    #with tf.device('/cpu:0'):
    classes= 230
    base_model = MobileNet(
                    include_top=False,
                    weights="imagenet",
                    # input_shape=(299, 299, 3),
                    #pooling='avg',
                    classes=230,
                    alpha=0.75
                    )
    x = base_model.output
    x = GlobalAveragePooling2D()(x)
    shape = (1, 1, int(1024 * 0.75))
    x = Reshape(shape, name='reshape_1')(x)
    x = Dropout(1e-3, name='dropout')(x)
    x = Conv2D(classes, (1, 1), padding='same', name='conv_preds')(x)
    x = Activation('softmax', name='act_softmax')(x)
    x = GlobalAveragePooling2D()(x)
    #pred = Dense(230, activation='softmax')(x)
    x = Reshape((classes,), name='reshape_2')(x)
    model = Model(inputs=base_model.input, outputs=x)
        #model.load_weights("../models/model.h5")
    # Replicates the model on 8 GPUs.
    # This assumes that your machine has 8 available GPUs.
    # parallel_model = multi_gpu_model(model,
    #                                  gpus=2,
    #                                 cpu_merge=False)
    adam = Adam(lr=0.001)
    model.compile(loss='categorical_crossentropy',
                           optimizer=adam,
                           metrics=['accuracy', f_score])
    # This `fit` call will be distributed on 8 GPUs.
    # Since the batch size is 256, each GPU will process 32 samples.

    model.fit_generator(train_generator,
                                 steps_per_epoch=500,
                                 epochs=100,
                                 verbose=1,
                                 validation_data=valid_generator,
                                 validation_steps=1000,
                                 max_queue_size=100,
                                 workers=4,
                                 use_multiprocessing=True)


    model.save("../models/mobilenet.h5", overwrite=True)
Esempio n. 2
0
def test_load_annotations(filename, zero_based, prefix):
    """test VOC/aergia format annotations"""
    data = load_annotations(filename, zero_based)
    n_data = len(data)
    for i in xrange(n_data):
        output_fname = os.path.join(OUTPUT_DIR, "{}_{}.jpg".format(prefix, i))
        img, bboxes, attrs_list = data[i]
        draw_bbox(img, bboxes, map(lambda x: x[0], attrs_list))
        write_successful = cv2.imwrite(output_fname, img)
        assert write_successful, \
          'fail to write image {} to file'.format(output_fname)
def load_dataset():

    annotations, classes = load_annotations()

    targets = np.array(annotations)[:, 1]
    print(targets)
    cls_mapping, cls_list = create_cls_mapping(targets)

    annotations = shuffle(annotations, random_state=52)
    # train, valid = train_test_split(annotations,
    #                                 test_size=0.10,
    #                                 random_state=52)
    return annotations
def train():
    classes = 230
    with tf.device('/cpu:0'):
        base_model = VGG16(
            include_top=False,
            weights=None,
            input_shape=(192, 192, 3),
            classes=classes,
        )
        x = base_model.output
        x = Flatten()(x)
        x = Dense(4096, activation='relu', name='fc1')(x)
        x = Dense(4096, activation='relu', name='fc2')(x)
        x = Dense(230, activation='sigmoid')(x)
        model = Model(inputs=base_model.input, outputs=x)
        #model.load_weights("../models/vgg16_all_da_epoch_34.h5")

    parallel_model = multi_gpu_model(model, gpus=2)
    adam = Adam(lr=0.0001)
    parallel_model.compile(loss='categorical_crossentropy',
                           optimizer=adam,
                           metrics=['accuracy', f_score])

    train, _ = load_annotations("train", 0.00)
    #valid, _ = load_annotations("validation", 0.00)
    x_train_set, y_train_set = generate_sets('train', train)
    # x_valid_set, y_valid_set = generate_sets('train', valid)
    train_generator = BatchGenerator(x_train_set, y_train_set, 32, (192, 192))
    # valid_generator = BatchGenerator(x_valid_set, y_valid_set, 128, (96, 96))
    print(len(train_generator))
    # print(len(valid_generator))
    for i in range(0, 100):
        parallel_model.fit_generator(
            train_generator,
            steps_per_epoch=31705,
            # steps_per_epoch=30120,
            epochs=1,
            verbose=1,
            # validation_data=valid_generator,
            # validation_steps=397,
            # validation_steps=1586,
            max_queue_size=200,
            workers=10,
            use_multiprocessing=True)

        model.save_weights("../models/vgg16_all_da_epoch_" + str(i) + ".h5",
                           overwrite=True)
Esempio n. 5
0
def train():
    classes= 230
    with tf.device('/cpu:0'):
        base_model = ResNet50(
                        include_top=False,
                        weights=None,
                        input_shape=(224, 224, 3),
                        classes=classes,
                        )
        x = base_model.output
        x = Flatten()(x)
        x = Dense(classes, activation='sigmoid', name='predictions')(x)
        model = Model(inputs=base_model.input, outputs=x)
        # model.load_weights("../models/ResNet50_all_epoch_6.h5")
    parallel_model = multi_gpu_model(model, gpus=2)
    adam = Adam(lr=0.0001)
    parallel_model.compile(loss='categorical_crossentropy',
                           optimizer=adam,
                           metrics=['accuracy', f_score])

    train, valid = load_annotations("train", 0.00)
    # valid, _ = load_annotations("validation", 0.00)
    x_train_set, y_train_set = generate_sets('train', train)
    # x_valid_set, y_valid_set = generate_sets('train', valid)
    train_generator = BatchGenerator(x_train_set, y_train_set, 32, (224,224))
    # valid_generator = BatchGenerator(x_valid_set, y_valid_set, 32, (224,224))
    print(len(train_generator))
    for i in range(1, 20):
        parallel_model.fit_generator(train_generator,
                                     steps_per_epoch=31705,
                                     epochs=1,
                                     verbose=1,
                                     # validation_data=valid_generator,
                                     # validation_steps=1586,
                                     max_queue_size=200,
                                     workers=10,
                                     use_multiprocessing=True)

        model.save_weights("../models/ResNet50_all_DA_epoch_"+str(6+i)+".h5", overwrite=True)
def train():

    # Preprocess
    annotations, classes = load_annotations()

    targets = np.array(annotations)[:, 1]
    print(targets)
    cls_mapping, cls_list = create_cls_mapping(targets)

    annotations = shuffle(annotations, random_state=52)
    train, valid = train_test_split(annotations,
                                    test_size=0.10,
                                    random_state=52)

    # Setup train data
    y_train = np.array(train)[:, 1]
    x_train = np.array(train)[:, 0]
    Y_train = convert_to_categorical(y_train, len(classes), cls_mapping)
    train_generator = BatchGenerator(x_train, Y_train, 16, (500, 100))
    print("length of train train_generator", len(train_generator))

    # Setup validation data
    X_valid, Y_valid = load_validation_data(annotations, shape, cls_mapping)

    # Set up model for training
    with tf.device('/cpu:0'):
        base_model = ResNet50(
            include_top=False,
            #weights='imagenet',
            input_shape=(100, 500, 3),
            classes=len(classes),
        )
        x = base_model.output
        x = Flatten()(x)
        x = Dense(len(classes), activation='softmax', name='predictions')(x)
        model = Model(inputs=base_model.input, outputs=x)
        # model.load_weights("../models/ResNet50_all_epoch_6.h5")
    parallel_model = multi_gpu_model(model, gpus=2)

    #parallel_model = model
    adam = Adam(lr=0.0001, amsgrad=True)
    #parallel_model.load_weights("weights/22.h5")

    parallel_model.compile(loss="categorical_crossentropy",
                           optimizer=adam,
                           metrics=['accuracy', f_macro_score])

    print("tart training")
    for i in range(1, 200):
        print(i)
        parallel_model.fit_generator(
            train_generator,
            steps_per_epoch=2236,
            epochs=1,
            verbose=1,
            validation_data=valid_generator,
            # validation_steps=1586,
            max_queue_size=200,  #
            workers=6,
            use_multiprocessing=True)
        pred = parallel_model.predict_generator(valid_generator)
        np.save("pred", pred)
        np.save("y_valid", Y_valid)
        pred[pred >= 0.5] = 1
        pred[pred < 0.5] = 0
        print(
            classification_report(np.array(Y_valid),
                                  pred,
                                  target_names=cls_list,
                                  digits=3))
        model.save_weights("../weights/" + str(i) + ".h5", overwrite=True)