Ejemplo n.º 1
0
    parser = argparse.ArgumentParser(description="predicter")
    parser.add_argument("--img_path",
                        default="./images/1.jpg",
                        help="target img path")
    parser.add_argument("--architecture",
                        default="PSPNet",
                        choices=["PSPNet", "SegUNet"],
                        help="architecture type")
    parser.add_argument("--weight",
                        default="./pretrained/LIP_PSPNet50_05.hdf5",
                        help="pretrained weight path")
    parser.add_argument("--output_path",
                        default="./response.json",
                        help="output path")
    args = parser.parse_args()

    # load model
    model = PSPNet50()
    model.load_weights(args.weight)

    # inference
    inputs = cv2.imread(args.img_path)
    outputs = inference(model, inputs)

    # create response
    response = createResponse(outputs, inputs)

    # save response
    with open(args.output_path, "w") as f:
        json.dump(response, f, indent=2)
Ejemplo n.º 2
0
def main(args):
    # set the necessary list
    train_list = pd.read_csv(args.train_list, header=None)
    val_list = pd.read_csv(args.val_list, header=None)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    # get old session old_session = KTF.get_session()

    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        KTF.set_learning_phase(1)

        # class weights
        classes = ['background', 'hat', 'hair', 'glove', 'sunglasses', 'upperclothes',
                'dress', 'coat', 'socks', 'pants', 'jumpsuits', 'scarf', 'skirt',
                'face', 'leftArm', 'rightArm', 'leftLeg', 'rightLeg', 'leftShoe','rightShoe']
        if args.class_weights:
            """
            class_weights = {0:1, 1:40, 2:1, 3:114, 4:151, 5:3, 6:53, 7:7, 8:165, 9:7, 10:106,
                    11:249, 12:150, 13:1, 14:1, 15:1, 16:1, 17:1, 18:114, 19:118}
            """
            class_weights = [1, 40, 1, 114, 151, 3, 53, 7, 165, 7, 106, 249, 150, 1, 1, 1, 1, 1, 114, 118]

        # set callbacks
        fpath = "./pretrained_class_weights/LIP_PSPNet50_class_weights{epoch:02d}.hdf5"
        cp_cb = ModelCheckpoint(filepath = fpath, monitor='val_loss', verbose=1, save_best_only=True, mode='auto', period=2)
        es_cb = EarlyStopping(monitor='val_loss', patience=2, verbose=1, mode='auto')
        tb_cb = TensorBoard(log_dir="./pretrained_class_weights", write_images=True)

        # set generater
        train_gen = data_gen_small(
                trainimg_dir,
                trainmsk_dir,
                train_list,
                args.batch_size,
                [args.input_shape[0], args.input_shape[1]],
                args.n_labels)
        val_gen = data_gen_small(
                valimg_dir,
                valmsk_dir,
                val_list,
                args.batch_size,
                [args.input_shape[0], args.input_shape[1]],
                args.n_labels)

        # set model
        pspnet = PSPNet50(
                input_shape=args.input_shape,
                n_labels=args.n_labels,
                output_mode=args.output_mode,
                upsample_type=args.upsample_type)
        print(pspnet.summary())

        # compile model
        pspnet.compile(
                loss=args.loss,
                optimizer=args.optimizer,
                metrics=["accuracy"])

        # fit with genarater
        pspnet.fit_generator(
                generator=train_gen,
                steps_per_epoch=args.epoch_steps,
                epochs=args.n_epochs,
                validation_data=val_gen,
                validation_steps=args.val_steps,
                class_weight=class_weights,
                callbacks=[cp_cb, es_cb, tb_cb])

    # save model
    with open("./pretrained_class_weights/LIP_PSPNet50.json", "w") as json_file:
        json_file.write(json.dumps(json.loads(pspnet.to_json()), indent=2))
    print("save json model done...")
Ejemplo n.º 3
0
def main(args):
    # set the necessary list
    # train_list = pd.read_csv(args.train_list,header=None)
    # val_list = pd.read_csv(args.val_list,header=None)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    # get old session
    # old_session = KTF.get_session()

    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        KTF.set_learning_phase(1)

        # set callbacks
        fpath = './pretrained_mask/' + args.name + '{epoch:02d}.hdf5'
        cp_cb = ModelCheckpoint(filepath=fpath,
                                monitor='val_loss',
                                verbose=1,
                                mode='auto',
                                period=1)
        tb_cb = TensorBoard(log_dir="./pretrained_mask",
                            write_graph=True,
                            write_images=True)

        seq = iaa.Sequential(
            [
                iaa.Crop(
                    px=(0, 16)
                ),  # crop images from each side by 0 to 16px (randomly chosen)
                iaa.Fliplr(0.5),  # horizontally flip 50% of the images
                sometimes(
                    iaa.Affine(
                        scale={
                            "x": (0.8, 1.2),
                            "y": (0.8, 1.2)
                        },
                        # scale images to 80-120% of their size, individually per axis
                        translate_percent={
                            "x": (-0.2, 0.2),
                            "y": (-0.2, 0.2)
                        },  # translate by -20 to +20 percent (per axis)
                        rotate=(-10, 10),  # rotate by -45 to +45 degrees
                    )),
            ],
            random_order=True)
        if args.dataset == 'coco':
            train_gen = Datahandler_COCO(trainimg_dir,
                                         trainmsk_dir).make_batches(
                                             batchsize=args.batch_size,
                                             inputshape=args.input_shape,
                                             augmentation=seq)
            val_gen = Datahandler_COCO(valimg_dir, valmsk_dir).make_batches(
                batchsize=args.batch_size,
                inputshape=args.input_shape,
                augmentation=None)
        elif args.dataset == 'pascal_khamba':
            train_gen = Pascal_Generator(trainimg_dir,
                                         trainmsk_dir).make_batches(
                                             batchsize=args.batch_size,
                                             inputshape=args.input_shape,
                                             augmentation=seq)
            val_gen = Pascal_Generator(valimg_dir, valmsk_dir).make_batches(
                batchsize=args.batch_size,
                inputshape=args.input_shape,
                augmentation=None)

        else:
            train_gen = Default_Generator(trainimg_dir,
                                          trainmsk_dir).make_batches(
                                              batchsize=args.batch_size,
                                              inputshape=args.input_shape,
                                              augmentation=seq)
            val_gen = Default_Generator(valimg_dir, valmsk_dir).make_batches(
                batchsize=args.batch_size,
                inputshape=args.input_shape,
                augmentation=None)

        # set model
        pspnet = PSPNet50(input_shape=args.input_shape,
                          n_labels=args.n_labels,
                          output_mode=args.output_mode,
                          upsample_type=args.upsample_type)
        print(pspnet.summary())
        if args.load is not None:
            print("loadinf weights")
            pspnet.load_weights(args.load)

        # compile model
        pspnet.compile(loss=args.loss,
                       optimizer=args.optimizer,
                       metrics=["accuracy"])

        # fit with genarater
        pspnet.fit_generator(generator=train_gen,
                             steps_per_epoch=args.steps,
                             epochs=args.epochs,
                             validation_data=val_gen,
                             validation_steps=args.val_steps,
                             callbacks=[cp_cb, tb_cb],
                             verbose=True)

    # save model
    with open("./pretrained_mask/" + args.name + ".json", "w") as json_file:
        json_file.write(json.dumps(json.loads(pspnet.to_json()), indent=2))

    print("save json model done...")
Ejemplo n.º 4
0
def main(args):
    # set the necessary list
    train_list = pd.read_csv(args.train_list, header=None)
    val_list = pd.read_csv(args.val_list, header=None)

    # set the necessary directories
    trainimg_dir = args.trainimg_dir
    trainmsk_dir = args.trainmsk_dir
    valimg_dir = args.valimg_dir
    valmsk_dir = args.valmsk_dir

    # get old session
    old_session = KTF.get_session()

    with tf.Graph().as_default():
        session = tf.Session('')
        KTF.set_session(session)
        KTF.set_learning_phase(1)

        # set callbacks
        fpath = './pretrained_mask/LIP_PSPNet50_mask{epoch:02d}.hdf5'
        cp_cb = ModelCheckpoint(filepath=fpath,
                                monitor='val_loss',
                                verbose=1,
                                save_best_only=True,
                                mode='auto',
                                period=5)
        es_cb = EarlyStopping(monitor='val_loss',
                              patience=2,
                              verbose=1,
                              mode='auto')
        tb_cb = TensorBoard(log_dir="./pretrained_mask", write_images=True)

        # set generater
        train_gen = data_gen_small(trainimg_dir, trainmsk_dir, train_list,
                                   args.batch_size,
                                   [args.input_shape[0], args.input_shape[1]],
                                   args.n_labels)
        val_gen = data_gen_small(valimg_dir, valmsk_dir, val_list,
                                 args.batch_size,
                                 [args.input_shape[0], args.input_shape[1]],
                                 args.n_labels)

        # set model
        pspnet = PSPNet50(input_shape=args.input_shape,
                          n_labels=args.n_labels,
                          output_mode=args.output_mode,
                          upsample_type=args.upsample_type)
        print(pspnet.summary())

        # compile model
        pspnet.compile(loss=args.loss,
                       optimizer=args.optimizer,
                       metrics=["accuracy"])

        # fit with genarater
        pspnet.fit_generator(generator=train_gen,
                             steps_per_epoch=args.epoch_steps,
                             epochs=args.n_epochs,
                             validation_data=val_gen,
                             validation_steps=args.val_steps,
                             callbacks=[cp_cb, es_cb, tb_cb])

    # save model
    with open("./pretrained_mask/LIP_SegUNet_mask.json", "w") as json_file:
        json_file.write(json.dumps(json.loads(segunet.to_json()), indent=2))
    print("save json model done...")
#


if __name__ == '__main__':
    trainroot1 = "train"
    trainroot2 = "train2"
    folder_create(trainroot2)
    valroot = "val"
    model_file = "model.hdf5"
    BATCH_SIZE = 5
    NUM_EPOCH = 2
    # sizeおよびch
    size = [512,512]
    input_ch = 3

    color_dic = {1:[255,255,255]}
    output_ch = len(color_dic) + 1
    # データ拡張
    # data_augment_copy(trainroot1, trainroot2)

    # modelのcompile
    input_shape = [size[0], size[1], input_ch]
    model = PSPNet50(input_shape = input_shape, n_labels = output_ch)
    # model.compile(loss=, optimizer="adadelta", metrics=["accuracy"])
    model_compile_unet(model)
    # model構築(Trainとvalidationで)
    history = train_unet(trainroot2,valroot,model_file,model,size, BATCH_SIZE, NUM_EPOCH, color_dic)
    plot_hist_dice(history)
    # trainroot2の消去
    folder_delete(trainroot2)