示例#1
0
def build_model():
    '''Reconstruct a trined model from saved data.
    '''

    os.environ["CUDA_VISIBLE_DEVICES"] = "0"


    with open("models/2020-03-06-16-21-20/topology.txt", "r") as topology:
        num_filters = tuple(map(int, topology.readline()[1:-1].split(', ')))

    input_shape = (None, None, 3)
   
    model = Autoencoder(input_shape=input_shape, num_filters=num_filters)
    model = model.build()

    model.load_weights("models/2020-03-06-16-21-20/weights.h5")
    model.compile(optimizer="adam", loss="MSE", metrics=["accuracy"])

    return model
示例#2
0
    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu


    with open(args.topology, "r") as topology:
        num_filters = tuple(map(int, topology.readline()[1:-1].split(', ')))
    
    files = glob.glob(os.path.join(args.file_dir, "*.png"))

    input_shape = (None, None, 3)
   
    # Reconstruct model from saved weights
    model = Autoencoder(input_shape=input_shape, num_filters=num_filters)
    model = model.build()
    print(model.summary())

    model.load_weights(args.weights)
    model.compile(optimizer="adam", loss="MSE", metrics=["accuracy"])

    # Generate time stamp for unique id of the result
    time_stamp = "{date:%Y-%m-%d-%H-%M-%S}".format(date=datetime.datetime.now())

    # Pass images to network
    for file, i in zip(files, range(len(files))):

        inp_img = cv2.imread(file) / 255
        inp_img = np.expand_dims(inp_img, axis=0)

        out_img = model.predict(inp_img)

        inp_img = np.squeeze(inp_img, axis=0)
        out_img = np.squeeze(out_img, axis=0)
    train_ds = dataloader.load_and_patch(files[0], "fit", args.patch_shape, args.n_patches, args.batch_size,
                         args.prefetch, args.num_parallel_calls, shuffle=None, repeat=True)
        
    valid_ds = dataloader.load_and_patch(files[1], "fit", args.patch_shape, args.n_patches, args.batch_size,
                         args.prefetch, args.num_parallel_calls, shuffle=None, repeat=True)

    test_ds, test_gt = dataloader.load_and_patch(test_files, "inf", num_parallel_calls=args.num_parallel_calls, batch_size=8)

    
    input_shape = (None, None, 3)
    
    model = Autoencoder(input_shape=input_shape, num_filters=num_filters)
    model = model.build()

    print(model.summary())

    if args.train_continue:
        model.load_weights(args.weights_path)


    # Train the model
    model.compile(optimizer=optimizer, loss="MSE", metrics=['accuracy'])
    history = model.fit(train_ds,
              steps_per_epoch=500,
              epochs=args.n_epochs,
              validation_data=valid_ds,
              validation_steps=250,
              callbacks=callbacks(model_path, test_ds, test_gt),
              verbose=1)