コード例 #1
0
def build_model():
    '''Reconstruct a trined model from saved data.
    '''

    os.environ["CUDA_VISIBLE_DEVICES"] = "0"


    with open("models/2020-03-06-16-21-20/topology.txt", "r") as topology:
        num_filters = tuple(map(int, topology.readline()[1:-1].split(', ')))

    input_shape = (None, None, 3)
   
    model = Autoencoder(input_shape=input_shape, num_filters=num_filters)
    model = model.build()

    model.load_weights("models/2020-03-06-16-21-20/weights.h5")
    model.compile(optimizer="adam", loss="MSE", metrics=["accuracy"])

    return model
コード例 #2
0
ファイル: train.py プロジェクト: Goganych/OiRS
trainX, testX = train_test_split(data, test_size=0.2)

trainX = np.asarray(trainX).astype("float32") / 255.0
testX = np.asarray(testX).astype("float32") / 255.0

# шумы
trainNoise = np.random.normal(loc=0.5, scale=0.5, size=trainX.shape)
testNoise = np.random.normal(loc=0.5, scale=0.5, size=testX.shape)
trainXNoisy = np.clip(trainX + trainNoise, 0, 1)
testXNoisy = np.clip(testX + testNoise, 0, 1)

print("[INFO] building autoencoder...")
opt = Adam(lr=1e-4)

autoencoder = Autoencoder().build(IMAGE_HEIGHT, IMAGE_WIDTH, 3)
autoencoder.compile(loss="mse", optimizer=opt)

H = autoencoder.fit(trainXNoisy,
                    trainX,
                    validation_data=(testXNoisy, testX),
                    epochs=EPOCHS,
                    batch_size=BS)

N = np.arange(0, EPOCHS)
plt.style.use("ggplot")
plt.figure()
plt.plot(N, H.history["loss"], label="train_loss")
plt.plot(N, H.history["val_loss"], label="val_loss")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch")
plt.ylabel("Loss/Accuracy")
コード例 #3
0
ファイル: train.py プロジェクト: MaximSurovtsev/OIRS
testX = np.asarray(testX).astype("float64") / 255.0

trainX = np.reshape(trainX, (len(trainX), 64, 64, 3))
testX = np.reshape(testX, (len(testX), 64, 64, 3))

# Шумы
trainNoise = np.random.normal(loc=0.5, scale=0.5, size=trainX.shape)
testNoise = np.random.normal(loc=0.5, scale=0.5, size=testX.shape)
trainXNoisy = np.clip(trainX + trainNoise, 0, 1)
testXNoisy = np.clip(testX + testNoise, 0, 1)

print("[INFO] building autoencoder...")
opt = 'adadelta'

autoencoder = Autoencoder().build(IMAGE_HEIGHT, IMAGE_WIDTH, 3)
autoencoder.compile(loss="mse", optimizer=opt, metrics=["accuracy"])
autoencoder.summary()

H = autoencoder.fit(trainXNoisy,
                    trainX,
                    validation_data=(testXNoisy, testX),
                    epochs=EPOCHS,
                    batch_size=BS)

N = np.arange(0, EPOCHS)
plt.style.use("ggplot")
plt.figure()
plt.plot(N, H.history["loss"], label="train_loss")
plt.plot(N, H.history["val_loss"], label="val_loss")
plt.title("Training Loss and Accuracy")
plt.xlabel("Epoch")
コード例 #4
0

    with open(args.topology, "r") as topology:
        num_filters = tuple(map(int, topology.readline()[1:-1].split(', ')))
    
    files = glob.glob(os.path.join(args.file_dir, "*.png"))

    input_shape = (None, None, 3)
   
    # Reconstruct model from saved weights
    model = Autoencoder(input_shape=input_shape, num_filters=num_filters)
    model = model.build()
    print(model.summary())

    model.load_weights(args.weights)
    model.compile(optimizer="adam", loss="MSE", metrics=["accuracy"])

    # Generate time stamp for unique id of the result
    time_stamp = "{date:%Y-%m-%d-%H-%M-%S}".format(date=datetime.datetime.now())

    # Pass images to network
    for file, i in zip(files, range(len(files))):

        inp_img = cv2.imread(file) / 255
        inp_img = np.expand_dims(inp_img, axis=0)

        out_img = model.predict(inp_img)

        inp_img = np.squeeze(inp_img, axis=0)
        out_img = np.squeeze(out_img, axis=0)
コード例 #5
0
    train_ds = dataloader.load_and_patch(files[0], "fit", args.patch_shape, args.n_patches, args.batch_size,
                         args.prefetch, args.num_parallel_calls, shuffle=None, repeat=True)
        
    valid_ds = dataloader.load_and_patch(files[1], "fit", args.patch_shape, args.n_patches, args.batch_size,
                         args.prefetch, args.num_parallel_calls, shuffle=None, repeat=True)

    test_ds, test_gt = dataloader.load_and_patch(test_files, "inf", num_parallel_calls=args.num_parallel_calls, batch_size=8)

    
    input_shape = (None, None, 3)
    
    model = Autoencoder(input_shape=input_shape, num_filters=num_filters)
    model = model.build()

    print(model.summary())

    if args.train_continue:
        model.load_weights(args.weights_path)


    # Train the model
    model.compile(optimizer=optimizer, loss="MSE", metrics=['accuracy'])
    history = model.fit(train_ds,
              steps_per_epoch=500,
              epochs=args.n_epochs,
              validation_data=valid_ds,
              validation_steps=250,
              callbacks=callbacks(model_path, test_ds, test_gt),
              verbose=1)