Exemplo n.º 1
0
 def __getitem__(self, i):
     input_image = np.asarray(
         Image.open(self.input_root_path + self.input_files[i]))
     output_image = np.asarray(
         Image.open(self.output_root_path + self.output_files[i]))
     input_tensor = self.transform(image2tensor(input_image)).float()
     output_tensor = self.transform(image2tensor(output_image)).float()
     return input_tensor, output_tensor
    def __getitem__(self, i):
        input_image = np.asarray(
            Image.open(self.image_folder + self.input_files[i]))
        output_image = np.asarray(
            Image.open(self.image_folder + self.output_files[i]))
        input_tensor = self.transform(image2tensor(input_image))
        output_tensor = self.transform(image2tensor(output_image))

        return input_tensor, output_tensor
Exemplo n.º 3
0
import utils
from generator import Generator
import os


if __name__ == '__main__':
    config, unparsed = get_config()
    if len(unparsed) > 0:
        print_usage()
        exit(1)
    # Generate images
    model = Generator(config)
    weights_file = config.weights_dir
    if os.path.exists(weights_file):
        device = "cuda:0" if torch.cuda.is_available() else "cpu"
        load_res = torch.load(weights_file, map_location=device)
        model.load_state_dict(load_res)
    model.eval()
    inp_list = sorted(os.listdir(config.data_dir))
    for i in range(len(inp_list)):
        img = mpimg.imread(config.data_dir + inp_list[i])
        print(img.shape)
        img = utils.image2tensor(img)
        masks, f1, f2, x = model.forward(img)
        mask = utils.transform_to_mpimg(masks[-1])
        mask = mask.squeeze(2)
        utils.show_img(mask)
        exit(0)
        # x = x.detach().numpy()
        # x = x.squeeze(0).transpose(1, 2, 0).astype(np.uint8)
Exemplo n.º 4
0
                        required=True,
                        help="path of pytorch-pwc .pth file")
    parser.add_argument('-o',
                        '--output',
                        default="output.flo",
                        help='output location for flo file')

    args = parser.parse_args()

    return args


if __name__ == '__main__':
    args = parse_args()

    tensorFirst = image2tensor(PIL.Image.open(args.first))
    tensorSecond = image2tensor(PIL.Image.open(args.second))

    moduleNetwork = Network().cuda().eval()
    moduleNetwork.load_state_dict(torch.load(args.model))

    tensorOutput = moduleNetwork.estimate(tensorFirst, tensorSecond)

    objectOutput = open(args.output, 'wb')

    numpy.array([80, 73, 69, 72], numpy.uint8).tofile(objectOutput)
    numpy.array(
        [tensorOutput.size(2), tensorOutput.size(1)],
        numpy.int32).tofile(objectOutput)
    numpy.array(tensorOutput.numpy().transpose(1, 2, 0),
                numpy.float32).tofile(objectOutput)
uploaded_file = st.file_uploader(
    label=
    "Upload an image of any of the available The Simpsons characters (please see Classes).",
    type=["png", "jpeg", "jpg"])

# Display the predict button just when an image is being uploaded
if not uploaded_file:
    st.warning("Please upload an image before proceeding!")
    st.stop()
else:
    image_as_bytes = uploaded_file.read()
    st.image(image_as_bytes, use_column_width=True)
    pred_button = st.button("Predict")

if pred_button:
    # Converts the input image into a Tensor
    image_tensor = image2tensor(image_as_bytes=image_as_bytes)

    # Prepare the data that is going to be sent in the POST request
    json_data = {"instances": image_tensor}

    # Send the request to the Prediction API
    response = requests.post(REST_URL, json=json_data)

    # Retrieve the highest probablity index of the Tensor (actual prediction)
    prediction = response.json()['predictions'][0]
    label = prediction2label(prediction=prediction)

    # Write the predicted label for the input image
    st.write(f"Predicted The Simpsons character is: {label}")
Exemplo n.º 6
0
def training_mode(model_name):

    batch_size = 25

    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=True,
        download=True,
        transform=transforms.Compose(
            [transforms.ToTensor(),
             transforms.Normalize((0.5, ), (0.5, ))])),
                                               batch_size=batch_size,
                                               shuffle=True)

    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        '../data',
        train=False,
        transform=transforms.Compose(
            [transforms.ToTensor(),
             transforms.Normalize((0.5, ), (0.5, ))])),
                                              batch_size=batch_size,
                                              shuffle=True)

    # Prepare unpaired image translation dataset

    X = np.array(
        [utils.tensor2image(image_batch) for image_batch, _ in train_loader])
    X = X.reshape(
        (X.shape[0] * X.shape[1], X.shape[2], X.shape[3], X.shape[4]))
    Y = np.array(X) * -1
    np.random.shuffle(Y)

    # utils.display_image(X[0])
    # utils.display_image(Y[0])

    X_train, X_test, Y_train, Y_test = train_test_split(X, Y)
    unsupervised_train_loader = data.DataLoader(data.TensorDataset(
        utils.image2tensor(X_train), utils.image2tensor(Y_train)),
                                                batch_size=1)
    unsupervised_test_loader = data.DataLoader(data.TensorDataset(
        utils.image2tensor(X_test), utils.image2tensor(Y_test)),
                                               batch_size=1)

    x, y = next(iter(unsupervised_train_loader))
    c = utils.tensor2image(x.cpu()[0:1])
    utils.display_image(c[0])

    # Initialize network and optimizers

    G = Generator(3)
    F = Generator(3)
    D_X = Discriminator(3)
    D_Y = Discriminator(3)

    G_optimizer = optim.Adam(G.parameters(), lr=1e-3)
    F_optimizer = optim.Adam(F.parameters(), lr=1e-3)
    D_X_optimizer = optim.Adam(D_X.parameters(), lr=1e-3)
    D_Y_optimizer = optim.Adam(D_Y.parameters(), lr=1e-3)
    writer = SummaryWriter(model_name)

    # Train network
    for epoch in range(1):
        train(epoch, G, F, D_X, D_Y, G_optimizer, F_optimizer, D_X_optimizer,
              D_Y_optimizer, unsupervised_train_loader, writer, test_loader)

    save_model(f'../models/{model_name}_G.pt', G, F,
               f'../models/{model_name}_F.pt')
Exemplo n.º 7
0
            optim_d.load_state_dict(load_res["optim_d"])
        else:
            os.remove(checkpoint_file)

    # Num epoch is around 100000 to get good results
    for epoch in range(config.num_epoch)[starting_epoch:]:
        prefix = "Training Epoch {:3d}: ".format(epoch)

        inp_list = inp_list[starting_index:]
        label_list = label_list[starting_index:]
        # Batch size is 1 by default.
        # It's not neccessary to use data loader
        # As I'm loading images one by one.
        for index in tqdm(range(len(inp_list))):
            img = mpimg.imread(config.data_dir + inp_list[index])
            img = utils.image2tensor(img)
            label_img = mpimg.imread(config.label_dir + label_list[index])
            label_img = utils.image2tensor(label_img)

            if torch.cuda.is_available():
                img = img.cuda()
                label_img = label_img.cuda()

            # Train Discriminator
            optim_d.zero_grad()
            mask_r, D_real = model_discriminator.forward(label_img)
            masks, f1, f2, x = model_gen.forward(img)
            mask_f, D_fake = model_discriminator.forward(x)

            # Eq9
            # L_map is the loss between the features extraced from
Exemplo n.º 8
0
    args = parse_args()

    vidcap = cv2.VideoCapture(args.input)
    FPS = int(vidcap.get(cv2.CAP_PROP_FPS))
    VID_WIDTH = int(vidcap.get(cv2.CAP_PROP_FRAME_WIDTH))
    VID_HEIGHT = int(vidcap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    vidout = cv2.VideoWriter(args.output, cv2.VideoWriter_fourcc(*'mp4v'), FPS, (VID_WIDTH, VID_HEIGHT))
    FRAMES = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT))
    success = True

    for _ in tqdm(range(FRAMES)):
        success, image = vidcap.read()

        if success and 'frame2' in locals():  # We want to skip the first frame
            frame1 = frame2
            frame2 = image2tensor(image)
        else:
            frame2 = image2tensor(image)
            continue

        moduleNetwork = Network().cuda().eval()
        moduleNetwork.load_state_dict(torch.load(args.model))

        tensorOutput = moduleNetwork.estimate(frame1, frame2)
        flow = np.array(tensorOutput.numpy().transpose(1, 2, 0), np.float32)
        flow_img = flowiz.convert_from_flow(flow)
        vidout.write(flow_img)

    vidout.release()
    print(f"Done! \nFlowviz written to {args.output}")