Exemplo n.º 1
0
def main():
    """
    main entry
    """
    train_X, train_Y, test_X, test_Y, classes = utils.load_data_sets()

    # 获取数据相关信息
    train_num = train_X.shape[0]
    test_num = test_X.shape[0]

    # 本例中num_px=64
    px_num = train_X.shape[1]

    # 转换数据形状
    data_dim = px_num * px_num * 3
    train_X = train_X.reshape(train_num, data_dim).T
    test_X = test_X.reshape(test_num, data_dim).T

    train_X = train_X / 255.
    test_X = test_X / 255.

    layer = [12288, 20, 7, 5, 1]
    parameters = utils.deep_neural_network(train_X, train_Y, layer, 2500)
    print 'Train Accuracy:', utils.predict_image(parameters, train_X,
                                                 train_Y), '%'
    print 'Test Accuracy:', utils.predict_image(parameters, test_X,
                                                test_Y), '%'
Exemplo n.º 2
0
 def on_epoch_end(self, epoch, logs={}):
     summaries = []
     for im, f in zip(self.inputs, self.filenames_in):
         im_pred = predict_image(im, self.model)
         summary = self.to_tensorflow_summary(im_pred, f)
         summaries.append(summary)
     self.write_summaries(summaries, epoch)
Exemplo n.º 3
0
from de_gan.networks import Generator
from utils import predict_image
from parameters import PATH_WEIGHTS_GENERATOR

torch.set_printoptions(precision=10)  # Print all decimals

path = '../data/dataset/'
num_gpu = 1
device = torch.device("cuda:0" if (
    torch.cuda.is_available() and num_gpu > 0) else "cpu")

generator = Generator()
generator = generator.to(device)
generator.load_state_dict(
    torch.load(PATH_WEIGHTS_GENERATOR, map_location=device))

dataset = os.listdir('../test')

with torch.no_grad():
    for num_image, image in enumerate(dataset):
        print(f"Elaborated: {image}")
        path = f'test/{image}'
        image = Image.open(path).convert('L')
        torch_image = transforms.ToTensor()(image).to(device).unsqueeze(0)
        predicted_image = predict_image(torch_image, generator, device)
        v_utils.save_image(predicted_image,
                           os.path.join("../output", f"bin_{num_image}.png"),
                           normalize=True)

print("Test completed successfully!")
# obtener etiquetas
_, breed_info = tfds.load('stanford_dogs', with_info=True, as_supervised=True)

_, cats_dogs_info = tfds.load('cats_vs_dogs',
                              with_info=True,
                              as_supervised=True)

# print(f'cats_vs_dogs: {cats_dogs_info.features["label"].num_classes}')
# print(f'dog breeds: {breed_info.features["label"].num_classes}')

# cargar modelos
dogs_optimizer = tf.keras.optimizers.Adam(learning_rate=1e-2)

cats_dogs_model = build_model(num_classes=2)
cats_dogs_model.load_weights('../tf_models/colab/cats_dogs_best_colab1.h5')

dogs_model = tf.keras.models.load_model(
    '../tf_models/colab/best_colab1_fine.h5', compile=False)
dogs_model.compile(optimizer=dogs_optimizer,
                   loss="categorical_crossentropy",
                   metrics=["accuracy"])

# realizar detección gato o perro
res = predict_image(file, cats_dogs_model, cats_dogs_info, show_image=False)

print(f'Animal: {res}')
# si es perro detectar raza
if res == 'dog':
    breed = predict_image(file, dogs_model, breed_info, show_image=False)
    print(f'Raza: {breed}')
Exemplo n.º 5
0
            generator.zero_grad()
            output = discriminator(n_generated_batch, n_degraded_batch).view(-1)
            error_generator_one = criterion(output, n_valid_batch)
            error_generator_one.backward()
            d_g_z2 = output.mean().item()
            optimizerGenerator.step()

            #####################################################################
            # Addition Log Loss function
            #####################################################################
            # generator.zero_grad()
            # output_generator = generator(n_degraded_batch)
            # error_generator_two = criterion(output_generator, n_clear_batch)
            # error_generator_two.backward()
            # d_g_z3 = output.mean().item()
            # optimizerGenerator.step()

        if i % 50 == 0:
            epoch_text = f"[{epoch}/{num_epochs}][{i}/{len(train_set)}]"
            # loss_text = "\tLoss_D: %.4f\tLoss_G: %.4f / %.4f" % (
            # error_discriminator.item(), error_generator_one.item(), error_generator_two.item())
            loss_text = "\tLoss_D: %.4f\tLoss_G: %.4f" % (
                error_discriminator.item(), error_generator_one.item())
            error_text = f"\t\tD(I_W, I_GT): %.4f\tD(I_W, G(I_W)) %.4f / %.4f" % (d_x, d_g_z1, d_g_z2)
            print(epoch_text + loss_text + error_text)
            with torch.no_grad():
                bin_batch = predict_image(test_image, generator, device)
                v_utils.save_image(bin_batch, os.path.join("../output", "bin_sample.png"), normalize=True)
            torch.save(generator.state_dict(), PATH_WEIGHTS_GENERATOR)
            torch.save(discriminator.state_dict(), PATH_WEIGHTS_DISCRIMINATOR)
Exemplo n.º 6
0
st.sidebar.image(WEB_APP_LOGO, use_column_width=True)
st.sidebar.markdown('Made (with love) in 2020')

# Trained deep learning model in action:
if uploaded_file is not None:
    
    # Take in charge uploaded file:
    st.markdown('---')
    image = Image.open(uploaded_file)
    filename = 'image.jpg'
    filepath = os.path.join(INFERENCE_FOLDER, filename)
    image.save(filepath, quality=95)
    st.markdown('The picture has been **correctly** uploaded and saved!')
    st.image(image, use_column_width=True)
    
    # Perform taxonomic classification on uploaded image:
    st.markdown('---')
    inputname = 'input.jpg'
    inputpath = os.path.join(INFERENCE_FOLDER, inputname)
    img_array = process_image(filepath, inputpath)
    classes, probs = predict_image(img_array, gpu=torch.cuda.is_available())
    outputname = 'output.jpg'
    outputpath = os.path.join(INFERENCE_FOLDER, outputname)
    visualize_prediction(classes, probs, outputpath)
    st.markdown('Check uploaded picture\'s flower taxonomic classification **results**!')
    output_img = Image.open(outputpath)
    st.image(output_img, use_column_width=True)
    st.markdown('---')
    st.markdown('Uploaded picture\'s **flower specie prediction**:')
    specie_prediction = classes[4].upper()
    st.markdown(f'**{specie_prediction}**')
Exemplo n.º 7
0
        timer = cv2.getTickCount()
        ok, bbox = tracker.update(frame)

        # Calculate FPS
        fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

        # Draw bounding box
        if ok:
            # Tracking success
            if index == 0:
                roi = frame[int(bbox[1]):int(bbox[1]) + int(bbox[3]),
                            int(bbox[0]):int(bbox[0]) + int(bbox[2])]
                pil_img = Image.fromarray(cv2.cvtColor(roi, cv2.COLOR_BGR2RGB))
                #cv2.imwrite("roi.png", roi)
                predicted = predict_image(pil_img, model, device, classes)

            if index == 50:
                roi = frame[int(bbox[1]):int(bbox[1]) + int(bbox[3]),
                            int(bbox[0]):int(bbox[0]) + int(bbox[2])]
                pil_img = Image.fromarray(cv2.cvtColor(roi, cv2.COLOR_BGR2RGB))
                predicted = predict_image(pil_img, model, device, classes)
                index = 1

            p1 = (int(bbox[0]), int(bbox[1]))
            p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
            cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
        else:
            # Tracking failure
            cv2.putText(frame, "Tracking failed", (100, 80),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)