Exemplo n.º 1
0
def test(code, start, end):
    test_data = cnn_data_generater.generate_input_data(code, start, end)
    model = cnn.load_model(model_path, code)
    predicted = model.predict(test_data, verbose=1)
    predicted = list(map(lambda n: n[0], predicted))

    data = data_processor.load_data(data_path + str(code) + ".csv", start, end)
    data = data_processor.divide(data, DATA_PERIOD + DATA_DURATION, 1)
    base_price = list(
        map(lambda n: n[DATA_PERIOD - 1],
            list(map(lambda n: n['CLOSE'].tolist(), data))))
    acual_price = list(
        map(lambda n: n[DATA_PERIOD + DATA_DURATION - 1],
            list(map(lambda n: n['CLOSE'].tolist(), data))))
    date = list(
        map(lambda n: n[DATA_PERIOD + DATA_DURATION - 1],
            list(map(lambda n: n['DATE'].tolist(), data))))

    df = pd.DataFrame({
        'date': date,
        'predict': predicted,
        'base_price': base_price,
        'acual_price': acual_price
    })
    df.to_csv("result.csv", index=False)
Exemplo n.º 2
0
def run():
    model = load_model('0.9392_HandyNet_1607802541.3999255')[0]
    cap = cv2.VideoCapture(0)

    while True:
        _, capture = cap.read()
        capture = cv2.flip(capture, 1)
        border = 100
        size = capture.shape[0]
        points = np.array([
            [border, border],
            [border, size - border],
            [size - border, size - border],
            [size - border, border],
        ])
        x, y, w, h = cv2.boundingRect(points)

        image = crop_square_region(capture, points)
        sign = predict_sign(model, image, threshold=0.5, verbose=True)
        group = find_sign_group(sign)

        capture = cv2.rectangle(capture, (x, y), (x + w, y + h), (0, 255, 0),
                                thickness=2)
        capture = cv2.putText(capture,
                              f"{sign} {str(group)}",
                              (10, capture.shape[0] - 10),
                              cv2.FONT_HERSHEY_SIMPLEX,
                              2, (0, 0, 255),
                              thickness=3)
        cv2.imshow('live feed', capture)
        cv2.waitKey(1)
Exemplo n.º 3
0
def predict(code, start, end):
    test_data = cnn_data_generater.generate_predict_data(code, start, end)
    model = cnn.load_model(model_path, code)
    predicted = model.predict(test_data, verbose=1)
    predicted = list(map(lambda n: n[0], predicted))

    data = data_processor.load_data(data_path + str(code) + ".csv", start, end)
    data = data_processor.divide(data, DATA_PERIOD, 0)
    base_price = list(
        map(lambda n: n[DATA_PERIOD - 1],
            list(map(lambda n: n['CLOSE'].tolist(), data))))
    date = list(
        map(
            lambda n: (datetime.strptime(n[DATA_PERIOD - 1], "%Y-%m-%d") +
                       timedelta(days=DATA_DURATION)).strftime("%Y-%m-%d"),
            list(map(lambda n: n['DATE'].tolist(), data))))

    df = pd.DataFrame({
        'date': date,
        'predict': predicted,
        'base_price': base_price
    })
    df.to_csv("result.csv", index=False)
    fig = (df.plot()).get_figure()
    fig.savefig('../figure.png', dpi=600)
Exemplo n.º 4
0
def run():
  model = load_model('0.9392_HandyNet_1607802541.3999255')[0]
  test_dataset = TestSignLanguageDataset()
  
  predictions = model(test_dataset.images).argmax(dim=1)
  target = test_dataset.target

  cm = build_confusion_matrix(model, test_dataset)

  target_names = [chr(65+i) for i in range(25)]
  target_names.pop(9) # remove J

  show_confusion_matrix(cm, target_names, 'Matrice de confusion du réseau HandyNet', save_path="confusion_matrix.png")
def main(image_folder_path, real_label):
    print("RadhaKrishna")
    # load the cnn model
    cnn_model = cnn.load_model()
    # load the bayesian model
    bayesian_model, labels_list = bayesian_network.load_model()
    # for each image in the test path
    for file in sorted(glob.glob(image_folder_path + "*.jpg")):
        # extract the image name from the image path
        image_name = (file.split('/'))[-1]
        print("Image: " + image_name)
        # classify the image
        prediction = classify_image(image_folder_path, image_name, real_label, cnn_model, bayesian_model, labels_list)
Exemplo n.º 6
0
def main():
    # Contrôle de VLC
    vlc_control = VLCController(DELAY=3)

    # Use Webcam
    webcam = cv2.VideoCapture(0)  # Seule caméra est celle de l'ordi
    webcam.set(cv2.CAP_PROP_FRAME_WIDTH,
               640)  # id pour le nombre de pixel I guess
    webcam.set(cv2.CAP_PROP_FRAME_HEIGHT,
               480)  # id pour le nombre de pixel I guess
    webcam.set(cv2.CAP_PROP_BRIGHTNESS, 75)  # id pour le brightness
    # Load CNN model
    model = load_model("0.9392_HandyNet_1607802541.3999255")[0]

    while True:
        # Capture image from webcam
        sucess, image = webcam.read()
        capture = cv2.flip(image.copy(), flipCode=1)
        sign = None

        # Return image of just the hand
        try:
            image_hand = segmentation_contour(capture)
            cv2.imshow("Image main", image_hand)
            # print("Taille image: {}".format(image_hand.shape))
            sign = predict_sign(model, image_hand, threshold=0.7)
        except Exception as e:
            print(e)

        group = find_sign_group(sign)

        capture = cv2.putText(
            capture,
            f"{sign} {str(group)}",
            (10, capture.shape[0] - 10),
            cv2.FONT_HERSHEY_SIMPLEX,
            2,
            (0, 0, 255),
            thickness=3,
        )
        cv2.imshow("Image", capture)
        if cv2.waitKey(1) & 0xFF == ord("q"):
            break

        vlc_control.run(group)
Exemplo n.º 7
0
def main(iters, n, epochs, db, model_path, printn=100):
    for i in range(iters):
        try:
            model = cnn.load_model(model_path)
        except FileNotFoundError:
            print(f"No file found at {model_path}.")
            model = cnn.ValueNet()
        print("Get samples")
        with factory.serializer_factory.get_serializer("mongo_bulk",
                                                       db=db) as tree:
            samples = cnn.get_samples(tree, n=n)
        print("Convert to torch")
        x, y, w = cnn.samples_to_torch(samples)
        print("Train model")
        for epoch in range(epochs):
            y_pred, loss = cnn.train_epoch(model, x, y, w)
            if not epoch % printn:
                print(epoch, loss.item())
        torch.save(model.state_dict(), model_path)
Exemplo n.º 8
0
def colourise_video(video_name, n):
  video_path = '/videos/' + video_name
  save_path = os.path.splitext(video_path)[0] + '_n=' + str(n) + '.mp4'
  vid_out = vu.setup_writer(video_path, save_path) # Setup video writer to save output to file

  model = cnn.load_model(cnn.checkpoint_models_path + "full_model_256.hdf5") # Load CTCNN model

  shot_indices = sc.split_video(video_path) # Split input video by shots

  vid_in = cv2.VideoCapture(video_path)  # Import video

  for shot_number in trange(len(shot_indices), desc="Shots"):
    curr_shot_Lab = gts.read_group_frames_lab(shot_indices, shot_number, vid_in) # Read all frames of current shot in CIELAB format

    sample_frame_indices = []
    radius = int(np.floor(n/2)) # every sample frame should have a radius of n/2 intermediate frames in both directions
    for i in range(radius,len(curr_shot_Lab),n): # choose every nth frame to be a sample frame
      sample_frame_indices.append(i)

    for j in tqdm(sample_frame_indices, desc="Frame Bundles"):
      sample_frame = zhang.colorize(curr_shot_Lab[j], lab_only = True) # Colourise every nth frame using Zhang et al.'s CNN

      colour_a = sample_frame[:,:,1] # Extract a + b channels
      colour_b = sample_frame[:,:,2]
      for k in range(j-radius, j+radius+1): # For all frames in current bundle
        if k != j and k < len(curr_shot_Lab): # If the frame is an intermediate frame
          intermediate = curr_shot_Lab[k]
          intermediate_l = intermediate[:,:,0] # Extract L channel

          X_channels = [intermediate_l, colour_a, colour_b]
          X_image = np.stack(X_channels, axis=-1) # Combine intermediate frame L channel with sample frame a+b channels

          new_frame = cnn.predict_lab(model, X_image) # Put through CTCNN to smooth colour differences
          new_frame_rgb = cv2.cvtColor(new_frame, cv2.COLOR_Lab2BGR) # Convert back to BGR to write to video

        if k == j and k < len(curr_shot_Lab): # If the frame is a sample frame
          new_frame_rgb = cv2.cvtColor(sample_frame, cv2.COLOR_Lab2BGR)

        vid_out.write(new_frame_rgb) # Save frame to video

  vid_in.release()
  vid_out.release() # Save final output video
Exemplo n.º 9
0
def main():
    image = cv2.imread('sudoku.jpeg')

    processed_image = process_image(image)

    # if DEBUG: show_image(processed_image, 'thresh')

    contours = find_contours(processed_image)

    corners = find_corners(contours)

    warped = perspective_transform(image, corners)

    # if DEBUG: show_image(warped, 'warped')

    squares = grid(warped)

    digits = extract_digits(warped, squares)

    model = cnn.load_model()
from cv2.cv2 import imwrite, resize, INTER_NEAREST

from cnn import load_model
from data import get_training_data_4d
import numpy as np

model = load_model()
data = get_training_data_4d()

predictions = np.argmax(model.predict(data[1]), axis=1)
classes = np.argmax(data[3], axis=1)
i = 0

for image, true_class, predicted_class in zip(data[1], classes, predictions):
    if true_class != predicted_class:
        print(f"class {true_class} predicted as class {predicted_class}")
        img = (image.reshape(28, 28)) * -255 + 255
        name = f"data\\wrongly_classified\\{i}_{true_class}as{predicted_class}.png"
        imwrite(name, resize(img, (280, 280), interpolation=INTER_NEAREST))
        i += 1
Exemplo n.º 11
0
def run():
    model = load_model('0.9392_HandyNet_1607802541.3999255')[0]
    test_realworld_images(model)
Exemplo n.º 12
0
## Place your hand into frame and press 'Q'
import cv2
import cnn
from time import sleep
import torch

model = cnn.CNN(1, 512, 6)
cap = cv2.VideoCapture(0)
model.load_state_dict(cnn.load_model())
modif = 50
neural_frame = None
while (True):
    ret, frame = cap.read()
    rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)

    rgb[0:500, 100] = 0
    rgb[0:500, 500] = 0
    rgb[500, 100:500] = 0

    neural_frame = frame[0:500, 100:500]
    cv2.imshow('Partial Frame', neural_frame)

    #cv2.imshow('frame', rgb)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        cv2.imwrite('temp.jpg', neural_frame)
        image = cv2.imread('temp.jpg')
        t_img = cnn.preprcess_image(image)
        #print t_img.size()
        with torch.no_grad():
            predict = model(t_img)
Exemplo n.º 13
0
 def __init__(self, name, model=None, **kwargs):
     self.model = model
     if not self.model:
         self.model = cnn.load_model()
     super().__init__(name, **kwargs)
Exemplo n.º 14
0
                    print("Yes", c, scs)
                else:
                    print("No", c, scs)
                    acc+=1
        acc=acc/40
        print(k)
        print("------------ " + "True Positive Rate for this class: " + str(1-acc) + " -----------------")
        dec.append(1-acc)
        #time.sleep(1.5)
    dec = np.array(dec)
    print(dec.mean())
    return dec

import tensorflow as tf
graph = tf.get_default_graph()
net = cnn.load_model()
#net._make_predict_function()
feature=get_detect_from_db()
orb = cv.ORB_create()
bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
svms=[]
for i in range(16):
    file = open('./SVM/svm_' + str(i), 'rb')
    SVM = pickle.load(file)
    svms.append(SVM)
    file.close()

@app.route('/svm',methods=['POST'])
def svm():
    """
    bf = cv.BFMatcher(cv.NORM_HAMMING, crossCheck=True)
Exemplo n.º 15
0
    # get the bayesian and bayesian + cnn predictions for the image
    bayesian_label, bayesian_cnn_label, emotion_dict, emotion_cnn_dict = bayesian_network.inference(
        bayesian_model, labels_list, labels, cnn_label)

    # print("Faces detected: " + str(faces_detected))
    # print("Real Label: " + str(real_label))
    # print("CNN Label: " + str(cnn_label))
    # print("Bayesian Label: " + str(bayesian_label))
    # print("Bayesian + CNN Label: " + str(bayesian_cnn_label))

    return classes[real_label], classes[str(cnn_label)], classes[str(
        bayesian_label)], classes[str(bayesian_cnn_label)], faces_detected


# load the cnn model
cnn_model = cnn.load_model()
# load the bayesian model
bayesian_model, labels_list = bayesian_network.load_model()


# function to evaluate the pipeline on a given directory
def evaluate(image_folder_path, real_label):
    # print("RadhaKrishna")
    # get the count of total number of files in the directory
    _, _, files = next(os.walk(image_folder_path))
    file_count = len(files) - 1
    # list to store the predictions
    predictions = []
    # set count = 1
    i = 1