Exemplo n.º 1
0
def scutfbp_df(queue):
    """Preprocess and augment SCUT-FBP faces to data/. Returns pandas dataframe"""
    scutfbp_df = pd.read_excel("Rating_Collection/Attractiveness label.xlsx")

    # Convert type of #Image column to str
    scutfbp_df["#Image"] = scutfbp_df["#Image"].astype(str)
    # Drop column Standard Deviation
    scutfbp_df = scutfbp_df.drop("Standard Deviation", 1)

    for face in os.listdir("Data_Collection"):
        if face.endswith(".jpg"):
            base = os.path.splitext(face)[0]
            # Regex to find numbers at end of string
            img_num = re.match(".*?([0-9]+)$", base).group(1)

            try:
                preprocess.resize("Data_Collection/{0}".format(face), "data/{0}".format(face))
                preprocess.hflip("data/{0}".format(face), "data/{0}-F.jpg".format(base))
                preprocess.add_noise("data/{0}".format(face), "data/{0}-N.jpg".format(base))
            except:
                scutfbp_df = scutfbp_df[getattr(scutfbp_df, "#Image") != img_num]

    flipped_df = scutfbp_df.copy()
    noisy_df = scutfbp_df.copy()
    flipped_df["#Image"] = "SCUT-FBP-" + flipped_df["#Image"] + "-F.jpg"
    noisy_df["#Image"] = "SCUT-FBP-" + noisy_df["#Image"] + "-N.jpg"
    scutfbp_df["#Image"] = "SCUT-FBP-" + scutfbp_df["#Image"] + ".jpg"

    df = pd.concat([scutfbp_df, flipped_df, noisy_df], ignore_index=True)
    # Rename #Image -> Face and Attractiveness label -> Rating
    df.columns = ["Face", "Rating"]
    # Convert from 5 point scale to 10 point scale
    df["Rating"] *= 10.0 / 5.0

    queue.put(df)
Exemplo n.º 2
0
def models_df(queue):
    """Preprocess and augment models.com faces to data/. Returns pandas dataframe"""
    imgs = []

    for i in range(1, 216):
        url = "http://models.com/newfaces/page/{0}".format(i)
        page = urllib2.urlopen(url).read()
        soup = BeautifulSoup(page, "lxml")

        for tag in soup.findAll("img", {"class": "attachment-square"}):
            src = "http:{0}".format(tag["src"])
            base = uuid.uuid4().hex
            filename = base + ".jpg"

            urllib.urlretrieve(src, "data/{0}".format(filename))

            try:
                preprocess.resize("data/{0}".format(filename), "data/{0}".format(filename))
                preprocess.hflip("data/{0}".format(filename), "data/{0}-F.jpg".format(base))
                preprocess.add_noise("data/{0}".format(filename), "data/{0}-N.jpg".format(base))
            except:
                os.remove("data/{0}".format(filename))
                continue

            imgs.append({"Face": filename, "Rating": 10})
            imgs.append({"Face": "{0}-F.jpg".format(base), "Rating": 10})
            imgs.append({"Face": "{0}-N.jpg".format(base), "Rating": 10})

    df = pd.DataFrame(imgs)
    queue.put(df)
Exemplo n.º 3
0
def usfaces_df(queue):
    """Preprocess and augment US Face Database faces to data/. Returns pandas dataframe"""
    usfaces_df = pd.read_excel("Full Attribute Scores/demographic & others labels/demographic-others-labels.xlsx")
    usfaces_df = usfaces_df[["Filename", "Attractive"]]

    usfaces_df = usfaces_df.drop_duplicates(["Filename"])

    for face in usfaces_df["Filename"]:
        base = os.path.splitext(face)[0]

        try:
            preprocess.resize("10k US Adult Faces Database/Face Images/{0}".format(face), "data/{0}".format(face))
            preprocess.hflip("data/{0}".format(face), "data/{0}-F.jpg".format(base))
            preprocess.add_noise("data/{0}".format(face), "data/{0}-N.jpg".format(base))
        except:
            usfaces_df = usfaces_df[usfaces_df.Filename != face]

    flipped_df = usfaces_df.copy()
    noisy_df = usfaces_df.copy()
    flipped_df["Filename"] = flipped_df["Filename"].str[:-4] + "-F.jpg"
    noisy_df["Filename"] = noisy_df["Filename"].str[:-4] + "-N.jpg"

    df = pd.concat([usfaces_df, flipped_df, noisy_df], ignore_index=True)
    df.columns = ["Face", "Rating"]
    df["Rating"] *= 10.0 / 5.0

    queue.put(df)
Exemplo n.º 4
0
def main() -> None:
    (x_train_s, x_ref, y_ref), _ = dataset.get_fasion_mnist()
    x_train_s = preprocess.resize(x_train_s)
    x_ref = preprocess.resize(x_ref)

    np.random.seed(0)
    train(x_train_s, x_ref, y_ref, 40)
Exemplo n.º 5
0
def eccv_df(queue):
    """Preprocess and augment Gray et al. dataset to data/. Returns pandas dataframe"""
    root = ET.parse("eccv2010_beauty_data/hotornot_face_all.xml").getroot()

    childs = []
    for child in root:
        filename = os.path.split(child.attrib["filename"])[-1]
        base = os.path.splitext(filename)[0]

        try:
            preprocess.resize(
                "eccv2010_beauty_data/{0}".format(child.attrib["filename"]),
                "data/{0}".format(base + ".jpg"),
                crop=False,
            )
            preprocess.hflip("data/{0}".format(filename), "data/{0}-F.jpg".format(base))
            preprocess.add_noise("data/{0}".format(filename), "data/{0}-N.jpg".format(base))
        except:
            continue

        childs.append([base + "-F.jpg", float(child.attrib["score"])])
        childs.append([base + "-N.jpg", float(child.attrib["score"])])
        childs.append([base + ".jpg", float(child.attrib["score"])])

    df = pd.DataFrame(childs, columns=["Face", "Rating"])
    df["Rating"] += 4
    df["Rating"] *= 10.0 / 8.0

    queue.put(df)
Exemplo n.º 6
0
def chicago_df(queue):
    """Preprocess and augment Chicago faces to data/. Returns pandas dataframe"""
    chicago_df = pd.read_excel("CFD Version 2.0/CFD 2.0 Norming Data and Codebook.xlsx", skiprows=4)
    chicago_df = chicago_df[["Target", "Attractive"]]

    for dir in os.listdir("CFD Version 2.0/CFD 2.0 Images"):
        if dir == ".DS_Store":
            continue

        for face in os.listdir("CFD Version 2.0/CFD 2.0 Images/{0}".format(dir)):
            # Neutral faces
            if face.endswith("N.jpg"):
                # Is one face detected
                try:
                    preprocess.resize(
                        "CFD Version 2.0/CFD 2.0 Images/{0}/{1}".format(dir, face), "data/{0}.jpg".format(dir)
                    )
                    preprocess.hflip("data/{0}.jpg".format(dir), "data/{0}-F.jpg".format(dir))
                    preprocess.add_noise("data/{0}.jpg".format(dir), "data/{0}-N.jpg".format(dir))
                except:
                    chicago_df = chicago_df[chicago_df.Target != dir]

    flipped_df = chicago_df.copy()
    noisy_df = chicago_df.copy()
    flipped_df["Target"] = flipped_df["Target"] + "-F.jpg"
    noisy_df["Target"] = noisy_df["Target"] + "-N.jpg"
    chicago_df["Target"] = chicago_df["Target"] + ".jpg"

    df = pd.concat([chicago_df, flipped_df, noisy_df], ignore_index=True)
    # Rename Target -> Face and Attractive -> Rating
    df.columns = ["Face", "Rating"]
    # Convert from 7 point scale to 10 point scale
    df["Rating"] *= 10.0 / 7.0

    queue.put(df)
Exemplo n.º 7
0
def main() -> None:
    (x_train_s, _, _), (x_test_s, x_test_b) = dataset.get_fasion_mnist()
    x_train_s = preprocess.resize(x_train_s)
    x_test_s = preprocess.resize(x_test_s)
    x_test_b = preprocess.resize(x_test_b)

    network = tf.keras.models.load_model(
        "_data/model.h5", custom_objects={"original_loss": original_loss})
    network.summary()

    predict(x_train_s, x_test_s, x_test_b, network)
Exemplo n.º 8
0
def load_image_test(image_file):
    input_image, real_image = load(image_file)
    input_image, real_image = resize(input_image, real_image, IMG_HEIGHT,
                                     IMG_WIDTH)
    input_image, real_image = normalize(input_image, real_image)

    return input_image, real_image
Exemplo n.º 9
0
def telemetry(sid, data):
    if data:
        # The current steering angle of the car
        steering_angle = data["steering_angle"]
        # The current throttle of the car
        throttle = data["throttle"]
        # The current speed of the car
        speed = data["speed"]
        # The current image from the center camera of the car
        imgString = data["image"]
        image = Image.open(BytesIO(base64.b64decode(imgString)))
        image_array = np.asarray(image)

        image_array = preprocess.crop(image_array, 0.35, 0.1)
        image_array = preprocess.resize(image_array, new_dimension=(64, 64))

        transformed_image_array = image_array[None, :, :, :]

        steering_angle = float(
            model.predict(transformed_image_array, batch_size=1))

        throttle = controller.update(float(speed))

        print(steering_angle, throttle)
        send_control(steering_angle, throttle)

        # save frame
        if args.image_folder != '':
            timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
            image_filename = os.path.join(args.image_folder, timestamp)
            image.save('{}.jpg'.format(image_filename))
    else:
        # NOTE: DON'T EDIT THIS.
        sio.emit('manual', data={}, skip_sid=True)
Exemplo n.º 10
0
    def do_preprocess(self, img):
        x = np.copy(img)

        if self.properties["autocrop"]["enabled"]:
            x = preprocess.autocrop(x)

        if x is None:
            return None

        x = preprocess.blur(
            x, gaussian_blur=self.properties["gaussian_blur"], median_blur=self.properties["median_blur"]
        )

        if self.properties["grey"]["enabled"]:
            x = preprocess.grey(x)

        if self.properties["resize"]["enabled"]:
            x = preprocess.resize(x, (self.properties["resize"]["width"], self.properties["resize"]["height"]))
        elif self.properties["scale_max"]["enabled"]:
            x = preprocess.scale_max(x, self.properties["scale_max"]["width"], self.properties["scale_max"]["height"])

        if self.properties["convert_to_matrix_colors"]["enabled"]:
            (matrix, cluster_centers_, labels, background_label) = improc.color.Matrix_scikit_kmeans(
                x, self.properties["convert_to_matrix_colors"]["number_of_colors"]
            )
            x = improc.color.Image_from_matrix(
                matrix,
                self.properties["convert_to_matrix_colors"]["height"],
                self.properties["convert_to_matrix_colors"]["width"],
            )

        return x
Exemplo n.º 11
0
    def do_preprocess(self, img):
        x = np.copy(img)
        if x is not None and self.properties["autocrop"]["enabled"]:
            x = preprocess.autocrop(x)

        if x is not None:
            x = preprocess.blur(
                x, gaussian_blur=self.properties["gaussian_blur"], median_blur=self.properties["median_blur"]
            )

        if x is not None and self.properties["grey"]["enabled"]:
            x = preprocess.grey(x)

        if x is not None and self.properties["bitwise"]["enabled"]:
            x = preprocess.bitwise(x)

        if x is not None and self.properties["canny"]["enabled"]:
            x = preprocess.canny(x, self.properties["canny"]["threshold1"], self.properties["canny"]["threshold2"])

        if x is not None and self.properties["laplacian"]["enabled"]:
            x = preprocess.laplacian(x)

        if x is not None and self.properties["thresh"]["enabled"]:
            x = preprocess.thresh(x)

        if x is not None and self.properties["closing"]["enabled"]:
            x = preprocess.closing(x, self.properties["closing"]["width"], self.properties["closing"]["height"])

        if x is not None and self.properties["dilate"]["enabled"]:
            x = preprocess.dilate(
                x,
                self.properties["dilate"]["width"],
                self.properties["dilate"]["height"],
                self.properties["dilate"]["iterations"],
            )

        if x is not None and self.properties["outline_contour"]["enabled"]:
            x = preprocess.outline_contour(x)

        if x is not None and self.properties["resize"]["enabled"]:
            x = preprocess.resize(x, (self.properties["resize"]["width"], self.properties["resize"]["height"]))
        elif x is not None and self.properties["scale_max"]["enabled"]:
            x = preprocess.scale_max(x, self.properties["scale_max"]["width"], self.properties["scale_max"]["height"])

        if x is not None and self.properties["add_border"]["enabled"]:
            x = preprocess.add_border(
                x,
                border_size=self.properties["add_border"]["border_size"],
                color_value=self.properties["add_border"]["color_value"],
                fill_dimensions=self.properties["add_border"]["fill_dimensions"],
            )

        return x
Exemplo n.º 12
0
    def do_preprocess(self, img):
        x = np.copy(img)

        if self.properties["autocrop"]["enabled"]:
            x = preprocess.autocrop(x)

        if x is None:
            return None

        x = preprocess.blur(
            x,
            gaussian_blur=self.properties["gaussian_blur"],
            median_blur=self.properties["median_blur"]

        )

        if self.properties["grey"]["enabled"]:
            x = preprocess.grey(x)

        if self.properties["resize"]["enabled"]:
            x = preprocess.resize(
                x,
                (
                    self.properties["resize"]["width"],
                    self.properties["resize"]["height"]
                )
            )
        elif self.properties["scale_max"]["enabled"]:
            x = preprocess.scale_max(
                x,
                self.properties["scale_max"]["width"],
                self.properties["scale_max"]["height"]
            )

        if self.properties["convert_to_matrix_colors"]["enabled"]:
            (
                matrix, cluster_centers_, labels, background_label
            ) = improc.color.Matrix_scikit_kmeans(
                x,
                self.properties["convert_to_matrix_colors"]["number_of_colors"]
            )
            x = improc.color.Image_from_matrix(
                matrix,
                self.properties["convert_to_matrix_colors"]["height"],
                self.properties["convert_to_matrix_colors"]["width"]
            )

        return x
Exemplo n.º 13
0
def preprocess(im, augment=False):
    with tf.variable_scope("preprocess"):
        if augment:
            im = pp.random_resize(im, INPUT_SIZE)
            im = pp.random_flip(im)
            im = pp.random_distort_color(im)
            if GRAYSCALE: im = pp.grayscale(im)
            im = pp.random_op(im, [
                lambda _: _, pp.sharpen, pp.avg_blur, pp.unsharpen,
                pp.gauss_blur
            ])
        else:
            im = pp.resize(im, INPUT_SIZE)
            if GRAYSCALE: im = pp.grayscale(im)
        im = pp.normalize(im, 0.5, 0.5)
    return im
Exemplo n.º 14
0
def telemetry(sid, data):
    # The current steering angle of the car
    steering_angle = data["steering_angle"]

    # The current throttle of the car
    throttle = data["throttle"]

    # The current speed of the car
    speed = data["speed"]

    # The current image from the center camera of the car
    imgString = data["image"]
    image = Image.open(BytesIO(base64.b64decode(imgString)))
    image_array = np.asarray(image)

    image_array = preprocess.crop(image_array, 0.35, 0.1)
    image_array = preprocess.resize(image_array, new_dim=(64, 64))

    transformed_image_array = image_array[None, :, :, :]

    # This model currently assumes that the features of the model are just the images. Feel free to change this.

    steering_angle = float(model.predict(transformed_image_array,
                                         batch_size=1))
    # The driving model currently just outputs a constant throttle. Feel free to edit this.
    #throttle = 0.3
    throttle = 0.2
    if float(speed) < 10 and float(speed) > 5:
        throttle = 0.5

    print('{:.5f}, {:.1f}'.format(steering_angle, throttle))

    send_control(steering_angle, throttle)

    # save frame
    if args.image_folder != '':
        timestamp = datetime.utcnow().strftime('%Y_%m_%d_%H_%M_%S_%f')[:-3]
        image_filename = os.path.join(args.image_folder, timestamp)
        image.save('{}.jpg'.format(image_filename))
Exemplo n.º 15
0
def runBatch(video_reader, gps_dat, cam, output_base, start_frame, final_frame, left_lanes, right_lanes, ldist, rdist, tr):
    meters_per_point = 10
    points_fwd = 8
    frames_per_second = 50
    distances = GPSVelocities(gps_dat) / frames_per_second
    pts = GPSPos(gps_dat, cam, gps_dat[0, :]) # points wrt camera
    count = 0
    output_num = 0
    imgs = []
    labels = []
    success = True
    while True:
        (success, I, frame, P) = video_reader.getNextFrame()
        #P = np.eye(3)
        if count % 160 == 0:
            print count
        if success == False or frame >= tr.shape[0]:
            print success, frame, tr.shape[0]
            break
        if frame < start_frame or (final_frame != -1 and frame >= final_frame):
            continue

        important_frames = (outputDistances(distances, frame, meters_per_point, points_fwd, 2))
        if len(important_frames) < points_fwd:
            continue

        important_left = []
        important_right = []

        velocities = gps_dat[important_frames,4:7]
        velocities[:,[0, 1]] = velocities[:,[1, 0]]
        sideways = np.cross(velocities, np.array([0,0,1]), axisa=1)
        sideways/= np.sqrt((sideways ** 2).sum(-1))[..., np.newaxis]
        vel_start = ENU2IMU(np.transpose(velocities), gps_dat[0,:])
        vel_current = ENU2IMU(np.transpose(velocities), gps_dat[frame,:])
        sideways_start = GPSPosCamera(np.cross(vel_start, np.array([0,0,1]), axisa=0).transpose(), cam) # sideways vector wrt starting frame (camera)
        sideways_current = GPSPosCamera(np.cross(vel_current, np.array([0,0,1]), axisa=0).transpose(), cam) # sideways vector wrt starting frame (camera)
        sideways_start /= np.sqrt((sideways_start ** 2).sum(0))[np.newaxis, ...]
        sideways_current /= np.sqrt((sideways_current ** 2).sum(0))[np.newaxis, ...] # save sideways_current
        center = GPSPos(gps_dat[important_frames,:], cam, gps_dat[0, :]) # points wrt imu
        for ind in xrange(len(important_frames)):
            fr = important_frames[ind]
            min_val = max(fr - 50, 0)
            max_val = min(fr + 50, left_lanes.shape[0] - 1)

            l_distances = np.cross(left_lanes[min_val:max_val, 0:3] - np.transpose(center[:,ind]), np.transpose(sideways_start[:, ind]), axisa=1)
            l_distances = np.sqrt((l_distances ** 2).sum(-1))
            r_distances = np.cross(right_lanes[min_val:max_val, 0:3] - np.transpose(center[:,ind]), np.transpose(sideways_start[:, ind]), axisa=1)
            r_distances = np.sqrt((r_distances ** 2).sum(-1))
            important_left.append(np.argmin(l_distances)+min_val)
            important_right.append(np.argmin(r_distances)+min_val)
        important_left = np.array(important_left)
        important_right = np.array(important_right)
        max_idx = max(max(np.max(important_frames), np.max(important_left)), np.max(important_right))
        if max_idx >= left_lanes.shape[0] or max_idx >= right_lanes.shape[0]:
            print 'maxing out'
            continue

        temp_left = np.linalg.solve(tr[frame, :, :], left_lanes[important_left, :].transpose()) # save temp_left [0:3,:]
        #temp_left[0:3, :] += sideways_current*0.8

        temp_gps = GPSPos(gps_dat[important_frames], cam, gps_dat[frame, :]) # save temp_gps [0:3,:]

        temp_right = np.linalg.solve(tr[frame, :, :], right_lanes[important_right, :].transpose()) # save temp_right [0:3,:]
        #temp_right[0:3, :] -= sideways_current*0.8

        outputs = []
        for i in xrange(temp_left.shape[1]):
            outputs.append(temp_left[0, i])
            outputs.append(temp_left[1, i])
            outputs.append(temp_left[2, i])
        for i in xrange(temp_gps.shape[1]):
            outputs.append(temp_gps[0, i])
            outputs.append(temp_gps[1, i])
            outputs.append(temp_gps[2, i])
        for i in xrange(temp_left.shape[1]):
            outputs.append(temp_right[0, i])
            outputs.append(temp_right[1, i])
            outputs.append(temp_right[2, i])

        for i in xrange(sideways_current.shape[1]):
            outputs.append(sideways_current[0, i])
            outputs.append(sideways_current[1, i])
            outputs.append(sideways_current[2, i])

        for i in xrange(P.shape[0]):
            for j in xrange(P.shape[1]):
                outputs.append(P[i, j])

        labels.append(outputs)
        
        
        reshaped = pp.resize(I, (240, 320))[0]
        imgs.append(reshaped)
        if len(imgs) == 960:
            merge_file = "%s_%d" % (output_base, output_num)
            pml.save_merged_file(merge_file, imgs, labels, imgRows=len(labels[0]))
            imgs = []
            labels = []
            output_num += 1

        count += 1
dataset_name = "stanford_dogs"
(ds_train, ds_val, ds_test), ds_info = tfds.load(dataset_name,
                                                 split=["train", "test[:50%]", "test[50%:100%]"],
                                                 with_info=True,
                                                 as_supervised=True)

# 犬の種類数
NUM_CLASSES = ds_info.features["label"].num_classes
names = ds_info.features["label"].names
IMG_SIZE = 300
BATCH_SIZE = 100
size = (IMG_SIZE, IMG_SIZE)

ds_train, ds_val, ds_test = preprocess.resize(train=ds_train,
                                              val=ds_val,
                                              test=ds_test,
                                              size=size)


ds_train, ds_val, ds_test = preprocess.batch_create(train=ds_train, 
                                                            val=ds_val, 
                                                            test=ds_test,
                                                            NUM_CLASSES=NUM_CLASSES,
                                                            BATCH_SIZE=BATCH_SIZE,
                                                            BUFFER_SIZE=len(ds_test))

model, lr = post_process.build_model(NUM_CLASSES=NUM_CLASSES, IMG_SIZE=IMG_SIZE)

model.load_weights(post_process.checkpoint(num=0, IMG_SIZE=IMG_SIZE))

# es_callback = post_process.early_stopping()
Exemplo n.º 17
0
def runBatch(video_reader, gps_dat, cam, output_base, start_frame, final_frame, left_lanes, right_lanes, ldist, rdist, tr):
    meters_per_point = 10
    points_fwd = 8
    frames_per_second = 50
    distances = GPSVelocities(gps_dat) / frames_per_second
    pts = GPSPos(gps_dat, cam, gps_dat[0, :]) # points wrt camera
    count = 0
    output_num = 0
    imgs = []
    labels = []
    success = True
    while True:
        #(success, I, frame, P) = video_reader.getNextFrame()
        (success, I) = video_reader.getNextFrame()
        frame = 10*count
        #frame = count
        P = np.eye(3)
        if count % 160 == 0:
            print count
        if success == False or frame >= tr.shape[0]:
            print success, frame, tr.shape[0]
            break
        if frame < start_frame or (final_frame != -1 and frame >= final_frame):
            continue

        important_frames = (outputDistances(distances, frame, meters_per_point, points_fwd, 2))
        if len(important_frames) < points_fwd:
            continue

        important_left = []
        important_right = []

        velocities = gps_dat[important_frames,4:7]
        velocities[:,[0, 1]] = velocities[:,[1, 0]]
        sideways = np.cross(velocities, np.array([0,0,1]), axisa=1)
        sideways/= np.sqrt((sideways ** 2).sum(-1))[..., np.newaxis]
        vel_start = ENU2IMU(np.transpose(velocities), gps_dat[0,:])
        vel_current = ENU2IMU(np.transpose(velocities), gps_dat[frame,:])
        sideways_start = GPSPosCamera(np.cross(vel_start, np.array([0,0,1]), axisa=0).transpose(), cam) # sideways vector wrt starting frame (camera)
        sideways_current = GPSPosCamera(np.cross(vel_current, np.array([0,0,1]), axisa=0).transpose(), cam) # sideways vector wrt starting frame (camera)
        sideways_start /= np.sqrt((sideways_start ** 2).sum(0))[np.newaxis, ...]
        sideways_current /= np.sqrt((sideways_current ** 2).sum(0))[np.newaxis, ...] # save sideways_current
        center = GPSPos(gps_dat[important_frames,:], cam, gps_dat[0, :]) # points wrt imu
        for ind in xrange(len(important_frames)):
            fr = important_frames[ind]
            min_val = max(fr - 50, 0)
            max_val = min(fr + 50, left_lanes.shape[0] - 1)

            l_distances = np.cross(left_lanes[min_val:max_val, 0:3] - np.transpose(center[:,ind]), np.transpose(sideways_start[:, ind]), axisa=1)
            l_distances = np.sqrt((l_distances ** 2).sum(-1))
            r_distances = np.cross(right_lanes[min_val:max_val, 0:3] - np.transpose(center[:,ind]), np.transpose(sideways_start[:, ind]), axisa=1)
            r_distances = np.sqrt((r_distances ** 2).sum(-1))
            important_left.append(np.argmin(l_distances)+min_val)
            important_right.append(np.argmin(r_distances)+min_val)
        important_left = np.array(important_left)
        important_right = np.array(important_right)
        max_idx = max(max(np.max(important_frames), np.max(important_left)), np.max(important_right))
        if max_idx >= left_lanes.shape[0] or max_idx >= right_lanes.shape[0]:
            print 'maxing out'
            continue

        temp_left = np.linalg.solve(tr[frame, :, :], left_lanes[important_left, :].transpose()) # save temp_left [0:3,:]
        #temp_left[0:3, :] += sideways_current*0.8

        temp_gps = GPSPos(gps_dat[important_frames], cam, gps_dat[frame, :]) # save temp_gps [0:3,:]

        temp_right = np.linalg.solve(tr[frame, :, :], right_lanes[important_right, :].transpose()) # save temp_right [0:3,:]
        #temp_right[0:3, :] -= sideways_current*0.8

        #gps_vals = warpPoints(P, PointsMask(temp_gps[0:3, :], cam)[0:2]) # save P
        #left_vals = warpPoints(P, PointsMask(temp_left[0:3, :], cam)[0:2])
        #right_vals = warpPoints(P, PointsMask(temp_right[0:3, :], cam)[0:2])
        #gps_vals = (gps_vals / 4).astype(np.int32)
        #left_vals = (left_vals / 4).astype(np.int32)
        #right_vals = (right_vals / 4).astype(np.int32)
        #left_vals = left_vals.clip(0,500)
        #right_vals = right_vals.clip(0,500)
        #gps_vals = gps_vals.clip(0,500)
        #gps_vals[0, gps_vals[0, :] >= 320] = 319
        #gps_vals[1, gps_vals[1, :] >= 240] = 239
        #left_vals[0, left_vals[0, :] >= 320] = 319
        #left_vals[1, left_vals[1, :] >= 240] = 239
        #right_vals[0, right_vals[0, :] >= 320] = 319
        #right_vals[1, right_vals[1, :] >= 240] = 239
        #outputs = []
        
        # scale down column numbers by 16 to aid bucketing but only scale down
        # row numbers by 4 to aid visualization
        #for i in xrange(points_fwd):
        #    outputs.append(left_vals[0, i] / 4)
        #    outputs.append(gps_vals[0, i] / 4)
        #    outputs.append(right_vals[0, i] / 4)
        #    outputs.append(left_vals[1, i])
        #    outputs.append(gps_vals[1, i])
        #    outputs.append(right_vals[1, i])
        #labels.append(outputs)

        outputs = []
        for i in xrange(temp_left.shape[1]):
            outputs.append(temp_left[0, i])
            outputs.append(temp_left[1, i])
            outputs.append(temp_left[2, i])
        for i in xrange(temp_gps.shape[1]):
            outputs.append(temp_gps[0, i])
            outputs.append(temp_gps[1, i])
            outputs.append(temp_gps[2, i])
        for i in xrange(temp_left.shape[1]):
            outputs.append(temp_right[0, i])
            outputs.append(temp_right[1, i])
            outputs.append(temp_right[2, i])

        for i in xrange(sideways_current.shape[1]):
            outputs.append(sideways_current[0, i])
            outputs.append(sideways_current[1, i])
            outputs.append(sideways_current[2, i])

        for i in xrange(P.shape[0]):
            for j in xrange(P.shape[1]):
                outputs.append(P[i, j])

        labels.append(outputs)
        
        
        reshaped = pp.resize(I, (240, 320))[0]
        imgs.append(reshaped)
        if len(imgs) == 960:
            merge_file = "%s_%d" % (output_base, output_num)
            pml.save_merged_file(merge_file, imgs, labels, imgRows=len(labels[0]))
            imgs = []
            labels = []
            output_num += 1

        count += 1
Exemplo n.º 18
0
    print("Percent of Live Birth Classified as Non-Pregnancies: {}% ".format(
        100 * sum(np.logical_and(y == 2, yhat == 0)) / sum(y == 2)))
    print("Percent of Live Birth Classified as SAB: {}%".format(
        100 * sum(np.logical_and(y == 2, yhat == 1)) / sum(y == 2)))
    print("\n")
    print("Pregnancy Accuracy (SAB or Live Birth): {}%".format(
        100 * sum(np.logical_and(y != 0, yhat != 0)) / sum(y != 0)))


##MODEL RUNNING

#Preprocesses the image data
all_images, names = preprocess.readImages(
    '/Users/jaredgeller/Desktop/Work/Stanford/Year 3/Quarter 2/IVF_Project/part_1/'
)
all_images = preprocess.resize(all_images, 224)
namesDf = preprocess.processName(names)
sheetDf = preprocess.processXLSXData(
    "/Users/jaredgeller/Desktop/Work/Stanford/Year 3/Quarter 2/IVF_Project/2018 FRESH TRANSFERS NAMES REMOVED.xlsx"
)
fullDf, y = preprocess.mergeData(namesDf, sheetDf)

#Gets training set (first 200 images)
X_train = all_images[:200]
Y_train = y[:200]
X_test = all_images[200:]
Y_test = y[200:]

#Train(X_train=X_train, Y_train=Y_train, alpha=0.001, n_epochs=100, print_every=1, batch_size=32, is_training=True)
Test(X_test=all_images, Y_test=y)
Exemplo n.º 19
0
    for r in each:
        for c in r:
            bordered_each[rowInd][colInd] = c
            colInd += 1
        colInd = borderSize
        rowInd += 1
    return bordered_each


print("printing chars: \n")
numberPlate = []
for each in segmentation.characters:
    each = np.floor(255 * each)
    row, col = each.shape[:2]
    each = borderize(each, 5)
    each = preprocess.resize(each, (40, 40), anti_aliasing_sigma=True)
    each = np.floor(each)
    each = abs(each - 255)
    plot_image(each)
    each = np.reshape(each, 1600)
    numberPlate.append(one_time_test(each))
rightplate_string = ''
column_list_copy = segmentation.column_list[:]
segmentation.column_list.sort()

for each in segmentation.column_list:
    rightplate_string += numberPlate[column_list_copy.index(each)]

segmentation.characters = []
segmentation.column_list = []
print(rightplate_string)
Exemplo n.º 20
0
if __name__ == "__main__":
	dictionary = {}
	start_time = time.time()
	spell.create_dictionary(dictionary,"./all_medical_terms.txt")
	run_time = time.time() - start_time
	print '%.2f seconds to run' % run_time

	filename = parser.parse_args().filename
	#Load image and change channels
	image = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)

	#Pre-processing = Cropping + Binarization
	imageEdges = pp.edgesDet(image, 200, 250)
	closedEdges = cv2.morphologyEx(imageEdges, cv2.MORPH_CLOSE, np.ones((5, 11)))
	pageContour = pp.findPageContours(closedEdges, pp.resize(image))
	pageContour = pageContour.dot(pp.ratio(image))
	newImage = pp.perspImageTransform(image, pageContour)

	#Saving image to show status in the app
	save_filename = filename[:-4]+"_1"+filename[-4:]
	cv2.imwrite(save_filename, cv2.cvtColor(newImage, cv2.COLOR_BGR2RGB))

	##Detect words using google-ocr
	if (parser.parse_args().google_ocr):
		entities,bBoxes = go.convert(save_filename)
		detected_filename = "input.txt"
		with open(detected_filename, 'w') as outfile:
			for i in entities:
				outfile.write(i)
				outfile.write("\n")
Exemplo n.º 21
0
    def do_preprocess(self, img):
        x = np.copy(img)
        if x is not None and self.properties["autocrop"]["enabled"]:
            x = preprocess.autocrop(x)

        if x is not None:
            x = preprocess.blur(
                x,
                gaussian_blur=self.properties["gaussian_blur"],
                median_blur=self.properties["median_blur"]
            )

        if x is not None and self.properties["grey"]["enabled"]:
            x = preprocess.grey(x)

        if x is not None and self.properties["bitwise"]["enabled"]:
            x = preprocess.bitwise(x)

        if x is not None and self.properties["canny"]["enabled"]:
            x = preprocess.canny(
                x,
                self.properties["canny"]["threshold1"],
                self.properties["canny"]["threshold2"]
            )

        if x is not None and self.properties["laplacian"]["enabled"]:
            x = preprocess.laplacian(x)

        if x is not None and self.properties["thresh"]["enabled"]:
            x = preprocess.thresh(x)

        if x is not None and self.properties["closing"]["enabled"]:
            x = preprocess.closing(
                x,
                self.properties["closing"]["width"],
                self.properties["closing"]["height"]
            )

        if x is not None and self.properties["dilate"]["enabled"]:
            x = preprocess.dilate(
                x,
                self.properties["dilate"]["width"],
                self.properties["dilate"]["height"],
                self.properties["dilate"]["iterations"]
            )

        if x is not None and self.properties["outline_contour"]["enabled"]:
            x = preprocess.outline_contour(x)

        if x is not None and self.properties["resize"]["enabled"]:
            x = preprocess.resize(
                x,
                (
                    self.properties["resize"]["width"],
                    self.properties["resize"]["height"]
                )
            )
        elif x is not None and self.properties["scale_max"]["enabled"]:
            x = preprocess.scale_max(
                x,
                self.properties["scale_max"]["width"],
                self.properties["scale_max"]["height"]
            )

        if x is not None and self.properties["add_border"]["enabled"]:
            x = preprocess.add_border(
                x,
                border_size=self.properties["add_border"]["border_size"],
                color_value=self.properties["add_border"]["color_value"],
                fill_dimensions=self.properties["add_border"]["fill_dimensions"]
            )

        return x