コード例 #1
0
ファイル: fernandez.py プロジェクト: bdrydyk/glitch3d
 def matthew_curve(self, obj, time, scale = 0.2):
   fx, fy, fz = self.rand_curve()
   verts =  [(fx(t), fy(t), fz(t)) for t in helpers.pitched_array(0, time, 0.2)]
   bpy.context.scene.objects.active = obj
   bpy.ops.object.select_all(action='DESELECT')
   for idx, coord in enumerate(verts[0::self.MESH_OCCURENCE]):
     new_obj = helpers.duplicate_object(obj)
     new_obj.select = True
     new_obj.location = coord
     new_obj.scale = (0.02,0.02,0.02) if idx % 2 == 0 else (0.05, 0.05, 0.05)
     new_obj.rotation_euler.z += idx * (2 * math.pi) / len(verts)
     bpy.context.scene.objects.active = new_obj
   bpy.ops.object.join()
   res = bpy.context.object
   res.name = 'fernandez'
   helpers.resize(res)
   helpers.center(res)
   helpers.decimate(res)
   helpers.assign_material(res, helpers.random_material(self.MATERIALS_NAMES))
   support = helpers.create_mesh('fernandez_support', verts, [], (0,0,0),  [[v, v+1] for v in range(0, (len(verts) - 1))])
   helpers.resize(support)
   helpers.center(support)
   helpers.extrude(support)
   helpers.assign_material(support, helpers.random_material(self.MATERIALS_NAMES))
   return res
コード例 #2
0
ファイル: fernandez.py プロジェクト: devops-god/glitch3d
 def matthew_curve(self, obj, time, scale = 0.2):
   fx, fy, fz = self.rand_curve()
   verts = helpers.parametric_curve(fx, fy, fz, time, 1)
   i = time
   verts = helpers.parametric_curve(fx, fy, fz, time, 1)
   while len(verts[0::self.MESH_OCCURENCE]) > self.MESH_NUMBER_LIMIT:
     i -= 10
     print(str(i))
     verts = helpers.parametric_curve(fx, fy, fz, i, 1)
   self.SCENE.objects.active = obj
   bpy.ops.object.select_all(action='DESELECT')
   for idx, coord in enumerate(verts[0::self.MESH_OCCURENCE]):
     new_obj = helpers.duplicate_object(obj)
     new_obj.select = True
     new_obj.location = coord
     new_obj.scale = (0.02,0.02,0.02) if idx % 2 == 0 else (0.05, 0.05, 0.05)
     new_obj.rotation_euler.z += idx * (2 * math.pi) / len(verts)
     self.SCENE.objects.active = new_obj
   bpy.ops.object.join()
   res = self.SCENE.objects.active
   res.name = 'fernandez'
   edges = []
   for v in range(0, (len(verts) - 1)):
       edges.append([v, v+1])
   res = helpers.create_mesh('fernandez_support', verts, [], (0,0,0), edges)
   helpers.resize(res)
   helpers.extrude(res)
   return res
コード例 #3
0
    def _glimp_patch(self, img, location, image_size):
        # TODO it is an inaccurate if image size is odd number.
        glimpse_images = None
        for i in range(self.num_zoom_image):
            current_img = img
            glimpse_half_size = int(self.retina_size * .5 * self.scale**i)
            top = int(location[0] - glimpse_half_size)
            bottom = int(location[0] + glimpse_half_size)
            left = int(location[1] - glimpse_half_size)
            right = int(location[1] + glimpse_half_size)

            if top < 0 or left < 0 or bottom > image_size or right > image_size:
                pad_dims = (
                    glimpse_half_size,
                    glimpse_half_size,
                    glimpse_half_size,
                    glimpse_half_size,
                )
                current_img = F.pad(current_img, pad_dims, "constant", 0)
                top += glimpse_half_size
                bottom += glimpse_half_size
                left += glimpse_half_size
                right += glimpse_half_size
            glimpse_images = append(
                glimpse_images,
                resize(current_img[:, top:bottom, left:right],
                       self.retina_size))
        return glimpse_images
def facial_pt_extractor(pic_path):
    ## Since we have the helper methods in helpers.py, we just need to import them
    # def rect_to_bb(rect):
    #     x = rect.left()
    #     y = rect.top()
    #     w = rect.right() - x
    #     h = rect.bottom() - y
    #     return (x, y, w, h)

    # def shape_to_np(shape, dtype="int"):
    #     coords = np.zeros((68, 2), dtype=dtype)
    #     for i in range(0, 68):
    #         coords[i] = (shape.part(i).x, shape.part(i).y)
    #
    #     return coords
    #
    # def resize(image, width=1200):
    #     r = width * 1.0 / image.shape[1]
    #     dim = (width, int(image.shape[0] * r))
    #     resized = cv2.resize(image, dim, interpolation=cv2.INTER_AREA)
    #     return resized

    image = cv2.imread(pic_path)
    image = resize(image, width=1200)
    gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    rects = detector(gray, 1)

    coordinates = []
    for (i, rect) in enumerate(rects):
        shape = predictor(gray, rect)
        shape = shape_to_np(shape)
        for (x, y) in shape:
            coordinates.append(x)
            coordinates.append(y)
    return coordinates
コード例 #5
0
ファイル: sign_detector.py プロジェクト: tqiu8/asl-cv
def sliding_window(model, img, stepSize, scale, min_scale=0):
    """TODO: implement sliding window approach"""

    #dimensions of window
    windowWidth = 32
    windowHeight = 24

    #initialize list of best boxes
    boxes = []

    #we start with the unscaled image
    newimg = img
    newimgscale = 1

    # while scaled image is larger than window
    while len(newimg) > windowHeight and len(
            newimg[0]) > windowWidth and newimgscale > min_scale:

        #initialize variables to find best box
        max_confidence = -1
        detected_box = []

        # slide a window across the image
        for y in range(0, len(newimg) - windowHeight, stepSize):
            for x in range(0, len(newimg[0]) - windowWidth, stepSize):

                # yield the current window
                subimage = newimg[y:y + windowHeight, x:x + windowWidth]
                # get probability that hand is contained in current window
                hogvec = helpers.convertToGrayToHOG(
                    helpers.resize(subimage, ((128, 128))))
                confidence = model.predict_proba([hogvec])

                # if the current window is better than our best window so far, update best window
                if confidence[0][1] > max_confidence:
                    max_confidence = confidence[0][1]
                    detected_box = [x, y, max_confidence, newimgscale]

        #get box coordinates in original image
        this_box = [0, 0, 0, 0, 0]
        this_box[0] = detected_box[0] // detected_box[3]
        this_box[1] = detected_box[1] // detected_box[3]
        this_box[2] = this_box[0] + (windowWidth // detected_box[3])
        this_box[3] = this_box[1] + (windowHeight // detected_box[3])
        this_box[4] = detected_box[2]

        #add best box for this scale to list of best boxes
        boxes.append(this_box)
        #scale image
        newimgscale *= scale
        newimg = rescale(img, newimgscale)

    #perform NMS (don't worry too much about the details of this)
    box = helpers.non_max_suppression_fast(np.array(boxes), .4)

    return box[0]
コード例 #6
0
def preprocess(img_dir, padding=True):
    img = cv2.imread(img_dir, 1)
    img = resize(img, img_h, always=True)
    img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
    img = cv2.adaptiveThreshold(img, 255, cv2.ADAPTIVE_THRESH_MEAN_C,
                                cv2.THRESH_BINARY_INV, 71, 17)
    if padding:
        img = cv2.copyMakeBorder(img,
                                 0,
                                 0,
                                 0,
                                 img_w - img.shape[1],
                                 cv2.BORDER_CONSTANT,
                                 value=0)

    return img.swapaxes(0, 1)[:, :, None] / 255 * 2 - 1
コード例 #7
0
ファイル: app.py プロジェクト: chpmrc/pico-prism
def main():
    try:
        url = request.args.get('url')
        op = request.args.get('op')
        width = int(request.args.get('width', 0))
        height = int(request.args.get('height', 0))
        crop = bool(request.args.get('crop', False))
        gravity = request.args.get('gravity', 'center')
        if op == 'resize':
            return send_file(img2file(
                resize(url, width, height, crop=crop, gravity=gravity)),
                             mimetype='image/jpeg')
        else:
            return send_file(img2file(url2img(url)), mimetype='image/jpeg')
    except Exception:
        logger.error(traceback.format_exc())
        return Response(
            "Something went wrong (invalid width/height?). Please read the docs below:\n{}"
            .format(DOCS),
            status=400,
            mimetype='text/plain')
コード例 #8
0
def run():
    print("Reach Position 1")
    detector = dlib.get_frontal_face_detector()
    predictor = dlib.shape_predictor("shape_predictor_68_face_landmarks.dat")
    fishface = Loader.load_model_fish("googleCKPlus.xml")
    video = cv2.VideoCapture(0)
    current = 0
    model = "20180402-114759"
    print("Reach Position 2")
    with tf.Graph().as_default():
        # print(tf.get_default_graph())
        print("Reach Position 3")
        with tf.Session() as sess:
            # Load the model

            ## we need to load the model first, then load each layer
            Loader.load_model(model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            print("Reach Position 4")
            while (True):
                ret, frame = video.read()
                frame = helpers.resize(frame, width=1200)
                gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
                # this is the right place to put the copy,
                # otherwise it will have empty when the face is too big
                temp = copy.deepcopy(frame)

                rects = detector(gray, 1)
                print("Running.....")
                for (i, rect) in enumerate(rects):
                    shape = predictor(gray, rect)
                    shape = helpers.shape_to_np(shape)
                    (x, y, w, h) = helpers.rect_to_coordinate(rect)
                    # draw rectangle
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                                  2)

                    # draw circle
                    for (x1, y1) in shape:
                        cv2.circle(frame, (x1, y1), 2, (0, 0, 255), -1)
                    # to prevent empty frame
                    try:
                        temp = temp[y:y + h, x:x + w]
                        temp_160 = misc.imresize(temp, (160, 160),
                                                 interp='bilinear')
                        # Snap by the camera save by the time stamp
                        # cv2.imwrite("./camera_photo/{}.png".format(datetime.fromtimestamp(time.time())), temp)
                        # print("SNAP!!!!!!!!!!!!! GIVE A SMILE")
                        if temp_160.ndim == 2:
                            temp_160 = helpers.to_rgb_from2(temp_160)

                        x1, y1, a1 = temp_160.shape
                        temp_re = temp_160.reshape([1, x1, y1, a1])
                        # we put the cropped image to the FaceNet, input shape(1,160,160,3)
                        feed_dict = {
                            images_placeholder: temp_re,
                            phase_train_placeholder: False
                        }
                        # emb return the facial feature of shape (1,512)
                        emb = sess.run(embeddings, feed_dict=feed_dict)
                        print("Network running....")

                    except ValueError:
                        pass
                try:
                    tag = helpers.calculation(emb[0])
                    cv2.putText(frame, "{}".format(tag), (x - 10, y - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)

                    # out = cv2.resize(temp, (350, 350))
                    gray = cv2.cvtColor(
                        temp, cv2.COLOR_BGR2GRAY)  # Convert image to grayscale
                    # out=misc.imresize(gray, (350, 350), interp='bilinear')
                    out = cv2.resize(gray, (350, 350))
                    pred, conf = fishface.predict(out)
                    # write on img
                    info1 = 'Guessed emotion: ' + emotions[pred]
                    cv2.putText(frame, info1, (10, 20),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255, 100, 0))
                    print(tag)
                    print(emotions[pred])

                except UnboundLocalError:
                    pass
                # we put the processed frame back to the camera
                rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2BGRA)
                cv2.imshow('frame', rgb)
                # press the key 'q' to quit the program
                if cv2.waitKey(1) & 0xFF == ord('q'):
                    break
                # press the key 'p' to snap a picture
                if cv2.waitKey(1) & 0xFF == ord('p'):
                    cv2.imwrite('capture{}.jpg'.format(current), frame)
                    current += 1

    video.release()
    cv2.destroyAllWindows()
コード例 #9
0
def calculate_feature(names):
    # output log file
    log_file = open("Uploader_info.log", "w")
    old_stdout = sys.stdout
    sys.stdout = log_file
    img_list = []
    ppp = -1
    error_list = []
    with tf.Graph().as_default():
        with tf.Session() as sess:
            # we need to load the model first, then load each layer
            Loader.load_model(model)
            images_placeholder = tf.get_default_graph().get_tensor_by_name(
                "input:0")
            embeddings = tf.get_default_graph().get_tensor_by_name(
                "embeddings:0")
            phase_train_placeholder = tf.get_default_graph(
            ).get_tensor_by_name("phase_train:0")
            for name in names:
                ppp += 1
                print("\n" + name)

                # sometime read image may have null, thus nullpointer exception
                try:
                    # name=os.path.join('./Team/', name)
                    img = cv2.imread(name)
                    img = helpers.resize(img, width=1200)
                except AttributeError:
                    print("error, cannot find {} ".format(name))
                    error_list.append(ppp)
                    continue

                gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

                # this is the right place to put the copy,
                # otherwise it will have empty when the face is too big
                rects = detector(gray, 1)
                if (len(rects)) == 0:
                    print("error, cannot detect face in {} ".format(name))
                    error_list.append(ppp)
                    continue
                print("Running.....on {}".format(name))
                for (i, rect) in enumerate(rects):
                    try:
                        if (len(rects)) > 1:
                            raise ValueError
                        shape = predictor(gray, rect)
                        shape = helpers.shape_to_np(shape)
                        (x, y, w, h) = helpers.rect_to_coordinate(rect)

                        img = img[y:y + h, x:x + w]
                        img = misc.imresize(img, (160, 160), interp='bilinear')
                        cv2.imwrite("./database_snap/{}".format(name[7:]), img)

                        img_list.append(img)

                        print(name + " Success!!!!!!!!!!!!!!!")

                    # if there are one more one face, we add it to the error list
                    except ValueError:
                        print("error, {} have more than one faces!!!".format(
                            name))
                        error_list.append(ppp)
                        break

            all_img = np.stack(img_list, axis=0)
            # we put the cropped image to the FaceNet, input shape(1,160,160,3)
            feed_dict = {
                images_placeholder: all_img,
                phase_train_placeholder: False
            }
            # emb return the facial feature of shape (1,512)
            embs = sess.run(embeddings, feed_dict=feed_dict)

            #log infor
            sys.stdout = old_stdout
            log_file.close()

    return all_img.tolist(), embs.tolist(), error_list
コード例 #10
0
from sklearn.preprocessing import LabelBinarizer
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.core import Flatten, Dense
from helpers import resize_to_fit as resize

# inicializa os dados e os rotulos
data = []
labels = []

for image_file in paths.list_images("extracted_letters"):
    # carregar a imagem e converte para escala de cinza
    img = cv2.cvtColor(cv2.imread(image_file), cv2.COLOR_BGR2GRAY)
    # ajusta o tamanho da imagem para 20x20
    img = resize(img, 20, 20)
    # adiciona um terceira dimensão para a imagem para usar com Keras
    img = np.expand_dims(img, axis=2)
    # a label é o nome da pasta
    label = image_file.split(os.path.sep)[-2]
    # a imagem e a label são adicionadas para o conjunto de dados para treinamento
    data.append(img)
    labels.append(label)

# transforma em um array numpy para facilitar o uso dos dados
data = np.array(data, dtype="float") / 255.0
labels = np.array(labels)
# separa os dados entre conjunto de treino e de teste
(X_train, X_test, Y_train, Y_test) = train_test_split(data,
                                                      labels,
                                                      test_size=0.25,
コード例 #11
0
def shape_detector(image, min_pixels=85, return_img=False):

    # load the image and resize it to a smaller factor so that
    # the shapes can be approximated better
    resized = resize(image, width=300)
    ratio = image.shape[0] / float(resized.shape[0])

    # convert the resized image to grayscale, blur it slightly,
    # and threshold it
    gray = cv2.cvtColor(resized, cv2.COLOR_BGR2GRAY)
    blurred = cv2.GaussianBlur(gray, (5, 5), 0)
    thresh = cv2.threshold(blurred, 60, 255, cv2.THRESH_BINARY)[1]

    # find contours in the thresholded image and initialize the
    # shape detector
    cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_SIMPLE)
    cnts = grab_contours(cnts)
    sd = ShapeDetector()

    # variables to count shapes
    num_cir = 0
    num_tri = 0
    num_sqr = 0
    unkn = 0

    # loop over the contours
    if return_img:
        # multiply the contour (x, y)-coordinates by the resize ratio,
        # then draw the contours and the name of the shape on the image
        for c in cnts:
            area = cv2.contourArea(c)
            if area > min_pixels:
                M = cv2.moments(c)
                cX = int((M["m10"] / (M["m00"] + 1e-7)) * ratio)
                cY = int((M["m01"] / (M["m00"] + 1e-7)) * ratio)
                shape = sd.detect(c)
                # multiply the contour (x, y)-coordinates by the resize ratio,
                # then draw the contours and the name of the shape on the image
                c = c.astype("float")
                c *= ratio
                c = c.astype("int")
                cv2.drawContours(image, [c], -1, (0, 255, 0), 2)
                cv2.putText(image, shape, (cX, cY), cv2.FONT_HERSHEY_SIMPLEX,
                            0.5, (0, 0, 255), 2)

                # count shapes
                if shape == 'circle':
                    num_cir += 1
                elif shape == 'triangle':
                    num_tri += 1
                elif shape == 'square':
                    num_sqr += 1
                else:
                    unkn += 1

            num_shapes = np.array([num_sqr, num_cir, num_tri])

        return image, num_shapes
    else:
        for c in cnts:
            area = cv2.contourArea(c)
            if area > min_pixels:
                shape = sd.detect(c)
                area = cv2.contourArea(c)

                # count shapes
                if shape == 'circle':
                    num_cir += 1
                elif shape == 'triangle':
                    num_tri += 1
                elif shape == 'square':
                    num_sqr += 1
                else:
                    unkn += 1

        num_shapes = np.array([num_sqr, num_cir, num_tri])

        return num_shapes
コード例 #12
0
ファイル: search.py プロジェクト: apop08/imageSearch
args = vars(ap.parse_args())

# initialize the image descriptor and results montage
desc = ResNetDescriptor()
montage = ResultsMontage((240, 320), 5, 20)
relevant = json.loads(open(args["relevant"]).read())

# load the relevant queries dictionary and look up the relevant results for the
# query image
queryFilename = args["query"][args["query"].rfind("/") + 1:]
queryRelevant = relevant[queryFilename]

# load the query image, display it, and describe it
print("[INFO] describing query...")
query = helpers.image_preprocessor(args["query"])
cv2.imshow("Query", helpers.resize(tf.image.convert_image_dtype(query, dtype=tf.uint8), width=320))
features = desc.describe(query)

# perform the search
print("[INFO] searching...")
searcher = Searcher(args["index"])
results = searcher.search(features, numResults=20)

# loop over the results
for (i, (score, resultID)) in enumerate(results):
    # load the result image and display it
    print("[INFO] {result_num}. {result} - {score:.2f}".format(result_num=i + 1, result=resultID,
                                                               score=score))
    result = cv2.imread("{}/{}".format(args["dataset"], resultID))
    montage.add_result(result, text="#{}".format(i + 1), highlight=resultID in queryRelevant)