Esempio n. 1
0
def run_sim(df,
            start,
            end,
            sim,
            models=[],
            tsvars=[],
            spatvars=[],
            transformvars=[],
            transformvars_post=[]):

    nunits = len(df.loc[start].index)
    tsstreams = [streamers.init_order(nunits, tsvar) for tsvar in tsvars]
    # Seed the streamers
    for stream in tsstreams:
        for value, streamer in zip(df.loc[start - 1, stream['name']].values,
                                   stream['streamers']):
            streamer.seed(value)

    # load the weight matrices
    for sdict in spatvars:
        with open(sdict['path_weight'], 'rb') as p:
            w = pickle.load(p)
            #print(sdict['name'], "loaded", sdict['path_weight'])
        sdict.update({'w': w})

    for t in range(start, end + 1):

        for stream in tsstreams:
            update = streamers.tick(stream['streamers'],
                                    df.loc[t - 1, stream['var']].values)
            df.loc[t, stream['name']] = update

        for sdict in spatvars:
            update = pysal.lag_spatial(sdict['w'], df.loc[t,
                                                          sdict['var']].values)
            df.loc[t, sdict['name']] = update

        for transform in transformvars:
            df = apply_transform(df, transform)

        for model in models:
            outputs, varnames = model.predict(sim=sim, data=df.ix[t])
            for output, varname in zip(outputs, varnames):
                df.loc[t, varname] = output

        for transform in transformvars_post:
            df = apply_transform(df, transform)

    return df
Esempio n. 2
0
def transform():
    imagefile = request.files["file"].read()
    with open("1.jpeg", "wb+") as fp:
        fp.write(imagefile)
    filter_type = request.args.get('type')
    result_name = transforms.apply_transform(filter_type)
    return send_file(result_name, mimetype='image/gif')
Esempio n. 3
0
def get_face_coordinates_system(landmarks, preview_window = None):
    """returns face center, xscale, yscale, rotationmatrix and landmarks in that coordinate system"""
    leftAcc = np.array([0,0],dtype=np.float64)
    for i in LEFT_EYE_INDICES:
        leftAcc += landmarks[i]
    
    rightAcc = np.array([0,0], dtype=np.float64)
    for i in RIGHT_EYE_INDICES:
        rightAcc += landmarks[i]
    #calculate eyes cog
    leftEyePos = leftAcc / EYE_INDICES_COUNT
    rightEyePos = rightAcc / EYE_INDICES_COUNT

    #align eyes
    dx = rightEyePos[0] - leftEyePos[0]
    dy = rightEyePos[1] - leftEyePos[1]

    #TODO why subtract 180?
    angle = np.degrees(np.arctan2(dy,dx)) - 180
    eyesC = (rightEyePos + leftEyePos)/2
    #calculate center based on two main axes
    rotM = cv2.getRotationMatrix2D((eyesC[0],eyesC[1]), angle, 1)
    rotatedLandmarks =  apply_transform(landmarks, rotM)

    face_top = np.array(rotatedLandmarks[FACE_AXIS_TOP_INDEX])
    face_bottom = np.array(rotatedLandmarks[FACE_AXIS_BOTTOM_INDEX])
    face_left = np.array(rotatedLandmarks[FACE_AXIS_LEFT_INDEX])
    face_right = np.array(rotatedLandmarks[FACE_AXIS_RIGHT_INDEX])
    vertical_axis_c = (face_top + face_bottom) / 2
    horizontal_axis_c = (face_left + face_right) / 2
    #consider face center is Y of vertical axis and X of horizontal
    #TODO make use intersection?
    vertical_axis_len = np.linalg.norm(face_top - face_bottom)
    horizontal_axis_len = np.linalg.norm(face_left - face_right)

    #this is a wrong assumption
    face_center = np.array((horizontal_axis_c[0], vertical_axis_c[1]))

    #face_center = np.average(rotatedLandmarks, axis=0)
    #calculate landmark positions based on center
    localLandmarks = []
    for rl in rotatedLandmarks:
        localLandmarks.append((np.array(rl) - face_center) / (horizontal_axis_len,vertical_axis_len))
    
    

    #scale everything so that the two main axes go between [-1,1]
    #* eyebrows will go above 1 in this case


    if preview_window:
        preview_window.clear()
        #multiply by 100 because scatter points are to large for -1,1 scale
        preview_window.scatter(np.array(localLandmarks)[:,0]*100,-np.array(localLandmarks)[:,1]*100)
        preview_window.add_patch(matplotlib.patches.Circle((0,0), 1))


    #return (face_center, horizontal_axis_len, vertical_axis_len, rotM, localLandmarks)
    #this is a kludge, suposed to return matrix, but sice it's not invertible, angle will do
    return (face_center, horizontal_axis_len, vertical_axis_len, angle, localLandmarks)
Esempio n. 4
0
def handlePosition(position, coos, svg, role_of):
    ids = []
    if Debug:
        print "handlePosition", position.getAttribute('id'), coos
    [translation, angle] = extract_transform(position.getAttribute('transform'))
    new_translation = view_mapping(translation)
    position.setAttribute('transform', make_transform(new_translation, angle))
    svg = printAttr('g', position, ('id', 'transform'), svg)
    role_of = extract_role(position, role_of)
    for child in position.childNodes:
        if child.nodeName == 'rect':
            [id, coos, svg] = handleRect(child, coos, svg)
            if Debug: 
                print "after handleRect:", coos
            ids.append(id)
        if child.nodeName == 'label':
            svg = handleLabel(child, svg)
    for cid in cids:
        if cid in ids:
            print 'before handlePOsition', cid, coos[cid], translation, angle
    for id in ids:
        coos = apply_transform(id, new_translation, angle, coos)
    for cid in cids:
        if cid in ids:
            print 'after handlePOsition', cid, coos[cid], translation, angle
    svg.append('</g>')
    return [ids, coos, svg, role_of]
Esempio n. 5
0
def handlePositionGroup(group, coos, svg, role_of):
    ids = []
    if Debug:
        print "handlePositionGroup", coos
    svg = printAttr('g', group, ('id', 'transform'), svg)
    for child in group.childNodes:
        if child.nodeName == 'position':
            [new_ids, coos, svg, role_of] = handlePosition(child, coos, svg, role_of)
            ids += new_ids
    [translation, angle] = extract_transform(group.getAttribute('transform'))
    new_translation = view_mapping(translation)
    group.setAttribute('transform', make_transform(new_translation, angle))
#    for cid in cids:
#        print 'posgroup', cid, 'before', coos[cid], translation, angle
    for id in ids: 
        coos = apply_transform(id, new_translation, angle, coos)
    for cid in cids:
        if cid in ids:
            print 'posgroup', cid, 'result', coos[cid], translation, angle
    svg.append('</g>')
    return [coos, svg, role_of]
Esempio n. 6
0
def landmarks_to_image_space(landmarks, rotM, faceCenter, scaleX, scaleY, preview_window = None):
    """moves landmarks from local face space to target face's image space for later triangulation and wrapping"""
    #opencv 2d roattion matrix can not be inverted, not without some extra operations, better just use reverse angle for now
    #invRot = np.linalg.inv(rotM)
    invRot = cv2.getRotationMatrix2D((0,0), -rotM, 1)
    inv_rot_landmarks = apply_transform(landmarks, invRot)
    target_image_landmarks = []
    for local_landmark in inv_rot_landmarks:
        #rescale landmarks up
        target_landmark = np.array(local_landmark) * (scaleX, scaleY)
        #calculate landmark position relative to target face center
        target_landmark = target_landmark + faceCenter
        #TODO see this approach's effect on eye positions
        target_image_landmarks.append(target_landmark)
        if preview_window:
            preview_window.add_patch(matplotlib.patches.Circle(target_landmark, 2, color='#FF0000'))
    
    
    return target_image_landmarks

        
    
Esempio n. 7
0
def match_to_model_face(source_landmarks):
    '''Assumes the source is as well aligned as possible, or maybe passed through the pose correction algortihm, at least'''
    """returns face center, xscale, yscale, rotationmatrix and landmarks in that coordinate system"""
    leftAcc = np.array([0, 0], dtype=np.float64)
    for i in utils.LEFT_EYE_INDICES:
        leftAcc += source_landmarks[i]

    rightAcc = np.array([0, 0], dtype=np.float64)
    for i in utils.RIGHT_EYE_INDICES:
        rightAcc += source_landmarks[i]
    #calculate eyes cog
    leftEyePos = leftAcc / utils.EYE_INDICES_COUNT
    rightEyePos = rightAcc / utils.EYE_INDICES_COUNT

    #align eyes
    dx = rightEyePos[0] - leftEyePos[0]
    dy = rightEyePos[1] - leftEyePos[1]

    #TODO why subtract 180?
    angle = np.degrees(np.arctan2(dy, dx)) - 180
    eyesC = (rightEyePos + leftEyePos) / 2
    #calculate center based on two main axes
    rotM = cv2.getRotationMatrix2D((eyesC[0], eyesC[1]), angle, 1)
    rotatedLandmarks = apply_transform(source_landmarks, rotM)

    face_top = np.array(rotatedLandmarks[utils.FACE_AXIS_TOP_INDEX])
    face_bottom = np.array(rotatedLandmarks[utils.FACE_AXIS_BOTTOM_INDEX])
    face_left = np.array(rotatedLandmarks[utils.FACE_AXIS_LEFT_INDEX])
    face_right = np.array(rotatedLandmarks[utils.FACE_AXIS_RIGHT_INDEX])
    vertical_axis_c = (face_top + face_bottom) / 2
    horizontal_axis_c = (face_left + face_right) / 2
    #consider face center is Y of vertical axis and X of horizontal
    #TODO make use intersection?
    vertical_axis_len = np.linalg.norm(face_top - face_bottom)
    horizontal_axis_len = np.linalg.norm(face_left - face_right)

    face_center = np.array((rotatedLandmarks[utils.NOSE_TIP][0],
                            rotatedLandmarks[utils.NOSE_TIP][1]))

    #face_center = np.average(rotatedLandmarks, axis=0)
    #calculate landmark positions based on center
    localLandmarks = []

    for rl in rotatedLandmarks:
        localLandmarks.append((np.array(rl) - face_center))

    key_landmarks = np.array([
        localLandmarks[utils.NOSE_TIP], localLandmarks[utils.CHIN],
        localLandmarks[utils.LEFT_EYE_LEFT],
        localLandmarks[utils.RIGHT_EYE_RIGHT],
        localLandmarks[utils.MOUTH_LEFT], localLandmarks[utils.MOUTH_RIGHT]
    ],
                             dtype=np.float64)

    #now transform the points so that the key ones match the ideal model
    transform, _ = cv2.findHomography(key_landmarks,
                                      aproximation_3d_face[:, :2])
    #transform = cv2.getAffineTransform(key_landmarks, aproximation_3d_face[:,:2])
    localLandmarks = apply_transform(localLandmarks, transform)
    ideal_3d_landmarks = np.array([(x, y, -160) for x, y in localLandmarks])

    #likely need to do this for whole landmarks if this is to work at all
    ideal_3d_landmarks[utils.NOSE_TIP][2] = 0
    ideal_3d_landmarks[utils.CHIN][2] = -65
    ideal_3d_landmarks[utils.LEFT_EYE_LEFT][2] - 135
    ideal_3d_landmarks[utils.RIGHT_EYE_RIGHT][2] - 135
    ideal_3d_landmarks[utils.MOUTH_LEFT][2] - 125
    ideal_3d_landmarks[utils.MOUTH_RIGHT][2] - 125

    return ideal_3d_landmarks