Exemplo n.º 1
0
 def do_POST(self):
     try:
         ctype, pdict = cgi.parse_header(self.headers['content-type'])
         if ctype == 'application/json':
             length = int(self.headers['content-length'])
             post_values = json.loads(self.rfile.read(length))
         else:
             self.send_error(415, "Only json data is supported.")
             return
         if self.path == "/api/cutWords":
             words = post_values["data"]
             topN = post_values["topN"]
             cutWords = cutWord.apply(words, topN)
             self.send_response(200)
             self.send_header('Content-type', 'application/json')
             self.end_headers()
             self.wfile.write(json.dumps(cutWords))
         if self.path == "/api/recommendation":
             #  process data
             coders = post_values["candidates"]
             me = post_values["matching"]
             matchers = matching.match(coders, me)
             self.send_response(200)
             self.send_header('Content-type', 'application/json')
             self.end_headers()
             self.wfile.write(json.dumps(matchers))
     except Exception, e:
         print e
         self.send_error(
             401, 'Url Not Found or Data format Error: %s' % self.path)
Exemplo n.º 2
0
def main():

    # Known song dict with each key as a song number starting at zero with a list of samples as the value
    with open("song_fingerprints.pkl", mode="rb") as opened_file:
        known_songs = pickle.load(opened_file)

    #Known song dict with each key as a song number starting at zero with a list of samples as the value
    known_songs = saveSongs.songSave()

    mic_spec = get_mic_input.rec(
        20
    )  # Spectrogram of the mic input. Uses 20 seconds for now. hopefully we can find a smaller value that still returns accurate results (10 wasnt working for me)

    sorted_b = np.sort(mic_spec.flatten())
    cutoff = sorted_b[int(.77 * len(sorted_b))]
    mic_peaks = find_peaks.local_peaks(mic_spec, cutoff,
                                       20)  # Find peaks of the unknown samples

    mic_fingerprint = fingerprints.sample_fingerprint(
        mic_peaks)  # Fingerprint the unknown samples
    # print("Known Songs")
    # print(known_songs)
    known_songs_1 = [song[0][0] for song in known_songs.values()]
    # print("known songs values")
    # print(known_songs_1)
    print(matching.match(mic_fingerprint, database=known_songs))
Exemplo n.º 3
0
def match():
    
    '''CRUX OF APPLICATION
        Not a sophisticated algorithm but matches students in the roster
    '''
    courseNum = request.args.get('courseNum')
    pid = request.args.get('pid')
    try:
        conn = queries.getConn('c9')
        curs = conn.cursor(MySQLdb.cursors.DictCursor)
        roster = queries.roster(conn, courseNum)
        matches = matching.match(roster)
        groupNums = list()
        for match in matches:
            
            allGroups = queries.allGroups(conn)
            groupNum = matching.groupNum(allGroups)
            check = curs.execute('''insert into groups(groupNum, pid, courseNum)
            values(%s, %s, %s)''',[groupNum, pid, courseNum])
            curs.execute('''insert into groupForPset(groupNum, bnumber)
            values(%s, %s)''',[groupNum, match])
        
            if matches[match]:
                curs.execute('''insert into groupForPset(groupNum, bnumber)
                values(%s, %s)''',[groupNum, matches[match]])
            groupNums.append(groupNum)
        return jsonify( {'error': False, 'match': matches, 'groupNum': groupNums})
    except Exception as err:
        return jsonify( {'error': True, 'err': str(err) } )
Exemplo n.º 4
0
def match_function():
    flash("MATCHING FACE IN PROGRESS!!!!")
    UserID = session['user_id']
    UserWorking = session['user_name']
    print(UserID, UserWorking)

    cur.execute("INSERT INTO logs(UserID, UserWorking) VALUES(%s,%s)",
                (UserID, UserWorking))
    db.commit()

    matching.match()
    if session['user_type'] == 'Admin':
        return render_template("adminedit.html")
    elif session['user_type'] == 'Guard':
        return render_template("guard.html")
    else:
        return render_template("no-account.html")
Exemplo n.º 5
0
def map2():
    if request.method=="POST":
        req=request.form
        scoords=req['scoords']
        ecoords=req['ecoords']
        #print(scoords,ecoords)
        cursor.execute("""SELECT * FROM `users`  WHERE `email` LIKE '{}' AND `password` LIKE '{}' """.format(email,password))
        user=cursor.fetchone()
        pooling=1
        cursor.execute("""UPDATE `users` set pooling='{}' WHERE email='{}' """.format(pooling,email))
        cursor.execute("""UPDATE `users` set starting_coords='{}' WHERE email='{}' """.format(scoords,email))
        cursor.execute("""UPDATE `users` set ending_coords='{}' WHERE email='{}' """.format(ecoords,email))
        conn.commit()
    
    # cursor.execute("""SELECT * FROM `users`  WHERE `email` LIKE '{}' AND `password` LIKE '{}' """.format(email,password))
    # user=cursor.fetchone()
    # A = user[10]
    # B = user[11]
    # #print(A,B)
    # ll1=A.split(',')
    # ll2=B.split(',')
    # ll1=ll1[::-1]
    # ll2=ll2[::-1]
    # a1=(ll1[0),ll1[1])
    # a2=(ll2[0],ll2[1])
    # cursor.execute("""SELECT * FROM `users`""")
    # users=cursor.fetchall()
    # start={record[1]: record[10] for record in users}
    # end={record[1]: record[11] for record in users}
    # print(start)

    cursor.execute("""SELECT * FROM `users`  WHERE `email` LIKE '{}' AND `password` LIKE '{}' """.format(email,password))
    user=cursor.fetchone()
    cursor.execute("""SELECT * FROM `users` """)
    records = cursor.fetchall()
    u1=(user[1],user[7])
    u2=(user[1],user[8])
    d1= {record[1]: record[7] for record in records}
    d2= {record[1]: record[8] for record in records}
    ss=matching.match(u2,d2)
    print(ss)
    # #latitudes and longitudes respectively (a1,a2),(b1,b2)
    

    
    #     li=matching.personality(u1,d1)

    #     if len(li)>4:
    #         mate=matching.match(u3,d3)
    #     else:
    #         mate=matching.match(u2,d2)    
    cursor.execute("""SELECT * FROM `users`  WHERE `name` LIKE '{}' """.format(ss))
    usr_phn=cursor.fetchone()
    phn=usr_phn[4]
    pooling=0
    cursor.execute("""UPDATE `users` set pooling='{}' WHERE email='{}' """.format(pooling,email))
    return render_template('map2.html',ss=ss,phn=phn)
Exemplo n.º 6
0
 def match(self, other, subst):
     """
     Try to extend subst a match from self to other. Return True on
     success, False otherwise. In the False case, subst is
     unchanged. 
     """
     if self.isNegative() != other.isNegative():
         return False
     else:
         res = match(self.atom, other.atom, subst)
         return res
Exemplo n.º 7
0
def main():
    img_ref = cv2.imread('../input/target.jpeg')
    overlay = cv2.imread('../input/overlay.jpg')
    cap = cv2.VideoCapture('../input/input.mp4')

    video_array = []

    # extracting descriptors for reference image
    kp1, kpo1, des1 = utils.extract_features(img_ref)

    frame_counter = 0
    total_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
    while cap.isOpened():
        # reading each frame of the video
        ret, frame = cap.read()

        # save the video when reading is over
        if ret is False or (cv2.waitKey(1) & 0xFF == ord('q')):
            out = cv2.VideoWriter(
                '../output/output.avi', cv2.VideoWriter_fourcc(*'DIVX'), 15,
                (video_array[0].shape[1], video_array[0].shape[0]))

            # save each frame to video
            for i in range(len(video_array)):
                out.write(video_array[i])
            out.release()

            break

        # extracting descriptors for each frame of the video
        kp2, kpo2, des2 = utils.extract_features(frame)

        # matching the image descriptors
        matches, matches_pos = matching.match(des1, des2)

        # if number of matches it not at least three
        if len(matches) < 3:
            continue

        # obtaining the final affine matrix
        affine_matrix = affine.affine_transformation_estimation(
            kp1, kp2, matches_pos)

        # pasting the overlying image on each frame
        final_frame = utils.pasting_overlay(img_ref, frame, overlay,
                                            affine_matrix)

        video_array.append(final_frame)
        frame_counter += 1
        print('processing frame', frame_counter, 'from', total_frames)
Exemplo n.º 8
0
 def matching(gg):
     n = max([max(u, v) for u, v, w in gg])
     G = [[] for i in range(n + 1)]
     for u, v, w in gg:
         G[u].append((v, w))
         G[v].append((u, w))
     mtc = match(G)
     maxw = 0
     used = [False] * (n + 1)
     for u, v, w in gg:
         if mtc[u] == v and mtc[v] == u and used[u] == used[v] == False:
             used[u] = used[v] = True
             maxw += w
     return maxw
Exemplo n.º 9
0
def execute(question):
    paragraphs = find_related_context(question)
    if paragraphs is not None:

        answer_array = m.mrcqa_batch(paragraphs, question)
        # make response
        data = []
        if answer_array:
            answers = answer_array[0]
            for i in range(len(answers)):
                answer = answers[i]
                context = paragraphs[i]
                start_index = get_start(context, answer)
                entity = matching.match(answer)
                json_object = {
                    'context': context,
                    'answer_start': start_index[0],
                    'answer_end': start_index[0] + start_index[1],
                    'entity': [entity]
                }
                data.append(json_object)
        print(data)
        return data
Exemplo n.º 10
0
def output_rows(category,size):
	json_data = open('catalog_dict.json')
	catalog = json.load(json_data)
	json_data.close()
	json_data1 = open('inv_catalog_dict.json')
	inv_catalog = json.load(json_data1)
	sample = create_sample(category,size)
	csvfile = open('combined.csv')
	csv_reader = csv.reader(csvfile, delimiter=',')
	rows = []
	x = []
	x.append("S NO.")
	x.append("ASIN")
	x.append("Title")
	x.append("Brand")
	x.append("Category")
	x.append("Description")
	x.append("Output")
	rows.append(x)
	j = 1
	i = 1
	for row in csv_reader:
		#print row +"\n"
		if i in sample:
			x = []
			x.append(str(j))
			j = j + 1
			x.append(row[0])
			x.append(row[1])
			x.append(row[2])
			x.append(row[3])
			x.append(row[4])
			output = matching.match(row[4],catalog,inv_catalog)
			x.append(output)
			rows.append(x)
		i = i + 1
	return rows
Exemplo n.º 11
0
 def test_matching(self):
   g = [("apple","banana",1), 
        ("peanut", "banana",2)]
   matches,left_remain,right_remain = matching.match(g) 
   self.assertTrue(matches == [("apple", "banana")])
   self.assertTrue(left_remain == ["peanut"])
    sc.measure_all_stabilizers()
    lc.add()

    # Decode
    lc.decode()

    # Round of perfect detection to eliminate stray errors
    if PERFECT_LAST_ROUND:
        lc.reset()
        sc.measure_all_stablizers()
        lc.add()
        anyons_star, anyons_plaq = lc.find_anyons_all()
        match_star = matching.match(distance,
                                    anyons_star,
                                    topology,
                                    "star",
                                    time=0,
                                    weights=weights)
        match_plaq = matching.match(distance,
                                    anyons_plaq,
                                    topology,
                                    "plaq",
                                    time=0,
                                    weights=weights)
        sc.correct_error("star", match_star, cycles)
        sc.correct_error("plaq", match_plaq, cycles)

    # Check for errors in decoding and correcting
    sc.measure_all_stabilizers()
    if (sc.qubits[:, sc.tags != "Q"] == -1).any():
        print("FAILURE CORRECTING")
Exemplo n.º 13
0
from matching import match, obj, _, com, mat, case, UnmatchError, _x, _xs, isGlobal, cond
import matching
import inspect
import sys
# test on base type int, str, tuple, list, bool

# base value to base value test
assert (match(1, 1))
assert (match('haha', 'haha'))
assert (match(1 + 2, 3))
assert (match(True, True))
assert (match((1, 2), (1, 2)))

assert (not match(1, 'haha'))
assert (not match('lala', 'haha'))
assert (not match(1 + 2, 5))
assert (not match(True, False))
assert (not match((1, 2), (1, 4)))

# one is type, another is value
assert (match(int, 1))
assert (match(str, 'haha'))
assert (match(str, 'haha' + 'lala'))
assert (match(bool, True))
assert (match(bool, False))
assert (match(list, [1, 2, 3, 4]))
assert (match(list, ['haha', 1, 2, 3, 'lala']))
assert (match(tuple, (1, 2)))
assert (match(tuple, ('haha', 'lala')))
assert (match(1, int))
assert (match('haha', str))
Exemplo n.º 14
0
            j += 1
        i += 1

    return stitched


if __name__ == "__main__":
    img1 = cv2.imread(sys.argv[1], 0)
    h1, w1 = img1.shape
    img2 = cv2.imread(sys.argv[2], 0)
    h2, w2 = img2.shape
    fname = sys.argv[1].strip().split(
        ".")[0] + "_" + sys.argv[2].strip().split(".")[0] + ".txt"
    files = os.listdir(os.getcwd())
    if fname not in files:
        matches = m.match(sys.argv[1], sys.argv[2])
        src_points = []
        dst_points = []
        i = 0
        while i < len(matches):
            src_points.append(matches[i])
            temp = (matches[i + 1][0] - w1, matches[i + 1][1])
            dst_points.append(temp)
            i += 2
    else:
        src_points, dst_points = read_matches(fname, w1)
    H = cv2.findHomography(np.asarray(src_points),
                           np.asarray(dst_points),
                           method=0)

    identity = np.asmatrix([[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]])
    ridge_orientation = gabor.get_orientation_map(normalized)
    ridge_frequency = gabor.get_frequency_map(normalized)
    a = np.mean(ridge_frequency)
    roi = enhancement.roi_mask(normalized)
    image = gabor.gabor_filter(normalized,
                               ridge_orientation,
                               ridge_frequency,
                               block_size=32,
                               gabor_kernel_size=16)

    # extract ROI
    image = np.where(roi == 1.0, image, 1.0)
    binarized = enhancement.binarization(image)
    thinned = enhancement.ridge_thinning(binarized)
    return thinned, ridge_orientation, ridge_frequency


if __name__ == '__main__':
    minutiae1 = open("../res/minutiae_indice_dx_luigi_1", "rb")
    minutiae1 = pickle.load(minutiae1)

    minutiae2 = open("../res/minutiae_indice_dx_luigi_2", "rb")
    minutiae2 = pickle.load(minutiae2)

    minutiae1, minutiae11 = minutiae1[0], minutiae1[1]
    minutiae2, minutiae22 = minutiae2[0], minutiae2[1]

    # matching
    msg = matching.match(minutiae1, minutiae2, minutiae11, minutiae22)
    print(msg)
Exemplo n.º 16
0
Arquivo: test.py Projeto: ablimit/ship
from matching import match
user_answer = "sweet utilized task chemistry Light and carbon dioxide water plant are transformed into stored metabolic energy through photosynthesis in the chloroplasts of the cell."

stored_answers = ["Photosynthesis is a process used by plants and other organisms to convert light energy, normally from the sun, into chemical energy that can be used to fuel the organisms' activities. Carbohydrates, such as sugars, are synthesized from carbon dioxide and water"]

print match(stored_answers,user_answer)