コード例 #1
0
ファイル: run_submitted_code.py プロジェクト: amakelov/learn
def run(student_MIT_email):
    datasource = ['ciphertext']

    # Open logging file
    f = open('log/evaluation_run_submitted_code.py.log','a')
    path = './test/' + student_MIT_email
    sys.path.append(path)
    print 'Running: ' + student_MIT_email

    for source in datasource:
        # Create filenames
        output_fname = 'output_' + student_MIT_email + '_' + source + '.txt';
        input_fname = 'ciphers_and_messages/' + source + '.txt'

        # Get ciphertext
        ciphertext = get_text(input_fname)

        # Write to log
        f.write(source + ': Started...\n')

        # Actually run the file
        f.write('trying\n')
        print path
        try:
            import decode
            decode.decode(ciphertext, path + '/' + output_fname)
            f.write('Done!\n')
            print 'done'
        except Exception as e:
            f.write('Exception!\n')
            print 'exception:', e
コード例 #2
0
def makechoise():
    seleccion = 0
    print '''Options:
0.- Exit
1.- Download d'un episode
2.- Download des sous-tire
3.- Seconnecter avec son compte
4.- Se connecter en invite
5.- demarer une liste manuelle
6.- Paramettre
7.- Auto recuperation des liens et lancement
'''
    try:
        seleccion = int(input("> "))
    except:
        try:
            os.system('cls')
        except:
            try:
                os.system('clear')
            except:
                pass
        print "ERROR: Invalid option."
        makechoise()
    if seleccion == 1 :
        ultimate.ultimate(raw_input('Please enter Crunchyroll video URL:\n'), '', '')
    elif seleccion == 2 :
        decode.decode(raw_input('Please enter Crunchyroll video URL:\n'))
    elif seleccion == 3 :
        username = raw_input(u'Username: '******'Password(don\'t worry the password are typing but hidden:')
        login.login(username, password)
        makechoise()
    elif seleccion == 4 :
        login.login('', '')
        makechoise()
    elif seleccion == 5 :
         queueu('./queue.txt')
         makechoice()
    elif seleccion == 6 :
        settings_()
        makechoise()
    elif seleccion == 7 :
        autocatch()
        queueu('./queue.txt')
    elif seleccion == 8 :
        import debug
    elif seleccion == 0 :
        sys.exit()
    else:
        try:
            os.system('cls')
        except:
            try:
                os.system('clear')
            except:
                pass
        print "ERROR: Invalid option."
        makechoise()
コード例 #3
0
def makechoise():
    seleccion = 0
    print '''Options:
0.- Exit
1.- Download Anime
2.- Download Subtitle only
3.- Login
4.- Login As Guest
5.- Download an entire Anime(Autocatch links)
6.- Run Queue
7.- Settings
'''
    try:
        seleccion = int(input("> "))
    except:
        try:
            os.system('cls')
        except:
            try:
                os.system('clear')
            except:
                pass
        print "ERROR: Invalid option."
        makechoise()
    if seleccion == 1 :
        ultimate.ultimate(raw_input('Please enter Crunchyroll video URL:\n'), '', '')
    elif seleccion == 2 :
        decode.decode(raw_input('Please enter Crunchyroll video URL:\n'))
    elif seleccion == 3 :
        username = raw_input(u'Username: '******'Password(don\'t worry the password are typing but hidden:')
        login.login(username, password)
        makechoise()
    elif seleccion == 4 :
        login.login('', '')
        makechoise()
    elif seleccion == 5 :
        autocatch()
        queueu('.\\queue.txt')
    elif seleccion == 6 :
        queueu('.\\queue.txt')
    elif seleccion == 7 :
        settings_()
        makechoise()
    elif seleccion == 8 :
        import debug
    elif seleccion == 0 :
        sys.exit()
    else:
        try:
            os.system('cls')
        except:
            try:
                os.system('clear')
            except:
                pass
        print "ERROR: Invalid option."
        makechoise()
コード例 #4
0
ファイル: main.py プロジェクト: meecoder/dummyphone-app
def go():
	test = input("Press 1 for encoding and 2 for decoding, then press Enter. ")
	
	if(test == "1"):
		encode()
	elif(test == "2"):
		decode()
	else:
		print("Please type 1 or 2. Try again.")
		go()
コード例 #5
0
ファイル: urban.py プロジェクト: xnrand/melonbot
def getDefn(msg):
    m = re.search('^ (.*?)((\[|\()([\d]+)(\]|\)))?\s*$', msg)
    if m:
        word = m.group(1).replace(' ', '+').strip()
        pos = m.group(4)
        if word:
            try:
                content = decode(urlopen('http://urbandictionary.com/define.php?term=' + word).read())
            except:
                return '.: could not reach server :.'
            if pos == None:
                m = re.search('meaning\'>\s(.*?)\s</div>', content)
                if m:
                    defn = m.group(1)
                else:
                    return '.: that word is not defined :.'
            else:
                pos = int(pos) - 1
                m = re.findall('meaning\'>\s(.*?)\s</div>', content)
                if m:
                    try:
                        defn = m[pos]
                    except:
                        return 'entry ' + str(pos + 1) + ' does not exist'
                else:
                    return '.: that word is not defined :.'
            defn = re.sub('<.*?>', '', defn)
            return re.sub('\r', ' ↲', html.unescape(defn))
        else:
            return '.: no search term :.'
コード例 #6
0
ファイル: remotecontrol.py プロジェクト: baloothebear4/remote
 def _decode2(self, d):
     a = []
     for i in range(len(d)):
         a.append(d[i])
     r = decode.decode( a, len(d), 0)
     if verbose: print "--+ :)"
     return r
コード例 #7
0
ファイル: fpga.py プロジェクト: ledyba/CheckPassword80
def receiveFPGA(devname):
	com = serial.Serial(
		port=devname,
		baudrate=230400,
		bytesize=8,
		parity=serial.PARITY_NONE,
		stopbits=serial.STOPBITS_ONE,
		timeout=None,
		xonxoff=False,
		rtscts=False,
		writeTimeout=None,
		dsrdtr=False)
	data = None
	print("Receiving...");
	while True:
		#first = ord((serial.to_bytes(com.read(1)))[0])
		first = list(com.read(1))[0]
		if first != 255: #first character
			print("oops.Invalid seq: {0}".format(first));
			continue
		#data = serial.to_bytes(com.read(PASSLEN));
		#data = map((lambda x: ord(x)), data);
		data = list(com.read(PASSLEN))
		if data == ENDSTRING:
			return;
		decoded='';
		for c in data:
			decoded += TABLE[c];
		decrypt = decode(decoded, ENCODED);
		print("{0}:{1}".format(decoded, decrypt[0:ANSLEN], decrypt[ANSLEN:]==decoded))
コード例 #8
0
ファイル: scipherd.py プロジェクト: china-richway2/scipher
    def process_decode(self, infile, outfile):
        inf = io.open(infile, 'r', encoding='utf-8')
        # search until blank line:
        header = ""
        header_lines = []
        for line in inf:
            line = line.rstrip()
            # we hit a blank line, and we have at least one line already
            if not line and len(header_lines) > 0:
                break
            header_lines.append(line)
            header = " ".join(header_lines)

        (conf_name, mask, version, ls_len) = decode.decode_conf_name(header)
        s = self.states.decode_states.get(version, None)
        if s is None:
            inf.close()
            return

        body_text = ""
        for line in inf:
            body_text += line
        inf.close()

        state = decode.DecodeState(s.common, conf_name, mask,
                                   s.header_grammar, s.body_grammar, {},
                                   s.space_before, s.space_after, decode.Done())
        msg = decode.decode(header, body_text, state, ls_len)

        outf = io.open(outfile, 'w', encoding='utf-8')
        outf.write(msg)
        outf.close()
コード例 #9
0
ファイル: bitly.py プロジェクト: xnrand/melonbot
def shorten(url, nick):
    try:
        content = loads(decode(urlopen('https://api-ssl.bitly.com/v3/shorten?access_token=' + access_token +
                                       '&longUrl=' + url.strip()).read()))
    except:
        return '.: could not reach server :.'
    try:
        return nick + ': ' + content['data']['url']
    except:
        return content['status_txt']
コード例 #10
0
ファイル: directionsAPI.py プロジェクト: kevinkuan0/eaas-demo
 def save_point_csv(self, f):
     f.write('RouteID,SegmentID,PointID,latitude,longitude\n')
     for rid in range(len(self.j['routes'])):
         ss = self.steps(rid)
         for sid in range(len(ss)):
             sss = ss[sid]
             polyline = decode(sss['polyline']['points'])
             latlngs = [(p[1], p[0]) for p in polyline[0:50]]
             for pid in range(len(latlngs)):
                 f.write('%d,%d,%d,%f,%f\n' % (rid, sid, pid) + latlngs[pid])
コード例 #11
0
ファイル: directionsAPI.py プロジェクト: kevinkuan0/eaas-demo
 def save_path_csv(self, f):
     f.write('RouteID,SegmentID,PointID,latlng1,latlng2\n')
     for rid in range(len(self.j['routes'])):
         ss = self.steps(rid)
         for sid in range(len(ss)):
             sss = ss[sid]
             polyline = decode(sss['polyline']['points'])
             latlngs = ['%fx%f' % (p[1], p[0]) for p in polyline[0:50]]
             for pid in range(len(latlngs) - 1):
                 f.write('%d,%d,%d,%s,%s\n' % (rid, sid, pid, latlngs[pid], latlngs[pid+1]))
コード例 #12
0
ファイル: refwork.py プロジェクト: xnrand/melonbot
def getDefn(word):
    try:
        content = decode(urlopen('http://dictionary.reference.com/browse/' + word.replace(' ', '+').strip()).read())
    except:
        return '.: that entry does not exist :.'
    m = re.search('def-content">(.*?)</div>', content, re.DOTALL)
    if m:
        defn = re.sub('<.*?>', '', m.group(1))
        return re.sub('\n', '', html.unescape(defn))
    else:
        return '.: no definitions available :.'
コード例 #13
0
ファイル: fcr.py プロジェクト: xnrand/melonbot
def getList():
    try:
        content = decode(urlopen('http://freechampionrotation.com').read())
    except:
        return '.: could not reach server :.'
    try:
        m = re.findall('<h1>(.*?)<', content)
        if m:
            return ', '.join(m)
    except:
        return '.: could not find free champion rotation data :.'
コード例 #14
0
ファイル: snarf.py プロジェクト: xnrand/melonbot
def getTitle(msg, response):
    link = re.search('((http|https)://[^ ]+)', msg)
    if link:
        try:
            content = decode(urlopen(Request(link.group(0), headers={'User-Agent': 'Mozilla/5.0'}), timeout = 8).read())
        except:
            return response
        match = re.search('<(T|t)itle>(.*?)</(T|t)itle>', content, re.DOTALL)
        if match:
            title = re.sub('\r', '', match.group(2))
            return re.sub('\n', '', html.unescape(title.strip()))
    return response
コード例 #15
0
ファイル: untitled0.py プロジェクト: kevinkuan0/eaas-demo
def getDSVFile(lat1, lng1, lat2, lng2, avdHigh):
    d = DirAPI()
    #d.load_old_json()
    #d.get_new_json(avoid_highways=False)
    d.get_new_json(
        origin=(lat1,lng1), 
        destination=(lat2, lng2),
        avoid_highways=avdHigh,
        alternatives=False)
        
    sample_distance = 100
    
    f = open('file', 'w')
    #f2 = open('highway_100_height', 'w')
    #f2.write('SegmentID,Segment2ID,EPointID,latitude,longitude,elevtaion,resolution\n')
    
    #for step in d.steps(0):
    for sid in range(len(d.steps(0))):
        step = d.steps(0)[sid]
    
        velocity = float(step['distance']['value']) / float(step['duration']['value'])
        path_ = decode(step['polyline']['points'])
        path_ = [(p[1], p[0]) for p in path_]
        spath_ = ['%f,%f' % p for p in path_]
        
        for i in range(0, len(path_), 50):
            i2 = min(len(path_), i+50)
            path = path_[i:i2]
            distance = [vincenty(path[j], path[j+1]).m for j in range(len(path) - 1)]
            mileage = sum(distance)
            
            if mileage < sample_distance: break
            samples = int(floor(mileage / sample_distance) + 1)
            
            e = ElevationAPI()
            e.get_new_json(path = '|'.join(spath_[i:i2]), samples = samples)
            
            assert len(e.points()) == samples   
            
            e_distance = [vincenty(e.latlng(ei), e.latlng(ei+1)).m for ei in range(samples-1)]
            e_height = [e.elev(ei) - e.elev(ei+1) for ei in range(samples-1)]
            e_slope = [atan2(e_height[ei], e_distance[ei]) for ei in range(samples-1)]
            
            for ei in range(samples - 1):
                f.write('%d\t%f\t%f\n' % (e_distance[ei], e_slope[ei], velocity))
    #            f.flush()
    
    #        for ei in range(samples):
    #            f2.write('%d,%d,%d,%f,%f,%f,%f\n' % (sid, i, ei, e.lat(ei), e.lng(ei), e.elev(ei), e.res(ei)))
    #            print '%d\t%f\t%f\n' % (e_distance[ei], e_slope[ei], velocity)
    
    f.close()
コード例 #16
0
ファイル: refwork.py プロジェクト: xnrand/melonbot
def getSyn(word):
    try:
        content = decode(urlopen('http://www.thesaurus.com/browse/' + word.replace(' ', '+').strip()).read())
    except:
        return '.: could not reach server :.'
    m = re.findall('"text">(.*?)</span>\s*<s', content)
    if m:
        if len(m) > 10:
            return ', '.join(m[:10])
        else:
            return ', '.join(m)
    else:
        return '.: no synonyms available :.'
コード例 #17
0
ファイル: geolocation.py プロジェクト: xnrand/melonbot
def locate(ip, nick):
    try:
        content = decode(urlopen("http://ip-api.com/csv/" + ip.strip()).read())
    except:
        return ".: could not reach server :."
    m = re.search("success,(.*?),.*?,.*?,(.*?),(.*?),.*?,.*?,.*?,(.*?),(.*?),", content)
    if m:
        result = (
            ": [ \x02City\x02: %s | \x02Region\x02: %s | \x02Country\x02: %s | \x02Timezone\x02: %s | \x02ISP\x02: %s ]"
            % (m.group(3), m.group(2), m.group(1), m.group(4), m.group(5))
        )
        return nick + result.replace('"', "")
    else:
        return ".: invalid ip address :."
コード例 #18
0
ファイル: reddit.py プロジェクト: xnrand/melonbot
def getThread(msg, redditList, channel):
    match = re.search('^ ([\w\d\_]+)\s*([\d]+)?\s*$', msg)
    if match:
        subreddit = match.group(1)
        pos = match.group(2)
    else:
        return None
    try:
        content = loads(decode(urlopen(Request('http://www.reddit.com/r/' + subreddit + '.json',
                                             headers={'User-Agent': 'melonbot 1.0 (used by /u/<handle_here>'})).read()))
    except:
        return '.: could not reach server :.'
    data = content['data']['children']
    if len(data) == 0:
        return '.: there doesn\'t seem to be anything here :.'
    for t in data[:]:
        try:
            if t['data']['stickied']:
                data.remove(t)
        except:
            return '.: there doesn\'t seem to be anything here :.'
    if not pos:
        if subreddit not in redditList[channel]:
            redditList[channel][subreddit] = [1, None]
        if redditList[channel][subreddit][1]:
            if (datetime.now() - redditList[channel][subreddit][1]).total_seconds() > 1800:
                redditList[channel][subreddit][0] = 1
        pos = redditList[channel][subreddit][0]
        redditList[channel][subreddit] = [pos + 1, datetime.now()]
        if redditList[channel][subreddit][0] > len(data):
            redditList[channel][subreddit][0] = 1
    else:
        redditList[channel][subreddit] = [int(pos) + 1, datetime.now()]
        if redditList[channel][subreddit][0] > len(data):
            redditList[channel][subreddit][0] = 1
    pos = int(pos) - 1
    try:
        title = [data[pos]['data']['title'], data[pos]['data']['url'], data[pos]['data']['id']]
    except:
        return 'entry ' + str(pos + 1) + ' does not exist'
    if data[pos]['data']['over_18']:
        if 'nsfw' not in title[0].lower():
            title[0] = '[NSFW] ' + title[0]
    if not data[pos]['data']['is_self']:
        return str(pos + 1) + ') ' + title[0] + '  ::  ' + title[1] + '  ::  ' + \
               'http://redd.it/' + title[2]
    else:
        return str(pos + 1) + ') ' + title[0] + '  ::  ' + title[1]
コード例 #19
0
ファイル: avahi-test.py プロジェクト: hatstand/dacp
def pair(address, port, pair):
  merged = StringIO.StringIO()
  merged.write(pair)
  for c in pin:
    merged.write(c)
    merged.write("\x00")

  found = md5.new(merged.getvalue()).hexdigest()
  print 'MD5: %s' % found.upper()

  url = 'http://' + address + ':' + str(port) + '/pair?pairingcode=' + found.upper() + '&servicename=' + service_name
  print url

  reply = urllib2.urlopen(url).read()
  decoded = decode.decode([c for c in reply], len(reply), 0)
  print decoded
コード例 #20
0
ファイル: client.py プロジェクト: celskeggs/codeday-fall-2014
def threadbody():
    assert parse4(getall(4, s)) == 0xD007D074
    while 1:
        header = getall(6, s)
        length = parse4(header[0:4])
        typeid = parse2(header[4:6])
        data = getall(length, s)
        if typeid == 0x0102:
			if data in dictionary:
				del dictionary[data]
        elif typeid == 0x0204:
            namelen = ord(data[0])
            key = data[1:namelen+1]
            body = data[namelen+1:]
            dictionary[key] = decode.decode(body)
        elif typeid == 0x0306:
            chatlines.put(data)
        elif typeid == 0x0408:
            global local_id
            local_id = ord(data[0])
        else:
            raise Exception("unhandled data command: %d" % typeid)
コード例 #21
0
ファイル: disassembler.py プロジェクト: twotymz/sms
    if len (args) == 0 :
        print 'Usage: disassembler.py rom'
        exit (1)

    sms.loadRom (sys.argv[1])

    while len (_queue) > 0 :

        pc = _queue[0]
        del _queue[0]

        _labels.append (pc)

        while pc < len (sms.rom) :

            decoded = decode.decode (pc)

            if pc not in _instructions :
                _instructions[pc] = decoded

            #print '{0:04X} \t{1:06X} {2}'.format (pc, (decoded['prefix'] << 8) | decoded['opcode'], decoded['mnemonic'])
            pc += decoded['bytes']

            if decoded['prefix'] == 0x00 :

                # JMP
                if decoded['opcode'] == 0xC3 :
                    if decoded['immediate'] not in _queue and decoded['immediate'] not in _labels :
                        _queue.append (decoded['immediate'])
                        break
コード例 #22
0
ファイル: response.py プロジェクト: baloothebear4/remote
 def _decode2(self, d):
     a = []
     for i in range(len(d)):
         a.append(d[i])
     return decode.decode(a, len(d), 0)
コード例 #23
0
                                                     [1 - args.weight]],
                                             hash_length=args.hash,
                                             verbose=args.verbose,
                                             num_iters=args.it,
                                             uncertainty=args.uncertainty)

            pr = prs[-1]

        elif args.weight_model is not None or args.weight != 1.0:
            pr, priors, weights, combined_priors = decode(
                data.input,
                model,
                sess,
                branch_factor=args.branch,
                beam_size=args.beam,
                weight=[[args.weight], [1 - args.weight]],
                out=None,
                hash_length=args.hash,
                weight_model_dict=weight_model_dict,
                verbose=args.verbose,
                gt=data.target if args.gt else None,
                weight_model=weight_model)
        else:
            pr = (data.input > 0.5).astype(int)

        # Save output
        if not args.save is None:
            np.save(os.path.join(args.save, fn.replace('.mid', '_pr')), pr)
            np.savetxt(os.path.join(args.save, fn.replace('.mid', '_pr.csv')),
                       pr)
            if (args.weight_model is not None
コード例 #24
0
ファイル: test.py プロジェクト: amakelov/learn
    output_filenames.append('./test_output/deciphered_' + str(i) + '.txt')


ciphers = [utils.random_cipher() for i in range(4)]
ciphertexts = [utils.encipher(ciphers[i], plaintexts[i]) for i in range(4)]


"""
STATISTICS
"""
test_mode = True
if test_mode:
    for text in plaintexts:
        symbol_freqs = utils.symbol_freq(text, sort=True)
        freqs_followed_by = utils.freq_followed_by(text, ' ', sort=True)
        most_frequent_words = utils.frequent_word_freqs(text, sort=True)
        #print "5 most frequent symbols: {}".format(symbol_freqs[:5])
        #print "Top by freq followed by space: {}".format(freqs_followed_by[:4])
        print "Most frequent words: {}".format(most_frequent_words[:5])


    accuracies = []

    for i in range(4):
        f = decode(ciphertexts[i], output_filenames[i])
        accuracy = utils.accuracy(f, ciphertexts[i], plaintexts[i])
        print "Accuracy: {}".format(accuracy)
        accuracies.append(accuracy)
    print accuracies

コード例 #25
0
 def testDecode(self):
     clearText = 'the quick vsjdjshsd'
     whitespace = self.words_to_whitespace(clearText)
     self.assertEquals(decode(StringIO(whitespace)), clearText)
コード例 #26
0
ファイル: avr.py プロジェクト: ITikhonov/avremu
###################################################33

def reset():
	setSP(0x8fe)


from time import time

reset()

start=time()
while True:
	A.run()
	x=A.FLASH[A.PC]
	if verbose: print '(%08u %04x:'%(A.CLOCKS,A.PC*2,),
	y=decode(x); A.PC+=1
	if not y:
		y=decode32(x,A.FLASH[A.PC])
		if A.SKIPNEXT: A.CLOCKS+=1
		A.PC+=1

	if A.SKIPNEXT:
		print 'SKIP'
		A.SKIPNEXT=False
		continue

	if verbose: print y

	#A.store()
	cl=locals()['avr_'+y[0]](*y[1])
	#A.compare()
コード例 #27
0
def reconstruct(scandir='../scans_undistort/manny/grab_0_u/', thresh=0.015):
    def _intersect_matlab(a, b):
        a1, ia = np.unique(a, return_index=True)
        b1, ib = np.unique(b, return_index=True)
        aux = np.concatenate((a1, b1))
        aux.sort()
        c = aux[:-1][aux[1:] == aux[:-1]]
        return c, ia[np.isin(a1, c)], ib[np.isin(b1, c)]

    def _find_index_good(goodpixels):
        assert (np.shape(goodpixels) == (H, W))
        # return a 1D index array of goodpixels
        ret = [[], []]

        for i in range(H):
            for j in range(W):
                if goodpixels[i][j]:
                    ret[0].append(j)
                    ret[1].append(i)

        return np.array(ret)

    # read calibration data saved from last calibration run
    with open("../cache/C0_CALIB.pkl", "rb") as c0:  # right
        R_mat, R_rvec, R_tvec, R_dist = pickle.load(c0)

    with open("../cache/C1_CALIB.pkl", "rb") as c1:  # left
        L_mat, L_rvec, L_tvec, L_dist = pickle.load(c1)

    # set calibration data selection index
    SELECT = 2

    ######################################################
    # start reconstruction
    R_h, R_h_good = dc.decode(scandir + 'frame_C0_', 0, 19, thresh)
    R_v, R_v_good = dc.decode(scandir + 'frame_C0_', 20, 39, thresh)
    L_h, L_h_good = dc.decode(scandir + 'frame_C1_', 0, 19, thresh)
    L_v, L_v_good = dc.decode(scandir + 'frame_C1_', 20, 39, thresh)

    # save image size info
    assert (np.shape(R_h) == np.shape(L_v))
    H, W = np.shape(R_h)

    # combine horizontal and vertical by bit shift + and operation
    L_h_shifted = np.left_shift(L_h.astype(int), 10)
    R_h_shifted = np.left_shift(R_h.astype(int), 10)
    L_C = np.bitwise_or(L_h_shifted, L_v.astype(int))
    R_C = np.bitwise_or(R_h_shifted, R_v.astype(int))

    L_good = np.logical_and(L_v_good, L_h_good)
    R_good = np.logical_and(R_v_good, R_h_good)

    # now perform background substraction
    R_color = dc.im2double(
        cv2.imread(scandir + 'color_C0_01.png', cv2.IMREAD_COLOR))
    R_background = dc.im2double(
        cv2.imread(scandir + 'color_C0_00.png', cv2.IMREAD_COLOR))
    L_color = dc.im2double(
        cv2.imread(scandir + 'color_C1_01.png', cv2.IMREAD_COLOR))
    L_background = dc.im2double(
        cv2.imread(scandir + 'color_C1_00.png', cv2.IMREAD_COLOR))

    R_colormap = abs(R_color - R_background)**2 > thresh
    L_colormap = abs(L_color - L_background)**2 > thresh

    R_ok = np.logical_or(R_colormap[:, :, 0], R_colormap[:, :, 1])
    R_ok = np.logical_or(R_colormap[:, :, 2], R_ok)
    L_ok = np.logical_or(L_colormap[:, :, 0], L_colormap[:, :, 1])
    L_ok = np.logical_or(L_colormap[:, :, 2], L_ok)

    R_good = np.logical_and(R_ok, R_good)
    L_good = np.logical_and(L_ok, L_good)

    fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(15, 20))
    ax1.imshow(L_C * L_good, cmap='jet')
    ax1.set_title('Left')
    ax2.imshow(R_C * R_good, cmap='jet')
    ax2.set_title('Right')
    plt.show()

    # find coordinates of pixels that were successfully decoded
    # R_coord, L_coord in 1D indices
    R_coord = _find_index_good(R_good)
    L_coord = _find_index_good(L_good)

    # pull out CODE values at successful pixels
    # (notice this is a little bit different from MATLAB)
    R_C_good = R_C[R_good]
    L_C_good = L_C[L_good]

    # perform intersection
    matched, iR, iL = _intersect_matlab(R_C_good, L_C_good)

    # get pixel coordinates of pixels matched
    # change R_coord, L_coord to 2D first
    xR = R_coord[:, iR]
    xL = L_coord[:, iL]

    # Now, triangulate the matched pixels using the first calibration result
    camL = (L_mat, L_rvec[SELECT], L_tvec[SELECT])
    camR = (R_mat, R_rvec[SELECT], R_tvec[SELECT])

    X = tr.triangulate(xL, xR, camL, camR)

    # Display triangulation result
    fig = plt.figure()
    ax = Axes3D(fig)
    ax.scatter(X[0, :], X[1, :], X[2, :])
    ax.view_init(45, 0)

    ax.set_xlabel('x axis')
    ax.set_ylabel('y axis')
    ax.set_zlabel('z axis')

    plt.show()

    # save to MATLAB file for easier 3D viewing
    import scipy.io
    scipy.io.savemat('../cache/reconstructed.mat', mdict={'X': X})

    # return reconstruction result for meshing
    return [X, xL, xR, L_color, R_color]
コード例 #28
0
 def decodeObject(dictionary):
     dictionary["Value"] = decode(dictionary["Value"])
     return dictionary
コード例 #29
0
from decode import decode
from encode import encode

message = "100010001"
encoded = encode(message)
wrongMessage = "000000000"
rec = decode(wrongMessage + encoded)
print(rec)
print(message == rec)
コード例 #30
0
    args = parser.parse_args()
    data_root = pathlib.Path(args.data_dir)
    serial_dir = pathlib.Path(args.serial_dir)
    pred_dir = pathlib.Path(args.pred_dir)

    pred_dir.mkdir(parents=True, exist_ok=True)
    test_dir = data_root / args.test_file

    uncollated_pred_path = pred_dir / "pred.json"
    uncollated_pred_path_decode = pred_dir / "decode.json"
    uncollated_pred_path_tsv = pred_dir / "pred.tsv"

    allennlp_command = [
        "allennlp", "predict",
        str(serial_dir),
        str(test_dir), "--predictor dygie", "--include-package dygie",
        "--use-dataset-reader", "--output-file",
        str(uncollated_pred_path), "--cuda-device", args.device
    ]
    subprocess.run(" ".join(allennlp_command), shell=True, check=True)

    in_data = load_jsonl(str(uncollated_pred_path))
    out_data = decode(in_data)
    save_jsonl(out_data, str(uncollated_pred_path_decode))
    dataset = document.Dataset.from_jsonl(str(uncollated_pred_path_decode))
    pred = format_dataset(dataset)
    pred.to_csv(str(uncollated_pred_path_tsv),
                sep="\t",
                float_format="%0.4f",
                index=False)
コード例 #31
0
import encode
import decode
print(encode.encode('1001101', 2, 3))
print(decode.decode(encode.encode('100110100101', 3, 4)))
import cv2
x = cv2.imread('green.jpg')
print(x)
コード例 #32
0
ファイル: main.py プロジェクト: mmTareque01/Steganography
#
from PIL import Image
import encode
import decode

dataEncoder = encode.encode()
dataDecoder = decode.decode()  #decoder object


def main():
    a = int(
        input(":: Welcome to Steganography ::\n"
              "1. Encode\n2. Decode\n\n> "))
    if (a == 1):
        dataEncoder.encodeTextInImage()
        print("Your stegan is ready!!!!")

    elif (a == 2):
        hideData = dataDecoder.decodeTextFromImage()
        print("Decoded data : " + hideData)
    else:
        raise Exception("Enter correct input")


# Driver Code
if __name__ == '__main__':

    # Calling main function
    main()
コード例 #33
0
 def decode(self, output_name):
     decode.decode(self.events, output_name)
コード例 #34
0
def main():
    """
    #TODO: Perform outlined tasks in assignment, like loading alignment
    models, computing BLEU scores etc.

    (You may use the helper functions)

    It's entirely upto you how you want to write Task5.txt. This is just
    an (sparse) example.
    """

    ## Write Results to Task5.txt (See e.g. Task5_eg.txt for ideation). ##
    '''
    f = open("Task5.txt", 'w+')
    f.write(discussion)
    f.write("\n\n")
    f.write("-" * 10 + "Evaluation START" + "-" * 10 + "\n")

    for i, AM in enumerate(AMs):

        f.write(f"\n### Evaluating AM model: {AM_names[i]} ### \n")
        # Decode using AM #
        # Eval using 3 N-gram models #
        all_evals = []
        for n in range(1, 4):
            f.write(f"\nBLEU scores with N-gram (n) = {n}: ")
            evals = _get_BLEU_scores(...)
            for v in evals:
                f.write(f"\t{v:1.4f}")
            all_evals.append(evals)

        f.write("\n\n")

    f.write("-" * 10 + "Evaluation END" + "-" * 10 + "\n")
    f.close()
    '''
    scores = []
    LM = _getLM('/u/cs401/A2 SMT/data/Hansard/Training/', 'e', 'lme')
    AM1k, AM10k, AM15k, AM30k  = _getAM('/u/cs401/A2 SMT/data/Hansard/Training/', 1000, 5, 'am1k'), _getAM('/u/cs401/A2 SMT/data/Hansard/Training/', 10000, 5, 'am10k'), \
                                 _getAM('/u/cs401/A2 SMT/data/Hansard/Training/', 15000, 5, 'am15k'), _getAM('/u/cs401/A2 SMT/data/Hansard/Training/', 30000, 5, 'am30k')

    ams = [AM1k, AM10k, AM15k, AM30k]
    with open('/u/cs401/A2 SMT/data/Hansard/Testing/Task5.f') as fre, open(
            '/u/cs401/A2 SMT/data/Hansard/Testing/Task5.e') as enh, open(
                '/u/cs401/A2 SMT/data/Hansard/Testing/Task5.google.e') as eng:
        fsents, esents, esentsg = fre.readlines(), enh.readlines(
        ), eng.readlines()

    for i in range(len(fsents)):
        scores_inner = []
        fsent, refs = " ".join(preprocess(fsents[i], 'f').split()[1:-1]), [
            esents[i].strip(), esentsg[i].strip()
        ]
        for am in ams:
            decoded = decode(fsent, LM, am)
            print(decoded)
            print(refs)
            b1 = BLEU_score(decoded, refs, 1, True)
            b1_ = BLEU_score(decoded, refs, 1, False)
            b2 = BLEU_score(decoded, refs, 2, True)
            b2_ = BLEU_score(decoded, refs, 2, False)
            b3 = BLEU_score(decoded, refs, 3, True)

            b2 = b2 * (b1_**0.5)  #get true bleue score

            b3 = b3 * (b1_**(1 / 3)) * ((b2_**2)**(1 / 3))

            scores_inner.append([b1, b2, b3])
        scores.append(scores_inner)

    with open('Task5.txt', 'w') as f:
        f.write(discussion)
        for item in scores:
            f.write("%s\n" % item)
コード例 #35
0
ファイル: main.py プロジェクト: christopherwoodall/dcf77-1
def main():
    reader = DCF77(FILE)
    for minute in reader.run():
        decode(minute)
コード例 #36
0
ファイル: test_it.py プロジェクト: chrisesharp/aoc-2020
 def test_check_fields(self):
     code = "FBFBBFFRLR"
     self.assertEqual(44, decode_row(code))
     self.assertEqual(5, decode_column(code))
     self.assertEqual(357, decode(code))
コード例 #37
0
from encode import encode
from decode import decode
from encrypt import encrypt
from decrypt import decrypt

s = input("Enter Any string to test: ")
key = input("Enter Any key in binary : ")
encoded_string = encode(s)
encrypted_string = encrypt(encoded_string, key)
decrypted_string = decrypt(encrypted_string, key)
decoded_string = decode(decrypted_string)
コード例 #38
0
ファイル: dacp-server.py プロジェクト: hatstand/dacp
def dec(str):
  return decode.decode([c for c in str], len(str), 0)
コード例 #39
0
image_file = os.path.join('images',image_name)   #images/car3.jpg
image_detection = os.path.join('images',"dect_car3.jpg") #images/dect_cat3.jpg

image = cv2.imread(image_file)  #read the image, images/car3.jpg
image_shape = image.shape[:2]

input_size = (416,416)

image_cp = preprocess_image(image)  #图像预处理,resize image, normalization归一化, 增加一个在第0的维度--batch_size
tf_image = tf.placeholder(tf.float32,[1,input_size[0],input_size[1],3])  #定义placeholder
model_output = darknet(tf_image)  #网络的输出

output_sizes = input_size[0]//32, input_size[1]//32 # 特征图尺寸是图片下采样32倍

#这个函数返回框的坐标(左上角-右下角),目标置信度,类别置信度
output_decoded = decode(model_output=model_output,output_sizes=output_sizes, num_class=len(class_names),anchors=anchors)




with tf.Session() as sess:
    sess.run(tf.global_variables_initializer())   #初始化tensorflow全局变量
    saver = tf.train.Saver()
    saver.restore(sess, model_path)  #把模型加载到当前session中
    bboxes, obj_probs, class_probs = sess.run(output_decoded, feed_dict={tf_image: image_cp})  #这个函数返回框的坐标,目标置信度,类别置信度


bboxes,scores,class_max_index = postprocess(bboxes,obj_probs,class_probs,image_shape=image_shape)   #得到候选框之后的处理,先留下阈值大于0.5的框,然后再放入非极大值抑制中去
colors = generate_colors(class_names)
img_detection = draw_detection(image, bboxes, scores, class_max_index, class_names, colors)  #得到图片
コード例 #40
0
ファイル: test_it.py プロジェクト: chrisesharp/aoc-2020
 def test_check_fields_4(self):
     code = "BBFFBBFRLL"
     self.assertEqual(102, decode_row(code))
     self.assertEqual(4, decode_column(code))
     self.assertEqual(820, decode(code))
コード例 #41
0
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 31 12:12:22 2017

@author: tsalo
"""
import pandas as pd
import decode

df1 = pd.read_csv('clusters.csv')
df2 = pd.read_csv('terms.csv', index_col='id')

for c in sorted(df1['cluster'].unique()):
    sel_ids = df1.loc[df1['cluster']==c]['id'].values
    p_df = decode.decode(df2, sel_ids)
    p_df.to_csv('cluster{0}_pvalues.csv'.format(c), index=False)
コード例 #42
0
ファイル: test_it.py プロジェクト: chrisesharp/aoc-2020
 def test_check_fields_3(self):
     code = "FFFBBBFRRR"
     self.assertEqual(14, decode_row(code))
     self.assertEqual(7, decode_column(code))
     self.assertEqual(119, decode(code))
コード例 #43
0
if arg.episode_number:
    epnum = arg.episode_number[0]
else:
    epnum = ''
if arg.guest:
    login.login('', '')
if arg.login:
    username = arg.login[0]
    password = arg.login[1]
    login.login(username, password)
if arg.debug:
    import debug
    sys.exit()
if arg.subs_only:
    if arg.url:
        decode.decode(page_url)
    else:
        decode.decode(raw_input('Please enter Crunchyroll video URL:\n'))
    sys.exit()
if arg.default_settings:
    defaultsettings(iquality, ilang1, ilang2, iforcesub, iforceusa,
                    ilocalizecookies, ionlymainsub, iconnection_n_)
    sys.exit()
if arg.queue:
    queueu(arg.queue)
if arg.url and not arg.subs_only:
    ultimate.ultimate(page_url, seasonnum, epnum)
else:
    makechoise()

#print 'username'
コード例 #44
0
ファイル: test_it.py プロジェクト: chrisesharp/aoc-2020
 def test_check_fields_2(self):
     code = "BFFFBBFRRR"
     self.assertEqual(70, decode_row(code))
     self.assertEqual(7, decode_column(code))
     self.assertEqual(567, decode(code))
コード例 #45
0
from safe import safe
from cezar import cezar
from random import randint
from decode import decode

# words form 0 to 420000

file = open("words.txt", "r")
list = []

for i in range(0, 420000):
    list.append(file.readline().lower())

gen = safe(list)

pas = gen.SelectPassword()

diff = randint(0, 20)

print("password before cezar is " + pas)

pas = cezar(pas, diff)

print("password after cezar is " + pas)

decod = decode(list, pas)

password = decod.dec()

print("word used: " + password)
コード例 #46
0
def main():
    parser = create_arg_parser()
    args = parser.parse_args()
    if args.pseudo == "None":
        args.pseudo = None

    if not path.exists(args.out_dir):
        os.mkdir(args.out_dir)
        print("# Make directory: {}".format(args.out_dir))

    # Log
    model_id = create_model_id(args)
    log_dir = path.join(args.out_dir, model_id)
    if path.exists(log_dir):
        raise FileExistsError("'{}' Already exists.".format(log_dir))
    os.mkdir(log_dir)
    print(log_dir)
    set_log_file(log_dir, "train", model_id)
    log = StandardLogger(path.join(log_dir, "log-" + model_id + ".txt"))
    log.write(args=args, comment=model_id)
    write_args_log(args, path.join(log_dir, "args.json"))

    # Seed
    torch.manual_seed(args.seed)

    # Load Dataset
    data_train = load_dataset(args.train, args.data_size)
    data_pseudo = load_dataset(args.pseudo,
                               args.data_size) if args.pseudo else []
    if args.train_method == "concat":
        data_train += data_pseudo
    data_dev = load_dataset(args.dev, 100)

    data_train = NtcBucketIterator(
        data_train,
        args.batch_size,
        shuffle=True,
        multi_predicate=args.multi_predicate,
        zero_drop=args.zero_drop,
        bert=args.bert,
        loss_stop=args.loss_stop,
        load_cpu=args.load_cpu,
        mapping_pseudo_train=args.mapping_pseudo_train,
        bert_embed_file=args.train_bert_embed_file,
        pseudo_bert_embed_file=args.pseudo_bert_embed_file)
    data_dev = NtcBucketIterator(data_dev,
                                 args.batch_size,
                                 multi_predicate=args.multi_predicate,
                                 bert=args.bert,
                                 load_cpu=args.load_cpu,
                                 bert_embed_file=args.dev_bert_embed_file)
    if args.train_method == "pre-train":
        data_pseudo = NtcBucketIterator(
            data_pseudo,
            args.batch_size,
            shuffle=True,
            multi_predicate=args.multi_predicate,
            zero_drop=args.zero_drop,
            bert=args.bert,
            loss_stop=args.loss_stop,
            load_cpu=args.load_cpu,
            mapping_pseudo_train=args.mapping_pseudo_train,
            pseudo_bert_embed_file=args.pseudo_bert_embed_file)

    bert_vec_holder = None
    if args.epoch_shuffle:
        bert_vec_holder = BertVecHolder(
            train_json=args.train,
            train_hdf5=args.train_bert_embed_file,
            pseudo_json=args.pseudo,
            pseudo_hdf5=args.pseudo_bert_embed_file,
            data_size=args.data_size)

    word_embedding_matrix = pretrained_word_vecs(
        args.wiki_embed_dir, "/wordIndex.txt") if args.wiki else None
    model = PackedE2EStackedBiRNN(
        hidden_dim=args.hidden_dim,
        n_layers=args.n_layers,
        out_dim=4,
        embedding_matrix=word_embedding_matrix,
        fixed_word_vec=args.fixed_word_vec,
        multi_predicate=args.multi_predicate,
        use_wiki_vec=args.wiki,
        use_bert_vec=args.bert,
        bert_dim=BERT_DIM,
        train_bert_embed_file=args.train_bert_embed_file,
        dev_bert_embed_file=args.dev_bert_embed_file,
        pseudo_bert_embed_file=args.pseudo_bert_embed_file,
        load_cpu=args.load_cpu,
        dropout=args.dropout,
        embed_dropout=args.embed_dropout)

    if torch.cuda.is_available():
        model = model.cuda()

    # Training Method
    print("# Training Method: {}".format(args.train_method), flush=True)
    if args.train_method == "pre-train":
        pretrain_best_thresh = train(log_dir, data_pseudo, data_dev, model,
                                     model_id, args.max_epoch, args.pseudo_lr,
                                     args.pseudo_lr / 20, args.half_checkpoint,
                                     bert_vec_holder, "pretrained_")
        with open(path.join(log_dir, "best.pretrain_thresh"), "w") as fo:
            json.dump(pretrain_best_thresh, fo)
    best_thresh = train(log_dir, data_train, data_dev, model, model_id,
                        args.max_epoch, args.lr, args.lr / 20,
                        args.half_checkpoint, bert_vec_holder)
    with open(path.join(log_dir, "best.thresh"), "w") as fo:
        json.dump(best_thresh, fo)
    log.write_endtime()

    if args.decode:
        data_decode = load_dataset(
            args.test, 100) if args.test else load_dataset(args.dev, 100)
        data_decode = NtcBucketIterator(
            data_decode,
            args.batch_size,
            bert=args.bert,
            multi_predicate=args.multi_predicate,
            decode=True,
            load_cpu=args.load_cpu,
            bert_embed_file=args.test_bert_embed_file
            if args.test else args.dev_bert_embed_file)
        tag = "test" if args.test else "dev"
        if args.train_method == "pre-train":
            new_model_id = model_id + "-" + "-".join(
                str(i) for i in pretrain_best_thresh)
            model.load_state_dict(
                torch.load(log_dir + "/pretrained_model-" + model_id + ".h5"))
            if args.test:
                model.dev_bert_vec = h5py.File(args.test_bert_embed_file, "r")
            decode(log_dir, data_decode, "pretrained_" + tag, model,
                   new_model_id, pretrain_best_thresh)
        new_model_id = model_id + "-" + "-".join(str(i) for i in best_thresh)
        model.load_state_dict(
            torch.load(log_dir + "/model-" + model_id + ".h5"))
        decode(log_dir, data_decode, tag, model, new_model_id, best_thresh)
コード例 #47
0
start = time.time()
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
end = time.time()

# show timing information on text prediction
print("[INFO] text detection took {:.6f} seconds".format(end - start))


# NMS on the the unrotated rects
confidenceThreshold = args['min_confidence']
nmsThreshold = 0.4

# decode the blob info
(rects, confidences, baggage) = decode(scores, geometry, confidenceThreshold)

offsets = []
thetas = []
for b in baggage:
    offsets.append(b['offset'])
    thetas.append(b['angle'])

##########################################################

functions = [nms.felzenszwalb.nms, nms.fast.nms, nms.malisiewicz.nms]

print("[INFO] Running nms.boxes . . .")

for i, function in enumerate(functions):
コード例 #48
0
            print("Blue2")
        else:
            next_class = 3
            print("Green2")

        if not ts:
            if prev_class == 0:
                if next_class == 1:
                    print("Transmission started2")
                    m2 = "1"
                    ts = 1
                if next_class == 2:
                    print("Transmission started2")
                    m2 = "0"
                    ts = 1
        else:
            if prev_class == 3 and next_class == 1:
                m2 = m2 + "1"
            elif prev_class == 3 and next_class == 2:
                m2 = m2 + "0"
            if prev_class == 1 and next_class == 2:
                # print(m2[:-1])
                print("Transmission ended2")
                break
        prev_class = next_class

    print("The first message is: " + decode(m1[:-1]))
    print("The second message is: " + decode(m2[:-1]))

    cap.release()
    cv2.destroyAllWindows()
コード例 #49
0
from decode import decode
import sqlite3
import geopandas as gpd

con = sqlite3.connect("ne.gpkg")
c = con.cursor()

c.execute("SELECT geom FROM out")
r = c.fetchall()
row = r[0]
b = row[0]

decode(b)
コード例 #50
0
ファイル: test.py プロジェクト: bcoppens/visual-cpu-uarch-sim
from decode import decode

# int f(int a, int b) {
#        int j = a + b;
#        return j + 2;
# }
# @O0, -march=rv32i

f = [
    0xfd010113,  #               addi    sp,sp,-48
    0x02812623,  #               sw      s0,44(sp)
    0x03010413,  #               addi    s0,sp,48
    0xfca42e23,  #               sw      a0,-36(s0)
    0xfcb42c23,  #               sw      a1,-40(s0)
    0xfdc42703,  #               lw      a4,-36(s0)
    0xfd842783,  #               lw      a5,-40(s0)
    0x00f707b3,  #               add     a5,a4,a5
    0xfef42623,  #               sw      a5,-20(s0)
    0xfec42783,  #               lw      a5,-20(s0)
    0x00278793,  #               addi    a5,a5,2
    0x00078513,  #               mv      a0,a5
    0x02c12403,  #               lw      s0,44(sp)
    0x03010113,  #               addi    sp,sp,48
    0x00008067,  #               ret
]

for i in f:
    inst = decode(i)
    print(str(inst))
コード例 #51
0
            result = encode(file_name=arg_dict["input_file_name"],
                            output_name=arg_dict["output_file_name"],
                            encoding=arg_dict["encoding"],
                            base_value=arg_dict["base_value"])
            for i in range(arg_dict["recursive"] - 1):
                result = encode(file_name=arg_dict["output_file_name"],
                                output_name=arg_dict["output_file_name"],
                                encoding=arg_dict["encoding"],
                                base_value=arg_dict["base_value"])
                if result == -1:
                    raise Exception()

        elif arg_dict["method"] == "decode":
            for i in range(arg_dict["recursive"] - 1):
                result = decode(file_name=arg_dict["input_file_name"],
                                output_file_name=arg_dict["input_file_name"],
                                encoding=arg_dict["encoding"],
                                base_value=arg_dict["base_value"])
                if result == -1:
                    raise Exception()
            result = decode(file_name=arg_dict["input_file_name"],
                            output_file_name=arg_dict["output_file_name"],
                            encoding=arg_dict["encoding"],
                            base_value=arg_dict["base_value"])
        else:
            # Invalid method was given
            raise getopt.error(
                "Invalid method was given, enter 'encode' or 'decode' in the arguments"
            )

        if result == -1:
            # Operation failed
コード例 #52
0
ファイル: parseString.py プロジェクト: brabeeba/Parser
		def decodeObject(dictionary):
			dictionary["Value"] = decode(dictionary["Value"])
			return dictionary
コード例 #53
0
ファイル: main.py プロジェクト: Moodrammer/LZ77
imageFilePath = input("Enter the image file path without quotes: ")
file_name, file_ext = os.path.splitext(imageFilePath)

SlidingWindowSize = int(input("Enter the Sliding window size: "))
lookAheadBufferSize = int(input("Enter the lookAheadBuffer size: "))
SearchBufferSize = SlidingWindowSize - lookAheadBufferSize

# Selecting the mode of saving the encoded image either in one or two files according to the sliding window
singlefileMode = 0
if SlidingWindowSize < 256:
    singlefileMode = 1

originalImage = np.array(cv2.imread(imageFilePath, 0), dtype=np.uint8)
numberOfRows = originalImage.shape[0]
numberOfColumns = originalImage.shape[1]

flattenedImage = np.reshape(originalImage, (1, originalImage.size))
print("1. flattened image vector")
print(flattenedImage)

# encoding the image
print("2. encoding ..")
encode.encode(flattenedImage, SearchBufferSize, lookAheadBufferSize,
              singlefileMode)

# decoding
print("3. decoding ..")
decode.decode(numberOfRows, numberOfColumns, singlefileMode, file_ext)
print("DecodedImage file was generated in the project directory")
コード例 #54
0
        while (hasMore == 1):
            payload['offset'] = nextOffset
            temp = request_data_vk(payload)
            temp = cut_trash(temp.text, r'{.*}')
            nextOffset = temp["nextOffset"]
            hasMore = temp["hasMore"]
            data.append(temp)
        with open(playlist_file, "w") as save_file:
            json.dump(data, save_file)
    else:
        print("# File playlist_file exist!")
        with open(playlist_file, 'r') as json_file:
            data = json_file.read()
        data = json.loads(data)

    # output data.json
    parser = HTMLParser()
    cnt = 0
    for i in data:
        for line in i["list"]:
            if line[2]:
                sys.stdout.write(
                    "ffmpeg -i " + "'" +
                    (decode.decode(line[2], line[1]) if decode.
                     check(line[2]) else line[2]) + "'" + " -codec copy " +
                    "'" + ' '.join(
                        getAllowName(parser.unescape(line[4] + " - " +
                                                     line[3])).split()) +
                    ".mp3" + "'" + "\n")
                cnt += 1
コード例 #55
0
if arg.episode_number:
    epnum = arg.episode_number[0]
else:
    epnum = ''
if arg.guest:
    login.login('', '')
if arg.login:
    username = arg.login[0]
    password = arg.login[1]
    login.login(username, password)
if arg.debug:
    import debug
    sys.exit()
if arg.subs_only:
    if arg.url:
        decode.decode(page_url)
    else:
        decode.decode(raw_input('Please enter Crunchyroll video URL:\n'))
    sys.exit()
if arg.default_settings:
    defaultsettings(iquality, ilang1, ilang2, iforcesub, iforceusa, ilocalizecookies)
    sys.exit()
if arg.queue:
    queueu(arg.queue)
if arg.url and not arg.subs_only:
    ultimate.ultimate(page_url, seasonnum, epnum)
else:
    makechoise()


コード例 #56
0
For this assignment, we will practice the use of imports to encrypt and decrypt messages.
The functions are already contained in the files.  Your job is to use them to encrypt and decrypt strings.  Good luck
'''

import encryption_key

import decode

import encode

#1 Decrypt this message using imports from the decode.py and encryption_key.py.  Make the result print in a friendly format that is easy for the user to understand. (10pt)
print("Problem 1")

encrypted_message = "¿®ª–ªÈٮϘT¤ÕEӘ¹âeC“íœÉÁϺŠ¢¡i¸–ºÇ—¿’"

message1 = decode.decode(encryption_key.key, encrypted_message)

print(message1)

#2 Encrypt your name and print the encrypted result.  Make the result print in a friendly format that is easy for the user to understand. (5pt)
print("\n Problem 2")

encrypted_name = "Austin Phillips O'Toole"


message2 = encode.encode(encryption_key.key, encrypted_name)

print("The encrypted name is \"" + message2 + "\"")

#3 Decrypt the encrypted code from part 2 to ensure that it worked properly and print the result.  Make the result print in a friendly format that is easy for the user to understand. (5pt)
コード例 #57
0
ファイル: pymsasid.py プロジェクト: autoscatto/Disasma
 def decode(self):
     return dec.decode(self)
コード例 #58
0
def text_detection(image, east, min_confidence, width, height):
    # load the input image and grab the image dimensions
    image = cv2.imread(image)
    orig = image.copy()
    (origHeight, origWidth) = image.shape[:2]

    # set the new width and height and then determine the ratio in change
    # for both the width and height
    (newW, newH) = (width, height)
    ratioWidth = origWidth / float(newW)
    ratioHeight = origHeight / float(newH)

    # resize the image and grab the new image dimensions
    image = cv2.resize(image, (newW, newH))
    (imageHeight, imageWidth) = image.shape[:2]

    # define the two output layer names for the EAST detector model that
    # we are interested -- the first is the output probabilities and the
    # second can be used to derive the bounding box coordinates of text
    layerNames = ["feature_fusion/Conv_7/Sigmoid", "feature_fusion/concat_3"]

    # load the pre-trained EAST text detector
    # print("[INFO] loading EAST text detector...")
    net = cv2.dnn.readNet(east)

    # construct a blob from the image and then perform a forward pass of
    # the model to obtain the two output layer sets
    blob = cv2.dnn.blobFromImage(image,
                                 1.0, (imageWidth, imageHeight),
                                 (123.68, 116.78, 103.94),
                                 swapRB=True,
                                 crop=False)

    start = time.time()
    net.setInput(blob)
    (scores, geometry) = net.forward(layerNames)
    end = time.time()

    # show timing information on text prediction
    # print("[INFO] text detection took {:.6f} seconds".format(end - start))

    # NMS on the the unrotated rects
    confidenceThreshold = min_confidence
    nmsThreshold = 0.4

    # decode the blob info
    (rects, confidences, baggage) = decode(scores, geometry,
                                           confidenceThreshold)
    # print(len(rects))
    offsets = []
    thetas = []
    for b in baggage:
        offsets.append(b['offset'])
        thetas.append(b['angle'])

    ##########################################################

    # functions = [nms.felzenszwalb.nms, nms.fast.nms, nms.malisiewicz.nms]
    functions = [nms.felzenszwalb.nms]
    # print("[INFO] Running nms.boxes . . .")
    boxes = []
    for i, function in enumerate(functions):

        start = time.time()
        indicies = nms.boxes(rects,
                             confidences,
                             nms_function=function,
                             confidence_threshold=confidenceThreshold,
                             nsm_threshold=nmsThreshold)
        end = time.time()
        # print(indicies)
        indicies = np.array(indicies).reshape(-1)
        # print(indicies)
        drawrects = np.array(rects)[indicies]
        # print(drawrects)
        name = function.__module__.split('.')[-1].title()
        # print("[INFO] {} NMS took {:.6f} seconds and found {} boxes".format(name, end - start, len(drawrects)))

        drawOn = orig.copy()
        drawBoxes(drawOn, drawrects, ratioWidth, ratioHeight, (0, 255, 0), 2)

        # title = "nms.boxes {}".format(name)
        # cv2.imshow(title,drawOn)
        # cv2.moveWindow(title, 150+i*300, 150)

    # cv2.waitKey(0)

    # convert rects to polys
    polygons = utils.rects2polys(rects, thetas, offsets, ratioWidth,
                                 ratioHeight)
    # print(len(polygons[0][0]))

    # print("[INFO] Running nms.polygons . . .")

    for i, function in enumerate(functions):

        start = time.time()
        indicies = nms.polygons(polygons,
                                confidences,
                                nms_function=function,
                                confidence_threshold=confidenceThreshold,
                                nsm_threshold=nmsThreshold)
        end = time.time()

        indicies = np.array(indicies).reshape(-1)
        # print(indicies)
        drawpolys = np.array(polygons)[indicies]
        # print(drawpolys)
        name = function.__module__.split('.')[-1].title()

        # print("[INFO] {} NMS took {:.6f} seconds and found {} boxes".format(name, end - start, len(drawpolys)))

        drawOn = orig.copy()
        drawPolygons(drawOn, drawpolys, ratioWidth, ratioHeight, (0, 255, 0),
                     2)

        # title = "nms.polygons {}".format(name)
        # cv2.imshow(title,drawOn)
        # cv2.moveWindow(title, 150+i*300, 150)

    # cv2.waitKey(0)
    return drawpolys
コード例 #59
0
	def testDecode(self):
		clearText = 'the quick vsjdjshsd'
		whitespace = self.words_to_whitespace(clearText)
		self.assertEquals(decode(StringIO(whitespace)), clearText)
コード例 #60
0
def weight_search(params, num=0, verbose=False):
    global global_params
    print(params)
    sys.stdout.flush()

    # Parse params
    min_diff = params[0]
    history = int(params[1])
    num_layers = int(params[2])
    is_weight = params[3]
    features = params[4]

    warnings.filterwarnings("ignore", message="tick should be an int.")

    max_len = 30
    section = [0, max_len]

    # Load model
    model = model_dict['model']
    sess = model_dict['sess']

    # Get weight_model data
    pkl = data_dict['blending_data']

    X = pkl['X']
    Y = pkl['Y']
    D = pkl['D']
    max_history = pkl['history']
    features_available = pkl['features']
    with_onsets = pkl['with_onsets']

    # Filter data for min_diff
    X, Y = filter_data_by_min_diff(
        X, Y,
        np.maximum(D[:, 0], D[:, 1]) if with_onsets else D, min_diff)
    if len(X) == 0:
        print("No training data generated.")
        sys.stdout.flush()
        return 0.0

    # Filter X for desired input fields
    X = filter_X_features(X, history, max_history, features,
                          features_available, with_onsets)

    # Ablate X
    X = ablate(X, global_params['ablate'], with_onsets=with_onsets)

    history = min(history, max_history)
    if features and not features_available:
        features = False

    # Train weight model
    print("Training weight model")
    sys.stdout.flush()
    layers = []
    for i in range(num_layers):
        layers.append(10 if with_onsets else 5)

    weight_model = train_model(X,
                               Y,
                               layers=layers,
                               weight=is_weight,
                               with_onsets=with_onsets)

    # Save model
    global most_recent_model
    most_recent_model = {
        'model': weight_model,
        'history': history,
        'features': features,
        'weight': is_weight,
        'with_onsets': with_onsets,
        'ablate': global_params['ablate']
    }

    weight_model_name = get_filename(min_diff, history, num_layers, features,
                                     with_onsets, is_weight,
                                     global_params['step'])

    # Write out weight model
    with open(os.path.join(global_params['model_out'], weight_model_name),
              "wb") as file:
        pickle.dump(most_recent_model, file)

    # Evaluation
    results = {}
    frames = np.zeros((0, 3))
    notes = np.zeros((0, 3))

    for filename in sorted(glob.glob(os.path.join(data_dict['valid'],
                                                  "*.mid"))):
        print(filename)
        sys.stdout.flush()

        if global_params['step'] == 'beat':
            data = DataMapsBeats()
            data.make_from_file(filename,
                                global_params['beat_gt'],
                                global_params['beat_subdiv'],
                                section,
                                acoustic_model=global_params['acoustic'],
                                with_onsets=with_onsets)
        else:
            data = DataMaps()
            data.make_from_file(filename,
                                global_params['step'],
                                section,
                                acoustic_model=global_params['acoustic'],
                                with_onsets=with_onsets)

        # Decode
        input_data = data.input
        if with_onsets:
            input_data = np.zeros(
                (data.input.shape[0] * 2, data.input.shape[1]))
            input_data[:data.input.shape[0], :] = data.input[:, :, 0]
            input_data[data.input.shape[0]:, :] = data.input[:, :, 1]

        # Add noise
        input_data = add_noise_to_input_data(input_data, data_dict['noise'],
                                             data_dict['noise_gauss'])

        pr, priors, weights, combined_priors = decode(
            input_data,
            model,
            sess,
            branch_factor=5,
            beam_size=50,
            weight=[[0.8], [0.2]],
            out=None,
            hash_length=12,
            weight_model=weight_model,
            verbose=verbose,
            weight_model_dict=most_recent_model)

        # Evaluate
        if with_onsets:
            target_data = pm.PrettyMIDI(filename)
            corresp = data.corresp
            [P_f, R_f,
             F_f], [P_n, R_n, F_n
                    ], _, _ = compute_eval_metrics_with_onset(pr,
                                                              corresp,
                                                              target_data,
                                                              double_roll=True,
                                                              min_dur=0.05,
                                                              section=section)

        else:
            if global_params['step'] in [
                    'quant', 'event', 'quant_short', 'beat'
            ]:
                pr = convert_note_to_time(pr,
                                          data.corresp,
                                          data.input_fs,
                                          max_len=max_len)

            data = DataMaps()
            if global_params['step'] == "20ms" or with_onsets:
                data.make_from_file(filename,
                                    "20ms",
                                    section=section,
                                    with_onsets=False,
                                    acoustic_model="kelz")
            else:
                data.make_from_file(filename,
                                    "time",
                                    section=section,
                                    with_onsets=False,
                                    acoustic_model="kelz")
            target = data.target

            #Evaluate
            P_f, R_f, F_f = compute_eval_metrics_frame(pr, target)
            P_n, R_n, F_n = compute_eval_metrics_note(pr, target, min_dur=0.05)

        print(
            f"Frame P,R,F: {P_f:.3f},{R_f:.3f},{F_f:.3f}, Note P,R,F: {P_n:.3f},{R_n:.3f},{F_n:.3f}"
        )
        sys.stdout.flush()

        frames = np.vstack((frames, [P_f, R_f, F_f]))
        notes = np.vstack((notes, [P_n, R_n, F_n]))

        if F_n < global_params['early_exit']:
            print("Early stopping, F-measure too low.")
            sys.stdout.flush()
            return 0.0

    P_f, R_f, F_f = np.mean(frames, axis=0)
    P_n, R_n, F_n = np.mean(notes, axis=0)

    print(
        f"Frame P,R,F: {P_f:.3f},{R_f:.3f},{F_f:.3f}, Note P,R,F: {P_n:.3f},{R_n:.3f},{F_n:.3f}"
    )
    print(str(F_n) + ": " + str(params))
    sys.stdout.flush()
    return -F_n