def main():
    banner()
    start         = timer()
    dork          = 'inurl:"/component/tags/"'
    file_string   = '######## By MakMan ########\n'
    final_result  = []
    count         = 0
    print( '[+] Starting dork scanner for : ' + dork)
    sys.stdout.flush()
    #Calling dork_scanner from makman.py for 6 pages and 6 parallel processes
    search_result = dork_scanner( dork, '6', '6' )
    print( '[+] Total URLs found : ' + str( len( search_result ) ) )
    with open( 'urls.txt', 'a', encoding = 'utf-8' ) as ufile:
        ufile.write( '\n'.join( search_result ) )
    print( '[+] URLs written to urls.txt' )
    print( '\n[+] Trying Joomla SQL Injection exploit on ' + str( len( search_result ) ) + ' urls' )
    sys.stdout.flush()
    #Running 8 parallel processes for the exploitation
    with Pool(8) as p:
        final_result.extend( p.map( inject, search_result ) )
    for i in final_result:
        if not 'Not Vulnerable' in i and not 'Bad Response' in i:
            count += 1
            file_string = file_string + i.split('~:')[0] + '\n' + i.split('~:')[1] + '\n' + i.split('~:')[2] + '\n' + i.split('~:')[3] + '\n' + i.split('~:')[4] + '\n' + i.split('~:')[5] + '\n' + i.split('~:')[6] + '\n\n\n'
    #Writing vulnerable URLs in a file makman.txt
    with open( 'makman.txt', 'a', encoding = 'utf-8' ) as rfile:
        rfile.write( file_string )
    print( 'Total URLs Scanned    : ' + str( len( search_result ) ) )
    print( 'Vulnerable URLs Found : ' + str( count ) )
    print( 'Script Execution Time : ' + str ( timer() - start ) + ' seconds' )
Пример #2
0
def main():

    # ********************************************************************
    # read and test input parameters
    # ********************************************************************

    print('Parallel Research Kernels version ') #, PRKVERSION
    print('Python Dense matrix-matrix multiplication: C = A x B')

    if len(sys.argv) != 3:
        print('argument count = ', len(sys.argv))
        sys.exit("Usage: ./transpose <# iterations> <matrix order>")

    iterations = int(sys.argv[1])
    if iterations < 1:
        sys.exit("ERROR: iterations must be >= 1")

    order = int(sys.argv[2])
    if order < 1:
        sys.exit("ERROR: order must be >= 1")

    print('Number of iterations = ', iterations)
    print('Matrix order         = ', order)

    # ********************************************************************
    # ** Allocate space for the input and transpose matrix
    # ********************************************************************

    A = numpy.fromfunction(lambda i,j: j, (order,order), dtype=float)
    B = numpy.fromfunction(lambda i,j: j, (order,order), dtype=float)
    C = numpy.zeros((order,order))

    for k in range(0,iterations+1):

        if k<1: t0 = timer()

        #C += numpy.matmul(A,B) # requires Numpy 1.10 or later
        C += numpy.dot(A,B)

    t1 = timer()
    dgemm_time = t1 - t0

    # ********************************************************************
    # ** Analyze and output results.
    # ********************************************************************

    checksum = numpy.linalg.norm(numpy.reshape(C,order*order),ord=1)

    ref_checksum = 0.25*order*order*order*(order-1.0)*(order-1.0)
    ref_checksum *= (iterations+1)

    epsilon=1.e-8
    if abs((checksum - ref_checksum)/ref_checksum) < epsilon:
        print('Solution validates')
        avgtime = dgemm_time/iterations
        nflops = 2.0*order*order*order
        print('Rate (MF/s): ',1.e-6*nflops/avgtime, ' Avg time (s): ', avgtime)
    else:
        print('ERROR: Checksum = ', checksum,', Reference checksum = ', ref_checksum,'\n')
        sys.exit("ERROR: solution did not validate")
Пример #3
0
def main():
    start = timer()
    # Calling dork_scanner from makman.py for 15 pages and 4 parallel processes
    search_result = dork_scanner('intext:Developed by : iNET inurl:photogallery.php', '15', '4')
    file_string = '######## By MakMan ########\n'
    final_result = []
    count = 0
    # Running 8 parallel processes for the exploitation
    with Pool(8) as p:
        final_result.extend(p.map(inject, search_result))
    for i in final_result:
        if not 'Not Vulnerable' in i and not 'Bad Response' in i:
            count += 1
            print('------------------------------------------------\n')
            print('Url     : http:' + i.split(':')[1])
            print('User    : '******':')[2])
            print('Version : ' + i.split(':')[3])
            print('------------------------------------------------\n')
            file_string = file_string + 'http:' + i.split(':')[1] + '\n' + i.split(':')[2] + '\n' + i.split(':')[3] + '\n\n\n'
    # Writing vulnerable URLs in a file makman.txt
    with open('makman.txt', 'a', encoding='utf-8') as file:
        file.write(file_string)
    print('Total URLs Scanned    : %s' % len(search_result))
    print('Vulnerable URLs Found : %s' % count)
    print('Script Execution Time : %s' % (timer() - start,))
Пример #4
0
def trycontext(n=100000):
    while True:
        from time import perf_counter as timer

        i = iter(range(n))
        t1 = timer()
        while True:
            try:
                next(i)
            except StopIteration:
                break
        t2 = timer()
        # print("small try", t2 - t1)
        i = iter(range(n))
        t3 = timer()
        try:
            while True:
                next(i)
        except StopIteration:
            pass
        t4 = timer()

        tsmall = D(t2) - D(t1)
        tbig = D(t4) - D(t3)

        fastest = "Small Try" if tsmall < tbig else "Big Try"

        # noinspection PyStringFormat
        print("small try %.8f" % tsmall, "big try %.8f" % tbig, "fastest:", fastest,
              "%%%.1f" % ((tsmall - tbig) / tsmall * 100))
Пример #5
0
def simulator():
    res_slow = []
    res_fast = []

    clusters = []
    for size in range(2, 201):
        clusters.append(gen_random_clusters(size))

    # slow
    for clist in clusters:
        slow_start = timer()
        slow(clist)
        slow_end = timer()
        res_slow.append(slow_end - slow_start)

    # fast
    for clist in clusters:
        fast_start = timer()
        fast(clist)
        fast_end = timer()
        res_fast.append(fast_end - fast_start)


    x_axis = [num for num in range(2, 201)]
    plt.title('Comparison of efficiency in desktop python environment')
    plt.xlabel('size of random clusters')
    plt.ylabel('running time (seconds)')
    plt.plot(x_axis, res_slow, '-b', label='slow_closest_pair', linewidth=2)
    plt.plot(x_axis, res_fast, '-r', label='fast_closest_pair', linewidth=2)
    plt.legend(loc='upper left')
    plt.show()
def main():
    print (Style.BRIGHT)
    banner()
    count        = 0
    start        = timer()
    file_string  = ''
    final_result = []
    # Make sure urls.txt is in the same directory
    try:
        with open( 'urls.txt' ) as f:
            search_result = f.read().splitlines()
    except:
        print( 'urls.txt not found in the current directory. Create your own or download from here. http://makman.tk/vb/urls.txt\n' )
        sys.exit(0)
    search_result = list( set( search_result ) )
    print (' [+] Executing Exploit for ' + Fore.RED + str( len( search_result ) ) + Fore.WHITE + ' Urls.\n')
    with Pool(8) as p:
        final_result.extend( p.map( inject, search_result ) )
    for i in final_result:
        if not 'Not Vulnerable' in i and not 'Bad Response' in i:
            count += 1
            file_string = file_string + i.split( ':::' )[0].strip() + '\n' + i.split( ':::' )[1].strip() + '\n' + i.split( ':::' )[2].strip() + '\n' + i.split( ':::' )[3].strip()
            file_string = file_string + '\n------------------------------------------\n'
    # Writing Result in a file makman.txt
    with open( 'makman.txt', 'a', encoding = 'utf-8' ) as rfile:
        rfile.write( file_string )
    print( 'Total URLs Scanned    : ' + str( len( search_result ) ) )
    print( 'Vulnerable URLs Found : ' + str( count ) )
    print( 'Script Execution Time : ' + str ( timer() - start ) + ' seconds' )
Пример #7
0
 def time_test(container, key_count, key_range, randrange=randrange, timer=timer):
     t1 = timer()
     for _i in range(key_count):
         keys = test_key % randrange(key_range)
         container[keys]
     t2 = timer()
     
     return t2 - t1
Пример #8
0
def _get_fps(self, frame):
    elapsed = int()
    start = timer()
    preprocessed = self.framework.preprocess(frame)
    feed_dict = {self.inp: [preprocessed]}
    net_out = self.sess.run(self.out, feed_dict)[0]
    processed = self.framework.postprocess(net_out, frame, False)
    return timer() - start
Пример #9
0
def do_parse_test(times, func=quick_strptime):
    t1 = timer()
    stripped = map(str.strip, times)
    raw = list(takewhile(bool, stripped))
    fmt = ParseDateFormat(raw[0])
    list(func(date, fmt) for date in raw)
    t2 = timer()
    return t2 - t1
Пример #10
0
def test_map(times, mock=quick_strptime, repeat=repeat):
    t1 = timer()
    stripped = map(str.strip, times)
    raw = list(takewhile(bool, stripped))
    fmt = ParseDateFormat(raw[0])
    list(map(mock, raw, repeat(fmt)))
    t2 = timer()
    return t2 - t1
Пример #11
0
def test( *args ):
    start = timer()
    result = [ url for url in sitemap( *args )]
    elapsed = timer() - start
    print( 'Result: ', end ='' )
    for r in result:
        print( GET_DUMMY. search( r ). group(), end= ' ' )
    print()
    print( 'Elapsed:', elapsed * 1000, 'miliseconds' )
Пример #12
0
def do_superfast_test(times, special_map=special_handler_map):
    t1 = timer()
    stripped = map(str.strip, times)
    raw = list(takewhile(bool, stripped))
    fmt = ParseDateFormat(raw[0])
    func = special_map[fmt]
    list(map(func, raw))
    t2 = timer()
    return t2 - t1
Пример #13
0
def main(argv):
    # Read Config
    starttime = timer()
    iniFile = "input/halo_makeDerivs.ini"
    Config = ConfigParser.SafeConfigParser()
    Config.optionxform = str
    Config.read(iniFile)
    paramList = []
    fparams = {}
    cosmo = {}
    stepSizes = {}
    fparams['hmf_model'] = Config.get('general','hmf_model')
    fparams['exp_name'] = Config.get('general','exp_name')
    for (key, val) in Config.items('hmf'):
        if ',' in val:
            param, step = val.split(',')
            paramList.append(key)
            fparams[key] = float(param)
            stepSizes[key] = float(step)
        else:
            fparams[key] = float(val)
    # Make a separate list for cosmology to add to massfunction
    for (key, val) in Config.items('cosmo'):
        if ',' in val:
            param, step = val.split(',')
            paramList.append(key)
            cosmo[key] = float(param)
            stepSizes[key] = float(step)
        else:
            cosmo[key] = float(val)
    fparams['cosmo'] = cosmo

    for paramName in paramList:
        #Make range for each parameter
        #First test: x2 the range, x0.01 the stepsize
        if paramName in cosmo:
            start = fparams['cosmo'][paramName] - stepSizes[paramName]
            end   = fparams['cosmo'][paramName] + stepSizes[paramName]
        else:
            start = fparams[paramName] - stepSizes[paramName]
            end   = fparams[paramName] + stepSizes[paramName]
        width = stepSizes[paramName]*0.01
        paramRange = np.arange(start,end+width,width)
        for paramVal in paramRange: 
            if paramName in cosmo:
                params = fparams.copy()
                params['cosmo'][paramName] = paramVal
            else:
                params = fparams.copy()
                params[paramName] = paramVal
            print paramName,paramVal
            N = clusterNum(params)
            np.savetxt("output/step/"+fparams['exp_name']+'_'+fparams['hmf_model']+"_"+paramName+"_"+str(paramVal)+".csv",N,delimiter=",")

        #----------------------------
        endtime = timer()
        print "Time elapsed: ",endtime-starttime
Пример #14
0
def run(cmd, timeout_sec):
    start = timer()
    with Popen(cmd, shell=True, stdout=PIPE, preexec_fn=os.setsid) as process:
        try:
            output = process.communicate(timeout=timeout_sec)[0]
        except TimeoutExpired:
            os.killpg(process.pid, signal.SIGINT) # send signal to the process group
            output = process.communicate()[0]
            print("DEBUG: process timed out: "+cmd)
    print('DEBUG: Elapsed seconds: {:.2f}'.format(timer() - start))
Пример #15
0
def timing_middleware(next, root, info, **args):
    start = timer()
    return_value = next(root, info, **args)
    duration = timer() - start
    logger.debug("{parent_type}.{field_name}: {duration} ms".format(
        parent_type=root._meta.name if root and hasattr(root, '_meta') else '',
        field_name=info.field_name,
        duration=round(duration * 1000, 2)
    ))
    return return_value
Пример #16
0
def search(identity, num_of_imgs):
    
    start = timer()
    
    #inizialize the queue for multithreading
    queue = Queue.Queue(maxsize=0)
    
    #create query
    query = identity['name'].replace('_', ' ')
    query = query.split()
    query = '+'.join(query)
    
    #build a dictionary with search info
    data = {
            'query': query,
            'label': identity['label'],
            'num_of_imgs': num_of_imgs,
            'header': {'User-Agent': 'Mozilla/5.0'} 
    }
    
    #start threads
    threads = []
    if use_bing == 'true':
        
        bing_search = Thread(target=fetcher, args=(queue, 'bing', data))
        threads.append(bing_search)
        
    if use_aol == 'true':
        
        aol_search_1 = Thread(target=fetcher, args=(queue, 'aol1', data))
        aol_search_2 = Thread(target=fetcher, args=(queue, 'aol2', data))
        threads.append(aol_search_1)
        threads.append(aol_search_2)
        
    if use_yahoo == 'true':
        
        yahoo_search = Thread(target=fetcher, args=(queue, 'yahoo', data))
        threads.append(yahoo_search)
    
    
    for t in threads:
        t.start()
    
    for t in threads:
        t.join()
    
    #if queue is not empty, save urls to db (status = OK), else end script (STATUS = ERR_F)
    if not queue.empty():
        queue_size = queue.qsize()
        insert_urls(queue, identity, queue_size)
        print 'Collector terminated for identity: ' + identity['name'] + ' - Number of images: ' \
            + str(queue_size) + ' - Elapsed time: ' + str((timer() - start))
    else:
        update_identity_status(identity, 'ERR_F')
        print 'Collector terminated for identity: ' + identity['name'] + ' - Elapsed time: ' + str((timer() - start))     
Пример #17
0
def benchmark(client, fn, num_transactions=200, num_workers=10, *args, **kwargs):
    start = timer()
    total_latency = 0

    with concurrent.futures.ThreadPoolExecutor(max_workers=num_workers) as executor:
        futures = [executor.submit(time_transaction(fn), client, *args, **kwargs) for _ in range(num_transactions)]

        latencies = [f.result() for f in concurrent.futures.as_completed(futures)]

        total_throughput = num_transactions / (timer() - start)

        return (latencies, total_throughput)
Пример #18
0
def inplace_speed():
    from time import perf_counter as timer
    _map = map
    n = D(1)
    i = D(1)
    incr = i.__add__

    loops = 100000 * 100

    test_data = tuple(D(1) for _ in range(loops))

    t1 = timer()
    for n in test_data:
        n = incr(n)
    t2 = timer()

    t3 = timer()
    for n in test_data:
        n += i
    t4 = timer()

    t5 = timer()
    for num in _map(incr, test_data):
        a = num
    t6 = timer()

    t7 = timer()
    for num in test_data:
        a = num + i

    t8 = timer()

    print("Incr:", t2 - t1, "normal:", t4 - t3, 'map:', t6 - t5, "comp", t8 - t7)
Пример #19
0
def get_result(worker_name, uuid, timeout=None, sg_url=SG_URL):
    """
    Wait for processing results and return RAW data structure.

    :param timeout: How many seconds to wait for a result.
    :param worker_name: name of the worker
    :param uuid: UUID of the task
    :param sg_url: URL of the service gateway
    """
    logger = getLogger(__name__)
    status = None
    logger.info("Getting result of processing request %s for %s",
                uuid, worker_name)
    status_url = urljoin(sg_url, "{}/status".format(worker_name))
    logger.debug("Status URL is %s", status_url)
    starttime = timer()
    logger.debug("Started at %s", starttime)
    if timeout:
        timeout = int(timeout)
        logger.info("Using %s as timeout value", timeout)
    while status != 'SUCCESS':
        logger.info("Waiting for success state")
        sleep(SUCCESS_WAIT_ITER_TIME)
        r_val = requests.get(status_url, params={'uuid': uuid})
        if r_val.status_code != 502:  # Ignore Bad gateway return codes...
            r_val.raise_for_status()
        else:
            logger.warning("BAD gateway response: %s", r_val)
        status = r_val.json()['status']
        logger.debug(r_val.text)
        logger.info("Status is %s", status)
        if status == "FAILURE":
            raise ServerError("Error: Remote server says: {}".
                              format(r_val.json()['result']['message']))
        elif status == "PROGRESS":
            progress = r_val.json()['result']['current']
            logger.info("Progress stated at %s%%", progress)
        elif status in ['EXPIRED', 'REVOKED']:
            raise ServerError("Error with task status: Contact administrator")

        latency = int(timer() - starttime)
        logger.info("Current latency is %s", latency)
        if latency > timeout:
            raise TimeOutError("Process did not succeed in required time")

    status_result = r_val.json()
    logger.debug("Result string is %s", status_result)
    return status_result
Пример #20
0
def main():
    start = timer()
    # Empty List to store the Urls
    result = []
    arguments = docopt(__doc__, version='Google scraper')
    search = arguments['<search>']
    pages = arguments['<pages>']
    # Calling the function [pages] times.
    for page in range(0, int(pages)):
        # Getting the URLs in the list
        result.extend(get_urls(search, str(page * 10)))
    # Removing Duplicate URLs
    result = list(set(result))
    print(*result, sep='\n')
    print('\nTotal URLs Scraped : %s ' % str(len(result)))
    print('Script Execution Time : %s ' % (timer() - start,))
Пример #21
0
 def wrapped(client, retry=False, *args, **kwargs):
     start = timer()
     success = False
     while success == False:
         try:
             txn = client.transaction()
             fn(txn, *args, **kwargs)
             txn.commit()
             success = True
         except TransactionFailureException:
             if not retry:
                 break
             else:
                 continue
     latency = timer() - start
     return latency
Пример #22
0
def main():
    parser = ArgumentParser()

    parser.add_argument(
        '-s', '--host', default='http://127.0.0.1', help='tracker host')
    parser.add_argument(
        '-p', '--port', default=6000, help='tracker host port', type=int)
    parser.add_argument(
        '-n', '--users', default=1, help='how many users to create', type=int)

    options = parser.parse_args()

    urlbase = '{}:{}/'.format(options.host, options.port)

    session = requests.Session()

    for user in range(options.users):
        # username is just a random number
        new_username = str(random.randint(1e6, 1e7))
        # all new users have one password for simple debug
        new_password = '******'

        resp = session.post(urljoin(urlbase, 'auth/signup'),
                            json={'username': new_username,
                                  'password': new_password})

        resp.raise_for_status()

        token = resp.json()['access_token']
        session.auth = TrackerAuth(token)

        fake_data = create_fake_data()

        ts = timer()
        resp = session.post(
            urljoin(urlbase, 'events'),
            headers={'Content-Type': 'application/json'},
            data=json.dumps(fake_data, default=default))
        delta = timer() - ts

        resp.raise_for_status()

        print('{} events in {:.2f}s @ {:.2f} rps'
              .format(len(fake_data), delta, len(fake_data) / delta))
Пример #23
0
def main():
    start = timer()
    result = []
    arguments = docopt(__doc__, version='MakMan Google Scrapper & Mass Exploiter')
    search = arguments['<search>']
    pages = arguments['<pages>']
    processes = int(arguments['<processes>'])
    ####Changes for Multi-Processing####
    make_request = partial(get_urls, search)
    pagelist = [str(x * 10) for x in range(0, int(pages))]
    with Pool(processes) as p:
        tmp = p.map(make_request, pagelist)
    for x in tmp:
        result.extend(x)
    ####Changes for Multi-Processing####
    result = list(set(result))
    print(*result, sep='\n')
    print('\nTotal URLs Scraped : %s ' % str(len(result)))
    print('Script Execution Time : %s ' % (timer() - start,))
Пример #24
0
def search(grid, type, line):

    begin_search = timer()

    start, goal = get_start_and_goal(line, grid)
    # start, goal = [line, 0], [line + 20, 3320]

    if type == 'A':
        print 'A*..'
        a = astar.Astar(grid)
        path, map = a.pathfind(start, goal)

    elif type == 'jps':
        print 'A* + JPS...'
        j = jps.Jps(grid)
        path, map = j.pathfind(start, goal)

    print ' => path found in ' + str(timer() - begin_search) + ' s'

    return path, map
Пример #25
0
def repeat(function, kwargs, title, count, verbose=True):

    t0 = timer()
    id_list = []
    while len(id_list) < count:
        func = deepcopy(function)
        kw = deepcopy(kwargs)
        id_list.append(func(**kw))
    t1 = timer()
    if verbose:
        print('%s inits of %s in %s secs' % (count, title, (t1 - t0)))
    rand_int = randomlab.random_integer(0, (count - 1))
    rand_item = id_list[rand_int]
    try:
        rand_str = str(rand_item)
        if verbose:
            print('Value of item[%s] of %s performance test is: %s' % (rand_int, title, rand_str))
    except:
        if verbose:
            print('Value returned by %s cannot be coerced into a string.' % title)
    return rand_item
Пример #26
0
def downloader(identity, DATA_DIR):

    start = timer()
    # take image urls for an identity from db
    images = select_urls(identity)

    # start multithreading
    queue = Queue.Queue(maxsize=0)
    download_master(images, queue, identity)

    # if queue is not empty, save images to disk (status = DONE), else end script (STATUS = ERR_D)
    if not queue.empty():
        queue_size = queue.qsize()
        print identity["name"] + " - Saving to disk.."
        save(queue, identity, DATA_DIR)
        print "Donwload terminated for identity: " + identity["name"] + " Number of images saved: " + str(
            queue_size
        ) + " - Elapsed time: " + str((timer() - start))
    else:
        update_identity_status(identity, "ERR_D")
        print "Donwload failed for identity: " + identity["name"] + " - Elapsed time: " + str((timer() - start))
Пример #27
0
def execute_size_euler(size, ws, nr, results):
    euler_graph = create_euler(size, 30)
    start = timer()
    euler(euler_graph, 0, [])
    time = timer()-start
    results[0].append(time)
    #ws["B"+str(nr)] = time

    euler_graph = create_euler(size, 50)
    start = timer()
    euler(euler_graph, 0, [])
    time = timer()-start
    results[1].append(time)
    #ws["C"+str(nr)] = time

    euler_graph = create_euler(size, 70)
    start = timer()
    euler(euler_graph, 0, [])
    time = timer()-start
    results[2].append(time)
Пример #28
0
def execute_size_hamilton(size, ws, nr, results):
    hamilton_graph = create_hamilton(size, 30)
    start = timer()
    result = hamilton(hamilton_graph, 0, 1, [1] + [0]*(len(hamilton_graph)-1))
    if result=="False":
        print "LOL"
    time = timer()-start
    results[3].append(time)
    #ws["G"+str(nr)] = time

    hamilton_graph = create_hamilton(size, 50)
    start = timer()
    hamilton(hamilton_graph, 0, 1, [1] + [0]*(len(hamilton_graph)-1))
    time = timer()-start
    results[4].append(time)
    #ws["H"+str(nr)] = time

    hamilton_graph = create_hamilton(size, 70)
    start = timer()
    hamilton(hamilton_graph, 0, 1, [1] + [0]*(len(hamilton_graph)-1))
    time = timer()-start
    results[5].append(time)
Пример #29
0
def generate_model_exons():
    tic = timer()
    cur = _con.cursor()
    genes = get_all_genes(cur)
    gene_count = 0
    ex_count = 0

    # delete old model exons/transcript if they exist
    cur.execute('''
        DELETE FROM exon
        WHERE transcript_id IN (
            SELECT id
            FROM transcript
            WHERE is_model=true
        );
        ''')
    cur.execute(
        '''
        DELETE FROM transcript WHERE is_model=true;
        ''', )

    for (gene_name, chrom, strand), gene_id in genes.items():
        cur.execute(
            '''
            SELECT chrom_start, chrom_end
            FROM exon
            WHERE transcript_id IN (
                SELECT id
                FROM transcript
                WHERE gene_id=%s AND is_model=false
            );
            ''', (gene_id, ))
        exon_coords = cur.fetchall()
        new_coords = interval_union(exon_coords)
        start_pos = min(i[0] for i in new_coords)
        end_pos = max(i[1] for i in new_coords)
        if strand == '-':
            new_coords.reverse()

        # insert transcript
        transcript_id = insert_transcript(
            {
                'start': start_pos,
                'end': end_pos,
                'attributes': {}
            },
            gene_id,
            None,
            cur,
            is_model=True)

        # insert exons
        for i, (start, end) in enumerate(new_coords, 1):
            insert_exon(
                {
                    'start': start,
                    'end': end,
                    'attributes': {
                        'exon_number': i
                    }
                }, transcript_id, cur)
            ex_count += 1

        gene_count += 1

    _con.commit()
    cur.close()
    toc = timer()
    print(f'generated\n\t{ex_count} exons\nacross\n\t{gene_count} genes')
    print(f'in {toc-tic} seconds')
Пример #30
0
def join_words_builder(words: List[str]) -> str:
    """Joins words using a StringBuilder"""
    builder = StringBuilder()
    for word in words:
        builder.append(word)
    return builder.to_string()


def join_words_join(words: List[str]) -> str:
    """Joins words using str.join"""
    return ''.join(words)


test_words = ['hehe', 'haha', 'hoho', 'hawhaw', 'ehehe', 'muahahahahhaha'
              ] * 10000000

start = timer()
joined = join_words_cat(test_words)
stop = timer()
print(f'Runtime of join_words_cat was {stop - start:0.4f} seconds')

start = timer()
joined = join_words_builder(test_words)
stop = timer()
print(f'Runtime of join_words_builder was {stop - start:0.4f} seconds')

start = timer()
joined = join_words_join(test_words)
stop = timer()
print(f'Runtime of join_words_join was {stop - start:0.4f} seconds')
Пример #31
0
    def get_magnitude_keys(self):
        return [
            'TICv8_Bmag', 'TICv8_e_Bmag', 'TICv8_Vmag', 'TICv8_e_Vmag',
            'TICv8_umag', 'TICv8_e_umag', 'TICv8_gmag', 'TICv8_e_gmag',
            'TICv8_rmag', 'TICv8_e_rmag', 'TICv8_imag', 'TICv8_e_imag',
            'TICv8_zmag', 'TICv8_e_zmag', 'TICv8_Jmag', 'TICv8_e_Jmag',
            'TICv8_Hmag', 'TICv8_e_Hmag', 'TICv8_Kmag', 'TICv8_e_Kmag',
            'TICv8_TWOMflag', 'TICv8_prox', 'TICv8_w1mag', 'TICv8_e_w1mag',
            'TICv8_w2mag', 'TICv8_e_w2mag', 'TICv8_w3mag', 'TICv8_e_w3mag',
            'TICv8_w4mag', 'TICv8_e_w4mag', 'TICv8_GAIAmag', 'TICv8_e_GAIAmag',
            'TICv8_Tmag', 'TICv8_e_Tmag'
        ]


if __name__ == '__main__':
    t0 = timer()
    cat = catalog()
    t1 = timer()
    print('took', t1 - t0, 's')
    print(cat.data)

# def load(tic_id=None, sector=None, keys=None):
#     f = '/Users/mx/Dropbox (Personal)/Science/TESS/TESS_SC_target_lists/unique_targets_S001-S023_obs_tic_gaia_banyan.csv.gz'
#     df = pd.read_csv(f, dtype=str, usecols=keys)

#     #::: filter by tic_id(s), select only requested rows
#     if tic_id is not None:
#         tic_id = [str(int(t)) for t in np.atleast_1d(tic_id)]
#         df = df.loc[df['TIC_ID'].isin(list(tic_id))]

#     #::: filter by sector(s), select only requested rows
Пример #32
0
from pygame import *
from random import *
from time import time as timer

win_widh = 1000
win_hight = 400

win = display.set_mode((win_widh, win_hight))
display.set_caption('Plants')

ImegHero = 'Woodman.png'
ImeBack = 'Forest.png'
ImeAnemi = 'BigCliz.png'
img_bullet = 'Ball.png'

TimeNow = timer()
TimeHit = timer()

# clock = time.Clock()


class GameSprite(sprite.Sprite):
    def __init__(self, PLimage, playX, playY, sizeX, sizeY, speed):
        sprite.Sprite.__init__(self)

        self.image = transform.scale(image.load(PLimage), (sizeX, sizeY))
        self.speed = speed

        self.rect = self.image.get_rect()
        self.rect.x = playX
        self.rect.y = playY
Пример #33
0
    existences[0] = tm.initial_cardinality
    ids = np.zeros((nsamples, nobjects), dtype=bool)
    tracker_structure = (samples, existences, ids)

    new_samples = np.empty(samples.shape)
    new_exist = np.empty(existences.shape)
    new_ids = np.empty(ids.shape, dtype=bool)
    new_tracker_structure = (new_samples, new_exist, new_ids)

    # unique labels, because id columns will be reused
    # assigned at first time object is recognized
    unique_ids = np.zeros((nobjects, ), dtype=int)
    unique_id_count = 0

    worst_time = 0.
    first_time = timer()

    for time in range(0, maxtime, skip):
        starttime = timer()
        measurements = data[times[time]:times[time + 1]]
        n_measurements = len(measurements)

        # predict
        tm.predict(samples)
        existences *= tm.survival(samples)
        #tm.debug(samples[existences > 1e-10])

        # entry
        # so this is hacky... but given the limited number of components
        # and the fact that they aren't easily merged, it is better to not
        # include entry components every single time
Пример #34
0
 def timed_callback(self, name):
     s = timer()
     yield
     self.add(name, timer() - s)
Пример #35
0
def gui_loop(device):
    global prev_gbu_counter_change_time
    do_print = True
    print_time = 0.0
    time = timer()
    handle_time = timer()
    write_time_capture = timer()
    prev_gbu_counter_change_time = timer()
    gbu_counter_change_time = timer()
    skip_write = 0
    prev_counter = 0
    send_stream_request_command_once = 1
    # cnt = None
    # prev_cnt = None
    # value = None
    global special_cmd
    # global print_flag

    while True:
        # Reset the counter
        if (do_print):
            print_time = timer()

        # Write to the device
#        if send_stream_request_command_once == 1:
#            send_stream_request_command_once = 0
#            if PRODUCT_ID == PRODUCT_ID_LAP_NEW_CAMERA:
#                print("enforce streaming of data with command 0x82"
# if device is attached enforce streaming of data.
# device.write(WRITE_DATA_CMD_START)

        if special_cmd == 'I':
            if PRODUCT_ID == PRODUCT_ID_STATION:
                WRITE_DATA = WRITE_DATA_CMD_START_0x304
            else:
                WRITE_DATA = WRITE_DATA_CMD_START
            device.write(WRITE_DATA)
            print("special_cmd Start")
            special_cmd = 0
#        elif special_cmd == 'S':
#            WRITE_DATA = WRITE_DATA_CMD_GET_BOARD_TYPE
#            device.write(WRITE_DATA)
#            print("special_cmd CMD_GET_BOARD_TYPE")
#            # print_flag = 1
#            special_cmd = 0
#        elif special_cmd == 'A':
#            WRITE_DATA = WRITE_DATA_CMD_A
#            print("special_cmd A -> keep Alive + fast BLE update (every 20 msec)")
#            special_cmd = 0
#        elif special_cmd == 'M':
#            WRITE_DATA = WRITE_DATA_CMD_M
#            print("special_cmd M -> moderate BLE update rate every 50 mSec")
#            special_cmd = 0
#        elif special_cmd == 'B':
#            WRITE_DATA = WRITE_DATA_CMD_B
#            device.write(WRITE_DATA)
#            print("special_cmd B -> set_BSL_mode  --- this will stop HID communication with this GUI")
#            special_cmd = 0
#        else:
#            WRITE_DATA = DEFAULT_WRITE_DATA

        cycle_time = timer() - time
        # print("cycle timer: %.10f" % cycle_time)

        # If not enough time has passed, sleep for SLEEP_AMOUNT seconds
        sleep_time = SLEEP_AMOUNT - (cycle_time)

        # Measure the time
        time = timer()
        # print(" ")

        # Read the packet from the device
        value = device.read(READ_SIZE, timeout=READ_TIMEOUT)

        # Update the GUI
        if len(value) >= READ_SIZE:
            # save into file:
            analog = [(int(value[i + 1]) << 8) + int(value[i])
                      for i in LAP_ANALOG_INDEX_LIST]
            channel_0 = analog[0]
            channel_1 = analog[1]
            channel_2 = analog[2]
            channel_3 = analog[3]
            channel_4 = analog[4]
            counter = (int(value[COUNTER_INDEX + 1]) << 8) + int(
                value[COUNTER_INDEX])
            count_dif = counter - prev_counter
            GBU_COUNTER_INDEX = 2 + 51  # need to add 2 since we have two bytes of lengths.
            gbu_counter = (int(value[GBU_COUNTER_INDEX + 1]) << 8) + int(
                value[GBU_COUNTER_INDEX])
            gbu_counter2 = (int(value[GBU_COUNTER_INDEX]) << 8) + int(
                value[GBU_COUNTER_INDEX + 1])
            global file1
            global prev_gbu_counter
            # global prev_gbu_counter_change_time
            #if count_dif > 1 :
            #    L = [ str(counter),",   ", str(clicker_analog), ", " , str(count_dif), " <<<<<--- " ,"\n" ]
            #else:
            #    L = [ str(counter),",   ", str(clicker_analog), ", " , str(count_dif), "\n" ]
            L = [
                str(channel_0), ",   ",
                str(channel_1), ", ",
                str(channel_2), ", ",
                str(channel_3), ", ",
                str(channel_4), "\n"
            ]
            # file1.writelines(L)

            # handler(value, do_print=do_print)
            # print("Received data: %s" % hexlify(value))
            Handler_Called = (timer() - handle_time)

            if Handler_Called > 0.002:
                # if Handler_Called > 0.02 :
                #print("handler called: %.6f" % Handler_Called)
                global print_every
                # if gbu_counter2 != prev_gbu_counter:
                # # check for 0x3f35ff0f then the rest
                # if value[0] == 0x3f and value[1] == 0x35 and value[2] == 0xff and value[3] == 0x0f:
                # print_every = 200
                # delta_time = timer() - prev_gbu_counter_change_time
                # gbu_counter_change_time = timer()

                # print_every = print_every + 1
                if print_every >= 200:
                    pass
                    # print_every = 0
                    # print("delta_time: %.1f" % delta_time, end="")
                    # L1 =  [str(delta_time), "\n"]
                    # delta_time_str = "%.1f" %delta_time  # save only 1 digit after decimal point
                    # event_time = get_time()
                    # L1 =  [delta_time_str, "    ",event_time,  "\n"]
                    # # global FILE1_PATH
                    # # file1 = open(FILE1_PATH,"w")
                    # file1.writelines(L1)
                    # # file1.close()
                    # # print("  Received data: %s" % str(gbu_counter))
                    # # print("  Received data: %06x" % gbu_counter)
                    # # print("  Received data: %06x " % gbu_counter2 + "%f" %v)
                    # print("  Received data: %06d" % gbu_counter2 + "    time: %s" %event_time)
                    # print("  Received data: %s" % hexlify(value))


#                                    10                  20                  30                  40
#                 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9
#Received data: b'3f2e64006400640064006400640064006400640064006400640064006400010101010101010101010101000000010101761dbd7fcabf5a0fafe1f3cb94ace35f'

                if value[0] == 0x3f and value[1] == 0x2e and value[
                        30] == 0x01 and value[31] == 0x01:
                    pass
                    # prev_gbu_counter = gbu_counter2
                    # prev_gbu_counter_change_time = gbu_counter_change_time
                for n in range(30, 41):
                    if value[n] != 0x01:
                        st = get_date_time_milisec()
                        print(st, end=" ")
                        print(" pressed: ", n)
                        L1 = [st, "   pressed: ", str(n), "\n"]
                        file1.writelines(L1)

            # print("time: %.6f" % time)
            handle_time = timer()
            prev_counter = counter

        # Update the do_print flag
        do_print = (timer() - print_time) >= PRINT_TIME
Пример #36
0
        # Create optimizer object.
        opt = SANelderMead(prob.f, debug=0, maxiter=100000)

        # Install custom reporter plugin. Print dot for 500 evaluations.
        opt.installPlugin(
            MyReporter(prob.name, 1000, concise=True, printImprovement=False))

        # If problem has a custom initial simplex, set it.
        if len(probdef) == 1:
            opt.reset(prob.initial)
        else:
            # Custom simplex (for McKinnon)
            opt.reset(probdef[1])

        # Start timing, run, measure time.
        dt = timer()
        opt.run()
        dt = timer() - dt

        # Write number of function evaluations.
        print(" %d evaluations" % opt.niter)

        # Calculate initial and final gradient
        gini = prob.g(prob.initial)
        gend = prob.g(opt.x)

        # Store results for this problem.
        result = {
            'i': opt.niter,
            'x': opt.x,
            'f': opt.f,
Пример #37
0
 def timed_Module(self, name):
     s = timer()
     yield
     self.add(name, timer() - s)
Пример #38
0
        state = row[1]
        city2 = row[2]
        state2 = row[3]
        origin_city = city + "," + state
        destination_city = city2 + "," + state2
        urls.append("https://maps.googleapis.com/maps/api/distancematrix/json" \
        "?origins=%s&destinations=%s&units=imperial&key=%s" % (origin_city, destination_city, gkey))
        counter = counter + 1


#make api requests
def dist_matrix(url):
    try:
        response = requests.get(url).json()
        return url, response["rows"][0]["elements"][0]["duration"]["text"]
    except Exception as e:
        return url, None, e


#if url list is built, start pooling and calling function
if counter >= 950:
    start = timer()
    print("Timer started")
    results = ThreadPool(20).imap_unordered(dist_matrix, urls)
    for url, time in enumerate(results):
        print(url, time)
    print("Elapsed Time: %s" % (timer() - start, ))

else:
    print(counter)
Пример #39
0
def menu():
    while True:
        print('01. Listar Cidades')
        print('02. Listar Pontos Turísticos')
        print('03. Distancia entre cidade e ponto turístico')
        print('04. Mostrar Lista de Adjacencias')
        print('05. Comparar Dijkstra e Bellman-Ford')
        print('06. Encerrar')
        print(
            '\nA função 3 foi modificada para incluir o algoritmo de Bellman-Ford'
        )
        print(
            'A função 5 compara Bellman-Ford e Dijkstra em todos os nós de forma geral\n'
        )

        func = int(input('Selecione uma função(1~6): '))
        if func == 1:
            print(' ')
            for cidade in nodes:
                print('[{}]: {}'.format(cidade, nodes[cidade]))
                if cidade == 11:
                    break
            print(' ')
        elif func == 2:
            print(' ')
            for x in range(13, 30):
                print('[{}]: {}'.format(x, nodes[x]))
            print(' ')
        elif func == 3:
            partida = int(input('Selecione a cidade de partida(1 - 11): '))
            destino = int(
                input('Selecione o ponto turistico de destino(12 - 28): '))
            print(' ')
            print('Dijkstra:')
            t0 = timer()
            dijkstra(partida, destino)
            t1 = timer()
            print(' ({:.10f} s)'.format(t1 - t0))
            print('Bellman Ford:')
            t2 = timer()
            bellmanFord(partida, destino, False)
            t3 = timer()
            print(dist[destino], 'km', end=' ')
            print('({:.10f} s)'.format(t3 - t2))
            print(" ")
        elif func == 4:
            print(" ")
            printGrafo(grafo)
            print(" ")
        elif func == 5:
            print('\nLista de distancias do ponto 1:\n')
            t0 = timer()
            for i in nodes:
                dijkstra(1, i, False)
            t1 = timer()
            print(dijDist)
            print('Dijkstra: {:.10f} segundos\n'.format(t1 - t0))
            t2 = timer()
            bellmanFord(1)
            t3 = timer()
            print(dist[1:])
            print('Bellman-Ford: {:.10f} segundos\n'.format(t3 - t2))
        elif func == 6:
            return
        else:
            print('Função não definida!\n')
Пример #40
0
def acceptor_epoch(sess,
                   acceptor,
                   data_generator,
                   rnn_structure,
                   input_length,
                   y_dim,
                   x_dim,
                   t_timer,
                   stateful=True,
                   training=True):
    """
    One epoch for either training or validation
    :param sess: TensorFlow session
    :param acceptor: acceptor instance
    :param data_generator: iterator for generating batches
    :param input_length: number of look back steps used for future prediction 
    :param y_dim: targets dismension
    :param x_dim: inputs dimension
    :param t_timer: in case running time is too long
    :param training: boolean. true is training, false is validation or prediction
    :return: averaged_loss for the epoch
    """
    # one epoch for either training or prediction
    num_layers = len(rnn_structure)
    total_loss = 0
    total_samples = 0

    # Batch Generator returns a tuple of (data, sequence length, keys, current size)
    # data: np 3-D array [record, time, feature]
    # sequence length: np 1-D array [record]
    # keys: np 1-D array [record]
    # current size: scalar number of samples in current batch
    for batch, seqlen, keys, current_size in data_generator:
        if timer() - t_timer > timeLimit:
            timeout = True
            print("Training timeout during batching at epoch:", epoch)
            break

            init_state = np.zeros((num_layers, 2, current_size,
                                   max(i[0] for i in rnn_structure)))
            # initialize data slicer, iterator for slicing each batch to fit the acceptor network
            data_slicer = DataSlicer(batch,
                                     seqlen,
                                     input_length,
                                     y_dim,
                                     x_dim,
                                     random=random_slice)

            # steps within a batch
            for x, y, x_length in data_slicer:
                if timer() - t_timer > time_limit:
                    timeout = True
                    print("Training timeout during slicing at epoch:", epoch)
                    break

                num_effective_sample = (x_length > 0).sum()
                total_samples += num_effective_sample

                # take one step and calculate loss
                preds, loss, final_state = acceptor_step(sess=sess,
                                                         acceptor=acceptor,
                                                         x=x,
                                                         y=y,
                                                         seqlen=x_length,
                                                         init_state=init_state,
                                                         training=training)

                # accumulate training loss
                if training:
                    total_loss += loss * num_effective_sample
                else:
                    total_loss += loss

                # initial state for the next GD update step
                init_state = np.zeros((num_layers, 2, current_size,
                                       max(i[0] for i in rnn_structure)))

                # if the rnn is stateful the initial state for the next training step should be the final state of the current training step
                if stateful:
                    for i in range(len(final_state)):
                        c = final_state[i][0]  # LSTM cell state
                        h = final_state[i][1]  # LSTM hidden state
                        n_row, n_col = c.shape
                        init_state[i][0][:nrow, :ncol] = c
                        init_state[i][1][:nrow, :ncol] = h

    # if training is stopped due to timeout
    if timeout:
        # TODO: raise timeout exception
        raise Exception("Timeout!")
    return total_loss, total_samples
Пример #41
0
def acceptor_train(sess,
                   acceptor,
                   data_generator,
                   num_epochs,
                   rnn_structure,
                   input_length,
                   y_dim,
                   x_dim,
                   valid_data_generator=None,
                   verbose=True,
                   time_limit=float("Inf"),
                   stateful=True,
                   random_slice=True):
    """
    Train the acceptor
    :param sess: TensorFlow session
    :param acceptor: acceptor instance
    :param data_generator: iterator for generating batches
    :param data_slicer: iterator for slicing each batch to fit the acceptor network
    :param num_epochs: number of epochs to run for training the network
    :param rnn_structure: define the structure of the acceptor. list of state tuples. Each tuple is a combinator of state size and drop out keep rate.[(9, .9), (20, .7)] means two stacked layers of 9 hidden units in first layer with 0.9 dropout KEEP rate and 20 units in second layer of 0.7 dropout KEEP rate.
    :param input_length: number of look back steps used for future prediction 
    :param y_dim: targets dismension
    :param x_dim: inputs dimension
    :param valid_data_generator: iterator for generating validation batches
    :param verbose: for debug and monitor purpose
    :param time_limit: in case running time is too long
    :param stateful: boolean indicating a stateful acceptor or not
    :param random_slice: boolean indication random slice the batches or not
    :return: training_losses, validation_losses
    """
    # training statistics
    training_losses = []
    validation_losses = []

    # initilize the timer, in case the training time is too long
    t_timer = timer()
    timeout = False

    # start training
    epoch = 0

    # extract the number of RNN layers from rnn_structure
    num_layers = len(self.rnn_structure)

    while epoch <= num_epochs:
        if timer() - t > timeLimit:
            timeout = True
            print("Training timeout at beginning of epoch:", epoch)
            break

        # take one epoch
        averaged_loss = acceptor_epoch(sess=sess,
                                       acceptor=acceptor,
                                       data_generator=data_generator,
                                       rnn_structure=rnn_structure,
                                       t_timer=t_timer,
                                       stateful=stateful,
                                       training=True)

        # training statistics
        training_losses.append(averaged_loss)

        # when one epoch is done, print training loss, test with validation
        if verbose:
            print("Average training loss for Epoch{}, loss:{}".format(
                epoch, averaged_loss))

        # if there are data for validation
        if valid_data_generator:
            averaged_valid_loss = acceptor_epoch(
                sess=sess,
                acceptor=acceptor,
                data_generator=valid_data_generator,
                rnn_structure=rnn_structure,
                t_timer=t_timer,
                stateful=stateful,
                training=False)
            # training statistics
            validation_losses.append(averaged_valid_loss)
            print("Average validation loss for Epoch{}, loss:{}".format(
                epoch, averaged_valid_loss))

        # next epoch
        epoch += 1
    return training_losses, validation_losses
Пример #42
0
def camera(self):
    file = self.FLAGS.demo
    SaveVideo = self.FLAGS.saveVideo

    if self.FLAGS.track:
        if self.FLAGS.tracker == "deep_sort":
            from deep_sort import generate_detections
            from deep_sort.deep_sort import nn_matching
            from deep_sort.deep_sort.tracker import Tracker
            metric = nn_matching.NearestNeighborDistanceMetric(
                "cosine", 0.2, 100)
            tracker = Tracker(metric)
            encoder = generate_detections.create_box_encoder(
                os.path.abspath(
                    "deep_sort/resources/networks/mars-small128.ckpt-68577"))
        elif self.FLAGS.tracker == "sort":
            from sort.sort import Sort
            encoder = None
            tracker = Sort()
    if self.FLAGS.BK_MOG and self.FLAGS.track:
        fgbg = cv2.bgsegm.createBackgroundSubtractorMOG()

    if file == 'camera':
        file = 0
    else:
        assert os.path.isfile(file), \
        'file {} does not exist'.format(file)

    camera = skvideo.io.VideoCapture(file)

    if file == 0:
        self.say('Press [ESC] to quit video')

    assert camera.isOpened(), \
    'Cannot capture source'

    if self.FLAGS.csv:
        f = open('{}.csv'.format(file), 'w')
        writer = csv.writer(f, delimiter=',')
        writer.writerow(['frame_id', 'track_id', 'x', 'y', 'w', 'h'])
        f.flush()
    else:
        f = None
        writer = None
    if file == 0:  #camera window
        cv2.namedWindow('', 0)
        _, frame = camera.read()
        height, width, _ = frame.shape
        cv2.resizeWindow('', width, height)
    else:
        _, frame = camera.read()
        height, width, _ = frame.shape

    if SaveVideo:
        if file == 0:  #camera window
            fps = 1 / self._get_fps(frame)
            if fps < 1:
                fps = 1
        else:
            fps = get_fps_rate(file)

        output_file = 'output_{}'.format(file)
        if os.path.exists(output_file):
            os.remove(output_file)

        videoWriter = skvideo.io.VideoWriter(output_file,
                                             fps=fps,
                                             frameSize=(width, height))
        videoWriter.open()

    # buffers for demo in batch
    buffer_inp = list()
    buffer_pre = list()

    elapsed = 0
    start = timer()
    self.say('Press [ESC] to quit demo')
    #postprocessed = []
    # Loop through frames
    n = 0
    while camera.isOpened():
        elapsed += 1
        _, frame = camera.read()
        if frame is None:
            print('\nEnd of Video')
            break
        if self.FLAGS.skip != n:
            n += 1
            continue
        n = 0
        if self.FLAGS.BK_MOG and self.FLAGS.track:
            fgmask = fgbg.apply(frame)
        else:
            fgmask = None
        preprocessed = self.framework.preprocess(frame)
        buffer_inp.append(frame)
        buffer_pre.append(preprocessed)
        # Only process and imshow when queue is full
        if elapsed % self.FLAGS.queue == 0:
            feed_dict = {self.inp: buffer_pre}
            net_out = self.sess.run(self.out, feed_dict)
            for img, single_out in zip(buffer_inp, net_out):
                if not self.FLAGS.track:
                    postprocessed = self.framework.postprocess(single_out,
                                                               img,
                                                               save=False)
                else:
                    postprocessed = self.framework.postprocess(
                        single_out,
                        img,
                        frame_id=elapsed,
                        csv_file=f,
                        csv=writer,
                        mask=fgmask,
                        encoder=encoder,
                        tracker=tracker,
                        save=False)
                if SaveVideo:
                    videoWriter.write(postprocessed)

            # Clear Buffers
            buffer_inp = list()
            buffer_pre = list()

        if elapsed % 5 == 0:
            sys.stdout.write('\r')
            sys.stdout.write('{0:3.3f} FPS'.format(elapsed /
                                                   (timer() - start)))
            sys.stdout.flush()

    sys.stdout.write('\n')
    if SaveVideo:
        videoWriter.release()
    if self.FLAGS.csv:
        f.close()
    camera.release()
Пример #43
0
def ns_fit(datadir):

    #::: init
    config.init(datadir)

    #::: show initial guess
    show_initial_guess()

    #::: settings
    nlive = config.BASEMENT.settings[
        'ns_nlive']  # (default 500) number of live points
    bound = config.BASEMENT.settings[
        'ns_bound']  # (default 'single') use MutliNest algorithm for bounds
    ndim = config.BASEMENT.ndim  # number of parameters
    sample = config.BASEMENT.settings[
        'ns_sample']  # (default 'auto') random walk sampling
    tol = config.BASEMENT.settings[
        'ns_tol']  # (defualt 0.01) the stopping criterion

    #::: run
    if config.BASEMENT.settings['ns_modus'] == 'static':
        logprint('\nRunning Static Nested Sampler...')
        logprint('--------------------------')
        t0 = timer()

        if config.BASEMENT.settings['multiprocess']:
            with closing(
                    Pool(processes=(config.BASEMENT.
                                    settings['multiprocess_cores']))) as pool:
                logprint('\nRunning on',
                         config.BASEMENT.settings['multiprocess_cores'],
                         'CPUs.')
                sampler = dynesty.NestedSampler(
                    ns_lnlike,
                    ns_prior_transform,
                    ndim,
                    pool=pool,
                    queue_size=config.BASEMENT.settings['multiprocess_cores'],
                    bound=bound,
                    sample=sample,
                    nlive=nlive)
                sampler.run_nested(dlogz=tol, print_progress=True)

        else:
            sampler = dynesty.NestedSampler(ns_lnlike,
                                            ns_prior_transform,
                                            ndim,
                                            bound=bound,
                                            sample=sample,
                                            nlive=nlive)
            sampler.run_nested(dlogz=tol, print_progress=True)

        t1 = timer()
        timedynesty = (t1 - t0)
        logprint("\nTime taken to run 'dynesty' (in static mode) is {} hours".
                 format(int(timedynesty / 60. / 60.)))

    elif config.BASEMENT.settings['ns_modus'] == 'dynamic':
        logprint('\nRunning Dynamic Nested Sampler...')
        logprint('--------------------------')
        t0 = timer()

        if config.BASEMENT.settings['multiprocess']:
            with closing(
                    Pool(processes=config.BASEMENT.
                         settings['multiprocess_cores'])) as pool:
                logprint('\nRunning on',
                         config.BASEMENT.settings['multiprocess_cores'],
                         'CPUs.')
                sampler = dynesty.DynamicNestedSampler(
                    ns_lnlike,
                    ns_prior_transform,
                    ndim,
                    pool=pool,
                    queue_size=config.BASEMENT.settings['multiprocess_cores'],
                    bound=bound,
                    sample=sample)
                sampler.run_nested(nlive_init=nlive,
                                   dlogz_init=tol,
                                   print_progress=True)

        else:
            sampler = dynesty.DynamicNestedSampler(ns_lnlike,
                                                   ns_prior_transform,
                                                   ndim,
                                                   bound=bound,
                                                   sample=sample)
            sampler.run_nested(nlive_init=nlive, print_progress=True)

        t1 = timer()
        timedynestydynamic = (t1 - t0)
        logprint("\nTime taken to run 'dynesty' (in dynamic mode) is {} hours".
                 format(int(timedynestydynamic / 60. / 60.)))

    #::: pickle-save the 'results' class
    results = sampler.results
    with open(os.path.join(config.BASEMENT.outdir, 'save_ns.pickle'),
              'wb') as f:
        pickle.dump(results, f)
Пример #44
0
 def __init__(self, summarize_every=5, disabled=False):
     self.last_tick = timer()
     self.logs = OrderedDict()
     self.summarize_every = summarize_every
     self.disabled = disabled
Пример #45
0
def camera(self):
    global current_count
    file = self.FLAGS.demo
    SaveVideo = self.FLAGS.saveVideo
    if file == 'camera':
        file = 0
    else:
        assert os.path.isfile(file), \
            'file {} does not exist'.format(file)

    camera = cv2.VideoCapture(file)

    if file == 0:
        self.say('Press [ESC] to quit demo')

    assert camera.isOpened(), \
        'Cannot capture source'

    if file == 0:  # camera window
        cv2.namedWindow('', 0)
        _, frame = camera.read()
        height, width, _ = frame.shape
        cv2.resizeWindow('', width, height)
    else:
        _, frame = camera.read()
        height, width, _ = frame.shape

    if SaveVideo:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        if file == 0:  # camera window
            fps = 1 / self._get_fps(frame)
            if fps < 1:
                fps = 1
        else:
            fps = round(camera.get(cv2.CAP_PROP_FPS))
        videoWriter = cv2.VideoWriter('video.avi', fourcc, fps,
                                      (width, height))

    # buffers for demo in batch
    buffer_inp = list()
    buffer_pre = list()

    elapsed = int()
    start = timer()
    self.say('Press [ESC] to quit demo')
    # Loop through frames
    i = 0
    count = 0
    while camera.isOpened():
        elapsed += 1
        _, frame = camera.read()
        i += 1
        if frame is None:
            print('\nEnd of Video')
            break
        if i % 20 == 0:
            preprocessed = self.framework.preprocess(frame)
            buffer_inp.append(frame)
            buffer_pre.append(preprocessed)
            # Only process and imshow when queue is full
            if elapsed % self.FLAGS.queue == 0:
                feed_dict = {self.inp: buffer_pre}
                net_out = self.sess.run(self.out, feed_dict)
                for img, single_out in zip(buffer_inp, net_out):
                    postprocessed, count = self.framework.postprocess(
                        single_out, img, False)
                    current_count = count
                    if SaveVideo:
                        videoWriter.write(postprocessed)
                    # if file == 0:  # camera window
                    #     cv2.imshow('', postprocessed)
                # Clear Buffers
                buffer_inp = list()
                buffer_pre = list()

            if elapsed % 5 == 0:
                sys.stdout.write('\r')
                sys.stdout.write('{0:3.3f} FPS\n'.format(elapsed /
                                                         (timer() - start)))
                sys.stdout.flush()
            if file == 0:  # camera window
                choice = cv2.waitKey(1)
                if choice == 27: break
        # else:
        #     cv2.imshow('', frame)
        #     cv2.waitKey(1)
    sys.stdout.write('\n')
    if SaveVideo:
        videoWriter.release()
    camera.release()
    if file == 0:  # camera window
        cv2.destroyAllWindows()
Пример #46
0
		
		# Create optimizer object.
		opt=SANelderMead(prob.f, debug=0, maxiter=100000)
		
		# Install custom reporter plugin. Print dot for 500 evaluations. 
		opt.installPlugin(MyReporter(prob.name, 1000, concise=True, printImprovement=False))
		
		# If problem has a custom initial simplex, set it. 
		if len(probdef)==1:
			opt.reset(prob.initial)
		else:
			# Custom simplex (for McKinnon) 
			opt.reset(probdef[1])
		
		# Start timing, run, measure time. 
		dt=timer()
		opt.run()
		dt=timer()-dt
		
		# Write number of function evaluations. 
		print(" %d evaluations" % opt.niter)
		
		# Calculate initial and final gradient
		gini=prob.g(prob.initial)
		gend=prob.g(opt.x)
		
		# Store results for this problem. 
		result={ 
			'i': opt.niter, 
			'x': opt.x, 
			'f': opt.f, 
Пример #47
0
clock = time.Clock()
FPS = 60
pl1 = Player("yes1.png",30,200,4,25,100)
pl2 = Player("yes2.png",520,200,4,25,100)
ball = Gamesprite("мячик.png",240,200,4,100,75)
x = 3
y = 3
cccc = 0
ccc = 0
cc = randint(1,2)
c = 0
if cc == 2:
    x *=-1
else :
    x *=1
last_time = timer()


score1 = font.render(str(s1),True,(2,4,5))
score2 = font.render(str(s2),True,(6,7,8))
while game:
    for e in event.get():
        if e.type == QUIT:
            game = False



    
    

    if finish != True:
Пример #48
0
 def verify(self):
     if abs(self.time - timer()) > self.MaxVideoTime:
         self.imagespath, self.videopath, self.time = self.path_saves()
         self.videoWriter = self.video_partition()
         self.time = timer()
Пример #49
0
 def initSetConfig(self):
     """Reset the timer before setting configuration (tyically at startup)"""
     self.timeStart = timer()
Пример #50
0
def total_timer(msg):
    """ A context which add the time spent inside to TotalTimer. """
    start = timer()
    yield
    t = timer() - start
    _TOTAL_TIMER_DATA[msg].feed(t)
Пример #51
0
 def reset_timer(self):
     self.last_tick = timer()
Пример #52
0
def main():

    # client = Client(processes = False) # threads ?
    client = Client()
    size = 10000000
    # size       = 20
    # shards     = 20
    # shards     = 6
    # shards     = 1
    shards = 12
    shape = [size]
    lat = np.random.rand(size) * 180.0 - 90.0
    lon = np.random.rand(size) * 360.0 - 180.0
    resolution_ = 8
    resolution = np.full(shape, resolution_, dtype=np.int64)

    # print('lat shape: ',lat.shape)

    print('')
    serial_start = timer()
    s_sids = ps.from_latlon(lat, lon, resolution_)
    s_sidsstr = [hex16(s_sids[i]) for i in range(len(s_sids))]
    serial_end = timer()
    # print('0 s_sids: ',s_sids)
    print('time s_sids: ', serial_end - serial_start)

    def w_from_latlon(llr):
        # print('')
        # print('llr:  ',llr)
        sids = ps.from_latlon(llr[0], llr[1], int(llr[2][0]))
        # print('sids: ',sids)
        # print('')
        return sids

    # def w_from_latlon1(lat,lon,res):
    #     return ps.from_latlon(np.array([lat],dtype=np.double)\
    #                            ,np.array([lon],dtype=np.double)\
    #                            ,int(res))
    # sid        = ps.from_latlon(lat,lon,resolution)
    # sid        = client.map(w_from_latlon1,lat,lon,resolution) # futures

    dask_start = timer()
    shard_size = int(size / shards)
    shard_bins = np.arange(shards + 1) * shard_size
    shard_bins[-1] = size

    # print('---')
    # print('shards:     ',shards)
    # print('shard_size: ',shard_size)
    # print('shard_bins: ',shard_bins)
    # print('---')
    lat_shards = [lat[shard_bins[i]:shard_bins[i + 1]] for i in range(shards)]
    lon_shards = [lon[shard_bins[i]:shard_bins[i + 1]] for i in range(shards)]
    res_shards = [
        resolution[shard_bins[i]:shard_bins[i + 1]] for i in range(shards)
    ]

    llr_shards = []
    for i in range(shards):
        llr_shards.append([lat_shards[i], lon_shards[i], res_shards[i]])

    # print('llr_shards len: ',len(llr_shards))
    # print('llr_shards: ',llr_shards)

    ## future = client.submit(func, big_data)    # bad
    ##
    ## big_future = client.scatter(big_data)     # good
    ## future = client.submit(func, big_future)  # good

    # sid        = client.map(w_from_latlon,llr_shards) # futures

    big_future = client.scatter(llr_shards)
    sid = client.map(w_from_latlon, big_future)  # futures

    # print('0 sid:  ',sid)
    # print('9 len(sid): ',len(sid))
    # for i in range(shards):
    #     print(i, ' 10 sid: ',sid[i])
    #     print(i, ' 11 sid: ',sid[i].result())

    # print('15 sid:    ',[type(i) for i in sid])

    sid_cat = np.concatenate([i.result() for i in sid])
    sidsstr = [hex16(sid_cat[i]) for i in range(len(sid_cat))]
    dask_end = timer()
    # print('2 sids: ',sids)
    sids = sid_cat

    print('')
    # for i in range(size-20,size):
    for i in np.array(np.random.rand(20) * size, dtype=np.int64):
        print("%09i" % i, sidsstr[i], s_sidsstr[i], ' ', sids[i] - s_sids[i])

    print('')
    print('dask total threads:  ', sum(client.nthreads().values()))
    print('size:                ', size)
    print('shards:              ', shards)
    print('')
    print('time sids:           ', dask_end - dask_start)
    print('time s_sids:         ', serial_end - serial_start)
    print('parallel speed up:   ',
          (serial_end - serial_start) / (dask_end - dask_start))

    client.close()
Пример #53
0
#Let's keep the format from bridge_played_hands:
#name of the folder = first three digits of the lin files
for directory in [str(s) for s in range(423,534+1)]:
	if not os.path.exists(directory):
		os.makedirs(directory)

def fetch_lin_file(url):
	#name of the folder = first three digits of the lin files
	filename = url[-5:-2]+"/"+url[-5:]

	try:
		response = urlopen(url)
		f=open(filename+'.lin', 'w')
		f.write(response.read())
		f.close()
		return url, None

	except Exception as e:
		return url, e

#download them in parallel
#based on https://stackoverflow.com/questions/16181121/a-very-simple-multithreading-parallel-url-fetching-without-queue
start = timer()
results = ThreadPool(20).imap_unordered(fetch_lin_file, urls)#20 requests at a time
for url, error in results:
    if error is None:
        print("%r fetched in %ss" % (url[-5:], timer() - start))
    else:
        print("Error fetching %r: %s" % (url, error))
print("Elapsed Time: %s" % (timer() - start,))
Пример #54
0
# ===============================================
# Preparing files for network
# Drop indices for neutral reviews
to_drop = [2]
data_validation = data_validation[~data_validation['Polarity'].isin(to_drop)]
data_test = data_test[~data_test['airline_sentiment'].isin(to_drop)]

# Tweets corresponding to training, validation and test set
tweets_training = data_training['Tweet']
tweets_validation = data_validation['Tweet']
tweets_test = data_test['text']

# Create vocabulary from reduced tweets and dump it for easy read out.
# It takes ~2 s for creating a vocabulary for 1.6M reduced tweets.
t_start = timer()
flat_list = [word for tweet in tweets_training for word in tweet.split()
             ]  # Equivalent of looping over different terms
# vocab = [*set(flat_list), ]  # Using set and converting to a list
vocab = Counter()
vocab.update(flat_list)
t_stop = timer()
print("Time to create vocabulary for %d Tweets is %0.5f s" %
      (len(tweets_training), t_stop - t_start))
# Output in terminal
# Time to create vocabulary for 1000 Tweets is 0.00122 s
# Time to create vocabulary for 1600000 Tweets is 1.76170 s

# Reducing limits with heapq package
t_start = timer()
red_word = 10000  # Number of reduced words in the vocabulary
Пример #55
0
 def reset(self):
     self.start = timer()
Пример #56
0
 def stop_timer_for(self, i, score):
     self.measurements[i][TIME] = timer() - self.t0
     self.measurements[i][R1] = score[0]
     self.measurements[i][R2] = score[1]
     self.measurements[i][R4] = score[2]
Пример #57
0
                os.makedirs(this)

    requiredDirectories = [FLAGS.imgdir, FLAGS.binary,
                           FLAGS.backup, os.path.join(FLAGS.imgdir, 'out')]
    if FLAGS.summary:
        requiredDirectories.append(FLAGS.summary)

    _get_dir(requiredDirectories)
    tfnet = TFNet(FLAGS)

    model = masknet.create_model()
    model.summary()
    model.load_weights("weights.hdf5")

    elapsed = int()
    start = timer()
    tfnet.say('Press [ESC] to quit demo')

    preprocessed = tfnet.framework.preprocess(frame)
     buffer_inp.append(frame)
      buffer_pre.append(preprocessed)

       # Only process and imshow when queue is full
       if elapsed % FLAGS.queue == 0:
            feed_dict = {tfnet.inp: buffer_pre}
            net_out = tfnet.sess.run(
                [tfnet.out, tfnet.my_c2, tfnet.my_c3, tfnet.my_c4, tfnet.my_c5], feed_dict)
            my_c2 = net_out[1]
            my_c3 = net_out[2]
            my_c4 = net_out[3]
            my_c5 = net_out[4]
Пример #58
0
data_validation = data_validation[~data_validation['Polarity'].isin(to_drop)]
data_test = data_test[~data_test['airline_sentiment'].isin(to_drop)]

# Tweets corresponding to training, validation and test set
tweets_training = data_training['Tweet']
tweets_validation = data_validation['Tweet']
tweets_test = data_test['text']

# ===============================
# Preparing for inputs for embedding model
# Some Constants
VocLen = 10000  # Number of words to keep in dictionary in dictionary
MaxLen = 24
time_step = 50  # Same as vector dimension of GloVe Embedding

t_start = timer()  # Clock start

tokenizer = Tokenizer(num_words=VocLen)
tokenizer.fit_on_texts(tweets_training)

# Save token for future use
with open('models/tokenizer_10k.pickle', 'wb') as handle:
    pickle.dump(tokenizer, handle, protocol=pickle.HIGHEST_PROTOCOL)

# # Load tokenizer
# # Will be used in fast loading this routine.
# with open('models/tokenizer_10k.pickle', 'rb') as handle:
#     tokenizer = pickle.load(handle)

# Converting sequences of tweets
X_train_seq = tokenizer.texts_to_sequences(tweets_training)
Пример #59
0
 def timed_callback(self, name):
     s = timer()
     yield
     self.add(name, timer() - s)
Пример #60
0
def main():

    # ********************************************************************
    # read and test input parameters
    # ********************************************************************

    print('Parallel Research Kernels version ')  #, PRKVERSION
    print('Python Sparse matrix-vector multiplication')

    if len(sys.argv) < 3:
        print('argument count = ', len(sys.argv))
        sys.exit(
            "Usage: ./sparse.py <# iterations> <2log grid size> <stencil radius>"
        )

    iterations = int(sys.argv[1])
    if iterations < 1:
        sys.exit("ERROR: iterations must be >= 1")

    lsize = int(sys.argv[2])
    if lsize < 0:
        sys.exit("ERROR: lsize must be >= 0")
    size = 2**lsize
    size2 = size**2

    radius = int(sys.argv[3])
    if radius < 1:
        sys.exit("ERROR: Stencil radius should be positive")
    if size < (2 * radius + 1):
        sys.exit("ERROR: Stencil radius exceeds grid size")

    stencil_size = 4 * radius + 1
    sparsity = (4. * radius + 1.) / size2
    nent = size2 * stencil_size

    print('Number of iterations = ', iterations)
    print('Matrix order         = ', size2)
    print('Stencil diameter     = ', 2 * radius + 1)
    print('Sparsity             = ', sparsity)

    # ********************************************************************
    # Initialize data and perform computation
    # ********************************************************************

    matrix = numpy.zeros(nent, dtype=float)
    colIndex = numpy.zeros(nent, dtype=int)
    vector = numpy.zeros(size2, dtype=float)
    result = numpy.zeros(size2, dtype=float)

    for row in range(size2):
        i = int(row % size)
        j = int(row / size)
        elm = row * stencil_size
        colIndex[elm] = offset(i, j, lsize)
        for r in range(1, radius + 1):
            colIndex[elm + 1] = offset((i + r) % size, j, lsize)
            colIndex[elm + 2] = offset((i - r + size) % size, j, lsize)
            colIndex[elm + 3] = offset(i, (j + r) % size, lsize)
            colIndex[elm + 4] = offset(i, (j - r + size) % size, lsize)
            elm += 4
        # sort colIndex to make sure the compressed row accesses vector elements in increasing order
        colIndex[row * stencil_size:(row + 1) * stencil_size] = sorted(
            colIndex[row * stencil_size:(row + 1) * stencil_size])
        for k in range(0, stencil_size):
            elm = row * stencil_size + k
            matrix[elm] = 1.0 / (colIndex[elm] + 1)

    for k in range(iterations + 1):

        if k < 1: t0 = timer()

        # fill vector
        vector += numpy.fromfunction(lambda i: i + 1.0, (size2, ))

        # do the actual matrix-vector multiplication
        for row in range(0, size2):
            result[row] += numpy.dot(
                matrix[stencil_size * row:stencil_size * (row + 1)],
                vector[colIndex[stencil_size * row:stencil_size * (row + 1)]])

    t1 = timer()
    sparse_time = t1 - t0

    #******************************************************************************
    #* Analyze and output results.
    #******************************************************************************

    reference_sum = 0.5 * nent * (iterations + 1) * (iterations + 2)

    vector_sum = numpy.linalg.norm(result, ord=1)

    epsilon = 1.e-8

    # verify correctness
    if abs(vector_sum - reference_sum) < epsilon:
        print('Solution validates')
        flops = 2 * nent
        avgtime = sparse_time / iterations
        print('Rate (MFlops/s): ', 1.e-6 * flops / avgtime, ' Avg time (s): ',
              avgtime)
    else:
        print('ERROR: Vector sum = ', vector_sum, ', Reference vector sum = ',
              reference_sum)
        sys.exit()