Exemplo n.º 1
0
def ip_in_subnet(ip, subnet):
    """Does IP exists in a given subnet utility. Returns a boolean"""
    ipaddr = int(''.join(['%02x' % int(x) for x in ip.split('.')]), 16)
    netstr, bits = subnet.split('/')
    netaddr = int(''.join(['%02x' % int(x) for x in netstr.split('.')]), 16)
    mask = (0xffffffff << (32 - int(bits))) & 0xffffffff
    return (ipaddr & mask) == (netaddr & mask)
Exemplo n.º 2
0
def make_video(events, t0=0.0, t1=None, dt_frame=0.01, tau=0.01):
    if t1 is None:
        t1 = events["t"].max()

    ts = events["t"]
    dt = 1e-3
    nt = int((t1 - t0) / dt) + 1
    # nt = min(nt, 1000)  # cap at 1000 for now

    image = np.zeros((128, 128))
    images = np.zeros((nt, 128, 128))

    for i in range(nt):
        # --- decay image
        image *= np.exp(-dt / tau) if tau > 0 else 0
        # image *= 0

        # --- add events
        ti = t0 + i * dt
        add_to_image(image, events[close(ts, ti)])

        images[i] = image

    # --- average in frames
    nt_frame = int(dt_frame / dt)
    nt_video = int(nt / nt_frame)

    video = np.zeros((nt_video, 128, 128))
    for i in range(nt_video):
        slicei = slice(i * nt_frame, (i + 1) * nt_frame)
        video[i] = np.sum(images[slicei], axis=0)

    return video
Exemplo n.º 3
0
    def post(self):

        if not check_logged_in():
            self.redirect(users.create_login_url(self.request.uri))
            return

        # Grab user
        user = get_user()

        # Grab the payload from POST request and today's month
        session_dict = json.loads(self.request.body)

        # Pre-process received data, including escaping HTML chars
        safe_session_dict = {} # Escaped / processed data will go in here

        safe_session_dict['user'] = user
        safe_session_dict['duration'] = int(session_dict.get('duration'), 0)
        safe_session_dict['goal'] = int(session_dict.get('goal'), 0)
        safe_session_dict['reached_goal'] = bool(session_dict.get('reachedGoal'), False)

        # Begin validity check
        # validity_check = self.verify_data_integrity(safe_session_dict)

        new_session = Session()

        # Populate the entity with cleaned dictionary values and save
        new_session.populate(**safe_survey_dict)
        new_session.put()

        # Let user know this was a success!
        response = {"status": "success"}
        self.response.write(json.dumps(response))
Exemplo n.º 4
0
def get_adjacency_lists(in_file):
    edges = {}
    verts = {}
    edge_count = 0
    with open(in_file) as f:
        for line in f.readlines():
            vertex = line.split()
            v1 = int(vertex[0])
            for v2_s in vertex[1:]:
                v2 = int(v2_s)
                if v2 > v1:
                    # avoid adding duplicated edges in the loaded graph
                    try:
                        verts[v1].add(edge_count)  # edges in v1
                    except KeyError:
                        verts[v1] = set()
                        verts[v1].add(edge_count)
                    try:
                        verts[v2].add(edge_count)  # edges in v2
                    except KeyError:
                        verts[v2] = set()
                        verts[v2].add(edge_count)

                    edges[edge_count] = [v1, v2]
                    edge_count += 1

    return edges, verts
Exemplo n.º 5
0
    def post(self):

        if not check_logged_in():
            self.redirect(users.create_login_url(self.request.uri))
            return

        # Grab user and user data
        user = get_user()
        user_data = UserData.query(UserData.user == user).fetch()[0]

        # Flips to true if any updates were made
        updated = False

        # Grab the payload from POST request and today's month
        user_dict = json.loads(self.request.body)

        if user_dict.get('duration'):
            user_data.last_duration = int(user_dict.get('duration'))
            updated = True

        if user_dict.get('goal'):
            user_data.last_goal = int(user_dict.get('goal'))
            updated = True

        user_data.put()

        # Let user know this was a success!
        response = {"status": "success"}
        self.response.write(json.dumps(response))        
Exemplo n.º 6
0
	def route(self, minz):
		try:
			self.paths = []
			self.sub_terminal_collision_lines()
			visited = set()
			for index in xrange(1, len(self.terminals)):
				visited |= set([(int(self.terminals[index - 1][2][0]+0.5), int(self.terminals[index - 1][2][1]+0.5), z) for z in xrange(self.pcb.depth)])
				ends = [(int(self.terminals[index][2][0]+0.5), int(self.terminals[index][2][1]+0.5), z) for z in xrange(self.pcb.depth)]
				self.pcb.mark_distances(self.pcb.routing_flood_vectors, self.radius, self.via, self.gap, visited, ends)
				ends = [(self.pcb.get_node(node), node) for node in ends]
				ends.sort()
				_, end = ends[0]
				path = [end]
				while path[-1] not in visited:
					nearer_nodes = self.pcb.all_not_shorting(self.pcb.all_nearer_sorted, \
								(self.pcb.routing_path_vectors, path[-1], end, self.pcb.dfunc), path[-1], self.radius, self.via, self.gap)
					next_node = next(nearer_nodes)
					if minz:
						for node in nearer_nodes:
							if node[2] == path[-1][2]:
								next_node = node
								break
					path.append(next_node)
				visited |= set(path)
				self.paths.append(path)
				self.pcb.unmark_distances()
			self.paths = self.optimise_paths(self.paths)
			self.add_paths_collision_lines()
			self.add_terminal_collision_lines()
			return True
		except StopIteration:
			self.pcb.unmark_distances()
			self.remove()
			return False
Exemplo n.º 7
0
def main():
    ptt_dir = '/tmp2/GorsachiusMelanolophus/ptt_posts_new/no_sponsored/'
    imgs_dir = '/tmp2/GorsachiusMelanolophus/ptt_imgs/no_sponsored/'
    start = int(sys.argv[1])
    end = int(sys.argv[2])
    fp = open('../img_num/' + str(start)+ '.txt', 'a')
    for i in range(start, end):
        try:
            post_path = ptt_dir + str(i) + '.p'
            post = pickle.load(open(post_path, 'rb'))
            url = ptt_url + post['href']
            webpage = get_webpage(url)
            imgs, blog_url = parse_post(webpage)
            if imgs:
                print(f'{i}:{len(imgs)}', file=fp)
                save(imgs, imgs_dir + str(i))
            elif blog_url:
                webpage = get_webpage(blog_url)
                imgs = get_imgs_blog(webpage)
                if imgs:
                    print(f'{i}:{len(imgs)}', file=fp)
                    save(imgs, imgs_dir + str(i))
        except KeyboardInterrupt:
            return 0
        except Exception as e:
            print(e)
            pass
Exemplo n.º 8
0
def search_csv(request, database_name=settings.MONGO_DB_NAME,
                collection_name=settings.MONGO_MASTER_COLLECTION,
                skip=0, sort=None, limit=settings.MONGO_LIMIT, return_keys=(), query={}):
    
    result = prepare_search_results(request, database_name=database_name,
                collection_name=collection_name, sort=sort, skip=skip,
                limit=limit, return_keys=return_keys, query=query)

    #print result.keys()

    if int(result['code']) == 200:
        listresults=result['results']
        if settings.RESPECT_SOCIAL_GRAPH:
            listresults = filter_social_graph(request, listresults)
            len_results = len(listresults)
            if len_results < result['num_results']:
                result['ommitted-results']= result['num_results'] - len_results

        keylist = []
        for i in listresults:
            for j in i.keys():
                if not keylist.__contains__(j):
                    keylist.append(j)


        return convert_to_csv(keylist, listresults)

    else:
        jsonresults=to_json(result)
        return HttpResponse(jsonresults, status=int(result['code']),
                            mimetype="application/json")
Exemplo n.º 9
0
 def equ(*args):
     Visor = '' + N1.get()
     if Visor.find('²') != -1:
         try:
             A = int(Visor[:Visor.find('x²')])
         except ValueError:
             A = 1
         B = int(Visor[ Visor.find('²')+1 : Visor.find('x', Visor.find('²'))])
         C = int(Visor[ Visor.find('x', Visor.find('²'))+1 : Visor.find('=')])+int(Visor[ Visor.find('=')+1:])
         D = B**2 - 4 * A * C
         #D = B**2 – 4 * A * C parece igual a linha de cima mas não é
         if D > 0:
             X1 = ((-1*B)+sqrt(D))/2*A
             X2 = ((-1*B)-sqrt(D))/2*A
             Resultado = ('X\' = ',X1,'X\" = ',X2)
             N1.set(Resultado)
 
         elif D == 0:
             X = ((-1*B)+sqrt(D))/2*A
             Resultado = ('X\' = ',X,'X\" = ',X)
             N1.set(Resultado)
         else:
             N1.set('Não há raízes reais')
         Historico.set(Historico.get() + Visor + ' = ' + N1.get() + '\n')                 
     else:
         pass
Exemplo n.º 10
0
 def __helpsort(self, h1, h2):
     if int(h1[2]) < int(h2[2]):
         return -1
     elif int(h1[2]) > int(h2[2]):
         return 1
     else:
         return 0
Exemplo n.º 11
0
def search_json(request, database_name=settings.MONGO_DB_NAME,
                collection_name=settings.MONGO_MASTER_COLLECTION,
                skip=0, limit=settings.MONGO_LIMIT, sort=None, return_keys=(),
                query={}):
    

    
    result = prepare_search_results(request, database_name=database_name,
                collection_name=collection_name, skip=skip, sort=sort,
                limit=limit, return_keys=return_keys, query=query)

    if int(result['code'])==200:
        listresults=result['results']

    else:
        response = json.dumps(result, indent =4)
        return HttpResponse(response, status=int(result['code']),
                            mimetype="application/json")

    if settings.RESPECT_SOCIAL_GRAPH:
        listresults=filter_social_graph(request, listresults)


        len_results=len(listresults)
        if len_results < result['num_results']:
            result['ommitted-results']= result['num_results'] - len_results
            result['results']=listresults

        jsonresults=to_json(result)
        return HttpResponse(jsonresults, status=int(result['code']),
                            mimetype="application/json")
    else:
        jsonresults=to_json(normalize_results(result))
        return HttpResponse(jsonresults, status=int(result['code']),mimetype="application/json")
Exemplo n.º 12
0
 def _altCrop(self, source, desc):
     """PIL image for the described crop"""
     w, h = source.size
     return source.crop((int(w * desc[PHO.x1].toPython()),
                         int(h * desc[PHO.y1].toPython()),
                         int(w * desc[PHO.x2].toPython()),
                         int(h * desc[PHO.y2].toPython())))
Exemplo n.º 13
0
    def _load_zabbix_config(self,config_file):
        # Load zabbix agent configuration as default values
        # Default values are set in self._config
        # - ServerActive (default: 127.0.0.1)
        # - LogFile (default: /tmp/zabbix_agentd.log)
        # - DebugLevel (default: 3, Allowed: 0-4)
        #               0 -> logging.NOTSET
        #               1 -> logging.CRITICAL
        #               2 -> logging.ERROR
        #               3 -> logging.WARNING
        #               4 -> logging.DEBUG
        # - Timeout (default: 3, Allowed: 1-30)
        tmp_config = configobj.ConfigObj(config_file, list_values=False)

        if 'ServerActive' in tmp_config:
            tmp_server = tmp_config['ServerActive'][0] \
                         if isinstance(tmp_config['ServerActive'], list) \
                         else list(tmp_config['ServerActive'])[0]
            self._config['server'], self._config['port'] = tmp_server.split(':') \
                         if ":" in tmp_server else (tmp_server, 10051)

        if 'LogFile' in tmp_config:
            self._config['log_output'] = tmp_config['LogFile']

        if 'DebugLevel' in tmp_config:
            self._config['log_level'] = int(tmp_config['DebugLevel'])

        if 'Timeout' in tmp_config:
            self._config['timeout'] = int(tmp_config['Timeout'])
Exemplo n.º 14
0
    def load_goal_image(self):
        '''
        It will load the truck image, that will be used
        for displaying the current vehicles.
        '''

        box_width_meters = 32.*0.2
        box_length_meters = 32.*0.2

        flag_image = pygame.image.load('resources/finishFlag.png')

        (flag_image_width, flag_image_height) = flag_image.get_size()

        [x_pixel_1, y_pixel_1] = self.convert_position_to_image_pixel(0, 0)
        [x_pixel_2, y_pixel_2] = self.convert_position_to_image_pixel(box_length_meters, 0)

        desired_box_width_pixels = float(x_pixel_2 - x_pixel_1)

        scale_down_ratio = desired_box_width_pixels/(flag_image_width/2.)

        new_size = (int(round(scale_down_ratio*flag_image_width)), int(round(scale_down_ratio*flag_image_height)))

        flag = pygame.transform.smoothscale(flag_image,new_size)

        self.goal_image = flag

        return
Exemplo n.º 15
0
def event_update_participant_status(profile, event):
    if request.is_xhr:
        if profile.userid not in g.user.user_organizations_owned_ids():
            abort(403)
        participantid = int(request.form['participantid'])
        status = int(request.form['status'])
        participant = Participant.query.get(participantid)

        if participant.event != event:
            abort(403)
        if participant.status == PARTICIPANT_STATUS.WITHDRAWN:
            abort(403)
        if participant.status != status:
            participant.status = status
            try:
                text_message = unicode(getattr(event, (participants_email_attrs[status] + '_text')))
                text_message = text_message.replace("*|FULLNAME|*", participant.user.fullname)
                message = unicode(getattr(event, participants_email_attrs[status]))
                message = message.replace("*|FULLNAME|*", participant.user.fullname)
                if message and g.user.email:
                    send_email(sender=(g.user.fullname, g.user.email), to=participant.email,
                    subject="%s - Hacknight participation status" % event.title , body=text_message, html=message)
            except KeyError:
                pass
            db.session.commit()
        return "Done"
    abort(403)
Exemplo n.º 16
0
 def output_shape(self):
     channels, height, width = self.input_shape
     out_height = (height - self.pool_shape[0]) / self.stride + 1
     out_width = (width - self.pool_shape[1]) / self.stride + 1
     assert out_height % 1 == 0
     assert out_width % 1 == 0
     return channels, int(out_height), int(out_width)
Exemplo n.º 17
0
    def load_small_box_image(self):
        '''
        It will load the truck image, that will be used
        for displaying the current vehicles.
        '''

        box_width_meters = 32.*0.3
        box_length_meters = 32.*0.4

        box_image = pygame.image.load('resources/waterOffset.png')

        (box_image_width, box_image_height) = box_image.get_size()

        [x_pixel_1, y_pixel_1] = self.convert_position_to_image_pixel(0, 0)
        [x_pixel_2, y_pixel_2] = self.convert_position_to_image_pixel(box_length_meters, 0)

        desired_box_width_pixels = float(x_pixel_2 - x_pixel_1)

        scale_down_ratio = desired_box_width_pixels/(box_image_width/2.)

        new_size = (int(round(scale_down_ratio*box_image_width)), int(round(scale_down_ratio*box_image_height)))

        box = pygame.transform.smoothscale(box_image,new_size)

        self.small_box_image = box

        return
Exemplo n.º 18
0
def hamiltonkreis(graph):
    kreisExistiert = False
    startKnoten = graph[0][0]
    wege = [[startKnoten]]
    wegeErweitert = []
    zaehler = 0
    while wege != [] and not kreisExistiert: 
        aktuellerWeg = wege[0]
        wege = wege[1:]

        letzterKnoten = aktuellerWeg[len(aktuellerWeg)-1]
        # bestimme die Nachbarn von letzterKnoten
        knotenGefunden = False
        
        if len(aktuellerWeg) == len(graph):
            # Test, ob zu Hamiltonkreis erweiterbar
            zaehler = zaehler + 1
            

            listeNachbarknoten = graph[int(letzterKnoten)-1][1]
            
            # teste, ob es eine Kante zum startKnoten gibt
            if startKnoten in listeNachbarknoten:
                kreisExistiert = True
                #print(aktuellerWeg+[startKnoten])
        else:
            listeNachbarknoten = graph[int(letzterKnoten)-1][1]
            
            for nachbarKnoten in listeNachbarknoten:
                zaehler = zaehler + 1
                if not nachbarKnoten in aktuellerWeg:
                    erweiterterWeg = aktuellerWeg + [nachbarKnoten]
                    wege = [erweiterterWeg] + wege
    return (kreisExistiert, zaehler)
Exemplo n.º 19
0
    def test_command_line_ignores_draft_page(self):
        # we need to create a superuser (the db is empty)
        User.objects.create_superuser('djangocms', '*****@*****.**', '123456')

        create_page("The page!", "nav_playground.html", "en", published=False)

        pages_from_output = 0
        published_from_output = 0

        with StdoutOverride() as buffer:
            # Now we don't expect it to raise, but we need to redirect IO
            com = publisher_publish.Command()
            com.handle_noargs()
            lines = buffer.getvalue().split('\n') #NB: readlines() doesn't work

        for line in lines:
            if 'Total' in line:
                pages_from_output = int(line.split(':')[1])
            elif 'Published' in line:
                published_from_output = int(line.split(':')[1])

        self.assertEqual(pages_from_output,0)
        self.assertEqual(published_from_output,0)

        self.assertEqual(Page.objects.public().count(), 0)
Exemplo n.º 20
0
def page_extract(start, end, SUBSECTION):

    PDF_IN = PdfFileReader(open(PDF_DIR, 'rb'))

#    for i in xrange(PDF_IN.numPages): # for all pages
    for i in range(int(start) - 1, int(end)):

        output = PdfFileWriter()
        output.addPage(PDF_IN.getPage(i))
        
        base, name_ext = os.path.split(PDF_DIR)
        name, ext      = os.path.splitext(name_ext)
        PDF_OUT        = '{}{}'.format(TMP_DIR, '{}-{}{}'.format(name, str(i).zfill(6), ext))
        
        with open(PDF_OUT, 'wb') as outputStream:
            output.write(outputStream)
        
        gs_pdf_to_png(PDF_OUT)
        os.remove(PDF_OUT)
    
    png_list = group(os.listdir(TMP_DIR), 2)
    for tup in png_list:
        print tup
        card_front = os.path.join(TMP_DIR, tup[0])
        card_back  = os.path.join(TMP_DIR, tup[1])
        make_cards(card_front, card_back, SUBSECTION)
Exemplo n.º 21
0
def checkHash(hashnum):
        checkByte = 0;
        flag = 0;

        HashStr = '%s' % hashnum
        length = len(HashStr)

        for i in range(0, length):
            Re = int(HashStr[(length - 1) - i])
            if (1 == (flag % 2)):
                Re += Re
                Re = int((Re / 10) + (Re % 10))

            checkByte += int(Re)
            flag = flag + 1

        checkByte %= 10
        if (0 != checkByte):
            checkByte = 10 - checkByte
            if (1 == (flag % 2)):
                if (1 == (checkByte % 2)):
                    checkByte += 9

                checkByte >>= 1
        return '7%s%s' % (checkByte, HashStr)
Exemplo n.º 22
0
 def crop(self, left, top, right, bottom):
     self.image = self.image.crop((
         int(left),
         int(top),
         int(right),
         int(bottom)
     ))
Exemplo n.º 23
0
 def RandomGraph(self, nodes, edges, maxweight = 100.0):
     """
     Generates a graph of random edges.
     
     @param nodes: list of nodes or number of nodes in the random graph
     @param edges: number of edges to generate in the random graph
     @type edges: integer
     @param maxweight: maximum weight of each edge. default = 100.0
     @type maxweight: float
     """
     import random
     nodes_size = 0
     if type(nodes) == int:
         adjacency = [range(nodes)]
         nodes_size = nodes
         for node in range(nodes):
             adjacency.append([0 for x in range(nodes)])
     elif type(nodes) == list:
         adjacency = nodes
         nodes_size = len(nodes)
         for node in range(nodes_size):
             adjacency.append([0 for x in range(nodes_size)])
     else: raise FunctionParameterTypeError('nodes can only be a list \
             or integer')
     count = 0
     while count <= edges:
         edge = (int(random.uniform(0, nodes_size)) + 1, 
                 int(random.uniform(0, nodes_size)),
                 int(random.uniform(0, 1) * maxweight))
         if adjacency[edge[0]][edge[1]] == 0:
             adjacency[edge[0]][edge[1]] = edge[2]
             count = count + 1
     self.makeGraphFromAdjacency(adjacency)
    def _on_headers(self, data):
        data = native_str(data.decode("latin1"))
        first_line, _, header_data = data.partition("\n")
        match = re.match("HTTP/1.[01] ([0-9]+) ([^\r]*)", first_line)
        assert match
        code = int(match.group(1))
        self.headers = HTTPHeaders.parse(header_data)
        if 100 <= code < 200:
            self._handle_1xx(code)
            return
        else:
            self.code = code
            self.reason = match.group(2)

        if "Content-Length" in self.headers:
            if "," in self.headers["Content-Length"]:
                # Proxies sometimes cause Content-Length headers to get
                # duplicated.  If all the values are identical then we can
                # use them but if they differ it's an error.
                pieces = re.split(r',\s*', self.headers["Content-Length"])
                if any(i != pieces[0] for i in pieces):
                    raise ValueError("Multiple unequal Content-Lengths: %r" %
                                     self.headers["Content-Length"])
                self.headers["Content-Length"] = pieces[0]
            content_length = int(self.headers["Content-Length"])
        else:
            content_length = None

        if self.request.header_callback is not None:
            # re-attach the newline we split on earlier
            self.request.header_callback(first_line + _)
            for k, v in self.headers.get_all():
                self.request.header_callback("%s: %s\r\n" % (k, v))
            self.request.header_callback('\r\n')

        if self.request.method == "HEAD" or self.code == 304:
            # HEAD requests and 304 responses never have content, even
            # though they may have content-length headers
            self._on_body(b"")
            return
        if 100 <= self.code < 200 or self.code == 204:
            # These response codes never have bodies
            # http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.3
            if ("Transfer-Encoding" in self.headers or
                    content_length not in (None, 0)):
                raise ValueError("Response with code %d should not have body" %
                                 self.code)
            self._on_body(b"")
            return

        if (self.request.use_gzip and
                self.headers.get("Content-Encoding") == "gzip"):
            self._decompressor = GzipDecompressor()
        if self.headers.get("Transfer-Encoding") == "chunked":
            self.chunks = []
            self.stream.read_until(b"\r\n", self._on_chunk_length)
        elif content_length is not None:
            self.stream.read_bytes(content_length, self._on_body)
        else:
            self.stream.read_until_close(self._on_body)
Exemplo n.º 25
0
    def next(self):
        """
        Increment the iterator and yield the new value. Also, store the
        current value for use in the comparison function.
        """
        if not self.finishedReadingFile:

            try:
                line = self.theFile.next()
                cols = line.strip().split("\t")
                chrom = cols[0]

                # Where possible, convert chromosome names into
                # integers for sorting. If not possible, use
                # original names.
                try:
                    chrom = int(chrom.upper().strip("CHR"))
                except:
                    pass

                pos = int(cols[1])
                heapq.heappush(self.heap, (chrom, pos, line))

            except StopIteration:
                self.finishedReadingFile = True

        if len(self.heap) != 0:
            # Now take the top line
            self.chrom, self.pos, self.line = heapq.heappop(self.heap)
        else:
            raise StopIteration
Exemplo n.º 26
0
def check_compatible(client, min_version=None, max_version=None):
    """Checks if a kazoo client is backed by a zookeeper server version.

    This check will verify that the zookeeper server version that the client
    is connected to satisfies a given minimum version (inclusive) and
    maximum (inclusive) version range. If the server is not in the provided
    version range then a exception is raised indiciating this.
    """
    server_version = None
    if min_version:
        server_version = tuple((int(a) for a in client.server_version()))
        min_version = tuple((int(a) for a in min_version))
        if server_version < min_version:
            pretty_server_version = ".".join([str(a) for a in server_version])
            min_version = ".".join([str(a) for a in min_version])
            raise exc.IncompatibleVersion("Incompatible zookeeper version"
                                          " %s detected, zookeeper >= %s"
                                          " required" % (pretty_server_version,
                                                         min_version))
    if max_version:
        if server_version is None:
            server_version = tuple((int(a) for a in client.server_version()))
        max_version = tuple((int(a) for a in max_version))
        if server_version > max_version:
            pretty_server_version = ".".join([str(a) for a in server_version])
            max_version = ".".join([str(a) for a in max_version])
            raise exc.IncompatibleVersion("Incompatible zookeeper version"
                                          " %s detected, zookeeper <= %s"
                                          " required" % (pretty_server_version,
                                                         max_version))
  def IncrementVersion(self):
    """Updates the version file by incrementing the patch component.

    Args:
      message: Commit message to use when incrementing the version.
      dry_run: Git dry_run.
    """
    if not self.incr_type or self.incr_type not in self.VALID_INCR_TYPES:
      raise VersionUpdateException('Need to specify the part of the version to'
                                   ' increment')

    if self.incr_type == 'chrome_branch':
      self.chrome_branch = str(int(self.chrome_branch) + 1)

    # Increment build_number for 'chrome_branch' incr_type to avoid
    # crbug.com/213075.
    if self.incr_type in ('build', 'chrome_branch'):
      self.build_number = str(int(self.build_number) + 1)
      self.branch_build_number = '0'
      self.patch_number = '0'
    elif self.incr_type == 'branch' and self.patch_number == '0':
      self.branch_build_number = str(int(self.branch_build_number) + 1)
    else:
      self.patch_number = str(int(self.patch_number) + 1)

    return self.VersionString()
Exemplo n.º 28
0
    def mouseMoveEvent (self, event):
        if self.moving:
            _myDigitalNewWidth, _siblingWidth = Endstop.mouseMoveEvent (self, event)

            _lhs = _myDigitalNewWidth
            _middle = self.layoutWidth - _myDigitalNewWidth - _siblingWidth
            _rhs = _siblingWidth

            self._parent.sliderBarLayout.setStretchFactor (self, _lhs)
            self._parent.sliderBarLayout.setStretchFactor (self.slider, _middle)
            self._parent.sliderBarLayout.setStretchFactor (self.sibling, _rhs)

            _oldStart = self._parent.startPosition
            _oldFinish = self._parent.finishPosition
            self._parent.startPosition = int (((_lhs - self.endstopWidth) * (self._parent._steps - 1.0) / (self.layoutWidth - 2.0 * self.endstopWidth)))
            self._parent.finishPosition = self._parent._steps - int (((_rhs - self.endstopWidth) * (self._parent._steps - 1.0) / (self.layoutWidth - 2.0 * self.endstopWidth))) - 1
            self._parent.slider.setRange (self._parent.startPosition, self._parent.finishPosition)
            self._parent.lower.setText (self.root.tss.series () [0].getAllTimes () [self._parent.startPosition])

            if (_oldStart != self._parent.startPosition) or (_oldFinish != self._parent.finishPosition):
                self.root.redrawAll ()

                if (self._parent.finishPosition - self._parent.startPosition) < 20:
                    self._parent.slider.setTickInterval (1)
                else:
                    self._parent.slider.setTickInterval (0)
Exemplo n.º 29
0
def get_messages(request, pk):
    error_code = 0
    error_text = ''
    channel = 'channel_' + str(pk)
    SSE.redis_channel = channel
    if request.is_ajax():
        r = redis.Redis('127.0.0.1')
        messages_list = []
        count = 0
        messages = r.lrange(SSE.redis_channel, 0, r.llen(SSE.redis_channel))
        for message in messages:
            messages_list.append(json.loads(message.replace('\'', '\"').replace('u"', '"')))
        for obj in messages_list:
            if not int(request.user.profile.pk) in obj['read']:
                i = messages_list.index(obj)
                obj['read'].append(int(request.user.profile.pk))
                r.lset(SSE.redis_channel, i, obj)
                count += 1
        if messages_list.__len__() > 20:
            if count == 0:
                messages_list = messages_list[-20:]
            else:
                messages_list = messages_list[-count-5:]
        return HttpResponse(json.dumps({'error_code': error_code, 'error_text': error_text, 'messages': messages_list}), content_type="application/json")
    return render(request, 'accounts/chat.html')
Exemplo n.º 30
0
Arquivo: popup.py Projeto: fuinha/kiwi
    def _on__button_press_event(self, window, event):
        # If we're clicking outside of the window
        # close the popup
        toplevel = event.window.get_toplevel()
        parent = self.main_widget.get_parent_window()
        if toplevel != parent:
            self.popdown()
            return True

        # Gtk 2.x
        if hasattr(self, 'allocation'):
            out = self.allocation.intersect(
                gtk.gdk.Rectangle(x=int(event.x), y=int(event.y),
                                  width=1, height=1))
        else:
            rect = gtk.gdk.Rectangle()
            (rect.x, rect.y,
             rect.width, rect.height) = (int(event.x), int(event.y), 1, 1)
            out = gtk.gdk.Rectangle()
            self.intersect(rect, out)

        if (out.x, out.y, out.width, out.height) == (0, 0, 0, 0):
            self.popdown()
            return True

        return False
Exemplo n.º 31
0
SLEEP_TIME_1 = 600
SLEEP_TIME_2 = 600

client.connect()
if not client.is_user_authorized():
    client.send_code_request(phone)
    client.sign_in(phone, input('Enter the code: '))

users = []
with open(r"C:\Users\R15H4B\Desktop\myscrappermachine\members5.csv", encoding='UTF-8') as f:
    rows = csv.reader(f,delimiter=",",lineterminator="\n")
    next(rows, None)
    for row in rows:
        user = {}
        user['username'] = row[0]
        user['id'] = int(row[1])
        user['access_hash'] = int(row[2])
        user['name'] = row[3]
        users.append(user)

chats = []
last_date = None
chunk_size = 200
groups = []

result = client(GetDialogsRequest(
    offset_date=last_date,
    offset_id=0,
    offset_peer=InputPeerEmpty(),
    limit=chunk_size,
    hash=0
Exemplo n.º 32
0
                npart = self.getPartition(sample)

                similarity.append(self.computeSimilarity(opart, npart))

            data.append(
                [s, resilience,
                 np.mean(similarity),
                 np.std(similarity)])

        # Save
        with open(self.sname, 'a') as f:
            writer = csv.writer(f, delimiter=',')
            for d in data:
                writer.writerow([self.tname, self.name] + d)
                print(d)


if __name__ == '__main__':
    mode = int(sys.argv[1])
    fname = sys.argv[2]
    sname = sys.argv[3]
    name = sys.argv[4]
    tname = sys.argv[5]

    if mode == 0:
        anomaly = Anomaly(fname, sname, name, tname)
        anomaly.run()
    elif mode == 1:
        com = KCommunity(fname, sname, name, tname)
        com.run()
Exemplo n.º 33
0
     'word_end_time'] - formant_extract['word_start_time']
 formant_extract['seg_duration'] = formant_extract[
     'seg_end_time'] - formant_extract['seg_start_time']
 formant_extract = formant_extract[[
     'trans_id', 'word_start_time', 'word_end_time', 'word_duration',
     'word_SWG', 'seg_number', 'seg_start_time', 'seg_end_time',
     'seg_duration', 'segment_SWG', 'diphthong_orthography', 'var_code',
     'word_German', 'word_lemma', 'word_stem', 'POS_tag'
 ]]
 # # # read formant
 formant_file_list = [
     file for file in os.listdir(formant_path) if file.endswith('.Formant')
 ]
 formant_file_list = sorted(
     formant_file_list,
     key=lambda x: (int(x.split('-')[0][1:]), int(x.split('-')[1]),
                    int(x.split('-')[3]), int(x.split('_')[1][:-8])))
 for formant_file in formant_file_list:
     print(formant_file[:-8])
     formant_raw = read_formant(formant_path + formant_file)
     formant_raw['trans_id'] = formant_file[:-8]
     # print(formant_raw.columns)
     # list all the formant time, F1, F2 within that time frame
     formant = formant_raw[['trans_id', 'time(s)', 'F1(Hz)', 'F2(Hz)']]
     formant = formant.rename(columns={
         "time(s)": "time",
         "F1(Hz)": "F1Hz",
         "F2(Hz)": "F2Hz"
     })  # rename column for SQL
     formant_swg = formant_extract[
         formant_extract['trans_id'] ==
Exemplo n.º 34
0
 def useExistingFolder(self, output_path, sample_name):
     self.folder_path = output_path + sample_name + "/"
     self.sample_name = sample_name
     self.bs_y = int(sample_name[-9:-5])
     self.bs_x = int(sample_name[-4:])
Exemplo n.º 35
0
    #choose ctdlist for each mooring
    if moornum==7:
        ctdlist=glob.glob(datadir+'OSNAP2016recovery/MCTD_Data_CF/MAT/CF'+str(moornum)+'*mat_ilebras.mat')
    else:
        ctdlist=glob.glob(datadir+'OSNAP2016recovery/MCTD_Data_CF/NetCDF/*CF'+str(moornum)+'*.nc')
    if moornum==7:
        ctdlist=hstack((ctdlist,glob.glob(datadir+'OSNAP2016recovery/RBR/CF'+str(moornum)+'*xr420*_ilebras.mat')))
    #load in each sal,tmp set
    for dd in ctdlist:
        if moornum==7:
            dat = io.loadmat(dd)
            tmp_hrly=hrly_ave([float(tt) for tt in dat['temp'][:].flatten()],aveconst)
            sal_hrly=hrly_ave([float(ss) for ss in dat['psal'][:].flatten()],aveconst)
            prs_hrly=hrly_ave(list(dat['pres'][:].flatten()),aveconst)
            time_hrly=dat['dtnum'][:].flatten()[::aveconst][:len(prs_hrly)]
            date_hrly=array([datetime.datetime(1,1,1)+datetime.timedelta(days=int(tt-366)) for tt in time_hrly])

        else:
            dat = Dataset(dd, 'r')
            time_hrly=array(dat.variables['TIME'][:])[::aveconst][:-1]
            tmp_hrly=hrly_ave([float(tt) for tt in dat.variables['TEMP'][:].flatten()],aveconst)[:len(time_hrly)]
            sal_hrly=hrly_ave([float(ss) for ss in dat.variables['PSAL'][:].flatten()],aveconst)[:len(time_hrly)]
            prs_hrly=hrly_ave(list(dat.variables['PRES'][:].flatten()),aveconst)[:len(time_hrly)]
            date_hrly=array([datetime.datetime(1950,1,1)+datetime.timedelta(days=int(tt)) for tt in time_hrly])

        prskey=int(round(nanmean(prs_hrly)/50.0)*50.0)
        timekey=mean(time_hrly)

        sal[moornum][prskey]=sal_hrly
        tmp[moornum][prskey]=gsw.pt0_from_t(sal_hrly,tmp_hrly,prs_hrly)
        prs[moornum][prskey]=prs_hrly
os.makedirs(experiment_generation_root, exist_ok=True)

# The dev and test counts are held fixed
dev_count = 20000
test_count = 300000

## Start with the language variants
# m is the maximum stack depth
for m in [3, 5]:
  # k is the number of types of brackets
  for k in [2, 8, 32, 128]:
    # train_count is the number of training samples
    #for train_count in [1000, 10000, 100000, 1000000, 10000000]:
    for train_count in [2e3, 2e4, 2e5, 2e6, 2e7]:
      # calculate the training min and max sequence lengths
      train_count = int(train_count)
      train_min = 1 #6*m*(m-2)+20
      train_max = 8*m*(m-2)+60
      # calculate the testing min and max sequence lengths
      test_min = train_max+1
      test_max = int(train_max*2)
      # determine the corpus paths
      data_root = 'data'
      train_path = os.path.join(data_root, 'k{}_m{}_tr{}.train'.format(k,m,train_count))
      dev_path = os.path.join(data_root, 'k{}_m{}_tr{}.dev'.format(k,m,train_count))
      test_path = os.path.join(data_root, 'k{}_m{}_tr{}.test'.format(k,m,train_count))
      # fill in the templates in the language generation config template
      language_generation_text = open(language_generation_template).read()
      language_generation_text = re.sub('__BRACKET_TYPES__', str(k), language_generation_text)
      language_generation_text = re.sub('__TRAIN_MAX_LEN__', str(train_max), language_generation_text)
      language_generation_text = re.sub('__TRAIN_MIN_LEN__', str(train_min), language_generation_text)
                    v = currentEvaluator.evaluator
                    finalEvaluator.board = position
            else:
                position = "".join(InvertBoard([i for i in position]))
                currentEvaluator = MinMax(position, depth, True)
                if v > currentEvaluator.evaluator:
                    v = currentEvaluator.evaluator
                    finalEvaluator.board = position

        finalEvaluator.evaluator = v
    else:
        finalEvaluator.evaluator = staticEstimationImprovedOpening([i for i in board])
        statesReached += 1
    return finalEvaluator


if __name__ == '__main__':

    file1 = open(sys.argv[1], "r")
    file2 = open(sys.argv[2], "w")
    depth = int(sys.argv[3])

    inBoard = file1.read()
    val = MinMax(inBoard, depth, True)
    file2.write(val.board)

    print("Input Position: "+inBoard+" Output Position: "+val.board)
    print("Position Evaluated by static estimation: "+str(statesReached))
    print("MINMAX estimate: "+str(val.evaluator))

Exemplo n.º 38
0
import seaborn as sns

data['processing_time'] = data['processing_time'].apply(
    lambda x: x / np.timedelta64(24, 'h'))
ax = sns.boxplot(y="processing_time", x="start_time_window", data=data)
fig = ax.get_figure()
fig.set_size_inches(8, 5.5)
fig.savefig("2.png", dpi=100)

df = data[['processing_time', 'Agency']].groupby('Agency').mean()
df = df.apply(lambda x: x.sort_values(ascending=False))
df.plot(kind='bar', figsize=(8, 5.5))
plt.savefig("3.png", dpi=100)
plt.clf()

df = data[['processing_time', 'Agency',
           'Borough']].groupby(['Agency', 'Borough']).mean()
df = df.apply(lambda x: x.sort_values(ascending=False))
df = df.unstack('Borough')
COL_NUM = 2
ROW_NUM = 3
fig, axes = plt.subplots(ROW_NUM, COL_NUM, figsize=(12, 12))
for i, ((label, col)) in enumerate((df.iteritems())):
    ax = axes[int(i / COL_NUM), i % COL_NUM]
    col = col.sort_values(ascending=False)[:15]
    col.plot(kind='barh', ax=ax)
    ax.set_title(label[1])
plt.tight_layout
plt.savefig("4.png", dpi=100)
plt.clf()
Exemplo n.º 39
0
        try_query = True
        while try_query:
            try:
                r = requests.get(api_url)
                data = r.json()
                main_website_id = data['main_site']
            except Exception, e:
                if not (try_no <= 10 and self.retry):
                    raise e
                else:
                    try_no += 1
                    time.sleep(1)
            else:
                try_query = False

        return int(main_website_id)

    def get_products_total_account(self, member_id):
        total = 0

        api_url = urljoin_rfc(self.host,
                              '/api/get_products_total_account.json')
        api_url = add_or_replace_parameter(api_url, 'member_id',
                                           str(member_id))
        api_url = add_or_replace_parameter(api_url, 'api_key', self.api_key)

        try_no = 1
        try_query = True
        while try_query:
            try:
                r = requests.get(api_url)
import modfibnocci

a = int(input("enter the "))
modfibnocci.fibonacci(a)
Exemplo n.º 41
0
def f(line,cate_dict):
    txt = valid_jsontxt(line.strip())
    ob = json.loads(txt)
    if type(ob) == type(1.0): return None
    if ob.has_key("ret") and "ERRCODE_QUERY_DETAIL_FAIL::宝贝不存在" in ob["ret"]:
        return None
    data = ob.get('data',"-")
    apiStack = data.get("apiStack",[])
    if apiStack == []:
        return None
    value_json = apiStack[0].get("value")
    if value_json == "": return None
    value_ob = json.loads(valid_jsontxt(value_json))
    if value_ob["data"]["itemControl"]["unitControl"].has_key("offShelfUrl"):
        is_online = 2
    else:
        is_online = 1
    if data == "-": return None
    itemInfoModel = data.get('itemInfoModel',"-")
    if itemInfoModel == "-": return None
    location = valid_jsontxt(itemInfoModel.get('location','-'))
    item_id = itemInfoModel.get('itemId','-')
    if item_id == "-": return None
    title = itemInfoModel.get('title','-').replace("\n","")
    favor = itemInfoModel.get('favcount','0')
    categoryId = itemInfoModel.get('categoryId','-')
    root_cat_id = cate_dict.get(categoryId,["-","-","-"])[1]
    cat_name = cate_dict.get(categoryId,["-","-","-"])[0]
    root_cat_name = cate_dict.get(categoryId,["-","-","-"])[2]
    trackParams = data.get('trackParams',{})
    BC_type = trackParams.get('BC_type','-')
    if BC_type != 'B' and BC_type != 'C': BC_type = "-"
    brandId = trackParams.get('brandId','-')
    # brand_name = brand_dict.get(brandId,"-")
    brand_name = "-"
    # item_info = "-"
    props = data.get('props',[])
    item_info_list = []
    for v in props:
        item_info_list.append(valid_jsontxt(v.get('name',"-")).replace(":","").replace(",","") \
                     +":" + valid_jsontxt(v.get('value',"-")).replace(":","").replace(",",""))
        if valid_jsontxt('品牌') in valid_jsontxt(v.get('name',"-")) and brand_name == "-" :
            brand_name = v.get('value',"-")
    item_info = ",".join(item_info_list)
    value = parse_price(value_ob["data"]["itemInfoModel"]["priceUnits"])
    price = value[0]
    if int(price) > 160000:
        price = 1.0
    price_zone = value[1]
    seller = data.get('seller',{})
    seller_id = seller.get('userNumId','-')
    shopId = seller.get('shopId','-')
    ts = "1471881600"
    off_time = "-"
    if is_online == '2' : off_time = ts #如果已下架,显示下架时间,未下架,显示“-”,此处为修复程序,下架时间暂时为入库时间,后面会join之前的数据产生准确的下架时间
    sku_info = "-"
    # skuProps = data.get("apiStack",{}).get("skuModel",{}).get("","-")
    # if skuProps != "-":
    # is_online = "-"
    result = []
    result.append(item_id)
    result.append(title)
    result.append(categoryId)
    result.append(cat_name)
    result.append(root_cat_id)
    result.append(root_cat_name)
    result.append(brandId)
    result.append(brand_name)
    result.append(BC_type)
    result.append(str(price))
    result.append(price_zone)
    result.append(str(is_online))
    result.append(off_time)
    result.append(str(favor))
    result.append(seller_id)
    result.append(shopId)
    result.append(location)
    result.append(item_info)
    result.append(sku_info)
    result.append(ts)
    return (item_id,result)
Exemplo n.º 42
0
    def check(self, instance):
        name = instance.get('name', None)
        tags = instance.get('tags', [])
        exact_match = _is_affirmative(instance.get('exact_match', True))
        search_string = instance.get('search_string', None)
        ignore_ad = _is_affirmative(instance.get('ignore_denied_access', True))
        pid = instance.get('pid')
        pid_file = instance.get('pid_file')
        collect_children = _is_affirmative(
            instance.get('collect_children', False))
        user = instance.get('user', False)
        try_sudo = instance.get('try_sudo', False)

        if self._conflicting_procfs:
            self.warning(
                'The `procfs_path` defined in `process.yaml is different from the one defined in '
                '`datadog.conf` This is currently not supported by the Agent. Defaulting to the '
                'value defined in `datadog.conf`:{}'.format(
                    psutil.PROCFS_PATH))
        elif self._deprecated_init_procfs:
            self.warning(
                'DEPRECATION NOTICE: Specifying `procfs_path` in process.yaml` is deprecated. '
                'Please specify it in `datadog.conf` instead')

        if not isinstance(search_string,
                          list) and pid is None and pid_file is None:
            raise ValueError(
                '"search_string" or "pid" or "pid_file" parameter is required')

        # FIXME 8.x remove me
        if search_string is not None:
            if "All" in search_string:
                self.warning(
                    'Deprecated: Having "All" in your search_string will greatly reduce the '
                    'performance of the check and will be removed in a future version of the agent.'
                )

        if name is None:
            raise KeyError('The "name" of process groups is mandatory')

        if search_string is not None:
            pids = self.find_pids(name,
                                  search_string,
                                  exact_match,
                                  ignore_ad=ignore_ad)
        elif pid is not None:
            # we use Process(pid) as a means to search, if pid not found
            # psutil.NoSuchProcess is raised.
            pids = self._get_pid_set(pid)
        elif pid_file is not None:
            try:
                with open(pid_file, 'r') as file_pid:
                    pid_line = file_pid.readline().strip()
                    pids = self._get_pid_set(int(pid_line))
            except IOError as e:
                # pid file doesn't exist, assuming the process is not running
                self.log.debug('Unable to find pid file: {}'.format(e))
                pids = set()
        else:
            raise ValueError(
                'The "search_string" or "pid" options are required for process identification'
            )

        if collect_children:
            pids.update(self._get_child_processes(pids))

        if user:
            pids = self._filter_by_user(user, pids)

        proc_state = self.get_process_state(name, pids, try_sudo)

        # FIXME 8.x remove the `name` tag
        tags.extend(['process_name:{}'.format(name), name])

        self.log.debug('ProcessCheck: process {} analysed'.format(name))
        self.gauge('system.processes.number', len(pids), tags=tags)

        if len(pids) == 0:
            self.warning("No matching process '{}' was found".format(name))

        for attr, mname in iteritems(ATTR_TO_METRIC):
            vals = [x for x in proc_state[attr] if x is not None]
            # skip []
            if vals:
                if attr == 'run_time':
                    self.gauge('system.processes.{}.avg'.format(mname),
                               sum(vals) / len(vals),
                               tags=tags)
                    self.gauge('system.processes.{}.max'.format(mname),
                               max(vals),
                               tags=tags)
                    self.gauge('system.processes.{}.min'.format(mname),
                               min(vals),
                               tags=tags)

                # FIXME 8.x: change this prefix?
                else:
                    self.gauge('system.processes.{}'.format(mname),
                               sum(vals),
                               tags=tags)

        for attr, mname in iteritems(ATTR_TO_METRIC_RATE):
            vals = [x for x in proc_state[attr] if x is not None]
            if vals:
                self.rate('system.processes.{}'.format(mname),
                          sum(vals),
                          tags=tags)

        self._process_service_check(name, len(pids),
                                    instance.get('thresholds', None), tags)
Exemplo n.º 43
0
    def build(self, config):
        '''
            build index from scratch
        '''
        operation_method = config.get("index_operation", "new").lower()

        gallery_images, gallery_docs = split_datafile(
            config['data_file'], config['image_root'], config['delimiter'])

        # when remove data in index, do not need extract fatures
        if operation_method != "remove":
            gallery_features = self._extract_features(gallery_images, config)
        assert operation_method in [
            "new", "remove", "append"
        ], "Only append, remove and new operation are supported"

        # vector.index: faiss index file
        # id_map.pkl: use this file to map id to image_doc
        if operation_method in ["remove", "append"]:
            # if remove or append, vector.index and id_map.pkl must exist
            assert os.path.join(
                config["index_dir"], "vector.index"
            ), "The vector.index dose not exist in {} when 'index_operation' is not None".format(
                config["index_dir"])
            assert os.path.join(
                config["index_dir"], "id_map.pkl"
            ), "The id_map.pkl dose not exist in {} when 'index_operation' is not None".format(
                config["index_dir"])
            index = faiss.read_index(
                os.path.join(config["index_dir"], "vector.index"))
            with open(os.path.join(config["index_dir"], "id_map.pkl"),
                      'rb') as fd:
                ids = pickle.load(fd)
            assert index.ntotal == len(ids.keys(
            )), "data number in index is not equal in in id_map"
        else:
            if not os.path.exists(config["index_dir"]):
                os.makedirs(config["index_dir"], exist_ok=True)
            index_method = config.get("index_method", "HNSW32")

            # if IVF method, cal ivf number automaticlly
            if index_method == "IVF":
                index_method = index_method + str(
                    min(int(len(gallery_images) // 8), 65536)) + ",Flat"

            # for binary index, add B at head of index_method
            if config["dist_type"] == "hamming":
                index_method = "B" + index_method

            #dist_type
            dist_type = faiss.METRIC_INNER_PRODUCT if config[
                "dist_type"] == "IP" else faiss.METRIC_L2

            #build index
            if config["dist_type"] == "hamming":
                index = faiss.index_binary_factory(config["embedding_size"],
                                                   index_method)
            else:
                index = faiss.index_factory(config["embedding_size"],
                                            index_method, dist_type)
                index = faiss.IndexIDMap2(index)
            ids = {}

        if config["index_method"] == "HNSW32":
            logger.warning(
                "The HNSW32 method dose not support 'remove' operation")

        if operation_method != "remove":
            # calculate id for new data
            start_id = max(ids.keys()) + 1 if ids else 0
            ids_now = (
                np.arange(0, len(gallery_images)) + start_id).astype(np.int64)

            # only train when new index file
            if operation_method == "new":
                if config["dist_type"] == "hamming":
                    index.add(gallery_features)
                else:
                    index.train(gallery_features)

            if not config["dist_type"] == "hamming":
                index.add_with_ids(gallery_features, ids_now)

            for i, d in zip(list(ids_now), gallery_docs):
                ids[i] = d
        else:
            if config["index_method"] == "HNSW32":
                raise RuntimeError(
                    "The index_method: HNSW32 dose not support 'remove' operation"
                )
            # remove ids in id_map, remove index data in faiss index
            remove_ids = list(
                filter(lambda k: ids.get(k) in gallery_docs, ids.keys()))
            remove_ids = np.asarray(remove_ids)
            index.remove_ids(remove_ids)
            for k in remove_ids:
                del ids[k]

        # store faiss index file and id_map file
        if config["dist_type"] == "hamming":
            faiss.write_index_binary(
                index, os.path.join(config["index_dir"], "vector.index"))
        else:
            faiss.write_index(
                index, os.path.join(config["index_dir"], "vector.index"))

        with open(os.path.join(config["index_dir"], "id_map.pkl"), 'wb') as fd:
            pickle.dump(ids, fd)
Exemplo n.º 44
0
a=int(input())
b=str(input())
result=0
for i in b:
    result += int(i)
print(result)
# https://www.codechef.com/JUNE20B/problems/EOEO

for _ in range(int(input())):
    ts = int(input())
    binary = bin(ts)[2:]
    # print(binary)
    # print(int(binary, 2))
    JerryWins = 0
    if int(binary[len(binary) - 1]) == int(1):
        JerryWins = ts // 2
    else:
        binary = binary.strip("0")
        if len(binary) > 1:
            JerryWins = int(binary, 2) // 2
    print(JerryWins)
Exemplo n.º 46
0
def twitter_download(url,
                     output_dir='.',
                     merge=True,
                     info_only=False,
                     **kwargs):
    html = get_html(url)
    screen_name = r1(r'data-screen-name="([^"]*)"', html) or \
        r1(r'<meta name="twitter:title" content="([^"]*)"', html)
    item_id = r1(r'data-item-id="([^"]*)"', html) or \
        r1(r'<meta name="twitter:site:id" content="([^"]*)"', html)
    page_title = "{} [{}]".format(screen_name, item_id)

    try:  # extract images
        urls = re.findall(r'property="og:image"\s*content="([^"]+:large)"',
                          html)
        assert urls
        images = []
        for url in urls:
            url = ':'.join(url.split(':')[:-1]) + ':orig'
            filename = parse.unquote(url.split('/')[-1])
            title = '.'.join(filename.split('.')[:-1])
            ext = url.split(':')[-2].split('.')[-1]
            size = int(get_head(url)['Content-Length'])
            images.append({
                'title': title,
                'url': url,
                'ext': ext,
                'size': size
            })
        size = sum([image['size'] for image in images])
        print_info(site_info, page_title, images[0]['ext'], size)

        if not info_only:
            for image in images:
                title = image['title']
                ext = image['ext']
                size = image['size']
                url = image['url']
                print_info(site_info, title, ext, size)
                download_urls([url], title, ext, size, output_dir=output_dir)

    except:  # extract video
        # always use i/cards or videos url
        if not re.match(r'https?://twitter.com/i/', url):
            url = r1(r'<meta\s*property="og:video:url"\s*content="([^"]+)"',
                     html)
            if not url:
                url = 'https://twitter.com/i/videos/%s' % item_id
            html = get_content(url)

        data_config = r1(r'data-config="([^"]*)"', html) or \
            r1(r'data-player-config="([^"]*)"', html)
        i = json.loads(unescape_html(data_config))
        if 'video_url' in i:
            source = i['video_url']
            if not item_id: page_title = i['tweet_id']
        elif 'playlist' in i:
            source = i['playlist'][0]['source']
            if not item_id: page_title = i['playlist'][0]['contentId']
        elif 'vmap_url' in i:
            vmap_url = i['vmap_url']
            vmap = get_content(vmap_url)
            source = r1(r'<MediaFile>\s*<!\[CDATA\[(.*)\]\]>', vmap)
            if not item_id: page_title = i['tweet_id']
        elif 'scribe_playlist_url' in i:
            scribe_playlist_url = i['scribe_playlist_url']
            return vine_download(scribe_playlist_url,
                                 output_dir,
                                 merge=merge,
                                 info_only=info_only)

        if source.endswith('.mp4'):
            urls = [source]
        else:
            urls = extract_m3u(source)
        size = urls_size(urls)
        mime, ext = 'video/mp4', 'mp4'

        print_info(site_info, page_title, mime, size)
        if not info_only:
            download_urls(urls, page_title, ext, size, output_dir, merge=merge)
Exemplo n.º 47
0
plt.rcParams.update({"lines.linewidth": 5})
plt.rcParams.update({"lines.markersize": 10})


# Get environement
env = gridworld.GridWorld1


# ########################## Question 1.4 #########################################################################

# Discount factor
gamma = 0.95

# Determination of Tmax
delta = 0.2
Tmax = int(-np.log(delta) / (1 - gamma))


# here the v-function and q-function to be used for question 4
v_q4 = [0.87691855, 0.92820033, 0.98817903, 0.00000000, 0.67106071, -0.99447514, 0.00000000, -0.82847001, -0.87691855,
        -0.93358351, -0.99447514]

# Estimate mu
nest_mu0 = 1000
mu0_mc = exo2.mc_estimate_mu0(env, nest_mu0)

# Define the policy
pol = [0, 0, 0, 0, 3, 0, 0, 0, 0, 0, 3]

# Set range for plot of J and J^pi
nmax = 10000
Exemplo n.º 48
0
RECORD_SECONDS = 5
WAVE_OUTPUT_FILENAME = "record_and_plot.wav"

p = pyaudio.PyAudio()

stream = p.open(format=FORMAT,
                channels=CHANNELS,
                rate=RATE,
                input=True,
                frames_per_buffer=CHUNK)

print("* recording")

frames = []

for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
    data = stream.read(CHUNK)
    frames.append(data)

print("* done recording")

stream.stop_stream()
stream.close()
p.terminate()

wf = wave.open(WAVE_OUTPUT_FILENAME, 'wb')
wf.setnchannels(CHANNELS)
wf.setsampwidth(p.get_sample_size(FORMAT))
wf.setframerate(RATE)
wf.writeframes(b''.join(frames))
wf.close()
Exemplo n.º 49
0
def bruker_read_files(param_file, data_path, sub_scan_num='1'):
    """
    Reads parameters files of from Bruker raw data imaging format.
    It parses the files 'acqp', 'method', 'reco', 'visu_pars' and 'subject'.
    Even if only 'visu_pars' is relevant for the conversion to nifti, having a more general parser has turned out
    to be useful in many cases (e.g. in PV5.1 to check).
    :param param_file: file parameter, must be a string in the list ['acqp', 'method', 'reco', 'visu_pars', 'subject'].
    :param data_path: path to data.
    :param sub_scan_num: number of the sub-scan folder where usually the 'reco' and 'visu_pars' parameter files
    are stored.
    :return: dict_info dictionary with the parsed information from the input file.
    """
    if param_file.lower() == 'reco':
        if os.path.exists(jph(data_path, 'pdata', '1', 'reco')):
            f = open(jph(data_path, 'pdata', '1', 'reco'), 'r')
        else:
            print('File {} does not exists'.format(jph(data_path, 'pdata', '1', 'reco')))
            return {}
    elif param_file.lower() == 'acqp':
        if os.path.exists(jph(data_path, 'acqp')):
            f = open(jph(data_path, 'acqp'), 'r')
        else:
            print('File {} does not exists'.format(jph(data_path, 'acqp')))
            return {}
    elif param_file.lower() == 'method':
        if os.path.exists(jph(data_path, 'method')):
            f = open(jph(data_path, 'method'), 'r')
        else:
            print('File {} does not exists'.format(jph(data_path, 'method')))
            return {}
    elif param_file.lower() == 'visu_pars':
        if os.path.exists(jph(data_path, 'pdata', str(sub_scan_num), 'visu_pars')):
            f = open(jph(data_path, 'pdata', str(sub_scan_num), 'visu_pars'), 'r')
        elif os.path.exists(jph(data_path, str(sub_scan_num), 'pdata', '1', 'visu_pars')):
            f = open(jph(data_path, str(sub_scan_num), 'pdata', '1', 'visu_pars'), 'r')
        else:
            print('File {} does not exists'.format(jph(data_path, 'pdata', str(sub_scan_num), 'visu_pars')))
            return {}
    elif param_file.lower() == 'subject':
        if os.path.exists(jph(data_path, 'subject')):
            f = open(jph(data_path, 'subject'), 'r')
        else:
            print('File {} does not exists'.format(jph(data_path, 'subject')))
            return {}
    else:
        raise IOError("param_file input must be the string 'reco', 'acqp', 'method', 'visu_pars' or 'subject'")

    dict_info = {}
    lines = f.readlines()

    for line_num in range(len(lines)):
        '''
        Relevant information are in the lines with '##'.
        For the parameters that have arrays values specified between (), with values in the next line.
        Values in the next line can be parsed in lists or np.ndarray when they contains also characters or numbers.
        '''

        line_in = lines[line_num]

        if '##' in line_in:

            if ('$' in line_in) and ('(' in line_in) and ('<' not in line_in):
                # A:
                splitted_line = line_in.split('=')
                # name of the variable contained in the row, and shape:
                var_name = var_name_clean(splitted_line[0][3:])

                done = False
                indian_file = ''
                pos = line_num
                sh = splitted_line[1]
                # this is not the shape of the vector but the beginning of a full vector.
                if sh.replace(' ', '').endswith(',\n'):
                    sh = sh.replace('(', '').replace(')', '').replace('\n', '').strip()
                    indian_file += sh
                    sh = None
                # this is not the shape of the vector but a full vector.
                elif sh.replace(' ', '').endswith(')\n') and '.' in sh:
                    sh = sh.replace('(', '').replace(')', '').replace('\n', '').strip()
                    indian_file += sh
                    sh = None
                # this is finally the shape of the vector that will start in the next line.
                else:
                    sh = sh.replace('(', '').replace(')', '').replace('\n', '').strip()
                    sh = [int(num) for num in sh.split(',')]

                while not done:

                    pos += 1
                    # collect the indian file: info related to the same variables that can appears on multiple rows.
                    line_to_explore = lines[pos]  # tell seek does not work in the line iterators...

                    if ('##' in line_to_explore) or ('$$' in line_to_explore):
                        # indian file is over
                        done = True

                    else:
                        # we store the rows in the indian file all in the same string.
                        indian_file += line_to_explore.replace('\n', '').strip() + ' '

                dict_info[var_name] = indians_file_parser(indian_file, sh)

            elif ('$' in line_in) and ('(' not in line_in):
                # B:
                splitted_line = line_in.split('=')
                var_name = var_name_clean(splitted_line[0][3:])
                indian_file = splitted_line[1]

                dict_info[var_name] = indians_file_parser(indian_file)

            elif ('$' not in line_in) and ('(' in line_in):
                # C:
                splitted_line = line_in.split('=')
                var_name = var_name_clean(splitted_line[0][2:])

                done = False
                indian_file = splitted_line[1].strip() + ' '
                pos = line_num

                while not done:
                    pos += 1
                    # collect the indian file: info related to the same variables that can appears on multiple rows.
                    line_to_explore = lines[pos]  # tell seek does not work in the line iterators...
                    if ('##' in line_to_explore) or ('$$' in line_to_explore):
                        # indian file is over
                        done = True
                    else:
                        # we store the rows in the indian file all in the same string.
                        indian_file += line_to_explore.replace('\n', '').strip() + ' '

                dict_info[var_name] = indians_file_parser(indian_file)

            elif ('$' not in line_in) and ('(' not in line_in):
                # D:
                splitted_line = line_in.split('=')
                var_name = var_name_clean(splitted_line[0])
                indian_file = splitted_line[1].replace('=', '').strip()
                dict_info[var_name] = indians_file_parser(indian_file)

            else:
                # General case: take it as a simple string.
                splitted_line = line_in.split('=')
                var_name = var_name_clean(splitted_line[0])
                dict_info[var_name] = splitted_line[1].replace('(', '').replace(')', '').replace('\n', ''). \
                                                       replace('<', '').replace('>', '').replace(',', ' ').strip()

        else:
            # line does not contain any 'assignable' variable, so this information is not included in the info.
            pass

    return dict_info
def isPrime(n):
    if n == 1:
        return False
    if n == 2:
        return True
    for i in range(2,n):
        if n % i == 0:
            return False
    return True

n = int(input())
a = []
for i in range(n):
    temp = list(map(int,input().split()))
    a.append(temp)

b = []
for i in range(len(a)):
    for j in range(len(a)):
        if j == len(a) - (i+1):
            temp = a[i][j]
            b.append(temp)

c = 1
for i in b:
    if isPrime(i):
        temp = i
        c = c * temp

print(c % 1000003)
Exemplo n.º 51
0
def atoi(string):
    """Converts a string to an integer according to the locale settings."""
    return int(delocalize(string))
Exemplo n.º 52
0
def dataloader(args):
    """Return the dataloader for selected dataset.
    Now have:
    - MNIST
    - FashionMNIST
    - CIFAR10
    - CIFAR100
    - SVHN
    - CelebA (https://drive.google.com/drive/folders/0B7EVK8r0v71pTUZsaXdaSnZBZ
      zg?resourcekey=0-rJlzl934LzC-Xp28GeIBzQ)
    - STL10
    - LSUN
    - Fake data

    Parameters
    ----------
    batch_size : int
        Minibatch size.
    dataset_name : str
        Name of the selected dataset.

    Returns
    -------
    tr_set:
        Dataloader for training set.
    te_set:
        Dataloader for test set.

    """

    # resize images or not
    if args.img_resize:
        transform3c = transforms.Compose([
            transforms.Resize(args.img_size),
            transforms.CenterCrop(args.img_size),  # if H != W
            transforms.ToTensor(),
            transforms.Normalize((.5, .5, .5), (.5, .5, .5))])
        transform1c = transforms.Compose([
            transforms.Resize(args.img_size),
            transforms.CenterCrop(args.img_size),  # if H != W
            transforms.ToTensor(), transforms.Normalize((.5), (.5))])
    else:
        transform3c = transforms.Compose([transforms.ToTensor(),
                                         transforms.Normalize((.5, .5, .5),
                                                              (.5, .5, .5))])
        transform1c = transforms.Compose([transforms.ToTensor(),
                                         transforms.Normalize((.5), (.5))])
    # create dataloaders
    datapath, dataset_name, batch_size = 'data', args.dataset, args.batch_size
    if dataset_name == 'mnist':  # handwritten digits, (1, 28, 28)
        tr_set = thv.datasets.MNIST(datapath, train=True, download=True,
                                    transform=transform1c)
        te_set = thv.datasets.MNIST(datapath, train=False, download=True,
                                    transform=transform1c)
    elif dataset_name == 'fashion-mnist':  # fashion (Zalando), (1, 28, 28)
        tr_set = thv.datasets.FashionMNIST(datapath, train=True, download=True,
                                           transform=transform1c)
        te_set = thv.datasets.FashionMNIST(datapath, train=False,
                                           download=True,
                                           transform=transform1c)
    elif dataset_name == 'cifar10':  # 10-class image recognition, (3, 32 32)
        tr_set = thv.datasets.CIFAR10(datapath, train=True, download=True,
                                      transform=transform3c)
        te_set = thv.datasets.CIFAR10(datapath, train=False, download=True,
                                      transform=transform3c)
    elif dataset_name == 'cifar100':  # 100-class image recognition, (3, 32 32)
        tr_set = thv.datasets.CIFAR100(datapath, train=True, download=True,
                                       transform=transform3c)
        te_set = thv.datasets.CIFAR100(datapath, train=False, download=True,
                                       transform=transform3c)
    elif dataset_name == 'svhn':  # digit recognition, (3, 32, 32)
        tr_set = thv.datasets.SVHN(os.path.join(datapath, 'SVHN'),
                                   split='train', download=True,
                                   transform=transform3c)
        te_set = thv.datasets.SVHN(os.path.join(datapath, 'SVHN'),
                                   split='test', download=True,
                                   transform=transform3c)
    elif dataset_name == 'celeba':  # celebrity face, (3, 218, 178)
        celeba = dset.ImageFolder(root='data/celeba', transform=transform3c)
        tr_len = int(len(celeba) * 0.8)
        te_len = len(celeba) - tr_len
        tr_set, te_set = torch.utils.data.random_split(celeba,
                                                       [tr_len, te_len])
    elif dataset_name == 'stl10':  # 10-class image recognition, (3, 96, 96)
        tr_set = thv.datasets.STL10(datapath, split='train', download=True,
                                    transform=transform3c)
        te_set = thv.datasets.STL10(datapath, split='test', download=True,
                                    transform=transform3c)
    elif dataset_name == 'lsun':
        tr_classes = [c + '_train' for c in args.lsun_classes.split(',')]
        te_classes = [c + '_test' for c in args.lsun_classes.split(',')]
        tr_set = dset.LSUN(root='data/lsun', classes=tr_classes)
        te_set = dset.LSUN(root='data/lsun', classes=te_classes)
    elif dataset_name == 'fake':
        tr_set = dset.FakeData(
                               image_size=(3, args.img_size, args.img_size),
                               transform=transforms.ToTensor())
        te_set = dset.FakeData(size=1024,
                               image_size=(3, args.img_size, args.img_size),
                               transform=transforms.ToTensor())
    tr_set = DataLoader(tr_set, batch_size=batch_size, shuffle=True,
                        drop_last=True)
    te_set = DataLoader(te_set, batch_size=batch_size, shuffle=True,
                        drop_last=True)
    args.img_channels = 1 if dataset_name in ['mnist', 'fashion-mnist'] else 3
    if not args.img_resize:  # use original size
        if dataset_name in ['mnist', 'fashion-mnist']:
            args.img_size = 28
        elif dataset_name in ['cifar10', 'cifar100', 'svhn']:
            args.img_size = 32
        elif dataset_name == 'celeba':
            args.img_size = [218, 178]
        elif dataset_name == 'stl10':
            args.img_size = 96
    return tr_set, te_set
def DoAnalysis():
    cond_runs = {}
    with open('Results/bi_runs_4.csv', 'r', encoding='utf-8-sig') as f, \
        open('Results/temp.csv', 'w') as fout:
        for textline in f:
            if textline.startswith('#'):
                print("Excluding line: " + textline)
                continue
            if textline.find(',') < 0:
                print("Excluding line: " + textline)
                continue
            fields = textline.rstrip().split(',')
            if len(fields) < 6:
                fields.append('3')
            cond_method_name, cond_method_interface, result_dataSize, result_relCard, result_elapsedTime, result_recordCount = fields
            if result_recordCount != '3':
                print("Excluding line: " + textline)
                continue
            result = RunResult(dataSize=int(result_dataSize),
                               relCard=int(result_relCard),
                               elapsedTime=float(result_elapsedTime),
                               recordCount=int(result_recordCount))
            if cond_method_name not in cond_runs:
                cond_runs[cond_method_name] = []
            cond_runs[cond_method_name].append(result)
            fout.write(
                "%s,%s,%d,%d,%f,%d\n" %
                (cond_method_name, cond_method_interface, result.dataSize,
                 result.relCard, result.elapsedTime, result.recordCount))
    CondResult = collections.namedtuple("CondResult", [
        "name", "interface", "run_count", "b0", "b0_low", "b0_high", "b1",
        "b1_low", "b1_high", "s2", "s2_low", "s2_high"
    ])
    summary_status = ''
    regression_status = ''
    if True:
        cond_results = []
        confidence = 0.95
        summary_status += "%s,%s,%s,%s,%s,%s,%s,%s\n" % (
            'Method', 'Interface', 'NumRuns', 'relCard', 'Elapsed Time',
            'stdev', 'rl', 'rh')
        regression_status += '%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s\n' % (
            'Method', 'Interface', 'b0_low', 'b0', 'b0_high', 'b1_low', 'b1',
            'b1_high', 's2_low', 's2', 's2_high')
        # f.write(("%s,%s,%s,"+"%s,%s,%s,"+"%s,%s,%s,"+"%s,%s,%s\n")%(
        #     'RawMethod', 'interface', 'run_count',
        #     'b0', 'b0 lo', 'b0 hi',
        #     'b1M', 'b1M lo', 'b1M hi',
        #     's2', 's2 lo', 's2 hi'))
        # f.write(("%s,%s,%s,"+"%s,%s,%s,"+"%s,%s\n")% (
        #     'RawMethod', 'interface', 'run_count',
        #     'relCard', 'mean', 'stdev',
        #     'rl', 'rh'
        # ))
        for name in cond_runs:
            print("Looking to analyze %s" % name)
            cond_method = [x for x in implementation_list if x.name == name][0]
            times = cond_runs[name]
            size_values = set(x.relCard for x in times)
            for relCard in size_values:
                ar = [x.elapsedTime for x in times if x.relCard == relCard]
                numRuns = len(ar)
                mean = numpy.mean(ar)
                stdev = numpy.std(ar, ddof=1)
                rl, rh = scipy.stats.norm.interval(confidence,
                                                   loc=mean,
                                                   scale=stdev /
                                                   math.sqrt(len(ar)))
                # f.write(("%s,%s,"+"%d,%d,"+"%f,%f,%f,%f\n")%(
                #     name, cond_method.interface,
                #     numRuns, relCard,
                #     mean, stdev, rl, rh
                # ))
                summary_status += "%s,%s,%d,%d,%f,%f,%f,%f\n" % (
                    name, cond_method.interface, numRuns, relCard, mean, stdev,
                    rl, rh)
            x_values = [float(x.relCard) for x in times]
            y_values = [float(x.elapsedTime) for x in times]
            (b0, (b0_low, b0_high)), (b1, (b1_low,b1_high)), (s2, (s2_low,s2_high)) = \
                linear_regression(x_values, y_values, confidence)
            result = CondResult(name=cond_method.name,
                                interface=cond_method.interface,
                                run_count=len(times),
                                b0=b0,
                                b0_low=b0_low,
                                b0_high=b0_high,
                                b1=b1,
                                b1_low=b1_low,
                                b1_high=b1_high,
                                s2=s2,
                                s2_low=s2_low,
                                s2_high=s2_high)
            cond_results.append(result)
            # f.write(("%s,%s,%d,"+"%f,%f,%f,"+"%f,%f,%f,"+"%f,%f,%f\n")%(
            #     cond_method.name, cond_method.interface, result.run_count,
            #     result.b0, result.b0_low, result.b0_high,
            #     result.b1*1e+6, result.b1_low*1e+6, result.b1_high*1e+6,
            #     result.s2, result.s2_low, result.s2_high))
            regression_status += '%s,%s,%f,%f,%f,%f,%f,%f,%f,%f,%f\n' % (
                cond_method.name, cond_method.interface, result.b0_low,
                result.b0, result.b0_high, result.b1_low, result.b1,
                result.b1_high, result.s2_low, result.s2, result.s2_high)
    with open('Results/bi_results_5.csv', 'w') as f:
        f.write(summary_status)
        f.write("\n")
        f.write(regression_status)
        f.write("\n")
Exemplo n.º 54
0
def count_baselineJ(
    df: pd.DataFrame,
    days_in_past: int,
    days_in_future: int,
    method: str = "HW",
    detectors: list = None,
    alpha: float = 0.1,
    beta: float = 0.1,
    gamma: float = 0.2,
    kern=None,
) -> pd.DataFrame:

    """Produces a DataFrame where the count and baseline can be compared for use
        in scan statistics

    Args:
        df: Dataframe of JamCam data
        days_in_past: Integer past days to train forecast one
        days_in_future: Days in future produce a baseline too and record count for
        method: Forecast method to use for baseline, default is "HW" for Holt-Winters, option for GP
        detectors: List of detectors to look at

    Returns:
        Dataframe of counts and baseline along with detector data

        """

    t_min = df["measurement_start_utc"].min()
    t_max = df["measurement_end_utc"].max()

    print("Input dataframe contains data spanning {} to {}.".format(t_min, t_max))

    if detectors is None:
        detectors = df["detector_id"].drop_duplicates().to_numpy()

    prediction_start = df["measurement_end_utc"].iloc[-1] - np.timedelta64(
        days_in_future * 24, "h"
    )

    train_data = df[df["measurement_end_utc"] <= prediction_start]
    test_data = df[df["measurement_end_utc"] > prediction_start]

    avail_past_days = int(len(train_data["measurement_end_utc"].unique()) / 16)
    if avail_past_days < days_in_past:
        print(
            "Input dataframe only contains {} days worth of data before the prediction period.".format(
                avail_past_days
            ),
            "Setting days_in_past = {}.".format(avail_past_days),
        )

    print(
        "Using data from {} to {}, to forecast counts between {} and {} for {} detectors using {} method...".format(
            t_min, prediction_start, prediction_start, t_max, len(detectors), method
        )
    )

    if method == "HW":
        y = holt_wintersJ(
            train_data,
            days_in_past,
            days_in_future,
            alpha=alpha,
            beta=beta,
            gamma=gamma,
            detectors=detectors,
        )

    if method == "GP":
        y = GP_forecast(
            train_data,
            days_in_past=days_in_past,
            days_in_future=days_in_future,
            detectors=detectors,
            kern=kern,
        )

    sd = []

    print("Forecasting complete.")

    for detector in detectors:

        sd.append(test_data[test_data["detector_id"] == detector])

    sample_test_data = pd.concat(sd)

    forecast_df = y.merge(
        sample_test_data,
        on=[
            "lon",
            "lat",
            "measurement_end_utc",
            "detector_id",
            "measurement_start_utc",
        ],
        how="left",
    )
    forecast_df = forecast_df.rename(
        columns={
            "n_vehicles_in_interval_x": "baseline",
            "n_vehicles_in_interval_y": "count",
        }
    )

      # Make Baseline Values Non-Negative
    negative= len(forecast_df[forecast_df["baseline"] < 0]["baseline"])
    if negative > 0:
        print("Setting {} negative baseline values to zero.\n".format(negative))
        forecast_df["baseline"] = forecast_df["baseline"].apply(
            lambda x: np.max([0, x])
        )
        forecast_df["baseline_upper"] = forecast_df["baseline_upper"].apply(
            lambda x: np.max([0, x])
        )
    forecast_df["baseline_lower"] = forecast_df["baseline_lower"].apply(
        lambda x: np.max([0, x])
    )

    # T = pd.date_range(
    #     start=Y["measurement_end_utc"].min() - np.timedelta64(3, "h"),
    #     end=Y["measurement_end_utc"].max() + np.timedelta64(5, "h"),
    #     freq="H",
    # )
    # dets = Y["detector_id"].unique()
    # mux = pd.MultiIndex.from_product(
    #     [dets, T], names=("detector_id", "measurement_end_utc")
    # )
    # Y = Y.set_index(["detector_id", "measurement_end_utc"])
    # Y = Y.reindex(mux)

    # Y = Y.reset_index()

    return forecast_df
Exemplo n.º 55
0
import os
from flask import Flask, render_template
from buzz import generator
from comic import xkcd_app

app = Flask(__name__, template_folder='./comic/templates')

@app.route("/")
# def home_page():
#     return '<html><body><a href="/xkcd">Random XKCD Comic</a></body></html>'

# @app.route("")
def xkcd_comic():
    xkcd_data = xkcd_app.getRandomXkcdImage()
    if xkcd_data is None:
        return '<html><body><a href="/xkcd">Random XKCD Comic</a></body></html>'
    else:
        return render_template("index.html", data=xkcd_data)

if __name__ == "__main__":
    app.run(host='0.0.0.0', port=int(os.getenv('PORT', 5000)))
Exemplo n.º 56
0
def optionChanges(o, m):
    # This function allows the player to make changes to their activated measures, by browsing and selecting available options.
    #
    # This function expects an array of OptionCategory objects (o) to be passed, and an array of Measures objects (m) with at least one item.
    #
    # This function returns a new Measures object with totals, based on the player's selections.
    #
    oldMeasures = m[0]
    newMeasures = m[0]
    while True:
        print("\n\n")
        print(f"{cl.b.cyan}{cl.f.black}- MEASURE SELECTION ------------------------------------------------------------------------------------------{cl.r}")
        print("Categories:")
        for c in o:
            print(f"({c.key}) {c.title} - {len(c.list)} option(s)")
        print("")
        print("You can:")
        print(
            f"- Enter a Category ID (e.g. 'T') to {cl.f.cyan}browse the options in that category{cl.r},")
        print(
            f"- Type an Option ID (e.g. 'T1') to {cl.f.green}quickly activate that option{cl.r},")
        print(
            f"- or just press ENTER to {cl.f.red}exit Measure Selection and go back{cl.r}.")
        print("")
        selection = input(f"{cl.f.cyan}Category or ID: {cl.r}")
        print("\n")
        if selection == "":
            print(f"{cl.f.red}Exiting measure selection.{cl.r}")
            break
        elif (re.search("^([A-Z][0-9]+)$", selection, re.IGNORECASE) != None):
            # This regular expression searches for Option IDs only (letter and a number).
            catNo = 0
            for c in o:
                # Get category first
                if c.key == selection[0].upper():
                    # Check if there are any items in the category
                    if (len(c.list) > 0):
                        # If yes, check if the item specified exists
                        if (len(c.list) > int(selection[1:])):
                            print(
                                f"({selection.upper()}) {c.list[int(selection[1:])].name}\n     {c.list[int(selection[1:])].desc}")
                            print("Is this the measure you want to activate?")
                            while True:
                                confirm = input(
                                    f"{cl.f.cyan}Confirm selection ({cl.f.green}Y{cl.f.cyan}/{cl.f.red}N{cl.f.cyan}): {cl.r}")
                                if ("Y" in confirm.upper()):
                                    print(
                                        f"{cl.f.green}{c.list[int(selection[1:])].name} activated!{cl.r}")
                                    for item in c.list:
                                        item.actv = False
                                    c.list[int(selection[1:])].actv = True
                                    newMeasures.optn[catNo] = c.list[int(
                                        selection[1:])]
                                    break
                                else:
                                    print(
                                        f"{cl.f.red}Cancelled.{cl.r} {c.list[int(selection[1:])].name} was not activated.")
                                    input(
                                        "Press ENTER to go back to the categories list.")
                                    break
                        else:
                            print(
                                f"{cl.f.red}That option ({selection.upper()}) does not exist.{cl.r} Please try again.")
                            input(
                                "Press ENTER to go back to the categories list.")
                            break
                    else:
                        # If no, say so
                        print(
                            f"{cl.f.red}There are no options in the {c.title} category.{cl.r}")
                        input("Press ENTER to go back to the categories list.")
                        break
                else:
                    catNo = catNo + 1
                    continue
        elif (re.search("^([A-Z])$", selection, re.IGNORECASE) != None):
            # This regular expression searches for Category keys only (letter)
            found = False
            for c in o:
                # Get category first
                if c.key == selection.upper():
                    found = True
                    n = 0
                    # Check if there are any items in the category
                    if (len(c.list) > 0):
                        # If yes, list them
                        print(f"Options in {c.title} category:\n")
                        for i in c.list:
                            print(
                                f"({selection.upper()}{n}) {i.name} {f'{cl.f.green}[ACTIVE]{cl.r}' if i.actv else ''}\n     {i.desc}")
                            n = n + 1
                        selection = input(
                            f"{cl.f.cyan}Enter an ID to activate it, or press ENTER to go back: {cl.r}")
                        if (re.search("^([A-Z][0-9]+)$", selection, re.IGNORECASE) != None):
                            # This regular expression searches for Option IDs only (letter and a number).
                            catNo = 0
                            for c in o:
                                # Get category first
                                if c.key == selection[0].upper():
                                    # Check if there are any items in the category
                                    if (len(c.list) > 0):
                                        # If yes, check if the item specified exists
                                        if (len(c.list) > int(selection[1:])):
                                            print(
                                                f"({selection.upper()}) {c.list[int(selection[1:])].name}\n     {c.list[int(selection[1:])].desc}")
                                            print(
                                                "Is this the measure you want to activate?")
                                            while True:
                                                confirm = input(
                                                    f"{cl.f.cyan}Confirm selection ({cl.f.green}Y{cl.f.cyan}/{cl.f.red}N{cl.f.cyan}): {cl.r}")
                                                if ("Y" in confirm.upper()):
                                                    print(
                                                        f"{cl.f.green}{c.list[int(selection[1:])].name} activated!{cl.r}")
                                                    for item in c.list:
                                                        item.actv = False
                                                    c.list[int(
                                                        selection[1:])].actv = True
                                                    newMeasures.optn[catNo] = c.list[int(
                                                        selection[1:])]
                                                    break
                                                else:
                                                    print(
                                                        f"{cl.f.red}Cancelled.{cl.r} {c.list[int(selection[1:])].name} was not activated.")
                                                    input(
                                                        "Press ENTER to go back to the categories list.")
                                                    break
                                        else:
                                            print(
                                                f"{cl.f.red}That option ({selection.upper()}) does not exist.{cl.r} Please try again.")
                                            input(
                                                "Press ENTER to go back to the categories list.")
                                            break
                                    else:
                                        # If no, say so
                                        print(
                                            f"{cl.f.red}There are no options in the {c.title} category.{cl.r}")
                                        input(
                                            "Press ENTER to go back to the categories list.")
                                        break
                                else:
                                    catNo = catNo + 1
                                    continue
                                    input(
                                        "Press ENTER to go back to the categories list.")
                                    break
                        else:
                            print("Going back to Category list.")
                            break
                    else:
                        # If no, say so
                        print(
                            f"{cl.f.red}There are no options in the {c.title} category.{cl.r}")
                        input("Press ENTER to go back to the categories list.")
                        break
            if not found:
                print(
                    f"{cl.f.red}That category ({selection.upper()}) does not exist.{cl.r}")
                input("Press ENTER to go back to the categories list.")
        else:
            print(
                f"{cl.f.red}'{selection}' doesn't look like a valid ID.{cl.r} Please try again.")
            input("Press ENTER to go back to the categories list.")
            break
    # Calculate the new totals and return.
    return newMeasures
		node.branch += 1
		node.isEnd = True

	def prefix(self,word):
		node = self.root
		for char in word:
			i = ord(char) - ord('a')
			if node.children[i] is None:
				return 0
			node = node.children[i]
		return node.branch

def contacts(queries):
	t = Trie()
	result = []
	for q in queries:
		if q[0] == 'add':
			t.insert(q[1])
		else:
			result.append( t.prefix(q[1]) )
	return result

if __name__ == '__main__':
	queries_rows = int(input())
	queries = []

	for _ in range(queries_rows):
		queries.append(input().rstrip().split())

	print( contacts(queries) )
Exemplo n.º 58
0
#!/usr/bin/env python3
import time
import pysoc
import bubble

l = list(reversed(range(int(1e5))))
print("== Python ==")
time1 = time.time()
l1 = bubble.bubble_sort(l)
time0 = time.time()
print(f"> {time0 - time1} seconds")

l = list(reversed(range(int(1e5))))
print("== C ==")
time3 = time.time()
l2 = pysoc.bubble_sort(l)
time2 = time.time()
print(f"> {time2 - time3} seconds")
print(f"\n> C is {(time0 - time1)/(time2 - time3)} times faster")
Exemplo n.º 59
0
def plot_energy(
    ax,
    energy,
    kind,
    bfmi,
    figsize,
    textsize,
    fill_alpha,
    fill_color,
    fill_kwargs,
    plot_kwargs,
    bw,
    legend,
    backend_kwargs,
    show,
):
    """Bokeh energy plot."""
    if backend_kwargs is None:
        backend_kwargs = {}

    backend_kwargs = {
        **backend_kwarg_defaults(("dpi", "plot.bokeh.figure.dpi")),
        **backend_kwargs,
    }
    dpi = backend_kwargs.pop("dpi")

    figsize, _, _, _, line_width, _ = _scale_fig_size(figsize, textsize, 1, 1)

    fill_kwargs = {} if fill_kwargs is None else fill_kwargs
    plot_kwargs = {} if plot_kwargs is None else plot_kwargs
    plot_kwargs.setdefault("line_width", line_width)
    if kind == "hist":
        legend = False

    if ax is None:
        ax = create_axes_grid(
            1,
            figsize=figsize,
            squeeze=True,
            backend_kwargs=backend_kwargs,
        )

    _colors = [
        prop for _, prop in zip(
            range(10), cycle(mpl_rcParams["axes.prop_cycle"].by_key()
                             ["color"]))
    ]
    if (fill_color[0].startswith("C")
            and len(fill_color[0]) == 2) and (fill_color[1].startswith("C")
                                              and len(fill_color[1]) == 2):
        fill_color = tuple(
            (_colors[int(color[1:]) % 10] for color in fill_color))
    elif fill_color[0].startswith("C") and len(fill_color[0]) == 2:
        fill_color = tuple([_colors[int(fill_color[0][1:]) % 10]] +
                           list(fill_color[1:]))
    elif fill_color[1].startswith("C") and len(fill_color[1]) == 2:
        fill_color = tuple(
            list(fill_color[1:]) + [_colors[int(fill_color[0][1:]) % 10]])

    series = zip(
        fill_alpha,
        fill_color,
        ("Marginal Energy", "Energy transition"),
        (energy - energy.mean(), np.diff(energy)),
    )

    labels = []

    if kind == "kde":
        for alpha, color, label, value in series:
            fill_kwargs["fill_alpha"] = alpha
            fill_kwargs["fill_color"] = vectorized_to_hex(color)
            plot_kwargs["line_alpha"] = alpha
            plot_kwargs["line_color"] = vectorized_to_hex(color)
            _, glyph = plot_kde(
                value,
                bw=bw,
                label=label,
                fill_kwargs=fill_kwargs,
                plot_kwargs=plot_kwargs,
                ax=ax,
                legend=legend,
                backend="bokeh",
                backend_kwargs={},
                show=False,
                return_glyph=True,
            )
            labels.append((
                label,
                glyph,
            ))

    elif kind == "hist":
        hist_kwargs = plot_kwargs.copy()
        hist_kwargs.update(**fill_kwargs)

        for alpha, color, label, value in series:
            hist_kwargs["fill_alpha"] = alpha
            hist_kwargs["fill_color"] = vectorized_to_hex(color)
            hist_kwargs["line_color"] = None
            hist_kwargs["line_alpha"] = alpha
            _histplot_bokeh_op(
                value.flatten(),
                values2=None,
                rotated=False,
                ax=ax,
                hist_kwargs=hist_kwargs,
                is_circular=False,
            )

    else:
        raise ValueError(f"Plot type {kind} not recognized.")

    if bfmi:
        for idx, val in enumerate(e_bfmi(energy)):
            bfmi_info = Label(
                x=int(figsize[0] * dpi * 0.58),
                y=int(figsize[1] * dpi * 0.73) - 20 * idx,
                x_units="screen",
                y_units="screen",
                text=f"chain {idx:>2} BFMI = {val:.2f}",
                render_mode="css",
                border_line_color=None,
                border_line_alpha=0.0,
                background_fill_color="white",
                background_fill_alpha=1.0,
            )

            ax.add_layout(bfmi_info)

    if legend and label is not None:
        legend = Legend(
            items=labels,
            location="center_right",
            orientation="horizontal",
        )
        ax.add_layout(legend, "above")
        ax.legend.click_policy = "hide"

    show_layout(ax, show)

    return ax
Exemplo n.º 60
0
def timestamp_now_ns():
    timestamp = datetime.datetime.now().timestamp()
    timestamp = int(timestamp * 10**7) * 10**2
    return timestamp