Пример #1
0
def GetVariablesAndDofs():
	
	variables = []
	dofs = []
	dofsWithReactions = []
	
	# Loop through all loaded modules
	for aModuleKey in sys.modules.keys():
		
		# Get all members that are 'free functions'
		aModule = sys.modules[aModuleKey]
		allMembers = inspect.getmembers(aModule, inspect.isfunction)
		
		# Loop through all members
		for aMember in allMembers:
			
			# The variable 'aMember' is a tuple:
			# (function name, function address)
			# use aMembers[0] to compare the name of the fuction,
			# and use aMember[1]() to invoke the function if it's
			# what we're searching for.
			if(aMember[0] == "Variables"):
				variables += aMember[1]() # invoke method
			elif(aMember[0] == "Dofs"):
				dofs += aMember[1]() # invoke method
			elif(aMember[0] == "DofsWithReactions"):
				dofsWithReactions += aMember[1]() # invoke method
	
	variables = OrderedDict.fromkeys(variables).keys()
	dofs = OrderedDict.fromkeys(dofs).keys()
	dofsWithReactions = OrderedDict.fromkeys(dofsWithReactions).keys()
	
	return (variables, dofs, dofsWithReactions)
Пример #2
0
 def __init__(self, graph, s):
     assert isinstance(graph, (Graph, DiGraph))
     self.graph = graph
     self.s = s
     self.marked = OrderedDict.fromkeys(self.graph.vertices, value=False)
     self.edge_to = OrderedDict.fromkeys(self.graph.vertices, value=None)
     self.__dfs(s)
Пример #3
0
    def to_representation(self, user):
        now = timezone.now()
        one_day_from_now = now + datetime.timedelta(hours=24)

        reviews = (
            get_users_reviews(user)
            .filter(next_review_date__range=(now, one_day_from_now))
            .annotate(hour=TruncHour("next_review_date", tzinfo=timezone.utc))
            .annotate(date=TruncDate("next_review_date", tzinfo=timezone.utc))
            .values("streak", "date", "hour")
            .annotate(review_count=Count("id"))
            .order_by("date", "hour")
        )
        expected_hour = now.hour
        hours = [hour % 24 for hour in range(expected_hour, expected_hour + 24)]

        retval = OrderedDict.fromkeys(hours)

        for key in retval.keys():
            retval[key] = OrderedDict.fromkeys([level.name for level in KwSrsLevel], 0)

        for review in reviews:
            found_hour = review["hour"].hour
            while found_hour != expected_hour:
                expected_hour = (expected_hour + 1) % 24
            streak = review["streak"]
            srs_level = STREAK_TO_SRS_LEVEL_MAP_KW[streak].name
            retval[expected_hour][srs_level] += review["review_count"]

        real_retval = [
            [count for srs_level, count in hourly_count.items()]
            for hour, hourly_count in retval.items()
        ]
        return real_retval
  def __init__(self, distro, repository_url, name, version, version_patch):
    self.packages = []
    self.distro = distro
    self.repository_url = repository_url
    package = self._parse_package_file(
      self._get_package_xml_url(repository_url, name, version))
    self.name = package.name
    self.version = package.version
    self.version_patch = version_patch
    self.licenses = package.licenses
    self.run_dependencies = list(OrderedDict.fromkeys([dependency.name for dependency in package.run_depends]))
    self.build_dependencies = list(OrderedDict.fromkeys([dependency.name for dependency in package.build_depends + package.buildtool_depends]))
    # Build dependencies already added:
    if 'git' in self.build_dependencies: self.build_dependencies.remove('git')
    if 'cmake' in self.build_dependencies: self.build_dependencies.remove('cmake')

    # Remove HTML tags from description
    self.description = re.sub('<[^<]+?>', '', package.description)
    # Put it on one line to motivate packagers to make shorter descriptions
    self.description = re.sub('\n', ' ', self.description)
    # Multiple consecutive spaces turned into one
    self.description = re.sub('([ ]+)', ' ', self.description)
    # Only take the first sentence (keep short description)
    self.description = self.description.split(".")[0] + "."
    # Handle quotes
    self.description = self.description.replace('"', '').replace('`', '').replace('&quot;', '').replace('\'','')
    def _create_grid(self):

        from bempp.api import GridFactory
        from collections import OrderedDict

        vertices = self._impl.vertices
        elements = self._impl.elements

        factory = GridFactory()

        self._element_file_to_insertion_indices = OrderedDict.fromkeys(elements.keys(),value=-1)
        self._vertex_file_to_insertion_indices = OrderedDict.fromkeys(vertices.keys(),value=-1)

        vertex_insertion_count = 0
        element_insertion_count = 0

        for key in elements:
            elem = elements[key]
            elem_vertex_keys = []
            for vertex_key in elem['data']:
                if self._vertex_file_to_insertion_indices[vertex_key] ==-1:
                    factory.insert_vertex(vertices[vertex_key])
                    self._vertex_file_to_insertion_indices[vertex_key] = vertex_insertion_count
                    self._vertex_insertion_indices_to_file.append(vertex_key)
                    vertex_insertion_count += 1
                elem_vertex_keys.append(self._vertex_file_to_insertion_indices[vertex_key])
            factory.insert_element(elem_vertex_keys, domain_index=elem['domain_index'])
            self._element_file_to_insertion_indices[key] = element_insertion_count
            self._element_insertion_indices_to_file.append(key)
            element_insertion_count += 1

        return factory.finalize()
Пример #6
0
    def _preset(self):
        # Calculate all of rolling rectangles.
        tmp = []
        for rect in self.__arr_rects:
            t = deque(rect)
            for _ in range(len(t)):
                t.rotate(1)
                tmp.append(list(t))
        self.__arr_rects = tmp

        # Order by rectangle area and length, width.
        self.__arr_order_rects = OrderedDict.fromkeys(sorted([rect[0] * rect[1] for rect in self.__arr_rects]), 1)
        for rect in self.__arr_rects:
            item = rect.pop()
            tmp = sorted(rect)
            tmp.append(item)
            self.__arr_order_rects[rect[0] * rect[1]] = tmp
        print(self.__arr_order_rects)

        self.__arr_res = [rect[2] for _, rect in self.__arr_order_rects.items()]
        # Change the dict's key to order key.
        tmp = deepcopy(self.__arr_order_rects)
        self.__arr_order_rects = OrderedDict.fromkeys([_ for _ in range(len(tmp))])
        index = 0
        for _, value in tmp.items():
            self.__arr_order_rects[index] = value
            index += 1
Пример #7
0
def distinctPowers():
   array = []
   for i in range(2, 101):
     for j in range(2, 101):
       array.append(int(math.pow(i, j)))
   list(OrderedDict.fromkeys(array))
   print len(list(OrderedDict.fromkeys(array)))
Пример #8
0
def keyPreProcess(key):
    key = key.replace(" ", "")
    key = key.upper();  #change the key to uppercase

    key = ''.join([i for i in key if not i.isdigit()])
    keyArray = list(key);  # making the key as array
    # check if j & i both exists

    for i in keyArray:
        for j in keyArray:
            if (i == 'I') is True & (j == 'J') is True:
                   print 'J & I both can not coexist in the keyword'
                   sys.exit(0)
    ## keeping original order while having the key restored

    keyArray = list(OrderedDict.fromkeys(key))


            


    if type(key) is not str:
        out = 'Please choose a word as a key next time'
        print out
        sys.exit(0)


    newKey = ''.join([str(x) for x in keyArray])

    key = newKey
    out = 'You entered key as ' + key
    print out;
    # upto this point the key is generated with no
    # duplication of character
    # the keyword is now collected
    # Creating the playfair matrix

    print '\n============Starting to generate the playfair matrix===========\n'

    # create a list from A to Z
    matr = list(string.ascii_uppercase)

    # join two lists
    newKeyArray = keyArray + matr;
    newKey = ''.join([str(x) for x in newKeyArray])  # join two lists
    keyArray = list(OrderedDict.fromkeys(newKey))  # ordering the characters without duplications
    keyArray.remove('J') #removing J to consider I in the key only
    print 'Playfair matrix as an array'
    print keyArray


    n = 5
    pfm = [keyArray[i:i+n] for i in range(0, len(keyArray), n)]  # 1d to 2D transformation for print
    # pyhtonic array print
    print '\n\n\n\n\nThe playfair keyword         \n\n'
    print('\n'.join([''.join(['{:4}'.format(item) for item in row]) 
          for row in pfm]))

    return keyArray
Пример #9
0
def student(request,dept_code):
    try:
        dept = Department.objects.get(dept_code = dept_code)
        context_dict['Department'] = dept
    except:
        raise Http404
    try:
        btech = Student.objects.filter(dept = dept_code, degree = 1).values('year_of_admission')
        idd = Student.objects.filter(dept = dept_code, degree = 2).values('year_of_admission')

        btech_years = list()
        idd_years = list()
        
        for i in btech: btech_years.append(i['year_of_admission'])
        for i in idd: idd_years.append(i['year_of_admission'])
        
        btech_years = sorted(list(OrderedDict.fromkeys(btech_years)),reverse=True)
        idd_years = sorted(list(OrderedDict.fromkeys(idd_years)),reverse=True)
        
        students_list_btech = list()
        counter = 0
        for i in btech_years:
            students_list_btech.append(Student.objects.filter(dept = dept_code, degree = 1, year_of_admission = btech_years[counter]).order_by('roll_no'))
            counter += 1

        students_list_idd = list()
        counter = 0 
        for i in idd_years: 
            students_list_idd.append(Student.objects.filter(dept = dept_code, degree = 2, year_of_admission = idd_years[counter]).order_by('roll_no'))
            counter += 1

        headings_btech  = [ "B.Tech Part - I",
                            "B.Tech Part - II",
                            "B.Tech Part - III",
                            "B.Tech Part - IV" ]
        headings_idd    = [ "IDD Part - I",
                            "IDD Part - II",
                            "IDD Part - III",
                            "IDD Part - IV",
                            "IDD Part - V" ]

        #Every value in each counter needs to be different.
        counter1 = [1,2,3,4]
        counter2 = [11,22,33,44,55]
        full_list_btech = izip_longest(  
                                counter1,
                                headings_btech,
                                students_list_btech  )
        full_list_idd   = izip_longest(  
                                counter2,
                                headings_idd,
                                students_list_idd  )
        context_dict['full_list_btech'] = full_list_btech
        context_dict['full_list_idd'] = full_list_idd

    except:
        raise Http404
    return render(request,'student.html',context_dict)
Пример #10
0
 def unique_parameters(self):
     # We actually need to remove duplicates from the list of parameters
     # (and their corresponding gradients) in order to support reusing
     # the same layer at multiple places in the graph,
     # e.g. do weight sharing.
     params, grads = self.parameters()
     return (
         list(_OrderedDict.fromkeys(params).keys()),
         list(_OrderedDict.fromkeys(grads).keys()),
     )
def StatusReport():

	try:
		if(ProxyCounter <= 0):
			print("\nNo proxies found!\n")
		else:
			print("\n\n\t---------------------------------------\n\tEstimated Proxies: %d\n\n" % ProxyCounter )

			exportToFile = raw_input("\tExport to file? (Y/n): ");
			if("N" in exportToFile or "n" in exportToFile):
				pass
			else:
				SaveFile()

			# prompt to view the list of results
			if(NoSuccessCounter > 0):
				viewList = raw_input( "\n\tView website statistics? (y/N): ");
				if("y" in viewList or "Y" in viewList):
					global SucceededWebsites
					global NoSuccessWebsites
					# remove duplicate entries from the lists
					uniqueSW = list(OrderedDict.fromkeys(SucceededWebsites))
					uniqueNW = list(OrderedDict.fromkeys(NoSuccessWebsites))
					
					print("\n\tWebsites without proxies:\n\t---------------------------------------\n")
					for url in uniqueNW:
						Log("\t" + url,"red")
						
					print("\n\n\tWebsites with proxies:\n\t---------------------------------------\n")
					for url in uniqueSW:
						Log("\t" + url,"green")
						
					exportToFile = raw_input("\n\tExport succeeded websites to file? (y/N): ")
					if(exportToFile == "Y" or exportToFile == "y"):
						with open("output/Succeeded websites.txt","w+") as f:
							urlList = []
							# read all lines from the file (if it exists)
							for urlFromFile in f.readlines():
								urlList.append(urlFromFile)
							# append all succeeded websites to the list
							for sucUrl in uniqueSW:
								urlList.append(sucUrl)
							
							unique = list(OrderedDict.fromkeys(urlList))
							for url in unique:
								f.write(url + os.linesep)

						print("\n\tFile has been saved!\n\tYou can find it under 'output/Succeeded websites.txt'\n")

			# wait until we hit enter to continue because the call to Menu will clear the screen
		raw_input("\n\tHit enter to return to the main menu...")
	except KeyboardInterrupt:
		sys.exit()
	Menu()
	return
  def __init__(self, distro, repository_url, name, version, version_patch):
    self.packages = []
    self.distro = distro
    self.repository_url = repository_url
    self.repository_name = self.repository_url.split('/')[-1].split('.')[0]
    package = self._parse_package_file(
      self._get_package_xml_url(repository_url, name, version))
    self.name = package.name
    self.version = package.version
    self.version_patch = version_patch
    self.package_release = str(int(version_patch) + 1)
    self.licenses = package.licenses
    self.run_dependencies = list(OrderedDict.fromkeys([dependency.name for dependency in package.run_depends]))
    self.build_dependencies = list(OrderedDict.fromkeys([dependency.name for dependency in package.build_depends + package.buildtool_depends]))

    # Tarball
    self.tarball_url = "%s/archive/release/%s/%s/%s-%s.tar.gz" \
                        % (self.repository_url.replace('.git',''),
                           self.distro.name, self.name,
                           self.version, self.version_patch)
    self.tarball_dir = "%s-release-%s-%s-%s-%s" \
                        % (self.repository_name, self.distro.name, self.name,
                           self.version, self.version_patch)

    # This may be the case for some metapackages
    self.is_virtual = False

    # Build dependencies already added:
    if 'git' in self.build_dependencies:
      self.build_dependencies.remove('git')
    if 'cmake' in self.build_dependencies:
      self.build_dependencies.remove('cmake')

    # Remove HTML tags from description
    self.description = re.sub('<[^<]+?>', '', package.description)
    # Put it on one line to motivate packagers to make shorter descriptions
    self.description = re.sub('\n', ' ', self.description)
    # Convert tabs to spaces
    self.description = re.sub('\t', ' ', self.description)
    # Multiple consecutive spaces turned into one
    self.description = re.sub('([ ]+)', ' ', self.description)
    # Only take the first sentence (keep short description)
    self.description = re.split("\. |\.$", self.description)[0] + "."
    # Handle quotes
    self.description = self.description.replace('"', '').replace('`', '').replace('&quot;', '').replace('\'','')

    # Website URL
    self.site_url = "http://www.ros.org/"
    for url in package.urls:
      if url.type == "website":
        # Some maintainers provide wrong URLs...
        url.url = re.findall('http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]'
                             '|(?:%[0-9a-fA-F][0-9a-fA-F]))+', url.url)
        if url.url:
          self.site_url = url.url[0]
Пример #13
0
def _combinator(iterable1, iterable2):
    # Create 'OrderedSet's
    i1 = tuple(OrderedDict.fromkeys(iterable1))
    i2 = tuple(OrderedDict.fromkeys(iterable2))
    # Decide which one to repeat
    if len(i1) < len(i2):
        i1 = cycle(i1)
    else:
        i2 = cycle(i2)
    # Iterate through the values
    for value1, value2 in zip(i1, i2):
        yield value1, value2
Пример #14
0
 def __init__(self, names, Type, logOrder, isList = False):
     #self.names = rospy.get_param(names);
     self.names = names;
     if isList:
          self.states = OrderedDict.fromkeys(self.names, [0.0 for elem in logOrder]);
     else:
         self.states = OrderedDict.fromkeys(self.names, Type());
         
     self.Type = Type;
     self.stateMux = Lock();
     self.logOrder=logOrder;
     self.isList = isList;
Пример #15
0
    def __init__(self, graph):
        assert isinstance(graph, Graph)
        self.graph = graph
        self.count = 0
        self.marked = OrderedDict.fromkeys(self.graph.vertices, False)
        self.connections = OrderedDict.fromkeys(self.graph.vertices, None)

        for v in self.graph.vertices:
            if self.marked[v] is True:
                continue
            self.__dfs(v)
            self.count += 1
Пример #16
0
    def test_object(self):
        self.assertEqual(ubjdumpb({}), OBJECT_START + OBJECT_END)
        self.assertEqual(ubjdumpb({'a': None}, container_count=True), (OBJECT_START + CONTAINER_COUNT + TYPE_UINT8 +
                                                                       b'\x01' + TYPE_UINT8 + b'\x01' +
                                                                       'a'.encode('utf-8') + TYPE_NULL))
        self.check_enc_dec({})
        # negative length
        with self.assertRaises(DecoderException):
            ubjloadb(OBJECT_START + CONTAINER_COUNT + ubjdumpb(-1))
        with self.assertRaises(EncoderException):
            ubjdumpb({123: 'non-string key'})
        with self.assertRaises(EncoderException):
            ubjdumpb({'fish': type(list)})
        # invalid key size type
        with self.assertRaises(DecoderException):
            ubjloadb(OBJECT_START + TYPE_NULL)
        # invalid key size, key too short, key invalid utf-8, no value
        for suffix in (b'\x81', b'\x01', b'\x01' + b'\xfe', b'\x0101'):
            with self.assertRaises(DecoderException):
                ubjloadb(OBJECT_START + TYPE_INT8 + suffix)
        self.check_enc_dec({'longkey1' * 65: 1})
        self.check_enc_dec({'longkey2' * 4096: 1})

        obj = {'int': 123,
               'longint': 9223372036854775807,
               'float': 1.25,
               'hp': Decimal('10e15'),
               'char': 'a',
               'str': 'here is a string',
               'unicode': u(r'\u00a9 with extended\u2122'),
               '': 'empty key',
               u(r'\u00a9 with extended\u2122'): 'unicode-key',
               'null': None,
               'true': True,
               'false': False,
               'array': [1, 2, 3],
               'bytes_array': b'1234',
               'object': {'another one': 456, 'yet another': {'abc': True}}}
        for opts in ({'container_count': False}, {'container_count': True}):
            self.check_enc_dec(obj, **opts)

        # dictionary key sorting
        obj1 = OrderedDict.fromkeys('abcdefghijkl')
        obj2 = OrderedDict.fromkeys('abcdefghijkl'[::-1])
        self.assertNotEqual(ubjdumpb(obj1), ubjdumpb(obj2))
        self.assertEqual(ubjdumpb(obj1, sort_keys=True), ubjdumpb(obj2, sort_keys=True))

        # custom mapping class
        with self.assertRaises(TypeError):
            ubjloadb(TYPE_NULL, object_pairs_hook=list)
        self.assertEqual(ubjloadb(ubjdumpb(obj1), object_pairs_hook=OrderedDict), obj1)
Пример #17
0
def reorder_items(items):
    argkeys_cache = {}
    items_by_argkey = {}
    for scopenum in range(0, scopenum_function):
        argkeys_cache[scopenum] = d = {}
        items_by_argkey[scopenum] = item_d = defaultdict(deque)
        for item in items:
            keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum))
            if keys:
                d[item] = keys
                for key in keys:
                    item_d[key].append(item)
    items = OrderedDict.fromkeys(items)
    return list(reorder_items_atscope(items, argkeys_cache, items_by_argkey, 0))
Пример #18
0
def GetPoints(Entity, Precision, CheckDirection='No'):
    #Points = []
    if Entity.dxftype == "LINE":
        point1 = Entity.start
        point1 = tuple([round(x, Precision) for x in point1])
        point2 = Entity.end
        point2 = tuple([round(x, Precision) for x in point2])
        return [point1, point2]
    elif Entity.dxftype == "CIRCLE":
        point = Entity.center
        point = tuple([round(x, Precision) for x in point])
        return [point]
    elif Entity.dxftype in ("LWPOLYLINE",):
        Points = []
        for point in list(Entity.points):
            point = tuple([round(x, Precision) for x in point])
            Points.append(point + (0.0,))
        returnPoints = list(OrderedDict.fromkeys(Points))
        if CheckDirection == 'Yes':
            if not CheckCrossProduct(returnPoints[0], returnPoints[1], returnPoints[2]):
                return list(reversed(returnPoints))
        return returnPoints
    elif Entity.dxftype in ("SOLID", "3DFACE"):
        Points = []
        for point in list(Entity.points):
            if point == (-0.535, -18.725, 0):
                pass
            point = tuple([round(x, Precision) for x in point])
            if len(point) == 2:
                Points.append(point + (0.0,))
            else:
                Points.append(point)
        returnPoints = list(OrderedDict.fromkeys(Points))
        if CheckDirection == 'Yes':
            if not CheckCrossProduct(returnPoints[0], returnPoints[1], returnPoints[2]):
                return list(reversed(returnPoints))
        return returnPoints
    elif Entity.dxftype in ("POLYLINE",):
        Points = []
        for point in Entity.vertices:
            point = tuple([round(x, Precision) for x in point.location])
            if len(point) == 2:
                Points.append(point + (0.0,))
            else:
                Points.append(point)
        returnPoints = list(OrderedDict.fromkeys(Points))
        if CheckDirection == 'Yes':
            if not CheckCrossProduct(returnPoints[0], returnPoints[1], returnPoints[2]):
                return list(reversed(returnPoints))
        return returnPoints
Пример #19
0
def wolfram_search(args, resp):
    """Perform a WolframAlpha search, may require an API key in .cliqrc."""
    if resp is None:
        return open_url(args, 'http://www.wolframalpha.com')
    elif args['open']:
        return open_url(args, args['query'])

    try:
        # Filter unnecessary title fields
        titles = list(OrderedDict.fromkeys(
            resp.xpath("//pod[@title != '' and "
                       "@title != 'Number line' and "
                       "@title != 'Input' and "
                       "@title != 'Visual representation' and "
                       "@title != 'Image' and "
                       "@title != 'Manipulatives illustration' and "
                       "@title != 'Quotient and remainder']"
                       "/@title")))
    except AttributeError:
        raise AttributeError('Failed to retrieve data from lxml object!')

    entries = []
    if titles:
        for title in titles:
            if PY2:
                title = title.encode('ascii', 'ignore')
            entry_xpath = ("//pod[@title='{0}']/subpod/plaintext/text()"
                           .format(title))
            entry = resp.xpath(entry_xpath)
            if entry:
                entries.append(entry[0])

        entries = list(OrderedDict.fromkeys(entries))
        # Return False if results were empty
        if len(entries) == 1 and entries[0] == '{}':
            return False

        output_list = reformat_wolfram_entries(titles, entries)
        if not output_list:
            return False
        elif len(output_list) > 2:
            print('\n'.join(output_list[:2]))
            if utils.check_input(input(SEE_MORE), empty=True):
                print('\n'.join(output_list[2:]))
        else:
            print('\n'.join(output_list))
        return True
    else:
        return False
Пример #20
0
def ordered_packing_list(P):
    # SHOW Packing DIALOG
    if isinstance(P, list):
        tempP = P.copy()
        P = {}
        for i in range(len(tempP)):
            P[str("Packing %d" % i)] = tempP[i]
    try:
        odict = OrderedDict.fromkeys(sorted(P, key=lambda x: float(x)))
    except(Exception):
        odict = OrderedDict.fromkeys(sorted(P))

    for key in odict:
        odict[key] = list(P[key])
    return odict
Пример #21
0
  def RunStep(self):
    self["full_revision_list"] = list(OrderedDict.fromkeys(
        self._options.revisions))
    port_revision_list = []
    for revision in self["full_revision_list"]:
      # Search for commits which matches the "Port XXX" pattern.
      git_hashes = self.GitLog(reverse=True, format="%H",
                               grep="Port %s" % revision,
                               branch=self.vc.RemoteMasterBranch())
      for git_hash in git_hashes.splitlines():
        revision_title = self.GitLog(n=1, format="%s", git_hash=git_hash)

        # Is this revision included in the original revision list?
        if git_hash in self["full_revision_list"]:
          print("Found port of %s -> %s (already included): %s"
                % (revision, git_hash, revision_title))
        else:
          print("Found port of %s -> %s: %s"
                % (revision, git_hash, revision_title))
          port_revision_list.append(git_hash)

    # Do we find any port?
    if len(port_revision_list) > 0:
      if self.Confirm("Automatically add corresponding ports (%s)?"
                      % ", ".join(port_revision_list)):
        #: 'y': Add ports to revision list.
        self["full_revision_list"].extend(port_revision_list)
Пример #22
0
    def update(self, data):
        """Maps new values to integer identifiers.

        Parameters
        ----------
        data : iterable
            sequence of string values

        Raises
        ------
        TypeError
              If the value in data is not a string, unicode, bytes type
        """
        data = np.atleast_1d(np.array(data, dtype=object))

        # check if convertible to number:
        convertible = True
        for val in OrderedDict.fromkeys(data):
            # OrderedDict just iterates over unique values in data.
            if not isinstance(val, (str, bytes)):
                raise TypeError("{val!r} is not a string".format(val=val))
            if convertible:
                # this will only be called so long as convertible is True.
                convertible = self._str_is_convertible(val)
            if val not in self._mapping:
                self._mapping[val] = next(self._counter)
        if convertible:
            _log.info('Using categorical units to plot a list of strings '
                      'that are all parsable as floats or dates. If these '
                      'strings should be plotted as numbers, cast to the '
                      'appropriate data type before plotting.')
Пример #23
0
def preview(recid):
    """Preview file for given record."""
    filename = request.args.get('filename', type=str)

    for f in itertools.chain(get_record_documents(recid, filename),
                             get_record_files(recid, filename)):
        if f.name + f.superformat == filename or filename is None:
            extension = os.path.splitext(f.name + f.superformat)[1]
            ordered = previewers.keys()
            if "CFG_PREVIEW_PREFERENCE" in cfg and \
               extension in cfg["CFG_PREVIEW_PREFERENCE"]:
                from collections import OrderedDict
                ordered = OrderedDict.fromkeys(
                    cfg["CFG_PREVIEW_PREFERENCE"][extension] +
                    ordered).keys()

            try:
                for plugin_id in ordered:
                    if previewers[plugin_id]['can_preview'](f):
                        return previewers[plugin_id]['preview'](f)
            except Exception:
                current_app.logger.exception(
                    "Preview plugin {0} failed "
                    "previewing {1} in record {2}".format(
                        plugin_id, filename, recid
                    )
                )
    return previewers['default']['preview'](None)
Пример #24
0
def lemmatize(string, index, exceptions, rules):
    orig = string
    string = string.lower()
    forms = []
    oov_forms = []
    for old, new in rules:
        if string.endswith(old):
            form = string[: len(string) - len(old)] + new
            if not form:
                pass
            elif form in index or not form.isalpha():
                forms.append(form)
            else:
                oov_forms.append(form)
    # Remove duplicates but preserve the ordering of applied "rules"
    forms = list(OrderedDict.fromkeys(forms))
    # Put exceptions at the front of the list, so they get priority.
    # This is a dodgy heuristic -- but it's the best we can do until we get
    # frequencies on this. We can at least prune out problematic exceptions,
    # if they shadow more frequent analyses.
    for form in exceptions.get(string, []):
        if form not in forms:
            forms.insert(0, form)
    if not forms:
        forms.extend(oov_forms)
    if not forms:
        forms.append(orig)
    return forms
Пример #25
0
    def _find_compressible_variables(self):
        ''' Returns variable names that are not thought
        to be coordinate variables'''

        # It is quite difficult to properly identify the coordinate variables
        # assuming CF-Conventions (1.6) only. Therefore assume all 1-D variables
        # need not be compressed.
        # exclude proper coordinate variables (1-dimensional)
        exclude_coord = [varname for varname in self.variables if
                         len(self.variables[varname]['dimensions']) <= 1]
        # exclude auxiliary coordinates and cell-bounds
        exclude_aux_coords = []
        for atts in [v['attributes'] for v in self.variables.values()]:
            auxcoords = atts.get('coordinates') or ''
            auxcoords += ' ' + (atts.get('bounds') or '')
            exclude_aux_coords.extend(auxcoords.split())
        # for good measure exclude variable names from Dominik's list
        exclude_dom = ['lon', 'lat', 'slon', 'slat', 'slonu', 'slatu', 'slonv',
                       'slatv', 'time', 'time_bnds', 'rlon', 'rlat',
                       'level_bnds', 'level', 'levels']
        # also exclude variables of wrong datatype
        exclude_dtyp = []
        comp_dtyp = [np.dtype(x) for x in ['float64', 'float32',
                                           'uint32', 'uint16']]
        for vn, v in self.variables.iteritems():
            if v['dtype'] not in comp_dtyp:
                exclude_dtyp.append(vn)
        exclude_all = exclude_coord + exclude_aux_coords + \
                      exclude_dom + exclude_dtyp
        exclude_all = list(OrderedDict.fromkeys(exclude_all))  # make unique
        compressible = [v for v in self.variables if v not in exclude_all]
        return((compressible, exclude_all))
Пример #26
0
    def call(self):
        """ Display the CSV formated table data.
        """

        qname = self._cw.form["qname"]
        timepoint = self._cw.form["timepoint"]
        labels = json.loads(self._cw.form["labels"])

        rql = ("Any ID, QT, OV Where S is Subject, S code_in_study ID, "
               "S subject_questionnaire_runs QR, QR questionnaire QU, "
               "QU name '{0}', QR open_answers O, O value OV, "
               "O in_assessment A, A timepoint '{1}', O question Q, "
               "Q text QT".format(qname, timepoint))
        rset = self._cw.execute(rql)

        table = defaultdict(lambda: OrderedDict.fromkeys(labels, ""))
        for item in rset:
            table[item[0]][item[1]] = item[2]
        for id, data in table.iteritems():
            table[id]["ID"] = id

        writer = self.csvwriter()
        writer.writerow(labels)
        for psc2, data in iter(sorted(table.iteritems())):
            writer.writerow(data.values())
Пример #27
0
        def get_next_form(recno, recno_index, ):

            file_offset = 0
            recno_counter = 0

            for line in from_source:
                bits = line.split(delimiter)
                bits = map(lambda x: x.strip().decode('utf-8'), bits)
                bits = filter(lambda x: x, bits)
                if bits:
                    recno_counter += 1

                    bits = OrderedDict.fromkeys(bits).keys() # stable unique
                    bits_prefixed = PyDic.common_prefix(bits)

                    bits_str = (PyDic.INTERNAL_DELIMITER.join(bits_prefixed)).encode(
                        'utf-8') + '\n'

                    recno.write(bits_str)

                    if to_path is None:
                        recno_index.append(file_offset)
                    else:

                        recno_index.write(
                            struct.pack(PyDic.RECNO_INDEX_FMT, file_offset))

                    if verbose:
                        print >> sys.stderr, "[", recno_counter, "]", bits[0]

                    for bit in bits:
                        yield bit.lower(), (recno_counter, )
                    file_offset += len(bits_str)

            raise StopIteration
Пример #28
0
def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
                             restart_functions=None):
    """Helper function to perform the restart_on_change function.

    This is provided for decorators to restart services if files described
    in the restart_map have changed after an invocation of lambda_f().

    @param lambda_f: function to call.
    @param restart_map: {file: [service, ...]}
    @param stopstart: whether to stop, start or restart a service
    @param restart_functions: nonstandard functions to use to restart services
                              {svc: func, ...}
    @returns result of lambda_f()
    """
    if restart_functions is None:
        restart_functions = {}
    checksums = {path: path_hash(path) for path in restart_map}
    r = lambda_f()
    # create a list of lists of the services to restart
    restarts = [restart_map[path]
                for path in restart_map
                if path_hash(path) != checksums[path]]
    # create a flat list of ordered services without duplicates from lists
    services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
    if services_list:
        actions = ('stop', 'start') if stopstart else ('restart',)
        for service_name in services_list:
            if service_name in restart_functions:
                restart_functions[service_name](service_name)
            else:
                for action in actions:
                    service(action, service_name)
    return r
Пример #29
0
 def set_recent_files(self, recent_files):
     """Set a list of files opened by the project."""
     for recent_file in recent_files[:]:
         if not os.path.isfile(recent_file):
             recent_files.remove(recent_file)
     self.CONF[WORKSPACE].set('main', 'recent_files',
                              list(OrderedDict.fromkeys(recent_files)))
Пример #30
0
def select_fields_from_row(incoming_row_dict, field_names):     
    """Return OD with *field_names* as keys and *incoming_dict* as values, where matched."""
    result_row_dict = OrderedDict.fromkeys(field_names, NULL_CSV_MARKER)
    for k,v in incoming_row_dict.items():
        if k in field_names:
            result_row_dict[k] = v
    return result_row_dict
Пример #31
0
def cxx_toolchain_path(version: str = None) -> Tuple[str]:
    """
    Validate, then activate C++ toolchain directory path.
    """
    if platform.system() != 'Windows':
        raise RuntimeError(
            'Functionality is currently only supported on Windows')
    if version is not None and not isinstance(version, str):
        raise TypeError('Format version number as a string')
    logger = get_logger()
    toolchain_root = ''
    if 'CMDSTAN_TOOLCHAIN' in os.environ:
        toolchain_root = os.environ['CMDSTAN_TOOLCHAIN']
        if os.path.exists(os.path.join(toolchain_root, 'mingw_64')):
            compiler_path = os.path.join(
                toolchain_root,
                'mingw_64' if (sys.maxsize > 2**32) else 'mingw_32',
                'bin',
            )
            if os.path.exists(compiler_path):
                tool_path = os.path.join(toolchain_root, 'bin')
                if not os.path.exists(tool_path):
                    tool_path = ''
                    compiler_path = ''
                    logger.warning(
                        'Found invalid installion for RTools35 on %s',
                        toolchain_root,
                    )
                    toolchain_root = ''
            else:
                compiler_path = ''
                logger.warning('Found invalid installion for RTools35 on %s',
                               toolchain_root)
                toolchain_root = ''
        elif os.path.exists(os.path.join(toolchain_root, 'mingw64')):
            compiler_path = os.path.join(
                toolchain_root,
                'mingw64' if (sys.maxsize > 2**32) else 'mingw32',
                'bin',
            )
            if os.path.exists(compiler_path):
                tool_path = os.path.join(toolchain_root, 'usr', 'bin')
                if not os.path.exists(tool_path):
                    tool_path = ''
                    compiler_path = ''
                    logger.warning(
                        'Found invalid installion for RTools40 on %s',
                        toolchain_root,
                    )
                    toolchain_root = ''
            else:
                compiler_path = ''
                logger.warning('Found invalid installion for RTools40 on %s',
                               toolchain_root)
                toolchain_root = ''
    else:
        rtools_dir = os.path.expanduser(
            os.path.join('~', '.cmdstanpy', 'RTools'))
        if not os.path.exists(rtools_dir):
            raise ValueError('no RTools installation found, '
                             'run command line script "install_cxx_toolchain"')
        compiler_path = ''
        tool_path = ''
        if version not in ('4', '40', '4.0') and os.path.exists(
                os.path.join(rtools_dir, 'RTools35')):
            toolchain_root = os.path.join(rtools_dir, 'RTools35')
            compiler_path = os.path.join(
                toolchain_root,
                'mingw_64' if (sys.maxsize > 2**32) else 'mingw_32',
                'bin',
            )
            if os.path.exists(compiler_path):
                tool_path = os.path.join(toolchain_root, 'bin')
                if not os.path.exists(tool_path):
                    tool_path = ''
                    compiler_path = ''
                    logger.warning(
                        'Found invalid installion for RTools35 on %s',
                        toolchain_root,
                    )
                    toolchain_root = ''
            else:
                compiler_path = ''
                logger.warning('Found invalid installion for RTools35 on %s',
                               toolchain_root)
                toolchain_root = ''
        if (not toolchain_root
                or version in ('4', '40', '4.0')) and os.path.exists(
                    os.path.join(rtools_dir, 'RTools40')):
            toolchain_root = os.path.join(rtools_dir, 'RTools40')
            compiler_path = os.path.join(
                toolchain_root,
                'mingw64' if (sys.maxsize > 2**32) else 'mingw32',
                'bin',
            )
            if os.path.exists(compiler_path):
                tool_path = os.path.join(toolchain_root, 'usr', 'bin')
                if not os.path.exists(tool_path):
                    tool_path = ''
                    compiler_path = ''
                    logger.warning(
                        'Found invalid installion for RTools40 on %s',
                        toolchain_root,
                    )
                    toolchain_root = ''
            else:
                compiler_path = ''
                logger.warning('Found invalid installion for RTools40 on %s',
                               toolchain_root)
                toolchain_root = ''
    if not toolchain_root:
        raise ValueError('no C++ toolchain installation found, '
                         'run command line script "install_cxx_toolchain"')
    logger.info('Adds C++ toolchain to $PATH: %s', toolchain_root)
    os.environ['PATH'] = ';'.join(
        list(
            OrderedDict.fromkeys([
                compiler_path,
                tool_path,
            ] + os.getenv('PATH', '').split(';'))))
    return compiler_path, tool_path
Пример #32
0
    def get_all_accounts(self,
                         account_ids,
                         exclude_type=None,
                         only_type=None,
                         filter_report_type=None,
                         context=None):
        """Get all account passed in params with their childrens

        @param exclude_type: list of types to exclude (view, receivable,
                                                payable, consolidation, other)
        @param only_type: list of types to filter on (view, receivable,
                                                payable, consolidation, other)
        @param filter_report_type: list of report type to filter on
        """
        context = context or {}
        accounts = []
        if not isinstance(account_ids, list):
            account_ids = [account_ids]
        acc_obj = self.pool.get('account.account')
        for account_id in account_ids:
            accounts.append(account_id)
            children_acc_ids = acc_obj._get_children_and_consol(
                self.cursor, self.uid, account_id, context=context)
            if context.get('account_level'):
                domain = [('level', '<=', context['account_level']),
                          ('id', 'in', children_acc_ids)]
                accounts += self.pool['account.account'].search(
                    self.cursor, self.uid, domain)
            else:
                accounts += children_acc_ids
        # remove duplicate account IDs in accounts
        # We don't use list(set(accounts)) to keep the order
        # cf http://stackoverflow.com/questions/7961363/
        # removing-duplicates-in-lists
        res_ids = list(OrderedDict.fromkeys(accounts))
        res_ids = self.sort_accounts_with_structure(account_ids,
                                                    res_ids,
                                                    context=context)

        if exclude_type or only_type or filter_report_type:
            sql_filters = {'ids': tuple(res_ids)}
            sql_select = "SELECT a.id FROM account_account a"
            sql_join = ""
            sql_where = "WHERE a.id IN %(ids)s"
            if exclude_type:
                sql_where += " AND a.type not in %(exclude_type)s"
                sql_filters.update({'exclude_type': tuple(exclude_type)})
            if only_type:
                sql_where += " AND a.type IN %(only_type)s"
                sql_filters.update({'only_type': tuple(only_type)})
            if filter_report_type:
                sql_join += "INNER JOIN account_account_type t" \
                            " ON t.id = a.user_type"
                sql_join += " AND t.report_type IN %(report_type)s"
                sql_filters.update({'report_type': tuple(filter_report_type)})

            sql = ' '.join((sql_select, sql_join, sql_where))
            self.cursor.execute(sql, sql_filters)
            fetch_only_ids = self.cursor.fetchall()
            if not fetch_only_ids:
                return []
            only_ids = [only_id[0] for only_id in fetch_only_ids]
            # keep sorting but filter ids
            res_ids = [res_id for res_id in res_ids if res_id in only_ids]
        return res_ids
# Remove Duplicates From List¡

lst = [7, 3, 3, 5, 6, 5]
# removes duplicates but does not preserves the list order
no_dups = list(set(lst))
print(no_dups)

# removes duplicates and preserves the list order
from collections import OrderedDict


class OrderedDict(object):
    pass


no_dups = list(OrderedDict.fromkeys(lst).keys())
print(no_dups)

# Index of Min/Max Element


def min_index(lst):
    return min(range(len(lst)), key=lst.__getitem__)


def max_index(lst):
    return max(range(len(lst)), key=lst.__getitem__)


lst = [20, 40, 70, 10]
print("min index : {}".format(min_index(lst)))
def remove_duplicate1():
    print("=== remove_duplicate1 ===")
    ids = [1, 3, 4, 1, 2, 3]
    print(list(OrderedDict.fromkeys(ids)))
Пример #35
0
def remove_duplicates(items):
    return list(OrderedDict.fromkeys(items))
Пример #36
0
def plot_fitted(
    model: ModelBridge,
    metric: str,
    generator_runs_dict: TNullableGeneratorRunsDict = None,
    rel: bool = True,
    custom_arm_order: Optional[List[str]] = None,
    custom_arm_order_name: str = "Custom",
    show_CI: bool = True,
) -> AxPlotConfig:
    """Plot fitted metrics.

    Args:
        model: model to use for predictions.
        metric: metric to plot predictions for.
        generator_runs_dict: a mapping from
            generator run name to generator run.
        rel: if True, use relative effects. Default is True.
        custom_arm_order: a list of arm names in the
            order corresponding to how they should be plotted on the x-axis.
            If not None, this is the default ordering.
        custom_arm_order_name: name for custom ordering to
            show in the ordering dropdown. Default is 'Custom'.
        show_CI: if True, render confidence intervals.

    """
    traces = _single_metric_traces(model,
                                   metric,
                                   generator_runs_dict,
                                   rel,
                                   show_CI=show_CI)

    # order arm name sorting arm numbers within batch
    names_by_arm = sorted(
        np.unique(np.concatenate([d["x"] for d in traces])),
        key=lambda x: arm_name_to_tuple(x),
    )

    # get arm names sorted by effect size
    names_by_effect = list(
        OrderedDict.fromkeys(
            np.concatenate([d["x"] for d in traces]).flatten().take(
                np.argsort(np.concatenate([d["y"]
                                           for d in traces]).flatten()))))

    # options for ordering arms (x-axis)
    xaxis_categoryorder = "array"
    xaxis_categoryarray = names_by_arm

    order_options = [
        {
            "args": [{
                "xaxis.categoryorder": "array",
                "xaxis.categoryarray": names_by_arm
            }],
            "label":
            "Name",
            "method":
            "relayout",
        },
        {
            "args": [{
                "xaxis.categoryorder": "array",
                "xaxis.categoryarray": names_by_effect
            }],
            "label":
            "Effect Size",
            "method":
            "relayout",
        },
    ]

    # if a custom order has been passed, default to that
    if custom_arm_order is not None:
        xaxis_categoryorder = "array"
        xaxis_categoryarray = custom_arm_order
        order_options = [{
            "args": [{
                "xaxis.categoryorder": "array",
                "xaxis.categoryarray": custom_arm_order,
            }],
            "label":
            custom_arm_order_name,
            "method":
            "relayout",
        }
                         # Union[List[str...
                         ] + order_options

    layout = go.Layout(
        title="Predicted Outcomes",
        hovermode="closest",
        updatemenus=[{
            "x": 1.25,
            "y": 0.67,
            "buttons": list(order_options),
            "yanchor": "middle",
            "xanchor": "left",
        }],
        yaxis={
            "zerolinecolor": "red",
            "title": "{}{}".format(metric, " (%)" if rel else ""),
        },
        xaxis={
            "tickangle": 45,
            "categoryorder": xaxis_categoryorder,
            "categoryarray": xaxis_categoryarray,
        },
        annotations=[{
            "x": 1.18,
            "y": 0.72,
            "xref": "paper",
            "yref": "paper",
            "text": "Sort By",
            "showarrow": False,
            "yanchor": "middle",
        }],
        font={"size": 10},
    )

    fig = go.Figure(data=traces, layout=layout)
    return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
Пример #37
0
def _remove_duplications(a_list):
    return list(OrderedDict.fromkeys(a_list))
Пример #38
0
def get_unique_values_in_order(values):
    return list(OrderedDict.fromkeys(values))
Пример #39
0
def _normalize_evaluators_and_evaluator_config_args(
    evaluators,
    evaluator_config,
):
    from mlflow.models.evaluation.evaluator_registry import _model_evaluation_registry

    def check_nesting_config_dict(_evaluator_name_list,
                                  _evaluator_name_to_conf_map):
        return isinstance(_evaluator_name_to_conf_map, dict) and all(
            k in _evaluator_name_list and isinstance(v, dict)
            for k, v in _evaluator_name_to_conf_map.items())

    if evaluators is None:
        evaluator_name_list = list(_model_evaluation_registry._registry.keys())
        if len(evaluator_name_list) > 1:
            _logger.warning(
                f"Multiple registered evaluators are found {evaluator_name_list} and "
                "they will all be used in evaluation if they support the specified model type. "
                "If you want to evaluate with one evaluator, specify the `evaluator` argument "
                "and optionally specify the `evaluator_config` argument.")
        if evaluator_config is not None:
            conf_dict_value_error = ValueError(
                "If `evaluators` argument is None, all available evaluators will be used. "
                "If only the default evaluator is available, the `evaluator_config` argument is "
                "interpreted as the config dictionary for the default evaluator. Otherwise, the "
                "`evaluator_config` argument must be a dictionary mapping each evaluator's name "
                "to its own evaluator config dictionary.")
            if evaluator_name_list == ["default"]:
                if not isinstance(evaluator_config, dict):
                    raise conf_dict_value_error
                elif "default" not in evaluator_config:
                    evaluator_name_to_conf_map = {"default": evaluator_config}
                else:
                    evaluator_name_to_conf_map = evaluator_config
            else:
                if not check_nesting_config_dict(evaluator_name_list,
                                                 evaluator_config):
                    raise conf_dict_value_error
                evaluator_name_to_conf_map = evaluator_config
        else:
            evaluator_name_to_conf_map = {}
    elif isinstance(evaluators, str):
        if not (evaluator_config is None
                or isinstance(evaluator_config, dict)):
            raise ValueError(
                "If `evaluators` argument is the name of an evaluator, evaluator_config must be "
                "None or a dict containing config items for the evaluator.")
        evaluator_name_list = [evaluators]
        evaluator_name_to_conf_map = {evaluators: evaluator_config}
    elif isinstance(evaluators, list):
        if evaluator_config is not None:
            if not check_nesting_config_dict(evaluators, evaluator_config):
                raise ValueError(
                    "If `evaluators` argument is an evaluator name list, evaluator_config "
                    "must be a dict contains mapping from evaluator name to individual "
                    "evaluator config dict.")
        # Use `OrderedDict.fromkeys` to deduplicate elements but keep elements order.
        evaluator_name_list = list(OrderedDict.fromkeys(evaluators))
        evaluator_name_to_conf_map = evaluator_config or {}
    else:
        raise ValueError(
            "`evaluators` argument must be None, an evaluator name string, or a list of "
            "evaluator names.")

    return evaluator_name_list, evaluator_name_to_conf_map
Пример #40
0
    truncated_items = [
        p for p in sys.path if len([q for q in scripts_path if q in p]) == 0
    ]
    if len(truncated_items) < len(sys.path):
        warnings.warn(
            "Found hypermapper in PYTHONPATH. Usage is deprecated and might break things. "
            "Please remove all hypermapper references from PYTHONPATH. Trying to import"
            "without hypermapper in PYTHONPATH...")
        sys.path = truncated_items
    else:
        # this removes the 'scripts' path from the sys path, enabling from importing from the hypermapper directory (name clash)
        # only necessary in 'scripts' directory, all imports from scripts have to be done above
        sys.path = sys.path[1:]

    sys.path.append(".")  # noqa
    sys.path = list(OrderedDict.fromkeys(sys.path))

    from hypermapper.evolution import main
    from hypermapper.utility_functions import (
        deal_with_relative_and_absolute_path,
        Logger,
        extend_with_default,
    )

if __name__ == "__main__":
    warnings.warn(
        "Using 'scripts/evolution' is deprecated and it will be removed in the future. Use 'hypermapper/evolution' instead.",
        DeprecationWarning,
        2,
    )
Пример #41
0
b = 2
print(a == b)
# print(b == c)

# Para remover los duplicados de una lista, lo que hacemos es convertir
# la lista en una coleccion set usando set() para quitarlos. Despues usando
# el metodo list() para volver a convertirlo a una listas

a = ["1", 1, "1", 2, 4, 4, 4]
a = list(set(a))
print(a)

# Los diccionarios ordenados son otro tipo de datos, a diferencia de los
# diccionarios normales, estos, estan ordenados.

from collections import OrderedDict
a = ["1", 1, "1", 2]
a = list(OrderedDict.fromkeys(a))
print(a)

# Otra forma de elminar duplicados es usando un bucle for. Recorremos todos
# los items de la lista para comprobar que no existen. Este algortimo puede tomar
# mucho tiempo si la lista es muy grande. No es recomendable usarlo.
a = ['2', 2, '2', 3]
b = list()

for i in a:
    if i not in b:
        b.append(i)

print(b)
Пример #42
0
 def __dir__(self):
     current = super(Function, self).__dir__()
     return list(OrderedDict.fromkeys(dir(self._data) + current))
Пример #43
0
def train(model, test_set, save, valid_set, n_epochs):
    '''
    Main training function
    '''
    # Dataloaders

    test_loader = DataLoader(test_set,
                             batch_size=cfg.batch_size,
                             shuffle=False,
                             pin_memory=(torch.cuda.is_available()),
                             num_workers=cfg.num_workers)
    if valid_set is None:
        valid_loader = None
    else:
        valid_loader = DataLoader(valid_set,
                                  batch_size=cfg.batch_size,
                                  shuffle=False,
                                  pin_memory=(torch.cuda.is_available()),
                                  num_workers=cfg.num_workers)
    # Model on cuda
    model = to_device(model)

    # Wrap model for multi-GPUs, if necessary
    model_wrapper = model
    if torch.cuda.is_available() and torch.cuda.device_count() > 1:
        print('multi-gpus')
        if cfg.use_syncbn:
            print('Using sync-bn')
            model_wrapper = DataParallelWithCallback(model).cuda()
        else:
            model_wrapper = torch.nn.DataParallel(model).cuda()

    # Start logging
    logs = ['loss', 'acc', 'acc0', 'acc1']

    test_logs = ['test_' + log for log in logs] + ['test_auc', 'test_auc_pat']

    log_dict = OrderedDict.fromkeys(test_logs, 0)
    with open(os.path.join(save, 'logs.csv'), 'w') as f:
        f.write('epoch,')
        for key in log_dict.keys():
            f.write(key + ',')
        f.write('\n')
    with open(os.path.join(save, 'loss_logs.csv'), 'w') as f:
        f.write('iter,train_loss,\n')
    writer = SummaryWriter(log_dir=os.path.join(save, 'Tensorboard_Results'))

    # train and test the model
    best_auc = 0
    global iteration
    iteration = 0
    for epoch in range(1):
        os.makedirs(os.path.join(cfg.save, 'epoch_{}'.format(epoch)))
        # test epoch
        test_meters = test_epoch(model=model_wrapper,
                                 loader=test_loader,
                                 epoch=epoch,
                                 is_test=True,
                                 writer=writer)

        # Log results
        for i, key in enumerate(test_logs):
            log_dict[key] = test_meters[i]
        log_results(save, epoch, log_dict, writer=writer)
        # save model checkpoint
        if log_dict['test_auc'] > best_auc:
            torch.save(model.state_dict(), os.path.join(save, 'model.dat'))
            best_auc = log_dict['test_auc']
            print('New best auc: %.4f' % log_dict['test_auc'])
        else:
            print('Current best auc: %.4f' % best_auc)
    # end
    writer.close()
    with open(os.path.join(save, 'logs.csv'), 'a') as f:
        f.write(',,,,best auc,%0.5f\n' % (best_auc))
    print('best auc: ', best_auc)
 def _create_train_examples(self, questions_file, dict_explanations):
     """Creates examples for the training set."""
     df_q = pd.read_csv(questions_file, sep='\t')
     df_q['Answer_flag'] = None
     df_q['row_flag'] = None
     df_q['explanation_lenth'] = None
     df_q['Answer_number'] = df_q['question'].map(lambda x: len(x.split('(')) - 1)
     df_q['explanation_lenth'] = df_q['explanation'].map(lambda y: len(list(OrderedDict.fromkeys(str(y).split(' ')).keys())))
     examples = []
     i_flag = 0
     count_not_in_tables=[]
     count_not_in_tables_questionid=[]
     for _, row in df_q.iterrows():
         if 'SUCCESS' not in str(row['flags']).split(' '):
             continue
         if row['AnswerKey'] == 'A' or row['AnswerKey'] == "1":
             ac = 0
         elif row['AnswerKey'] == 'B' or row['AnswerKey'] == "2":
             ac = 1
         elif row['AnswerKey'] == 'C' or row['AnswerKey'] == "3":
             ac = 2
         else:
             ac = 3
         question_ac = row['question'].split('(')[0] +'[ANSWER]'+row['question'].split('(')[ac + 1].split(')')[1]
         question_ac = question_ac.replace("''", '" ').replace("``", '" ')
         text_a = question_ac
         explanations_id_list = []
         for single_row_id in list(OrderedDict.fromkeys(str(row['explanation']).split(' ')).keys()):
             explanations_id_list.append(single_row_id.split('|')[0])
         filtered_explanations_id_list=explanations_id_list.copy()
         for filer_single in explanations_id_list:
             if filer_single not in dict_explanations.keys():
                 count_not_in_tables.append(filer_single)
                 count_not_in_tables_questionid.append(row['QuestionID'])
                 filtered_explanations_id_list.remove(filer_single)
         non_explanations_list = []
         for each_row in dict_explanations.keys():
             if each_row not in filtered_explanations_id_list:
                 non_explanations_list.append(each_row)
         non_explanations_list = random.sample(non_explanations_list, 100)
         final_rows_list = filtered_explanations_id_list+non_explanations_list
         random.shuffle(final_rows_list)
         for each_row_id in final_rows_list:
             if each_row_id in filtered_explanations_id_list:
                 i_flag += 1
                 each_row_true = dict_explanations[each_row_id]
                 each_row_true = each_row_true.replace("''", '" ').replace("``", '" ')
                 text_b = each_row_true
                 guid = i_flag
                 label = "1"
                 examples.append(
                     InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
             else:
                 i_flag += 1
                 each_row_false = dict_explanations[each_row_id]
                 each_row_false = each_row_false.replace("''", '" ').replace("``", '" ')
                 text_b = each_row_false
                 guid = i_flag
                 label = "0"
                 examples.append(
                     InputExample(guid=guid, text_a=text_a, text_b=text_b, label=label))
     print('examples length: ', len(examples))
     print('count_not_in_tables_questions: ', len(count_not_in_tables_questionid))
     print('count_not_in_tables_rows: ', len(set(count_not_in_tables)))
     return examples
Пример #45
0
def tile_fitted(
    model: ModelBridge,
    generator_runs_dict: TNullableGeneratorRunsDict = None,
    rel: bool = True,
    show_arm_details_on_hover: bool = False,
    show_CI: bool = True,
    arm_noun: str = "arm",
    metrics: Optional[List[str]] = None,
    fixed_features: Optional[ObservationFeatures] = None,
) -> AxPlotConfig:
    """Tile version of fitted outcome plots.

    Args:
        model: model to use for predictions.
        generator_runs_dict: a mapping from
            generator run name to generator run.
        rel: if True, use relative effects. Default is True.
        show_arm_details_on_hover: if True, display
            parameterizations of arms on hover. Default is False.
        show_CI: if True, render confidence intervals.
        arm_noun: noun to use instead of "arm" (e.g. group)
        metrics: List of metric names to restrict to when plotting.
        fixed_features: Fixed features to use when making model predictions.

    """
    metrics = metrics or list(model.metric_names)
    nrows = int(np.ceil(len(metrics) / 2))
    ncols = min(len(metrics), 2)

    # make subplots (plot per row)
    fig = tools.make_subplots(
        rows=nrows,
        cols=ncols,
        print_grid=False,
        shared_xaxes=False,
        shared_yaxes=False,
        subplot_titles=tuple(metrics),
        horizontal_spacing=0.05,
        vertical_spacing=0.30 / nrows,
    )

    name_order_args: Dict[str, Any] = {}
    name_order_axes: Dict[str, Dict[str, Any]] = {}
    effect_order_args: Dict[str, Any] = {}

    for i, metric in enumerate(metrics):
        data = _single_metric_traces(
            model,
            metric,
            generator_runs_dict,
            rel,
            showlegend=i == 0,
            show_arm_details_on_hover=show_arm_details_on_hover,
            show_CI=show_CI,
            arm_noun=arm_noun,
            fixed_features=fixed_features,
        )

        # order arm name sorting arm numbers within batch
        names_by_arm = sorted(
            np.unique(np.concatenate([d["x"] for d in data])),
            key=lambda x: arm_name_to_tuple(x),
        )

        # get arm names sorted by effect size
        names_by_effect = list(
            OrderedDict.fromkeys(
                np.concatenate([d["x"] for d in data]).flatten().take(
                    np.argsort(
                        np.concatenate([d["y"] for d in data]).flatten()))))

        # options for ordering arms (x-axis)
        # Note that xaxes need to be references as xaxis, xaxis2, xaxis3, etc.
        # for the purposes of updatemenus argument (dropdown) in layout.
        # However, when setting the initial ordering layout, the keys should be
        # xaxis1, xaxis2, xaxis3, etc. Note the discrepancy for the initial
        # axis.
        label = "" if i == 0 else i + 1
        name_order_args["xaxis{}.categoryorder".format(label)] = "array"
        name_order_args["xaxis{}.categoryarray".format(label)] = names_by_arm
        effect_order_args["xaxis{}.categoryorder".format(label)] = "array"
        effect_order_args["xaxis{}.categoryarray".format(
            label)] = names_by_effect
        name_order_axes["xaxis{}".format(i + 1)] = {
            "categoryorder": "array",
            "categoryarray": names_by_arm,
            "type": "category",
        }
        name_order_axes["yaxis{}".format(i + 1)] = {
            "ticksuffix": "%" if rel else "",
            "zerolinecolor": "red",
        }
        for d in data:
            fig.append_trace(d, int(np.floor(i / ncols)) + 1, i % ncols + 1)

    order_options = [
        {
            "args": [name_order_args],
            "label": "Name",
            "method": "relayout"
        },
        {
            "args": [effect_order_args],
            "label": "Effect Size",
            "method": "relayout"
        },
    ]

    # if odd number of plots, need to manually remove the last blank subplot
    # generated by `tools.make_subplots`
    if len(metrics) % 2 == 1:
        del fig["layout"]["xaxis{}".format(nrows * ncols)]
        del fig["layout"]["yaxis{}".format(nrows * ncols)]

    # allocate 400 px per plot
    fig["layout"].update(
        margin={"t": 0},
        hovermode="closest",
        updatemenus=[{
            "x": 0.15,
            "y": 1 + 0.40 / nrows,
            "buttons": order_options,
            "xanchor": "left",
            "yanchor": "middle",
        }],
        font={"size": 10},
        width=650 if ncols == 1 else 950,
        height=300 * nrows,
        legend={
            "orientation": "h",
            "x": 0,
            "y": 1 + 0.20 / nrows,
            "xanchor": "left",
            "yanchor": "middle",
        },
        **name_order_axes,
    )

    # append dropdown annotations
    fig["layout"]["annotations"] += (
        {
            "x": 0.5,
            "y": 1 + 0.40 / nrows,
            "xref": "paper",
            "yref": "paper",
            "font": {
                "size": 14
            },
            "text": "Predicted Outcomes",
            "showarrow": False,
            "xanchor": "center",
            "yanchor": "middle",
        },
        {
            "x": 0.05,
            "y": 1 + 0.40 / nrows,
            "xref": "paper",
            "yref": "paper",
            "text": "Sort By",
            "showarrow": False,
            "xanchor": "left",
            "yanchor": "middle",
        },
    )

    fig = resize_subtitles(figure=fig, size=10)
    return AxPlotConfig(data=fig, plot_type=AxPlotTypes.GENERIC)
Пример #46
0
def merge_the_tools(string, k):
    subsequents = [string[i:i + k] for i in range(0, len(string), k)]
    for subset in subsequents:
        print("{}".format("".join(OrderedDict.fromkeys(subset))))
Пример #47
0
 def gpu_str2list(self, g_str:List[str]):
     if 'all' in g_str:
         gpus = list(range(torch.cuda.device_count()))
     else:
         gpus = list(OrderedDict.fromkeys([ int(g) for g in re.split(r',\s*|\s+', ','.join(g_str))]))
     return gpus
def merge_the_tools(string, k):
    t = len(string)//k
    for s_ in range(t):
        print( "".join(od.fromkeys(string[:k])) )
        string = string[k:]
Пример #49
0
def _build_field_vocab(field, counter, **kwargs):
    specials = list(
        OrderedDict.fromkeys(tok for tok in [
            field.unk_token, field.pad_token, field.init_token, field.eos_token
        ] if tok is not None))
    field.vocab = field.vocab_cls(counter, specials=specials, **kwargs)
Пример #50
0
def getVariablesDict(code_lines, get_variables):

    if len(get_variables) == 0:
        return OrderedDict()

    return_var_dicts = OrderedDict.fromkeys(get_variables, value=None)

    implicit_defs = getImplicitDefs(code_lines)

    for line in code_lines:

        #
        # First, make use of all variable type declaration lines
        #
        
        is_var_decl, type_name, type_size = isVariableDecl(line, return_type=True)

        if is_var_decl:

            # Remove type name from beginning of line so that
            # only the list of variable names remain.
            full_type_name = type_name + '*' + str(type_size)

            line_list = line.split()
            i = 1
            while i <= len(line_list):
                if ''.join(line_list[:i]).lower() in full_type_name:
                    i += 1
                    continue
                else:
                    break
            var_seq = ''.join(line_list[i-1:])

            # Parse line to extract info on the different variables
            var_dicts = parseVariableSequence(var_seq)

            # Append type_name and type_size to var_dicts
            for var_name in var_dicts.keys():

                # - Add type name
                var_dicts[var_name]['type'] = type_name
                # - Use the maximum of the sizes specified in the type name and in the variable sequence
                #   (Normally one of these should be 1 by default.)
                var_dicts[var_name]['size'] = max(type_size,var_dicts[var_name]['size'])


                # Check for character array type:
                if (var_dicts[var_name]['type'] == 'character'): 
                    dim_str = var_dicts[var_name]['dimension']
                    size    = var_dicts[var_name]['size']
                    if (dim_str == '') and (size > 1):
                        var_dicts[var_name]['dimension'] = '1:%i' % size

            # For requested variables, append the variable dicts to return_var_dicts
            for var_name in var_dicts.keys():
                if var_name in get_variables:
                    return_var_dicts[var_name] = var_dicts[var_name]



        #
        # Then, check all the 'dimension' statements
        #

        is_dim_stmnt = isDimensionStatement(line)

        if is_dim_stmnt:

            # Remove whitespace and 'dimension' keyword
            line = line.replace(' ','')
            line = line.replace('dimension','',1)

            # Parse line to extract info on the different variables
            dim_var_dicts = parseVariableSequence(line)

            # For variables that already exist in return_var_dicts, simply
            # update the 'dimension'. For variables that don't exist in 
            # return_var_dicts, create a new entry based on implicit types.
            for var_name in dim_var_dicts.keys():

                if var_name in get_variables:

                    # If info on this variable has not yet been added to return_var_dicts,
                    # insert a complete dict
                    if return_var_dicts[var_name] == None:
                        # Get type from implicit types
                        first_char = var_name[0]
                        type_name, type_size = implicit_defs[first_char.lower()]

                        if type_name == None or type_size == None:
                            raise RuntimeError("No type declaration (neither explicit nor implicit) was found for variable '%s'." % var_name)

                        return_var_dicts[var_name] = { 
                                                       'type'     : type_name,
                                                       'dimension': dim_var_dicts[var_name]['dimension'],
                                                       'size'     : type_size
                                                     }

                    # If info on this variable already exists, simply update the 'dimension' entry in the
                    # correct dict
                    else:
                        return_var_dicts[var_name]['dimension'] = dim_var_dicts[var_name]['dimension']


    #
    # END: Loop over code lines
    #

    #
    # Finally, add any missing variables that have not appeared in explicit type
    # declarations or 'dimension' statements
    #

    for get_var_name in get_variables:

        if return_var_dicts[get_var_name] == None:

            # Get type from implicit types
            first_char = get_var_name[0]
            type_name, type_size = implicit_defs[first_char.lower()]

            if type_name == None or type_size == None:
                raise RuntimeError("No type declaration (neither explicit nor implicit) was found for variable '%s'." % get_var_name)

            return_var_dicts[get_var_name] = { 
                                              'type'     : type_name,
                                              'dimension': '',
                                              'size'     : type_size
                                             }

    return return_var_dicts
Пример #51
0
def merge_the_tools(string, k):
    for i in range(0, len(string), k):
        print "".join(OrderedDict.fromkeys(string[i:i + k]))
Пример #52
0
#!/usr/bin/env python
import sys
from collections import OrderedDict

# get directory history in reverse order
chosen_dir = int(sys.argv[-1])
dir_list = sys.argv[1].split(' ')
dir_list = dir_list[::-1]
dir_list = dir_list[1:]

dir_list = list(OrderedDict.fromkeys(dir_list))

print dir_list[chosen_dir + 1]

#chosen_dir = int(sys.argv[-1])

# remove duplicates
#dir_hist_single = list(OrderedDict.fromkeys(dir_hist[2:-1]))
#print dir_hist_single[chosen_dir-1]
Пример #53
0
    def run(self, thunk, log_dir=None, format_strs=LOG_FMTS, datestamp=False):
        """
        Run each variant in the grid with function 'thunk'.

        Note: 'thunk' must be either a callable function, or a string. If it is
        a string, it must be the name of a parameter whose values are all
        callable functions.

        Uses ``call_experiment`` to actually launch each experiment, and gives
        each variant a name using ``self.variant_name()``.

        Maintenance note: the args for ExperimentGrid.run should track closely
        to the args for call_experiment. However, ``seed`` is omitted because
        we presume the user may add it as a parameter in the grid.
        """

        # Print info about self.
        self.print()

        # Make the list of all variants.
        variants = self.variants()

        # Print variant names for the user.
        var_names = OrderedDict.fromkeys(map(self.variant_name, variants))
        var_names = list(var_names.keys())
        line = "=" * DIV_LINE_WIDTH
        preparing = "Preparing to run the following experiments..."
        joined_var_names = "\n".join(var_names)
        announcement = f"\n{preparing}\n\n{joined_var_names}\n\n{line}"
        print(announcement)

        # Run the variants.
        for var in variants:
            exp_name = self.variant_name(var)
            print("Running experiment:", exp_name)

            thunk_plus = create_experiment(
                exp_name,
                thunk,
                log_dir=log_dir,
                datestamp=datestamp,
                format_strs=format_strs.split(","),
                **var,
            )
            # Prepare to launch a script to run the experiment
            pickled_thunk = cloudpickle.dumps(thunk_plus)
            encoded_thunk = base64.b64encode(zlib.compress(pickled_thunk)).decode(
                "utf-8"
            )

            entrypoint = osp.join(
                osp.abspath(osp.dirname(__file__)), "run_entrypoint.py"
            )
            cmd = [
                sys.executable if sys.executable else "python",
                entrypoint,
                encoded_thunk,
            ]
            try:
                subprocess.check_call(cmd, env=os.environ)
            except CalledProcessError:
                err_msg = (
                    "\n" * 3
                    + "=" * DIV_LINE_WIDTH
                    + "\n"
                    + dedent(
                        """

                There appears to have been an error in your experiment.

                Check the traceback above to see what actually went wrong. The
                traceback below, included for completeness (but probably not useful
                for diagnosing the error), shows the stack leading up to the
                experiment launch.

                """
                    )
                    + "=" * DIV_LINE_WIDTH
                    + "\n" * 3
                )
                print(err_msg)
                raise
Пример #54
0
    async def init_stored_taxa(self):
        """Load taxon history, starred, and frequently viewed items"""
        logger.info('Taxon: Loading stored taxa')
        (
            self.taxon_history_ids,
            self.starred_taxa_ids,
            self.frequent_taxa_ids,
            self.observed_taxa_ids,
        ) = get_app().settings_controller.stored_taxa

        # Refresh observed taxa, if expired
        if get_app().settings_controller.is_observed_taxa_expired():
            logger.info('Taxon: Observed taxa expired')
            self.refresh_observed_taxa()

        # Collect all the taxon IDs we need to load
        unique_history_ids = list(
            OrderedDict.fromkeys(
                self.taxon_history_ids[::-1]))[:MAX_DISPLAY_HISTORY]
        starred_taxa_ids = self.starred_taxa_ids[::-1]
        top_frequent_ids = list(
            self.frequent_taxa_ids.keys())[:MAX_DISPLAY_HISTORY]
        top_observed_ids = list(
            self.observed_taxa_ids.keys())[:MAX_DISPLAY_HISTORY]
        total_taxa = sum(
            map(
                len,
                (
                    unique_history_ids,
                    self.starred_taxa_ids,
                    top_frequent_ids,
                    top_observed_ids,
                ),
            ))

        # Start progress bar with a new batch loader
        loader = TaxonBatchLoader()
        self.start_progress(total_taxa, loader)

        # Add callback to index items after they have all been loaded
        def index_list_items(*args):
            for item in self.taxon_history_list.children:
                self.taxon_history_map[item.taxon.id] = item
            for item in self.starred_taxa_list.children:
                self.bind_star(item)

        loader.bind(on_complete=index_list_items)

        # Start loading batches of TaxonListItems
        logger.info(
            f'Taxon: Loading {len(unique_history_ids)} unique taxa from history'
            f' (from {len(self.taxon_history_ids)} total)')
        await loader.add_batch(unique_history_ids,
                               parent=self.taxon_history_list)
        # TODO: Temporary workaround while BatchLoader is borken
        # for taxon_id in unique_history_ids:
        #     widget = get_app().get_taxon_list_item(taxon_id)
        #     self.taxon_history_list.add_widget(widget)
        #     self.taxon_history_map[widget.taxon.id] = widget

        logger.info(f'Taxon: Loading {len(starred_taxa_ids)} starred taxa')
        await loader.add_batch(starred_taxa_ids,
                               parent=self.starred_taxa_list,
                               highlight_observed=False)
        logger.info(
            f'Taxon: Loading {len(top_frequent_ids)} frequently viewed taxa')
        await loader.add_batch(top_frequent_ids,
                               parent=self.frequent_taxa_list)
        logger.info(
            f'Taxon: Loading {len(top_observed_ids)} user-observed taxa'
            f' (from {len(self.observed_taxa_ids)} total)')
        await loader.add_batch(top_observed_ids,
                               parent=self.observed_taxa_list)

        await loader.start()
Пример #55
0
def uniqueify(x, sort=False):
    res = list(OrderedDict.fromkeys(x).keys())
    if sort: res.sort()
    return res
Пример #56
0
ttparse.drop(nan_row, inplace = True)
ttparse = ttparse[ttparse.STAT != "P"][ttparse.STAT != "R"][ttparse.STAT != "I"]
ttparse[ttparse["DAYS/ H"].isnull()]["DAYS/ H"] = "TBA"
#ttparse.drop(columns = ["STAT"], inplace = True)
print (ttparse)

outdf = pd.DataFrame([],columns=["Slot", "CourseCap"])

ttslot = OrderedDict()
ttcap = OrderedDict()

for a in ttparse["COURSENO"].tolist():
    #print(a)
    temp = ttparse[ttparse["COURSENO"] == a][ttparse["STAT"] == 'L']["DAYS/ H"].tolist()
    #REMOVE DUPLICATES FROM TEMP
    temp = list(OrderedDict.fromkeys(temp))
    ttslot[a] = temp[0]
    cap = 0
    if (not ttcap.__contains__(a)):
        for i in ttparse[ttparse["COURSENO"] == a]["CAPACITY"].tolist():
            if(not math.isnan(i)):
                cap = cap + int(i)
    
        ttcap[a] = cap

#Making the new format
ttcourse = OrderedDict()

for a in ttslot:
    ttcourse[ttslot[a]] = []
def getKeys(chiave):
    '''presa una chiave in input, genera e ritorna la chiave ordinata e disordinata'''
    lowercase = frozenset(ascii_lowercase)
    chiave = "".join(
        [c for c in reversed(OrderedDict.fromkeys(chiave)) if c in lowercase])
    return chiave, sorted(chiave)
Пример #58
0
 def __init__(self, iterable=None):
     self.dict = OrderedDict.fromkeys(iterable or ())
Пример #59
0
from collections import OrderedDict

x0 = input()  #输入一串字符串
x1 = input().split(" ")
x2 = input().split(" ")
x3 = input().split(" ")
x4 = input()
lis = "".join(OrderedDict.fromkeys(x0))
s_sp = list(x1)
while "" in s_sp:
    s_sp.remove("")
lis2 = list(set(s_sp))
lis2.sort(key=s_sp.index)
ress = len(lis) + len(lis2)
print("Total: " + str(ress))
while "" in x2:
    x2.remove("")
r3 = list(x2) + x3
st_split2 = list(x3)
liss2 = list(set(lis2 + list(lis)).difference(set(r3)))
l2 = list(set(r3))
l2.sort(key=r3.index)
lis4 = []
lis4 = list(set(l2) & set(x2))
lis41 = list(set(lis4) & set(x3))
lis5 = list(set(l2).difference(set(x2)))
lis6 = list(set(l2).difference(set(x3)))
lis7 = list(set(lis5).union(set(lis6)))

#输出结果#
print('Not in race: {0}, num: {1}'.format(sorted(liss2), str(len(liss2))))
Пример #60
0
        elif 'y' in first:
            y = int(first.split('=')[1])
        if 'x' in second:
            rag = second.split('=')[1]
            start = int(rag.split('..')[0])
            end = int(rag.split('..')[1])
            for i in range(start, end + 1):
                claypoints.append((i, y))
        elif 'y' in second:
            rag = second.split('=')[1]
            start = int(rag.split('..')[0])
            end = int(rag.split('..')[1])
            for i in range(start, end + 1):
                claypoints.append((x, i))

claypoints = list(OrderedDict.fromkeys(claypoints))
min_x = min(claypoints, key=itemgetter(0))[0]
max_x = max(claypoints, key=itemgetter(0))[0]
min_y = min(claypoints, key=itemgetter(1))[1]
max_y = max(claypoints, key=itemgetter(1))[1]

print(claypoints)
print(min_x, max_x, min_y, max_y)

a = numpy.full((max_y + 1, max_x + 1), 0)

for point in claypoints:
    a[point[1]][point[0]] = 1

a[0][500] = 2