Ejemplo n.º 1
0
def test():
    goals = []
    num_batches_per_epoch = int(len(x) / batch_size)
    for j in range(num_batches_per_epoch):
        idx = j * batch_size
        predict = sess.run(
            [model.predict],
            feed_dict={
                model.features: x[idx:(idx + batch_size)],
                model.event_size: x_times[idx:(idx + batch_size)],
                model.input_y: y[idx:(idx + batch_size)]
            })

        predict = predict[0]
        goal = [no2sku[item] for item in predict.tolist()]
        goals = operator.concat(goals, goal)

    idx = num_batches_per_epoch * batch_size
    predict = sess.run(
        [model.predict],
        feed_dict={
            model.features: x[idx:],
            model.event_size: x_times[idx:],
            model.input_y: y[idx:]
        })
    predict = predict[0]
    goal = [no2sku[item] for item in predict.tolist()]
    goals = operator.concat(goals, goal)

    goals = np.asarray(goals)
    goals.dump(data_path + 'goal.list')
Ejemplo n.º 2
0
    def generate(self, n_noise_samples=1):
        """Generate noise samples.

        The type of the noise that will be generated, and the size of the noise array are defined by the argument given
        to the constructor.

        :param n_noise_samples: The number of noise samples to be generated.

        :return: an np.array with the specified noise
        """

        n = n_noise_samples * self.noise_size[0] * self.noise_size[1]
        s = concat([n_noise_samples], list(self.noise_size))
        if self.noise_type == 'simplistic':
            return np.random.uniform(0, 1, size=concat([n_noise_samples], list(self.noise_size)))
        elif self.noise_type.lower() in {'gaussian', 'white', 'normal'}:
            return np.reshape(white(n), s)
        elif self.noise_type.lower() == 'pink':
            return np.reshape(pink(n), s)
        elif self.noise_type.lower() == 'blue':
            return np.reshape(blue(n), s)
        elif self.noise_type.lower() == 'brown':
            return np.reshape(brown(n), s)
        elif self.noise_type.lower() == 'violet':
            return np.reshape(violet(n), s)
        else:
            print("WARNING: noise type " + self.noise_type + " not defined. Returning 0")
            return np.reshape(np.zeros(n), s)
Ejemplo n.º 3
0
def permuter(finalPerm, inputPerm):
    if len(inputPerm) == 0:
        print (finalPerm)
    else:
        for i in range(len(inputPerm)):
            recurPerm = operator.concat(finalPerm, inputPerm[i:i+1])
            rmainPerm = operator.concat(inputPerm[:i],inputPerm[i+1:])
            permuter(recurPerm,rmainPerm)
    def preorderTraversal(self, root):  #42 ms
        if root == None:
            return []

        left_result = self.preorderTraversal(root.left)
        right_result = self.preorderTraversal(root.right)

        return concat(concat([root.val], left_result), right_result)
Ejemplo n.º 5
0
 def test_concat(self):
     self.assertRaises(TypeError, operator.concat)
     self.assertRaises(TypeError, operator.concat, None, None)
     self.assertTrue(operator.concat('py', 'thon') == 'python')
     self.assertTrue(operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4])
     self.assertTrue(operator.concat(Seq1([5, 6]), Seq1([7])) == [5, 6, 7])
     self.assertTrue(operator.concat(Seq2([5, 6]), Seq2([7])) == [5, 6, 7])
     self.assertRaises(TypeError, operator.concat, 13, 29)
Ejemplo n.º 6
0
 def test_concat(self):
     self.assertRaises(TypeError, operator.concat)
     self.assertRaises(TypeError, operator.concat, None, None)
     self.assertTrue(operator.concat('py', 'thon') == 'python')
     self.assertTrue(operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4])
     self.assertTrue(operator.concat(Seq1([5, 6]), Seq1([7])) == [5, 6, 7])
     self.assertTrue(operator.concat(Seq2([5, 6]), Seq2([7])) == [5, 6, 7])
     self.assertRaises(TypeError, operator.concat, 13, 29)
Ejemplo n.º 7
0
    def inorderTraversal(self, root):

        if root == None:
            return []

        left_val = self.inorderTraversal(root.left)
        right_val = self.inorderTraversal(root.right)
        result = concat(concat(left_val, [root.val]), right_val)
        return result
Ejemplo n.º 8
0
 def test_concat(self):
     self.failUnlessRaises(TypeError, operator.concat)
     self.failUnlessRaises(TypeError, operator.concat, None, None)
     self.failUnless(operator.concat('py', 'thon') == 'python')
     self.failUnless(operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4])
     self.failUnless(operator.concat(Seq1([5, 6]), Seq1([7])) == [5, 6, 7])
     self.failUnless(operator.concat(Seq2([5, 6]), Seq2([7])) == [5, 6, 7])
     if not test_support.is_jython:
         # Jython concat is add
         self.failUnlessRaises(TypeError, operator.concat, 13, 29)
Ejemplo n.º 9
0
 def test_concat(self):
     self.failUnlessRaises(TypeError, operator.concat)
     self.failUnlessRaises(TypeError, operator.concat, None, None)
     self.failUnless(operator.concat('py', 'thon') == 'python')
     self.failUnless(operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4])
     self.failUnless(operator.concat(Seq1([5, 6]), Seq1([7])) == [5, 6, 7])
     self.failUnless(operator.concat(Seq2([5, 6]), Seq2([7])) == [5, 6, 7])
     if not test_support.is_jython:
         # Jython concat is add
         self.failUnlessRaises(TypeError, operator.concat, 13, 29)
Ejemplo n.º 10
0
def save2(master, mem, name):
    x = operator.concat(name, ".txt")
    var2 = mem + mem
    for i in range(len(mem)):
        var2[i * 2] = mem[i]
        var2[(i * 2) + 1] = '\n'
    file = open(x, 'w')
    file.writelines(var2)
    file.close()
    y = operator.concat(name, " list is saved\nClick on the Close button")
    Label(master, text=y, font=("Arial", 12, ("bold", "italic"))).pack()
Ejemplo n.º 11
0
    def test_concat(self):
        class Seq1:
            def __init__(self, lst):
                self.lst = lst

            def __len__(self):
                return len(self.lst)

            def __getitem__(self, i):
                return self.lst[i]

            def __add__(self, other):
                return self.lst + other.lst

            def __mul__(self, other):
                return self.lst * other

            def __rmul__(self, other):
                return other * self.lst

        class Seq2(object):
            def __init__(self, lst):
                self.lst = lst

            def __len__(self):
                return len(self.lst)

            def __getitem__(self, i):
                return self.lst[i]

            def __add__(self, other):
                return self.lst + other.lst

            def __mul__(self, other):
                return self.lst * other

            def __rmul__(self, other):
                return other * self.lst

        import operator

        raises(TypeError, operator.concat)
        raises(TypeError, operator.concat, None, None)
        assert operator.concat('py', 'thon') == 'python'
        assert operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4]
        assert operator.concat(Seq1([5, 6]), Seq1([7])) == [5, 6, 7]
        assert operator.concat(Seq2([5, 6]), Seq2([7])) == [5, 6, 7]
        raises(TypeError, operator.concat, 13, 29)
Ejemplo n.º 12
0
def run301_04():
    """
    sequence ops
    :return:
    """
    a = [1, 2, 3]
    b = ['a', 'b', 'c']

    print('a=', a)
    print('b=', b)

    print('Constructive:')
    print('concat(a,b): ', concat(a, b))

    print('\nSearching:')
    print('contains(a,1):', contains(a, 1))
    print('contains(b,"d"):', contains(b, 'd'))
    print('countOf(a,1):', countOf(a, 1))
    print('countOf(b,"d"):', countOf(b, 'd'))
    print('indexOf(a,1):', indexOf(a, 1))
    # print('indexOf(a,5):', indexOf(a, 5)) # ValueError

    print('\nAccess Items:')
    print('getitem(b,1):', getitem(b, 1))
    print('getitem(b,slice(1,3)):', getitem(b, slice(1, 3)))
    print('setitem(b,1,"d"):', setitem(b, 1, 'd'))
    print(b)
    print('setitem(a,slice(1,3),[4,5]):', setitem(a, slice(1, 3), [4, 5]))
    print(a)

    print('\nDestructive:')
    print('delitem(b,1)', delitem(b, 1))
    print(b)
    print('delitem(a,slice(1,3))', delitem(b, slice(1, 3)))
    print(a)
def main():
    a = [1, 2, 3]
    b = ["a", "b", "c"]

    print("a =", a)
    print("b =", b)

    print("\nConstructive:")
    print("  concat(a, b)", operator.concat(a, b))

    print("\nSearching:")
    print("  contains(a, 1)  :", operator.contains(a, 1))
    print("  contains(b, 'd'):", operator.contains(b, "d"))
    print("  countOf(a, 1)   :", operator.countOf(a, 1))
    print("  countOf(b, 'd') :", operator.countOf(b, "d"))
    print("  indexOf(a, 1)   :", operator.indexOf(a, 1))

    print("\nAccess Items:")
    print("  getitem(b, 1)                  :", operator.getitem(b, 1))
    print("  getitem(b, slice(1, 3))        :",
          operator.getitem(b, slice(1, 3)))
    print("  setitem(b, 1, 'd')             :", end=" ")
    operator.setitem(b, 1, "d")
    print(b)
    print("  setitem(a, slice(1, 3), [4,5]):", end=" ")
    operator.setitem(a, slice(1, 3), [4, 5])
    print(a)

    print("\nDestructive:")
    print("  delitem(b, 1)          :", end=" ")
    operator.delitem(b, 1)
    print(b)
    print("  delitem(a, slice(1, 3)):", end=" ")
    operator.delitem(a, slice(1, 3))
    print(a)
Ejemplo n.º 14
0
async def section_callback(session, logger, section_url: str):
    """Process a page and add links to the database"""
    logger.debug(f"START SECTION {section_url}")
    soup = None

    async with session.get(section_url) as response:
        soup = BeautifulSoup(await response.text(), 'html.parser')

    article_urls = [a.get('href') or '' for a in soup.find_all('a')]

    # if not http://, prepend domain name
    domain = '/'.join(section_url.split('/')[:3])
    article_urls = [
        url if '://' in url else operator.concat(domain, url)
        for url in article_urls
    ]
    article_urls = set([
        normalize_url(url) for url in article_urls
        if re.search(NYT_SECTION_PATTERN, url)
    ])

    for url in article_urls:
        try:
            exists = await Article.get_or_create(url=url)
            logger.debug(f"EXISTS {url}")
            if not exists:
                logger.debug(f"CREATE ARTICLE {url}")
                await Article.create(url=url)
        except (tortoise.exceptions.IntegrityError, sqlite3.IntegrityError):
            pass
Ejemplo n.º 15
0
def conj(head, tail):
    '''
    Prepend an element to a collection, returning a new copy
    Exact behaviour will differ depending on the collection
    '''
    tail_type = type(tail)
    return op.concat(tail_type([head]), tail)
Ejemplo n.º 16
0
def get_next_round_urls(
    input_links,
    max_per_page=config_pytomo.MAX_PER_PAGE,
    max_per_url=config_pytomo.MAX_PER_URL,
    max_round_duration=config_pytomo.MAX_ROUND_DURATION,
):
    """Return a tuple of the set of input urls and a set of related url of
    videos
    Arguments:
        * input_links: list of the urls
        * max_per_url and max_per_page options
        * out_file_name: if provided, list is dump in it
    """
    # keep only non-duplicated links and no links from input file
    start = time.time()
    if len(input_links) > CONTINUOUS_CRAWL_SIZE:
        related_links = []
        for url in input_links:
            time.sleep(config_pytomo.DELAY_BETWEEN_REQUESTS)
            related_links = concat(related_links, get_related_urls(url, max_per_page, max_per_url))
            if (time.time() - start) > max_round_duration:
                break
        related_links = set(related_links).difference(input_links)
    else:
        related_links = set(
            reduce(concat, (get_related_urls(url, max_per_page, max_per_url) for url in input_links), [])
        ).difference(input_links)
    config_pytomo.LOG.info("%d links collected by crawler" % len(related_links))
    config_pytomo.LOG.debug(related_links)
    return related_links
Ejemplo n.º 17
0
def get_next_round_urls(input_links, max_per_page=config_pytomo.MAX_PER_PAGE,
                        max_per_url=config_pytomo.MAX_PER_URL,
                        max_round_duration=config_pytomo.MAX_ROUND_DURATION):
    """Return a tuple of the set of input urls and a set of related url of
    videos
    Arguments:
        * input_links: list of the urls
        * max_per_url and max_per_page options
        * out_file_name: if provided, list is dump in it
    """
    # keep only non-duplicated links and no links from input file
    start = time.time()
    if len(input_links) > CONTINUOUS_CRAWL_SIZE:
        related_links = []
        for url in input_links:
            time.sleep(config_pytomo.DELAY_BETWEEN_REQUESTS)
            related_links = concat(related_links,
                                   get_related_urls(url, max_per_page,
                                                   max_per_url))
            if (time.time() - start) > max_round_duration:
                break
        related_links = set(related_links).difference(input_links)
    else:
        related_links = set(reduce(concat, (get_related_urls(url, max_per_page,
                                                             max_per_url)
                                            for url in input_links), [])
                           ).difference(input_links)
    config_pytomo.LOG.info("%d links collected by crawler"
                            % len(related_links))
    config_pytomo.LOG.debug(related_links)
    return related_links
Ejemplo n.º 18
0
def overload_concat_add(*args):
	if len(args) == 1:
		return op.pos(args[0])
	if all(hasattr(type(v),'__iter__') for v in (args)):
		return op.concat(*args)
	else:
		return op.add(*args)
Ejemplo n.º 19
0
    def get_posts_by_id(self, owner_id=None, count=100, offset=0):
        """ how many post will be returned - count
        owner_id - id from group or user
        offset - how many posts will skipped
        offset_100 - var for getting posts if count more then 100
        because api.get.wall() can return max 100 items
        result_list = list with result items
        """

        result_list = []
        get_posts = self.api.wall.get
        offset_100 = offset

        if owner_id is None and self.user_id is not None:
            owner_id = self.user_id

        # formatting for wall.get(). owner_id must starts with '-'
        owner_id = concat('-', str(owner_id))

        while count - 100 > 0:
            result_list.extend(
                get_posts(owner_id=owner_id, count=100,
                          offset=offset_100).get("items"))
            count -= 100
            offset_100 += 100

        result_list.extend(
            get_posts(owner_id=owner_id, count=count,
                      offset=offset_100).get("items"))

        return result_list
Ejemplo n.º 20
0
def parse_grants(grants):
    """
    Translate the EC2 GRANTS into our internal format.
    """
    return concat([grant.cidr_ip for grant in grants
                   if grant.cidr_ip is not None],
                  [grant.name for grant in grants
                   if grant.name is not None])
Ejemplo n.º 21
0
 def _get_all_page(cls, url):
     soup = cls.soup(grab_url(url))
     urls = [url]
     urls += [
         concat(cls.page_prefix, link.get('href'))
         for link in soup.find_all('a', 'papernewstop')[:-1]
     ]
     return urls
Ejemplo n.º 22
0
def ftfilter(ext):
    """\
	THEFUNCTIONTAKESASTRINGANDRETURNSNONE
	PRINTSALLFILESWITHARGEXTENSIONINCWD
	"""
    infolder = os.listdir(os.getcwd())
    extfilter = filter((lambda x: o.eq(x[-4:], "." + ext)), infolder)
    print "all ." + ext + " files:\n"
    map((lambda nline: sys.stdout.write(o.concat(nline, "\n"))), extfilter)
Ejemplo n.º 23
0
 def feed_urls(cls):
     all_urls = []
     for feeder_url in cls.feeder_pages:
         domain = '/'.join(feeder_url.split('/')[:3])
         for page in cls._get_all_page(feeder_url):
             urls = [a.get('href') or '' for a in cls.soup(grab_url(page)).findAll('a')]
             urls = [url if '://' in url else concat(domain, url) for url in urls]
             all_urls += [url for url in urls if re.search(cls.feeder_pattern, url)]
     return all_urls
Ejemplo n.º 24
0
  def _getFiles( self, path ):
    """\brief Performs the non-recursive matching operation to \em filelist

    Stores all matching filenames immediately withing the starting path
    into \em filelist.  No recursion is performed.

    \param path The path from which to retrieve the .xml files
    """
    self.filelist = glob.glob(os.path.normpath(operator.concat(path, '/*.xml')))
Ejemplo n.º 25
0
def ftfilter( ext ):
	"""\
	THEFUNCTIONTAKESASTRINGANDRETURNSNONE
	PRINTSALLFILESWITHARGEXTENSIONINCWD
	"""
	infolder = os.listdir( os.getcwd() )
	extfilter = filter( (lambda x: o.eq( x[-4:], "." + ext )), infolder )
	print "all ." + ext + " files:\n"
	map( (lambda nline: sys.stdout.write( o.concat( nline, "\n" ) ))
		, extfilter )
Ejemplo n.º 26
0
def joincharli( listy ):
    '''\
    TAKESALISTOFSTRINGSANDRETURNSASTRING
    USEDINSTRINGPOPFUNCTION
    '''
    ahandle = listy

    ahandle .insert( 0, "" )

    return reduce( (lambda a, d: o.concat( a, d )), ahandle )
Ejemplo n.º 27
0
 def _get_all_page(cls, url):
     try:
         page = yield from grab_url(url)
         soup = cls.soup(page)
     except RuntimeError:
         log.error("Cannot get all pages for {}, defaulting to the first page".format(url))
         return [url]
     urls = [url]
     urls += [concat(cls.page_prefix, link.get('href')) for link in soup.find_all('a', 'papernewstop')[:-1]]
     return urls
Ejemplo n.º 28
0
def joincharli(listy):
    '''\
    TAKESALISTOFSTRINGSANDRETURNSASTRING
    USEDINSTRINGPOPFUNCTION
    '''
    ahandle = listy

    ahandle.insert(0, "")

    return reduce((lambda a, d: o.concat(a, d)), ahandle)
Ejemplo n.º 29
0
    def setwsgienv(self):
        self.wsgienv["SERVER_PROTOCOL"] = "HTTP/1.1"
        self.wsgienv["SERVER_NAME"] = self.reqHandler.headers["HOST"].split(
            ":")[0]
        self.wsgienv["SERVER_PORT"] = self.reqHandler.headers["HOST"].split(
            ":")[1]
        self.wsgienv["REQUEST_METHOD"] = self.reqHandler.command
        self.wsgienv["QUERY_STRING"] = self.reqHandler.requestline
        self.wsgienv["CONTENT_TYPE"] = self.reqHandler.headers["Content-Type"]
        self.wsgienv["CONTENT_LENGTH"] = int(
            self.reqHandler.headers["Content-Length"])
        self.wsgienv["SCRIPT_NAME"] = self.reqHandler.headers["SCRIPT_NAME"]

        if self.wsgienv["REQUEST_METHOD"] in ["POST", "post"]:
            val = self.wsgienv["CONTENT_LENGTH"]
            if val > 0:
                post_data = self.reqHandler.rfile.read(val)
                try:
                    post_data = json.loads(post_data.decode('utf-8'))
                except json.decoder.JSONDecodeError:
                    post_data = post_data.decode("utf-8")
                finally:
                    logs["dynamic"](post_data)
                    self.wsgienv["wsgi.input"] = post_data
            else:
                logs["dynamic"]("content-length = %s", val)
                self.reqHandler.wfile.write(getmsgbycode(204))

        elif self.wsgienv["REQUEST_METHOD"] in ["GET", "get"]:
            lastindex = self.path.rfind("/", 0, len(self.path)) + 1
            for ctype in config.avctypes:
                regexp = "/*.{0}$".format(ctype)
                logs["dynamic"](regexp)
                if re.search(regexp, self.path) != None:
                    rpath = operator.concat(config.root_path,
                                            operator.concat(ctype, "/"))
                    rpath = operator.concat(
                        rpath, self.path[lastindex:len(self.path)])
                    self.wsgienv["PATH_INFO"] = rpath
                    logs["dynamic"](rpath)
Ejemplo n.º 30
0
def include(argv):
    src = str()
    dst = str()
    if len(argv) < 3:
        print("""Usage: python3.5 include.py moddir=moddir modname=modname """)
    elif len(argv) == 3:
        moddir = argv[1]
        modname = argv[2]

    src = operator.concat(moddir, "/")
    src = operator.concat(src, modname)
    dst = operator.concat(dst, modname)
    try:
        os.symlink(src, dst)
    except FileExistsError as exc:
        print("change destination name because ", exc)
        exit(-1)
    finally:
        try:
            importlib.import_module(dst)
        except ImportError as exc:
            print(exc)
Ejemplo n.º 31
0
    def test_concat(self):
        class Seq1:
            def __init__(self, lst):
                self.lst = lst
            def __len__(self):
                return len(self.lst)
            def __getitem__(self, i):
                return self.lst[i]
            def __add__(self, other):
                return self.lst + other.lst
            def __mul__(self, other):
                return self.lst * other
            def __rmul__(self, other):
                return other * self.lst

        class Seq2(object):
            def __init__(self, lst):
                self.lst = lst
            def __len__(self):
                return len(self.lst)
            def __getitem__(self, i):
                return self.lst[i]
            def __add__(self, other):
                return self.lst + other.lst
            def __mul__(self, other):
                return self.lst * other
            def __rmul__(self, other):
                return other * self.lst

        import operator

        raises(TypeError, operator.concat)
        raises(TypeError, operator.concat, None, None)
        assert operator.concat('py', 'thon') == 'python'
        assert operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4]
        assert operator.concat(Seq1([5, 6]), Seq1([7])) == [5, 6, 7]
        assert operator.concat(Seq2([5, 6]), Seq2([7])) == [5, 6, 7]
        raises(TypeError, operator.concat, 13, 29)
Ejemplo n.º 32
0
 def _get_all_page(cls, url):
     try:
         page = yield from grab_url(url)
         soup = cls.soup(page)
     except RuntimeError:
         log.error(
             "Cannot get all pages for {}, defaulting to the first page".
             format(url))
         return [url]
     urls = [url]
     urls += [
         concat(cls.page_prefix, link.get('href'))
         for link in soup.find_all('a', 'papernewstop')[:-1]
     ]
     return urls
Ejemplo n.º 33
0
def calculatePlacements(potentialLocations,distance,minSeparation=5):
    """
    Calculate the placements of billboards which will give the maximum possible
    revenue.
    """
    scores = [MileScore(0,[])]*(minSeparation+1)
    for x in xrange(1,distance+1):
        if x in potentialLocations:
            firstScore = scores[0]
            scores.append(max([scores[-1], 
                               MileScore(firstScore.score+potentialLocations.get(x),
                                          concat(firstScore.placements,[x]))]))
        else:
            scores.append(scores[-1])
        scores = scores[1:]
    return scores[-1]
Ejemplo n.º 34
0
    def urlreconstruction(self):
        """ PEP 3333 WSGI Specification """
        wsgi_url_scheme = concat(self.env["wsgi.url_scheme"], "://")

        if self.env.get("HTTP_HOST"):
            wsgi_url_scheme = concat(wsgi_url_scheme, self.env["HTTP_HOST"])
        else:
            wsgi_url_scheme = concat(wsgi_url_scheme, self.env["SERVER_NAME"])

        if wsgi_url_scheme == "https":
            if self.server_port == "443" or self.server_port == "81":
                wsgi_url_scheme = concat(wsgi_url_scheme, ":")
                wsgi_url_scheme = concat(wsgi_url_scheme, self.server_port)

        wsgi_url_scheme = concat(wsgi_url_scheme, quote(self.script_name))
        wsgi_url_scheme = concat(wsgi_url_scheme, quote(self.path_info))

        if self.query_string:
            wsgi_url_scheme = concat(wsgi_url_scheme, "?")
            wsgi_url_scheme = concat(wsgi_url_scheme, self.query_string)

        return wsgi_url_scheme
Ejemplo n.º 35
0
 def feed_urls(cls):
     all_urls = []
     for feeder_url in cls.feeder_pages:
         domain = '/'.join(feeder_url.split('/')[:3])
         for page in cls._get_all_page(feeder_url):
             urls = [
                 a.get('href') or ''
                 for a in cls.soup(grab_url(page)).findAll('a')
             ]
             urls = [
                 url if '://' in url else concat(domain, url)
                 for url in urls
             ]
             all_urls += [
                 url for url in urls if re.search(cls.feeder_pattern, url)
             ]
     return all_urls
Ejemplo n.º 36
0
Archivo: funcs.py Proyecto: TZTsai/Calc
def or_(x, y):
    if any_([x, y], is_list):
        dx, dy = depth(x), depth(y)
        if abs(dx - dy) <= 1:
            if max(dx, dy) == 2 and len(x) == len(y):  # matrix augmentation
                return tuple(or_(xi, yi) for xi, yi in zip(x, y))
            if dx < dy: x = x,
            if dx > dy: y = y,
            return concat(x, y)
        else:
            raise TypeError('dimension mismatch')
    elif all_([x, y], is_env):
        assert x.parent is y.parent, \
            'two objects do not have the same parent'
        e = Env(parent=x.parent, binds=x)
        e.update(y)
    else:
        return b_or(x, y)
Ejemplo n.º 37
0
def operator_sequence_operation():
    """
    we do some sequence operation like following.
    ---CRUD---
    container :: in, append, extend, clear, remove, __new__, __eq__
    sized     :: len, index
    iterable  :: set, get
    """
    a = [1, 2, 3]
    b = list('abc')
    print('a =', a)
    print('b =', b)

    # concatenate
    print('\nConstructive:')
    print('  concat(a, b):', operator.concat(a, b))

    # search
    print('\nSearch:')
    print('  contains(a, 1)  :', operator.contains(a, 1))
    print('  contains(b, "d"):', operator.contains(b, "d"))
    print('  countOf(a, 1)   :', operator.countOf(a, 1))
    print('  countOf(b, "d") :', operator.countOf(b, "d"))
    print('  indexOf(a, 1)   :', operator.indexOf(a, 1))
    # access items
    print('\nAccess:')
    print('  getitem(b, 1)                   :', operator.getitem(b, 1))
    print('  getitem(b, slice(1, 3))         :',
          operator.getitem(b, slice(1, 3)))
    print('  setitem(b, 1, "d")              :', end=' ')
    operator.setitem(b, 1, "d")
    print(b)
    print('  setitem(a, slice(1, 3), [4, 5]) :', end=' ')
    operator.setitem(a, slice(1, 3), [4, 5])
    print(a)
    # remove items
    print('\nDestructive:')
    print('  delitem(b, 1)                   :', end=' ')
    operator.delitem(b, 1)
    print(b)
    print('  delitem(a, slice(1, 3))         :', end=' ')
    operator.delitem(a, slice(1, 3))
    print(a)
Ejemplo n.º 38
0
def calculatePlacements(potentialLocations, distance, minSeparation=5):
    """
    Calculate the placements of billboards which will give the maximum possible
    revenue.
    """
    scores = [MileScore(0, [])] * (minSeparation + 1)
    for x in xrange(1, distance + 1):
        if x in potentialLocations:
            firstScore = scores[0]
            scores.append(
                max([
                    scores[-1],
                    MileScore(firstScore.score + potentialLocations.get(x),
                              concat(firstScore.placements, [x]))
                ]))
        else:
            scores.append(scores[-1])
        scores = scores[1:]
    return scores[-1]
Ejemplo n.º 39
0
    def loadcsv(self):

        """load 'Date','Close', 'Volume' data from databse and return dataframe
        """

        self.loadfeatures()  # Load features data
        a = ['Date']
        a.extend(operator.concat(self.label, self.misc))

        seen = set()
        seen_add = seen.add
        a = [x for x in a if not (x in seen or seen_add(x))]

        self.def_features = a
        self.dataset = self.LoadData(self.filename).loc[:, a]

        self.dataset['Date'] = pd.to_datetime(self.dataset['Date'])
        self.dataset = self.dataset.fillna(self.dataset.mean())

        return self.dataset
Ejemplo n.º 40
0
def get_sitefilenames(dir):
    """
    This function gets the names of all the files in the
    input directory.

    Inputs:
      dir     - list of directories in which to get the names of all the files
    
    Returns:
      files   - a list with the names of all the files
    
    """
    files = []
    for dir_ind in xrange(len(dir)):
        files = operator.concat(
            files,
            filter(os.path.isfile, [
                os.path.join(dir[dir_ind], f) for f in os.listdir(dir[dir_ind])
            ]))

    return files
Ejemplo n.º 41
0
def concat(a, b):
    return operator.concat(a, b)
Ejemplo n.º 42
0
	times=[curr_tag.text[:_alpha.search(curr_tag.text).start()] for curr_tag in span_rel]
	events=[curr_tag.text[_alpha.search(curr_tag.text).start():] for curr_tag in span_rel]
	return (times,events)

	

page_url='http://www.bbc.co.uk/sport/football/premier-league/results'
url_pre='http://www.bbc.co.uk/'
bbc_page=requests.get(page_url).text
bbc_page= BeautifulSoup(bbc_page)
spans=bbc_page.find_all('a');
spans=[span_curr for span_curr in spans if span_curr.get('class')]
urls=[span_curr.get('href') for span_curr in spans if span_curr.get('class')[0]=='report']

urls=[operator.concat(url_pre,url) for url in urls]

for page_url in urls:
	date,team,time,event=getGameInfo(page_url)
	file_str=team[0]+'_'+team[1]+'_'+date[0]+'.txt'
	with open(file_str, 'w') as f:
		for time_curr in time:
			f.write(time_curr+'\t')
		f.write('\n')
		for event_curr in event:
			f.write(event_curr+'\t')
		f.close()


pdb.set_trace()
Ejemplo n.º 43
0
			iconcat(iter1, iter2): 将iter2添加到iter1的末尾
	获取元素或属性:
			attrgetter:
			itemgetter:
	判断对象类型:
			isMappingType:
			isNumberType:
			isSequenceType:
"""

import operator

a = [1, 2, 3]
b = ['a', 'b', 'c']

print operator.concat(a,b)
print operator.repeat(b, 3)
print operator.contains(a, 2)
print operator.contains(b, 'hhhhhh')
print operator.countOf(operator.repeat(b, 4), 'a')
print operator.countOf(a, 8)
print operator.indexOf(a, 2)
# output
# [1, 2, 3, 'a', 'b', 'c']
# ['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']
# True
# False
# 4
# 0
# 1
Ejemplo n.º 44
0
import operator
sequence = 1,2,4
print "add","==>", reduce(operator.add, sequence)
print "sub","==>", reduce(operator.sub, sequence)
print "mul","==>", reduce(operator.mul, sequence)
print "concat","==>", operator.concat("spam", "egg")
print "repeat","==>", operator.repeat("spam", 5)
print "reduce lambda" , "==>" ,reduce(lambda x, y: x+y, [1, 2, 3, 4, 5])
print "getitem","==>",operator.getitem(sequence, 2)
print "indexOf","==>",operator.indexOf(sequence, 4)
print "sequenceIncludes","==>", operator.sequenceIncludes(sequence, 3)



Ejemplo n.º 45
0
Archivo: t505.py Proyecto: Afey/skulpt
print operator.pos(False)

print operator.pow(2, 2)
print operator.pow(5, 3)

print operator.rshift(5, 2)
print operator.rshift(-5, 3)

print operator.sub(4, 2)
print operator.sub(2, 4)
print operator.sub(-4, 2)

print operator.xor(4, 2)
print operator.xor(8, 5)

print operator.concat("he", "llo")
print operator.concat([1,2,3,4], [5,6,7])
print operator.concat((1,2), (3,4))

l = [1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 9]
s = "hello world"
t = ("a", "b", "c")
d = {1:1, 2:2, 3:3, 4:4, 5:5}

print operator.contains(l, 2)
print operator.contains(l, 30)
print operator.contains(s, "ll")
print operator.contains(s, "z")
print operator.contains(t, "a")
print operator.contains(t, 2)
print operator.contains(d, 3)
Ejemplo n.º 46
0
import functools, operator
m = functools.reduce(operator.add, [1, 2, 3, 4, 5])
n = functools.reduce(operator.add, range(1, 100))
print(m, n)
a = operator.concat("d", "c")
print(a)
 def test_concat(self):
     self.failUnlessRaises(TypeError, operator.concat)
     self.failUnlessRaises(TypeError, operator.concat, None, None)
     self.failUnless(operator.concat('py', 'thon') == 'python')
     self.failUnless(operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4])
Ejemplo n.º 48
0
 def test_concat(self):
     self.failUnlessRaises(TypeError, operator.concat)
     self.failUnlessRaises(TypeError, operator.concat, None, None)
     self.failUnless(operator.concat('py', 'thon') == 'python')
     self.failUnless(operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4])
Ejemplo n.º 49
0
 def _get_all_page(cls, url):
     soup = cls.soup(grab_url(url))
     urls = [url]
     urls += [concat(cls.page_prefix, link.get('href')) for link in soup.find_all('a', 'papernewstop')[:-1]]
     return urls
Ejemplo n.º 50
0
import string
import operator as oper
import sys

set=oper.concat(string.uppercase,string.digits)
superSet=oper.concat(set,string.whitespace)
print "input string of filename that you want to convert:"
ime=input()

someFile=open(ime)

print "'print' or 'write'?"

ddd =filter((lambda x: x in superSet and x!=" "), someFile.read().upper())

adsd=input()

if adsd == 'print':
	print ddd
elif adsd == 'write': 
	imeNoExt = ime[:-4]
	outputWriteUpper=open(oper.concat(imeNoExt.upper(),"UPPER.enm"),"w")
	outputWriteUpper.write(ddd)
	outputWriteUpper.close()
else: print "haven't decided?"
	
someFile.close()
Ejemplo n.º 51
0
def getpath(path=[]):
    path = concat(path, sublime.load_settings("AllCompile.sublime-settings").get('path'))
    path = concat(path, os.environ.get('PATH', '').split(os.pathsep))
    return os.pathsep.join(path)
Ejemplo n.º 52
0
from haiku.types import *

__all__ = []

# ===----------------------------------------------------------------------===

_cat, = map(Symbol, "cat".split())

from operator import concat

builtinEnvironment[_cat] = Procedure(
    params=Tuple([(1, (SymbolCompatible, UnicodeCompatible)), (2, (SymbolCompatible, UnicodeCompatible))]),
    defaults=Tuple(),
    ellipsis=False,
    environment=builtinEnvironment,
    body=lambda eval_, env: concat(env[1], env[2]),
)

# ===----------------------------------------------------------------------===

_encode, _decode = map(Symbol, "encode   decode".split())

builtinEnvironment[_encode] = Procedure(
    params=Tuple([(1, UnicodeCompatible), ("encoding", SymbolCompatible)]),
    defaults=Tuple([("encoding", "utf-8")]),
    ellipsis=False,
    environment=builtinEnvironment,
    body=lambda eval_, env: env[1].encode(env["encoding"]),
)

builtinEnvironment[_decode] = Procedure(
Ejemplo n.º 53
0
def append_str(s):
    '''returns a function that appends s'''
    return lambda x: concat(x,s)
Ejemplo n.º 54
0
 def test_concat(self):
     self.failUnless(operator.concat("py", "thon") == "python")
     self.failUnless(operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4])
Ejemplo n.º 55
0
import operator
Ejemplo n.º 56
0
print operator.rshift(3,2)

#按位与 即 a&b
print operator.and_(1,8)
print operator.and_(1,1)

#按位或 即 a|b
print operator.or_(1,8)
print operator.or_(1,3)

#按位异或 即 a^b
print operator.xor(1,8)
print operator.xor(1,3)

#合并,不过只能用于序列
print operator.concat([1,2],[3,4])
print operator.concat(("one",),("two",))

#是否包含,同样是序列
print operator.contains([1,2,3],2)
print operator.contains([1,2,3],0)

#包含位置,同样是序列
print operator.indexOf([1,2,3],2)
#如果没有,则会抛出一个异常
# print operator.indexOf([1,2,3],0)

#包含 同 in
print operator.sequenceIncludes([1,2,3],1)
print operator.sequenceIncludes("123","1")
Ejemplo n.º 57
0
 def test_concat(self):
     self.failUnless(operator.concat('py', 'thon') == 'python')
     self.failUnless(operator.concat([1, 2], [3, 4]) == [1, 2, 3, 4])