Exemplo n.º 1
0
def list(**type):
    """List all the segments defined in the database.

    Search type can be identified by providing a named argument.
    like = glob match
    regex = regular expression
    selector = segment selector
    index = particular index
    name = specific segment name
    predicate = function predicate
    """
    res = __builtin__.list(iterate(**type))

    maxindex = max(__builtin__.map(operator.attrgetter('index'), res) or [1])
    maxaddr = max(__builtin__.map(operator.attrgetter('endEA'), res) or [1])
    maxsize = max(__builtin__.map(operator.methodcaller('size'), res) or [1])
    maxname = max(__builtin__.map(utils.compose(idaapi.get_true_segm_name,len), res) or [1])
    cindex = math.ceil(math.log(maxindex)/math.log(10))
    caddr = math.ceil(math.log(maxaddr)/math.log(16))
    csize = math.ceil(math.log(maxsize)/math.log(16))

    for seg in res:
        comment = idaapi.get_segment_cmt(seg, 0) or idaapi.get_segment_cmt(seg, 1)
        print("[{:{:d}d}] {:0{:d}x}:{:0{:d}x} {:>{:d}s} {:<+#{:d}x} sel:{:04x} flags:{:02x}{:s}".format(seg.index, int(cindex), seg.startEA, int(caddr), seg.endEA, int(caddr), idaapi.get_true_segm_name(seg), maxname, seg.size(), int(csize), seg.sel, seg.flags, "// {:s}".format(comment) if comment else ''))
    return
Exemplo n.º 2
0
def list(**type):
    """List all the enumerations within the database.

    Search type can be identified by providing a named argument.
    like = glob match
    regex = regular expression
    index = particular index
    identifier = particular id number
    pred = function predicate
    """
    res = __builtin__.list(iterate(**type))

    maxindex = max(__builtin__.map(idaapi.get_enum_idx, res))
    maxname = max(
        __builtin__.map(utils.compose(idaapi.get_enum_name, len), res))
    maxsize = max(__builtin__.map(size, res))
    cindex = math.ceil(math.log(maxindex or 1) / math.log(10))
    cmask = max(
        __builtin__.map(
            utils.compose(mask, math.log,
                          functools.partial(operator.mul, 1.0 / math.log(16)),
                          math.ceil), res) or [database.config.bits() / 4.0])

    for n in res:
        print("[{:{:d}d}] {:>{:d}s} & {:#<{:d}x} ({:d} members){:s}".format(
            idaapi.get_enum_idx(n), int(cindex), idaapi.get_enum_name(n),
            maxname, mask(n), int(cmask), len(__builtin__.list(members(n))),
            " // {:s}".format(comment(n)) if comment(n) else ''))
    return
Exemplo n.º 3
0
    def allPickups (self, me, passengers, players):
            def distanceFromUs(p):
                toPassenger = len(simpleAStar.calculatePath(self.gameMap, me.limo.tilePosition, p.lobby.busStop))
                toDest = len(simpleAStar.calculatePath(self.gameMap, p.lobby.busStop, p.destination.busStop))
                return toPassenger + toDest
            def keyFunc(p):
                return (100*p.pointsDelivered)/distanceFromUs(p)
            
            pickup = [p for p in passengers if (not p in me.passengersDelivered and
                                                p != me.limo.passenger and
                                                p.car is None and
                                                p.lobby is not None and p.destination is not None)]
            tempPickup = filter(lambda x: len([y for y in x.enemies if y in x.destination.passengers]) ==0, pickup)
            if len(tempPickup) > 0:
                pickup = tempPickup
            """Not Sure about this Part Yet"""
#             for player in players:
#                 tempPickup = filter(lambda x: self.easierForYou(x, me, player), pickup)
#                 if len(tempPickup) > 0:
#                     pickup = tempPickup
            values = __builtin__.map(lambda x: (x, keyFunc(x)), pickup)
            values = sorted(values, key=lambda x: x[1], reverse=True)
            pickup = __builtin__.map(lambda x: x[0], values)
            print values                    
            return pickup
Exemplo n.º 4
0
def list(**type):
    """List all the structures within the database.

    Search type can be identified by providing a named argument.
    like = glob match
    regex = regular expression
    index = particular index
    identifier = particular id number
    pred = function predicate
    """
    res = __builtin__.list(iterate(**type))

    maxindex = max(
        __builtin__.map(
            utils.compose(operator.attrgetter('index'), "{:d}".format, len),
            res) or [1])
    maxname = max(
        __builtin__.map(utils.compose(operator.attrgetter('name'), len), res)
        or [1])
    maxsize = max(
        __builtin__.map(
            utils.compose(operator.attrgetter('size'), "{:x}".format, len),
            res) or [1])

    for st in res:
        print("[{:{:d}d}] {:>{:d}s} {:<+{:d}x} ({:d} members){:s}".format(
            idaapi.get_struc_idx(st.id), maxindex, st.name, maxname, st.size,
            maxsize, len(st.members),
            " // {:s}".format(st.comment) if st.comment else ''))
    return
Exemplo n.º 5
0
 def list(cls, enum):
     # FIXME: make this consistent with every other .list
     eid = by(enum)
     res = __builtin__.list(cls.iterate(eid))
     maxindex = max(__builtin__.map(utils.first, enumerate(res)) or [1])
     maxvalue = max(
         __builtin__.map(utils.compose(cls.value, "{:x}".format, len), res)
         or [1])
     for i, mid in enumerate(res):
         print("[{:d}] {:>0{:d}x} {:s}".format(i, cls.value(mid), maxvalue,
                                               cls.name(mid)))
     return
Exemplo n.º 6
0
def map(f, graph):
    ''' Maps function f over the nodes in graph.

        >>> map(str, { 1:[2,3] })
        {'1': ['2', '3']}
    '''
    return map_items(lambda k, v: (f(k), __builtin__.map(f, v)), graph)
Exemplo n.º 7
0
    def parse_csv_file(self):

        with open(self.file_path, "rb", 1) as csv_file:
            dialect = csv.Sniffer().sniff(csv_file.read(1024))
            has_header = csv.Sniffer().has_header(csv_file.read(1024))
            csv_file.seek(0)

            if not has_header:
                raise Exception('Not a correct CSV file')

            csv_data = csv.reader(csv_file, dialect)

            company_names = next(csv_data)[2:]  # Extract the Companies name

            for name in company_names:
                self.output[name] = {
                    'price': 0,
                    'year': 'year',
                    'month': 'month'
                }
            for row in csv_data:
                year, month = row[:2]
                for name, price in zip(company_names, map(int, row[2:])):
                    if self.output[name]['price'] < price:
                        self.output[name] = {
                            'price': price,
                            'year': year,
                            'month': month
                        }

            self.result = '\nCompany name\tYear\tMonth\tMax Price\n\n'
            for company_name, analysis_dict in self.output.items():
                self.result += '%s\t%s\t%s\t%d\n' % (
                    company_name, analysis_dict['year'],
                    analysis_dict['month'], analysis_dict['price'])
Exemplo n.º 8
0
def map(f, *sequences):
    """
    Applies the function f elementwise across a number of sequences.
    The function f should have arity equal to the number of arguments.
    Each sequence must have the same length.
    """
    return __builtin__.map(f, *sequences)
Exemplo n.º 9
0
 def parse_csv_file(self):
     
     with open(self.file_path, "rb", 1) as csv_file:
         dialect = csv.Sniffer().sniff(csv_file.read(1024))
         has_header = csv.Sniffer().has_header(csv_file.read(1024))
         csv_file.seek(0)
         
         if not has_header:
             raise Exception('Not a correct CSV file')
         
         csv_data = csv.reader(csv_file, dialect)
         
         company_names = next(csv_data)[2:] # Extract the Companies name
         
         for name in company_names:
             self.output[name] = {'price': 0, 'year': 'year', 'month': 'month'}
         for row in csv_data:
             year, month = row[:2]
             for name, price in zip(company_names, map(int, row[2:])):
                 if self.output[name]['price'] < price:
                     self.output[name] = {'price':price, 'year': year, 'month': month}
         
         self.result = '\nCompany name\tYear\tMonth\tMax Price\n\n'
         for company_name , analysis_dict in self.output.items():
             self.result += '%s\t%s\t%s\t%d\n' % (company_name, analysis_dict['year'], analysis_dict['month'], analysis_dict['price'])
Exemplo n.º 10
0
    def _get_estimates(self, forecast=False):

        if forecast:
            pred_func = self.forecast
            labels = ['train forecast', 'val forecast', 'test forecast']
        else:
            pred_func = self.predict
            labels = ['train prediction', 'val prediction', 'test prediction']

        num_vars = len(self.variables)
        y = pd.concat([self.y_train, self.y_val, self.y_test], axis=0)
        sets = ['train', 'val', 'test']
        data_series = [self.x_train, self.x_val, self.x_test]
        predictions = []

        # true y
        temp = pd.DataFrame(y.values, index=y.index, columns=['true values'] * num_vars)
        predictions.append(np.split(temp, num_vars, axis=1))

        for set, series, label in zip(sets, data_series, labels):
            temp = pd.DataFrame(pred_func(set).T, index=series.index,
                                columns=[label] * num_vars)
            predictions.append(np.split(temp, num_vars, axis=1))

        predictions = __builtin__.map(list, zip(*predictions))

        for i in range(len(predictions)):
            predictions[i] = pd.concat(predictions[i], axis=1)

        return predictions
Exemplo n.º 11
0
 def test_mf_correct(self):
     """Should be identical output to map and filter without transduction."""
     self.assertEqual([
         a for a in __builtin__.map(msq,
                                    __builtin__.filter(fodd, range(10000)))
     ], transduce(compose(filter(fodd), map(msq)), append, [],
                  range(10000)))
Exemplo n.º 12
0
    def uimap(self, f, *args, **kwds):
        AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds)

        def submit(*argz):
            """send a job to the server"""
            _pool = self._serve()
            #print "using", _pool.get_ncpus(), 'local workers'
            try:
                return _pool.submit(f, argz, globals=globals())
            except pp.DestroyedServerError:
                self._is_alive(None)

        def imap_unordered(it):
            """build a unordered map iterator"""
            while len(it):
                for i, job in enumerate(it):
                    if job.finished:
                        yield it.pop(i)()
                        break
                # yield it.pop(0).get()  # wait for the first element?
                # *subprocess*           # alternately, loop in a subprocess
            raise StopIteration

        # submit all jobs, then collect results as they become available
        return imap_unordered(__builtin__.map(submit, *args))
Exemplo n.º 13
0
 def normalisedCPUCapacity(self):
     """
     Returns the normalised CPU capacity of the VM type - in the range [0,1].
     @return: the normalised CPU capacity of the VM type - in the range [0,1].
     """
     normCPUCapacities = map(lambda measurement: measurement.normaliseCpuCapacity(), self.measruements)
     return float(sum(normCPUCapacities)) / float(len(normCPUCapacities)) if normCPUCapacities else -1
Exemplo n.º 14
0
def map(f, graph):
    ''' Maps function f over the nodes in graph.

        >>> map(str, { 1:[2,3] })
        {'1': ['2', '3']}
    '''
    return map_items(lambda k,v: (f(k), __builtin__.map(f,v)), graph)
Exemplo n.º 15
0
def findWrongItems(queries, trainingData=True, numSamples=50):
    # theano overrides map if imported with *
    #from __builtin__ import map
    ret = []
#     surface = set()
    for qu in queries.values():
        for ek, en in qu.items():
            if en['training'] != trainingData:
                continue
#             for e in en:
            if en['gold']:
                if len(ret) > numSamples:
                    return ret
                if True:#ek not in surface:
                    sv = sorted(en['vals'].items(), key=lambda x: x[1])
                    if sv[-1][0] not in en['gold'] and len(set(en['gold']) & set(en['vals'].keys())) != 0:
#                     if 'Slayer (Buffy the Vampire Slayer)' in en['gold']:
                        # got this wrong
                        ret.append({
                            'gold': en['gold'],
                            'ordered': [(s[0], s[1][0], [(' '.join(map(str, a)), len(a)) for a in s[1][1]]) for s in sv][::-1],
                            'text': ek,
                            'training': en['training'],
                        })

#                     m = max(en['vals'].values())
#                     g = en['vals'].get(en['gold'][0], 0)
#                     if g != m and g != 0:
#                         ret[ek] = en
    return ret
Exemplo n.º 16
0
def csv_reader(stream, size=None, url=None, params=None):
    import csv
    import __builtin__

    fieldnames = getattr(params, 'csv_fields', None)
    dialect = getattr(params, 'csv_dialect', 'excel')
    delimiter = getattr(params, 'delimiter', None)

    if delimiter:
        reader = csv.reader(stream, delimiter=delimiter)
    else:
        reader = csv.reader(stream, dialect=dialect)

    done = False
    while not done:
        try:
            line = reader.next()
            if not line:
                continue
            if not fieldnames:
                fieldnames = [str(x) for x in range(len(line))]
            parts = dict(__builtin__.map(None, fieldnames, line))
            if None in parts:
                # remove extra data values
                del parts[None]
            yield parts
        except StopIteration as e:
            done = True
        except Exception as ee:
            # just skip bad lines
            print 'csv line error: %s' % ee
Exemplo n.º 17
0
    def map(self, work_function, required_iterable, *optional_iterables):
        ''' Parallel map().'''

        def submit(item):
            ''' Submit work_function(item) to pp as a separate task.'''

            return self.pp_server.submit(
                work_function, (item,),
                depfuncs = self.depfuncs,
                modules = self.modules,
                callback = self.callback,
                callbackargs = self.callbackargs,
                group = self.group,
                globals = self.globals)

        items = []
        items.extend(required_iterable)
        for optional_iterable in optional_iterables:
            items.extend(optional_iterable)

        if IS_PY2:
            tasks = __builtin__.map(submit, items)
        else:
            tasks = builtins.map(submit, items)
        results = [task_result() for task_result in tasks]

        if not MapPP.server_is_singleton:
            self.pp_server.destroy()

        return results
Exemplo n.º 18
0
def map(*args):
    """
    Apply function to every item of iterable and return a list of the results.
    If additional iterable arguments are passed, function must take that many
    arguments and is applied to the items from all iterables in parallel.
    If one iterable is shorter than another it is assumed to be extended with
    None items. If function is None, the identity function is assumed; if
    there are multiple arguments, map() returns a list consisting of tuples
    containing the corresponding items from all iterables (a kind of transpose
    operation). The iterable arguments may be a sequence or any iterable
    object. The result is always a list.

    :param args: items to apply function
    :return: list result
    """

    try:
        if IS_PYTHON3:
            import builtins
            return list(builtins.map(*args))
        else:
            import __builtin__
            return __builtin__.map(*args)
    except Exception as e:
        raise e
Exemplo n.º 19
0
def test_pipeline_example():
    from functools import reduce
    import operator as op

    data = range(100)
    result1 = math.sqrt(
        reduce(
            op.add,
            builtins.map(
                lambda x: x ** 2.0,
                builtins.filter(
                    lambda x: x % 2 == 0,
                    data,
                )
            )
        )
    )

    from toolz.curried import filter, map, reduce
    from flowly.tz import chained

    transform = chained(
        filter(lambda x: x % 2 == 0),
        map(lambda x: x ** 2.0),
        reduce(op.add),
        math.sqrt,
    )

    result2 = transform(data)

    assert result1 == result2
Exemplo n.º 20
0
def map(f, *sequences):
    """
    Applies the function f elementwise across a number of sequences.
    The function f should have arity equal to the number of arguments.
    Each sequence must have the same length.
    """
    return __builtin__.map(f, *sequences)
Exemplo n.º 21
0
def test_pipeline_example():
    from functools import reduce
    import operator as op

    data = range(100)
    result1 = math.sqrt(
        reduce(
            op.add,
            builtins.map(lambda x: x**2.0,
                         builtins.filter(
                             lambda x: x % 2 == 0,
                             data,
                         ))))

    from toolz.curried import filter, map, reduce
    from flowly.tz import chained

    transform = chained(
        filter(lambda x: x % 2 == 0),
        map(lambda x: x**2.0),
        reduce(op.add),
        math.sqrt,
    )

    result2 = transform(data)

    assert result1 == result2
Exemplo n.º 22
0
 def ToMap(dict):
     mapManager = MapManager()
     mapManager.index = dict['index']
     mapManager.hero = Hero.ToHero(dict['hero'])
     mapManager.masterRoomVisitedNum = dict['masterRoomVisitedNum']
     mapManager.rooms = map(RoomInformation.ToRoomInformation,
                            dict['rooms'])
     return mapManager
Exemplo n.º 23
0
 def imap(self, f, *args, **kwds):
     AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds)
     def submit(*argz):
         """send a job to the server"""
        #print "using", __STATE['server'].get_ncpus(), 'local workers'
         return __STATE['server'].submit(f, argz, globals=globals())
     # submit all jobs, then collect results as they become available
     return (subproc() for subproc in __builtin__.map(submit, *args))
Exemplo n.º 24
0
def cogroup(sc):
    x = sc.parallelize([("a", 1), ("b", 4)])
    y = sc.parallelize([("a", 2)])
    lst = sorted(list(x.cogroup(y).collect()))
    import __builtin__
    ret = [(x, tuple(__builtin__.map(list, y))) for x, y in lst]
    if (ret[0][1][0] == [1]):
        print("cogroup_test_ok")
Exemplo n.º 25
0
    def enum_max_matchings(self, callback, n=-1):
        self.__callback = callback  # save callback function

        # find a maximum matching in M
        M = nx.bipartite.maximum_matching(self.__G)

        # M is a dictionary like: {'__r0': 'rdx', '__r1': 'rcx', '__r2': 'rax', 'rdx': '__r0',
        # 'rcx': '__r1', 'rax': '__r2'}. Each edge it appears both in forward and reverse
        # direction. So, we only keep edges in one direction (V1 -> V2)
        #
        # don't use .iteritems() (dictionary is modifed on the fly)
        for key, val in M.items():
            if self.__opposite(
                    key):  # drop (host, virtual) (or (addr, var)) edges
                del M[key]

        M = M.items()  # cast dictionary to list (for convenience)

        # To get the number of virtual registers in the graph we can't use this:
        #   virt, _ = nx.bipartite.sets(self.__G)
        #
        # This is because bipartite.sets() algorithmically find the sets. So, if a node has no
        # edges it will classified in the 2nd set, even if it has attribute bipartite = 0. To
        # fix that we can either drop nodes with no edges, or to use an alternative:
        virt = [
            u for u, b in nx.get_node_attributes(
                self.__G, 'bipartite').iteritems() if not b
        ]

        # check if matching cover all virtual registers (or variables)
        # if not an explicit size is given, extract size from bipartite sets
        if n > 0 and len(M) < n or n < 0 and len(M) < len(virt):
            dbg_arb(DBG_LVL_3, "There are no maximum matchings for",
                    self.__G.edges())
            return 0  # abort

        # TODO: M can be:
        #   [('__r0', 'r14'), ('__r1', 'r15')]
        #   [('foo', ('<BV64 0x7ffffffffff0020>',))]
        #
        # Because bitvectors are strings at this point, no exceptions are thrown

        # remove tuples from bitvector strings
        M = __builtin__.map(
            lambda x: (x[0], x[1][0]) if isinstance(x[1], tuple) else x, M)

        # print 'M IS ', M

        # M is a the 1st maximum matching. Invoke callback
        # if self.__callback( sorted(M, key=lambda e: e[0]) ) < 0:
        if self.__callback(M) < 0:
            return -1  # if callback wants to stop, stop

        # OPTIONAL: As an optimization, we can trim unnecessary edges from D(G,M)

        # find all other maximum matchings
        return self.__matchings_iter(self.__G, M, self.__D(self.__G, M))
Exemplo n.º 26
0
 def ToDict(self):
     return {
         "index": self.index,
         "hero": Hero.ToDict(self.hero),
         "masterRoomVisitedNum": self.masterRoomVisitedNum,
         "rooms": map(RoomInformation.ToDict, self.rooms),
         "preRoomCode": self.preRoomCode,
         "buffState": self.room.buffList.ToDict(self.room.buffList)
     }
Exemplo n.º 27
0
def groupWith(sc):
    w = sc.parallelize([ ("a" , 5) , ("b" , 6) ])
    x = sc.parallelize([ ("a" , 1) , ("b" , 4) ])
    y = sc.parallelize([ ("a" , 2) ])
    z = sc.parallelize([ ("b" , 42) ])
    import __builtin__
    ret = [ (x , tuple(__builtin__.map(list , y))) for x , y in sorted(list(w.groupWith(x,y,z).collect() )) ]
    if (len(ret) == 2 and len(ret[0][1]) == 4):
        print("groupWith_test_ok")
Exemplo n.º 28
0
 def inferNormalisedCPUCapacity(targetVMType, vmTypes):
     cpuCap = targetVMType.normalisedCPUCapacity()
     # If we could not define the CPU capacity based on the measurements
     # then we'll infer it based on the other vmTypes 
     if cpuCap <= 0:
         measuredTypes=filter(lambda t: t.normalisedCPUCapacity() > 0, vmTypes)
         scaledCapacities = map(lambda t: (t.normalisedCPUCapacity() * targetVMType.declaredCpuCapacity ) / t.declaredCpuCapacity , measuredTypes )
         cpuCap = float(sum(scaledCapacities)) / len(scaledCapacities)
     
     return cpuCap
Exemplo n.º 29
0
    def reInit(self, dict):
        #清除之前的数据

        #重新初始化
        self.index = dict['index']
        self.hero.reInit(dict['hero'])
        self.masterRoomVisitedNum = dict['masterRoomVisitedNum']
        self.rooms = map(RoomInformation.ToRoomInformation, dict['rooms'])
        self.preRoomCode = dict['preRoomCode']
        self.room.buffList.reInit(self.room.buffList, dict['buffState'])
Exemplo n.º 30
0
 def normalisedCPUCapacity(self):
     """
     Returns the normalised CPU capacity of the VM type - in the range [0,1].
     @return: the normalised CPU capacity of the VM type - in the range [0,1].
     """
     normCPUCapacities = map(
         lambda measurement: measurement.normaliseCpuCapacity(),
         self.measruements)
     return float(sum(normCPUCapacities)) / float(
         len(normCPUCapacities)) if normCPUCapacities else -1
Exemplo n.º 31
0
    def list(self, **type):
        """List all the members within the structure.

        Search type can be identified by providing a named argument.
        like = glob match
        regex = regular expression
        index = particular index
        identifier = particular id number
        predicate = function predicate
        """
        res = __builtin__.list(self.iterate(**type))

        escape = repr
        maxindex = max(
            __builtin__.map(
                utils.compose(operator.attrgetter('index'), "{:d}".format,
                              len), res) or [1])
        maxoffset = max(
            __builtin__.map(
                utils.compose(operator.attrgetter('offset'), "{:x}".format,
                              len), res) or [1])
        maxsize = max(
            __builtin__.map(
                utils.compose(operator.attrgetter('size'), "{:x}".format, len),
                res) or [1])
        maxname = max(
            __builtin__.map(
                utils.compose(operator.attrgetter('name'), escape, len), res)
            or [1])
        maxtype = max(
            __builtin__.map(
                utils.compose(operator.attrgetter('type'), repr, len), res)
            or [1])

        for m in res:
            print "[{:{:d}d}] {:>{:d}x}:+{:<{:d}x} {:<{:d}s} {:{:d}s} (flag={:x},dt_type={:x}{:s}){:s}".format(
                m.index, maxindex, m.offset, int(maxoffset), m.size, maxsize,
                escape(m.name), int(maxname), m.type, int(maxtype), m.flag,
                m.dt_type,
                '' if m.typeid is None else ",typeid={:x}".format(m.typeid),
                " // {:s}".format(m.comment) if m.comment else '')
        return
Exemplo n.º 32
0
def iterate(**type):
    '''Iterate through each segment defined in the database.'''
    if not type: type = {'predicate':lambda n: True}
    def newsegment(index):
        res = idaapi.getnseg(index)
        res.index = index
        return res
    res = __builtin__.map(newsegment, xrange(idaapi.get_segm_qty()))
    for k,v in type.iteritems():
        res = __builtin__.list(__matcher__.match(k, v, res))
    for n in res: yield n
Exemplo n.º 33
0
   def find(self, query):
      """
      queries the document set and calculates realvance to query termss
      """

      query_vector= Vector(map(lambda term: query.tokens.count(term), self.terms))
      
      cosines= [query_vector.cosine(vector) for vector in self.frequency_matrix]

      for result in izip(cosines, self.items()):
         yield result
Exemplo n.º 34
0
    def imap(self, f, *args, **kwds):
        AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds)

        def submit(*argz):
            """send a job to the server"""
            _pool = self._serve()
            #print "using", _pool.get_ncpus(), 'local workers'
            return _pool.submit(f, argz, globals=globals())

        # submit all jobs, then collect results as they become available
        return (subproc() for subproc in __builtin__.map(submit, *args))
Exemplo n.º 35
0
def dynamic_reader(stream, size=None, url=None, params=None):
    """ This reader uses parts of both json_reader and csv_reader...
        It will attempt to json_read the stream, and if that fails, it will try to csv_read it instead
        using the fieldnames/dialect/delimiter as used in the csv_reader.
        This is helpful in cases where an inferno job has more than one source tag,
        where one of them may be json encoded, while the other may be delimited by some character
    """ 
    import ujson
    import csv
    import __builtin__

    fieldnames = getattr(params, 'csv_fields', None)
    dialect = getattr(params, 'csv_dialect', 'excel')
    delimiter = getattr(params, 'delimiter', None)

    if delimiter:
        reader = csv.reader(stream, delimiter=delimiter)
    else:
        reader = csv.reader(stream, dialect=dialect)

    done = False
    for line in stream:
        if line.find('{') != -1:
            try:
                parts = ujson.loads(line.rstrip())
                assert isinstance(parts, dict)
            except:
                # just skip bad lines
                print 'json line error: %r' % line
            else:
                yield parts
        else:
            # We couldn't find '{' in the line so it is not json encoded... use csv reader!
            while not done:
                try:
                    if not line:
                        line = reader.next()
                        continue
                    if not fieldnames:
                        fieldnames = [str(x) for x in range(len(line))]
                    parts = dict(__builtin__.map(None, fieldnames, line))
                    if None in parts:
                        # remove extra data values
                        del parts[None]
                    yield parts
                except StopIteration as e:
                    done = True
                except Exception as ee:
                    # just skip bad lines
                    print 'csv line error: %s' % ee
                line = reader.next()
        if done:
            break
Exemplo n.º 36
0
 def prev(cls, ea=None, count=1):
     isStop = lambda ea: _instruction.feature(
         ea) & idaapi.CF_STOP == idaapi.CF_STOP
     ea = ui.current.address() if ea is None else ea
     refs = xref.up(ea)
     if len(refs) > 1 and isStop(address.prev(ea)):
         logging.fatal(
             "%x: Unable to determine previous address due to multiple xrefs being available : %s"
             % (ea, ', '.join(__builtin__.map(hex, refs))))
         return None
     res = refs[0] if isStop(address.prev(ea)) else address.prev(ea)
     return cls.prev(res, count - 1) if count > 1 else res
Exemplo n.º 37
0
 def imap(self, f, *args, **kwds):
     AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds)
     def submit(*argz):
         """send a job to the server"""
         _pool = self._serve()
        #print "using", _pool.get_ncpus(), 'local workers'
         try:
             return _pool.submit(f, argz, globals=globals())
         except pp.DestroyedServerError:
             self._is_alive(None)
     # submit all jobs, then collect results as they become available
     return (subproc() for subproc in __builtin__.map(submit, *args))
Exemplo n.º 38
0
Arquivo: tools.py Projeto: xNUTs/PTVS
def map(func, *rangeIn):
    """Excel equivalent to the built-in map().

    ColumnVector ranges as well as Python iterables are accepted.
    The result list is written back to Excel as a column. A ColumnVector
    representing the stored results is returned"""

    import __builtin__
    xs = (_to_value(r) for r in rangeIn)
    name = getattr(func, '__name__', "<callable>")
    y = __builtin__.map(func, *xs)
    r = _dest_for_source_ranges(rangeIn)
    return view(y, name, to=r)
Exemplo n.º 39
0
def map(func, *rangeIn):
    """Excel equivalent to the built-in map().

    ColumnVector ranges as well as Python iterables are accepted.
    The result list is written back to Excel as a column. A ColumnVector
    representing the stored results is returned"""

    import __builtin__    
    xs = (_to_value(r) for r in rangeIn) 
    name = getattr(func, '__name__', "<callable>")
    y = __builtin__.map(func, *xs)
    r = _dest_for_source_ranges(rangeIn)
    return view(y, name, to=r)
Exemplo n.º 40
0
    def inferNormalisedCPUCapacity(targetVMType, vmTypes):
        cpuCap = targetVMType.normalisedCPUCapacity()
        # If we could not define the CPU capacity based on the measurements
        # then we'll infer it based on the other vmTypes
        if cpuCap <= 0:
            measuredTypes = filter(lambda t: t.normalisedCPUCapacity() > 0,
                                   vmTypes)
            scaledCapacities = map(
                lambda t: (t.normalisedCPUCapacity() * targetVMType.
                           declaredCpuCapacity) / t.declaredCpuCapacity,
                measuredTypes)
            cpuCap = float(sum(scaledCapacities)) / len(scaledCapacities)

        return cpuCap
Exemplo n.º 41
0
def by(**type):
    """Search through all the segments within the database for a particular result.

    Search type can be identified by providing a named argument.
    like = glob match
    regex = regular expression
    selector = segment selector
    index = particular index
    name = specific segment name
    predicate = function predicate
    """
    searchstring = ', '.join("{:s}={!r}".format(k,v) for k,v in type.iteritems())

    res = __builtin__.list(iterate(**type))
    if len(res) > 1:
        maxaddr = max(__builtin__.map(operator.attrgetter('endEA'), res) or [1])
        caddr = math.ceil(math.log(maxaddr)/math.log(16))
        __builtin__.map(logging.info, (("[{:d}] {:0{:d}x}:{:0{:d}x} {:s} {:+#x} sel:{:04x} flags:{:02x}".format(seg.index, seg.startEA, int(caddr), seg.endEA, int(caddr), idaapi.get_true_segm_name(seg), seg.size(), seg.sel, seg.flags)) for seg in res))
        logging.warn("{:s}.by({:s}) : Found {:d} matching results, returning the first one. : [{:d}] {:0{:d}x}:{:0{:d}x} {:s} {:+#x}".format(__name__, searchstring, len(res), res[0].index, res[0].startEA, int(caddr), res[0].endEA, int(caddr), idaapi.get_true_segm_name(res[0]), res[0].size()))

    res = next(iter(res), None)
    if res is None:
        raise LookupError("{:s}.by({:s}) : Found 0 matching results.".format(__name__, searchstring))
    return res
Exemplo n.º 42
0
def findItms(queries, key):
    ret = []
    from __builtin__ import map
    for qu in queries.values():
        for en in qu.values():
            if en['training'] == True:
                continue
            ad = False
            for k in en['vals'].keys():
                if key in k:
                    ad = True
            if ad:
                for k, v in en['vals'].iteritems():
                    ret.append((k, [(' '.join(map(str, a)), len(a)) for a in v[1]]))
#             return ret
    print len(ret)
    return ret
Exemplo n.º 43
0
 def __init__(self, map=None, source=None):
     if isinstance(map, basestring):
         _map = {}
         for line in map.splitlines():
             if re.search("^\s*#", line) or re.search("^\s*$", line):
                 continue
             keys = line.split()
             canonical_key = keys.pop(0)
             keys = __builtin__.map(lambda x: x.replace("%", canonical_key), keys)
             _map[canonical_key] = tuple(keys)
         map = _map
     if map and not len(map):
         map = None
     self._map = map
     self._source = source
     self.reset()
     if self._source is not None:
         self.parse()
Exemplo n.º 44
0
 def uimap(self, f, *args, **kwds):
     AbstractWorkerPool._AbstractWorkerPool__imap(self, f, *args, **kwds)
     def submit(*argz):
         """send a job to the server"""
         _pool = self._serve()
        #print "using", _pool.get_ncpus(), 'local workers'
         try:
             return _pool.submit(f, argz, globals=globals())
         except pp.DestroyedServerError:
             self._is_alive(None)
     def imap_unordered(it):
         """build a unordered map iterator"""
         while len(it):
             for i,job in enumerate(it):
                 if job.finished:
                     yield it.pop(i)()
                     break
             # yield it.pop(0).get()  # wait for the first element?
             # *subprocess*           # alternately, loop in a subprocess
         raise StopIteration
     # submit all jobs, then collect results as they become available
     return imap_unordered(__builtin__.map(submit, *args))
Exemplo n.º 45
0
def ppmap(processes, function, sequence, *sequences):
    """Split the work of 'function' across the given number of
    processes.  Set 'processes' to None to let Parallel Python
    autodetect the number of children to use.

    Although the calling semantics should be identical to
    __builtin__.map (even using __builtin__.map to process
    arguments), it differs in that it returns a generator instead of a
    list.  This enables lazy evaluation of the results so that other
    work can be done while the subprocesses are still running.

    >>> def rangetotal(n): return n, sum(range(n))
    >>> list(map(rangetotal, range(1, 6)))
    [(1, 0), (2, 1), (3, 3), (4, 6), (5, 10)]
    >>> list(ppmap(1, rangetotal, range(1, 6)))
    [(1, 0), (2, 1), (3, 3), (4, 6), (5, 10)]
    """

    # Create a new server if one isn't already initialized
    if not __STATE['server']:
        __STATE['server'] = pp.Server()
    
    def submit(*args):
        """Send a job to the server"""
        return __STATE['server'].submit(function, args, globals=globals())

    # Merge all the passed-in argument lists together.  This is done
    # that way because as with the map() function, at least one list
    # is required but the rest are optional.
    a = [sequence]
    a.extend(sequences)
    available_processes = sum( __STATE['server'].get_active_nodes().values())
    # Set the requested level of multi-processing
    if available_processes < processes:
        __STATE['server'].set_ncpus(processes-available_processes or 'autodetect')

    # First, submit all the jobs.  Then harvest the results as they
    # come available.
    return (subproc() for subproc in __builtin__.map(submit, *a))
Exemplo n.º 46
0
 def fetchData(self, inputMomentum, injectVariance = False):
     """
     Fetches measurement data from the server in the form of a VMMeasurement instance.
     @param inputMomentum: The measurement momentum. If None - no momentum will be considered.
     @param injectVariance: Whether to "inject" a change in the workload pattern. Used for experiment purposes.  
     @return: measurement data from the server in the form of a VMMeasurement instance. May return None if no data is available.
     """
     data = filterEmptyStrings( map(extractVal, self.fetchRawData().split(";")) )
     if(data and not AppServer._isHeader(data) and len(data) == 10):
         assert len(data) == 10, "Measurement data %s does not have proper size" % (data)
         log.debug("Measurement %s: %s", self.readableName, data)
         
         varianceActiveMem = data[6] + convertMem(1, fromCode="GB", toCode="KB") + data[9] * convertMem(1, fromCode="MB", toCode="KB")
         varianceIdlePerc = data[4] * 0.9
         
         measurement = VMMeasurement(readableName = self._measName(),
                          vmAddress=self.address,
                          serverTime = data[0],
                          cpuCapacityMhz = data[1], 
                          cpuIOWaitPerc = data[2], 
                          cpuStealPerc = data[3], 
                          cpuIdlePerc = data[4] if not injectVariance else varianceIdlePerc, 
                          ramInKb = data[5], 
                          activeMemInKb = data[6] if not injectVariance else varianceActiveMem, 
                          diskUtilPerc = data[7], 
                          nicUtilPerc = data[8], 
                          numUsers = data[9])
         
         measurement.considerMomentum(self.lastMeasurement, inputMomentum)
         self.lastMeasurement = measurement
         self.vmType.addMeasurement(measurement)
         self.htm.train(measurement)
         
         self._line(measurement)
         return measurement
     else:
         return None
Exemplo n.º 47
0
def all(f, x):
    return __builtin__.all(__builtin__.map(f, x))
Exemplo n.º 48
0
def any(f, x):
    return __builtin__.any(__builtin__.map(f, x))
Exemplo n.º 49
0
def map(function, *sequence):
    """map(function, sequence[, sequence, ...]) -> list

    Like the builtin map() function, but splits the workload across a
    pool of processes whenever possible.
    

    >>> map(None, [1,2,3])
    [1, 2, 3]
    >>> map(None, [1,2,3], [5,6])
    [(1, 5), (2, 6), (3, None)]
    """
    # IPC stuff
    structformat = "H"
    structlen = struct.calcsize(structformat)

    def sendmessage(myend, message):
        """Send a pickled message across a pipe"""
        outobj = cPickle.dumps(message)
        os.write(myend, struct.pack(structformat, len(outobj)) + outobj)

    def recvmessage(myend):
        """Receive a pickled message from a pipe"""
        length = struct.unpack(structformat, (os.read(myend, structlen)))[0]
        return cPickle.loads(os.read(myend, length))

    try:
        maxchildren = function.parallel_maxchildren
    except AttributeError:
        return __builtin__.map(function, *sequence)

    # Handle map()'s multi-sequence semantics
    if len(sequence) == 1:
        if function is None:
            return list(sequence[0])
        arglist = zip(sequence[0])
    else:
        arglist = __builtin__.map(None, *sequence)
    if function is None:
        return arglist

    argindex = 0
    finished = 0
    outlist = [None] * len(arglist)

    # Spawn the worker children.  Don't create more than the number of
    # values we'll be processing.
    fromchild, toparent = os.pipe()
    children = []
    for childnum in range(min(maxchildren, len(arglist))):
        fromparent, tochild = os.pipe()
        pid = os.fork()
        # Parent?
        if pid:
            # Do some housekeeping and give the child its first assignment
            children.append({"pid": pid, "fromparent": fromparent, "tochild": tochild})
            sendmessage(tochild, (argindex, arglist[argindex]))
            argindex += 1
        # Child?
        else:
            # Since children can't really tell when they've been
            # orphaned, set a timeout so that they die if they don't
            # hear from the parent in a timely manner.
            def timeouthandler(signum, frame):
                """Get out cleanly"""
                sys.exit()

            oldsignal = signal.signal(signal.SIGALRM, timeouthandler)

            # Keep processing values until the parent kills you
            while True:
                try:
                    # Wait one second before quitting.  Children
                    # should generally hear from their parent almost
                    # instantly.
                    signal.alarm(1)
                    message = recvmessage(fromparent)
                    signal.alarm(0)
                    if message is None:
                        sys.exit()
                    index, value = message
                    sendmessage(toparent, (childnum, index, function(*value)))
                except Exception, excvalue:
                    sendmessage(toparent, (childnum, index, excvalue))
                finally:
                    signal.signal(signal.SIGALRM, oldsignal)
Exemplo n.º 50
0
def vector_what(map, coord, distance = 0.0):
    """!Query vector map at given locations
    
    To query one vector map at one location
    @code
    print grass.vector_what(map = 'archsites', coord = (595743, 4925281), distance = 250)

    [{'Category': 8, 'Map': 'archsites', 'Layer': 1, 'Key_column': 'cat',
      'Database': '/home/martin/grassdata/spearfish60/PERMANENT/dbf/',
      'Mapset': 'PERMANENT', 'Driver': 'dbf',
      'Attributes': {'str1': 'No_Name', 'cat': '8'},
      'Table': 'archsites', 'Type': 'Point', 'Id': 8}]
    @endcode

    To query one vector map with multiple layers (no additional parameters required)
    @code
    for q in grass.vector_what(map = 'some_map', coord = (596532.357143,4920486.21429), distance = 100.0):
        print q['Map'], q['Layer'], q['Attributes']

    new_bug_sites 1 {'str1': 'Beetle_site', 'GRASSRGB': '', 'cat': '80'}
    new_bug_sites 2 {'cat': '80'}
    @endcode

    To query more vector maps at one location
    @code
    for q in grass.vector_what(map = ('archsites', 'roads'), coord = (595743, 4925281),
                               distance = 250):
        print q['Map'], q['Attributes']
                            
    archsites {'str1': 'No_Name', 'cat': '8'}
    roads {'label': 'interstate', 'cat': '1'}
    @endcode

    To query one vector map at more locations
    @code
    for q in grass.vector_what(map = 'archsites', coord = [(595743, 4925281), (597950, 4918898)],
                               distance = 250):
        print q['Map'], q['Attributes']

    archsites {'str1': 'No_Name', 'cat': '8'}
    archsites {'str1': 'Bob_Miller', 'cat': '22'}
    @endcode

    @param map vector map(s) to query given as string or list/tuple
    @param coord coordinates of query given as tuple (easting, northing) or list of tuples
    @param distance query threshold distance (in map units)

    @return parsed list
    """
    if "LC_ALL" in os.environ:
        locale = os.environ["LC_ALL"]
        os.environ["LC_ALL"] = "C"

    if type(map) in (types.StringType, types.UnicodeType):
        map_list = [map]
    else:
        map_list = map
    
    coord_list = list()
    if type(coord) is types.TupleType:
        coord_list.append('%f,%f' % (coord[0], coord[1]))
    else:
        for e, n in coord:
            coord_list.append('%f,%f' % (e, n))
    
    ret = read_command('v.what',
                       quiet      = True,
                       flags      = 'ag',
                       map        = ','.join(map_list),
                       east_north = ','.join(coord_list),
                       distance   = float(distance))
    
    if "LC_ALL" in os.environ:
        os.environ["LC_ALL"] = locale
        
    data = list()
    if not ret:
        return data
    
    dict_attrb = None
    dict_map = None
    dict_layer = None
    attr_pseudo_key = 'Attributes'
    for item in ret.splitlines():
        try:
            key, value = __builtin__.map(lambda x: x.strip(), item.split('=', 1))
        except ValueError:
            continue
        if key in ('East', 'North'):
            continue
        
        if key == 'Map':
            # attach the last one from the previous map
            if dict_layer is not None:
                dict_main = copy.copy(dict_map)
                dict_main.update(dict_layer)
                data.append(dict_main)
            dict_map  = { key : value }
            dict_layer = None
            dict_attrb = None
        elif key == 'Layer':
            # attach the last the previous Layer
            if dict_layer is not None:
                dict_main = copy.copy(dict_map)
                dict_main.update(dict_layer)
                data.append(dict_main)
            dict_layer = { key: int(value) }
            dict_attrb = None
        elif key == 'Key_column':
            dict_layer[key] = value
            dict_attrb = dict()
            dict_layer[attr_pseudo_key] = dict_attrb
        elif dict_attrb is not None:
            dict_attrb[key] = value
        elif dict_layer is not None:
            if key == 'Category':
                dict_layer[key] = int(value)
            else:
                dict_layer[key] = value
        else:
            dict_map[key] = value
            # TODO: there are some keys which has non-string values
            # examples: Sq_Meters, Hectares, Acres, Sq_Miles
    
    # attach the last one
    if dict_layer is not None:
        dict_main = copy.copy(dict_map)
        dict_main.update(dict_layer)
        data.append(dict_main)
    
    return data
Exemplo n.º 51
0
def zipwith(f, xs, ys):
    return __builtin__.map(f, xs, ys)
Exemplo n.º 52
0
def map(fn, sequence):
  return __builtin__.map(fn, sequence)
Exemplo n.º 53
0
def _jug_map(mapper, es):
    import __builtin__
    return __builtin__.map(mapper, es)
Exemplo n.º 54
0
def map(f, xs):
    return __builtin__.map(f, xs)
Exemplo n.º 55
0
def _jug_map_reduce(reducer, mapper, inputs):
    import __builtin__
    reducer = _get_function(reducer)
    mapper = _get_function(mapper)
    return __builtin__.reduce(reducer, __builtin__.map(mapper, inputs))
Exemplo n.º 56
0
def generate_and_push_new_documentation_page(
    temporary_documentation_folder,
    distribution_bundle_file,
    has_api_documentation,
    temporary_documentation_node_modules_directory
):
# #
    '''
        Renders a new index.html file and copies new assets to generate a new \
        documentation homepage.
    '''
    global BUILD_DOCUMENTATION_PAGE_COMMAND
    __logger__.info('Update documentation design.')
    if distribution_bundle_file:
        new_distribution_bundle_file = FileHandler(location='%s%s%s' % (
            temporary_documentation_folder.path, DOCUMENTATION_BUILD_PATH,
            DISTRIBUTION_BUNDLE_FILE_PATH))
        new_distribution_bundle_file.directory.make_directories()
        distribution_bundle_file.path = new_distribution_bundle_file
        new_distribution_bundle_directory = FileHandler(location='%s%s%s' % (
            temporary_documentation_folder.path, DOCUMENTATION_BUILD_PATH,
            DISTRIBUTION_BUNDLE_DIRECTORY_PATH))
        new_distribution_bundle_directory.make_directories()
        zipfile.ZipFile(distribution_bundle_file.path).extractall(
            new_distribution_bundle_directory.path)
    favicon = FileHandler(location='favicon.png')
    if favicon:
        favicon.copy(target='%s/source/image/favicon.ico' %
            temporary_documentation_folder.path)
    parameter = builtins.dict(builtins.map(lambda item: (
        String(item[0]).camel_case_to_delimited.content.upper(), item[1]
    ), SCOPE.get('documentationWebsite', {}).items()))
    if 'TAGLINE' not in parameter and 'description' in SCOPE:
        parameter['TAGLINE'] = SCOPE['description']
    if 'NAME' not in parameter and 'name' in SCOPE:
        parameter['NAME'] = SCOPE['name']
    __logger__.debug('Found parameter "%s".', json.dumps(parameter))
    api_documentation_path = None
    if has_api_documentation:
        api_documentation_path = '%s%s' % (
            API_DOCUMENTATION_PATH[1], API_DOCUMENTATION_PATH_SUFFIX)
        if not FileHandler(location='%s%s' % (
            FileHandler().path, api_documentation_path
        )).is_directory():
            api_documentation_path = API_DOCUMENTATION_PATH[1]
    parameter.update({
        'CONTENT': CONTENT,
        'CONTENT_FILE_PATH': None,
        'RENDER_CONTENT': False,
        'API_DOCUMENTATION_PATH': api_documentation_path,
        'DISTRIBUTION_BUNDLE_FILE_PATH': DISTRIBUTION_BUNDLE_FILE_PATH if (
            distribution_bundle_file and
            distribution_bundle_file.is_file()
        ) else None
    })
# # python3.5
# #     parameter = Dictionary(parameter).convert(
# #         value_wrapper=lambda key, value: value.replace(
# #             '!', '#%%%#'
# #         ) if builtins.isinstance(value, builtins.str) else value
# #     ).content
    parameter = Dictionary(parameter).convert(
        value_wrapper=lambda key, value: value.replace(
            '!', '#%%%#'
        ) if builtins.isinstance(value, builtins.unicode) else value
    ).content
# #
    if __logger__.isEnabledFor(logging.DEBUG):
        BUILD_DOCUMENTATION_PAGE_COMMAND = \
            BUILD_DOCUMENTATION_PAGE_COMMAND[:-1] + [
                '-debug'
            ] + BUILD_DOCUMENTATION_PAGE_COMMAND[-1:]
    serialized_parameter = json.dumps(parameter)
    parameter_file = FileHandler(location=make_secure_temporary_file('.json')[
        1])
    parameter_file.content = \
        BUILD_DOCUMENTATION_PAGE_PARAMETER_TEMPLATE.format(
            serializedParameter=serialized_parameter, **SCOPE)
    for index, command in builtins.enumerate(BUILD_DOCUMENTATION_PAGE_COMMAND):
        BUILD_DOCUMENTATION_PAGE_COMMAND[index] = \
            BUILD_DOCUMENTATION_PAGE_COMMAND[index].format(
                serializedParameter=serialized_parameter,
                parameterFilePath=parameter_file._path,
                **SCOPE)
    __logger__.debug('Use parameter "%s".', serialized_parameter)
    __logger__.info('Run "%s".', ' '.join(BUILD_DOCUMENTATION_PAGE_COMMAND))
    current_working_directory_backup = FileHandler()
    temporary_documentation_folder.change_working_directory()
    Platform.run(
        command=BUILD_DOCUMENTATION_PAGE_COMMAND[0],
        command_arguments=BUILD_DOCUMENTATION_PAGE_COMMAND[1:], error=False,
        log=True)
    current_working_directory_backup.change_working_directory()
    parameter_file.remove_file()
    for file in FileHandler():
        if not (file in (temporary_documentation_folder, FileHandler(
            location='.%s' % API_DOCUMENTATION_PATH[1]
        )) or is_file_ignored(file)):
            file.remove_deep()
    documentation_build_folder = FileHandler(location='%s%s' % (
        temporary_documentation_folder.path, DOCUMENTATION_BUILD_PATH
    ), must_exist=True)
    documentation_build_folder.iterate_directory(
        function=copy_repository_file, recursive=True,
        source=documentation_build_folder, target=FileHandler())
    if (Platform.run(
        "/usr/bin/env sudo umount '%s'" %
            temporary_documentation_node_modules_directory.path,
        native_shell=True, error=False, log=True
    )['return_code'] == 0):
        temporary_documentation_folder.remove_deep()
    Platform.run(
        (
            '/usr/bin/env git add --all',
            '/usr/bin/env git commit --message "%s" --all' %
                PROJECT_PAGE_COMMIT_MESSAGE,
            '/usr/bin/env git push',
            '/usr/bin/env git checkout master'
        ),
        native_shell=True,
        error=False,
        log=True
    )