예제 #1
0
 def get_build_data(self, depth=1):
     build_url = "/".join([JENKINS["url"], 'job',
                           self.name,
                           str(self.number),
                           'api/json?depth={depth}'.format(depth=depth)])
     logger.debug("Request build data from {}".format(build_url))
     return json.load(urllib2.urlopen(build_url))
예제 #2
0
    async def rd_zlexcount_cmd(self):
        """
        When all the elements in a sorted set are inserted
          with the same score, in order to force lexicographical
          ordering, this command returns the number of elements
          in the sorted set at key with a value between min and max.
          The min and max arguments have the same meaning as
          described for ZRANGEBYLEX.
          Return value:
          - the number of elements in the specified score range.

        :return: None
        """
        key1 = 'key1'
        values1 = ('TEST1', 'TEST2', 'TEST3', 'TEST4', 'TEST5')
        scores1 = (1, 1, 1, 1, 1)
        pairs1 = list(chain(*zip(scores1, values1)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs1)
            res1 = await conn.zlexcount(key1, min=b'-', max=b'+')
            res2 = await conn.zlexcount(key1,
                                        min=b'TEST3',
                                        max=b'TEST5',
                                        include_min=True,
                                        include_max=True)
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZLENCOUNT': KEY- {0}, " \
              "RES_ALL - {1}, RES_INCLUDE - {2}\n"
        logger.debug(frm.format(key1, res1, res2))
예제 #3
0
    def message(self, msg):
        if msg['type'] not in ('chat', 'normal'):
            logger.debug('Strange message type: %(type)s' % msg)
            return
        #logger.info('Message from %(from)s: %(body)s' % msg)
        
        msg_text = msg['body'].strip()
        # msg['from'] is a JID object
        # http://sleekxmpp.com/api/xmlstream/jid.html
        from_user = msg['from'].bare 
        logger.info('FROM:' + from_user)
        logger.info('MSG:' + msg_text)

        try:
            if (from_user in settings.accept_command_from) and msg_text.startswith("$"):
                resp = commands.execute(msg_text[1:])
                msg.reply('\n'+resp).send()
            else:
                msg.reply(msg_text).send()
                #self.send_message(  mto=msg['from'],
                #                    mtype='chat',
                #                    mbody=msg_text,
                #                    mhtml='''<a href="http://www.google.co.jp">%s</a>'''% (msg_text))
        except:
            exc = traceback.format_exc()
            msg.reply(exc).send()
예제 #4
0
    async def rd_pttl_cmd(self):
        """
        Like TTL this command returns the remaining time to live of a key
          that has an expire set, with the sole difference that TTL returns
          the amount of remaining time in seconds while PTTL returns it
          in milliseconds.
          Return value in case of error changed:
            -2 - if the key does not exist.
            -1 - if the key exists but has no associated expire.

        :return: None
        """
        key1 = 'key_1'
        value1 = 'TEST1'
        pttl = 10000
        with await self.rd1 as conn:
            res_1 = await conn.pttl(key1)
            await conn.set(key1, value1)
            res_2 = await conn.pttl(key1)
            await conn.pexpire(key1, pttl)
            await asyncio.sleep(1)
            res_3 = await conn.pttl(key1)
            await conn.delete(key1)
        frm = "GENERIC_CMD - 'PTTL': KEY - %s, PTTL_NO_KEY - %s, PTTL_NO_EXP - %s, PTTL - %s msec\n"
        logger.debug(frm, key1, res_1, res_2, res_3)
예제 #5
0
    async def rd_expire_cmd(self):
        """
        Set a timeout on key. After the timeout has expired, the key
          will automatically be deleted. A key with an associated
          timeout is often said to be volatile in Redis terminology.
          The timeout will only be cleared by commands that delete or
          overwrite the contents of the key, including DEL, SET, GETSET
          and all the *STORE commands. This means that all the operations
          that conceptually alter the value stored at the key without
          replacing it with a new one will leave the timeout untouched.
          For instance, incrementing the value of a key with INCR, pushing
          a new value into a list with LPUSH, or altering the field value
          of a hash with HSET are all operations that will leave the
          timeout untouched.

        :return: None
        """
        key = 'key'
        value = 'TEST'
        time_of_ex = 10
        with await self.rd1 as conn:
            await conn.set(key, value)
            await conn.expire(key, time_of_ex)
            await asyncio.sleep(2)
            ttl1 = await conn.ttl(key)
            await conn.set(key, value)
            ttl2 = await conn.ttl(key)
            await conn.delete(key)
        frm = "GENERIC_CMD - 'EXPIRE': KEY- {0}, BEFORE_EX - ({1} sec), AFTER_EX - ({2} sec)\n"
        logger.debug(frm.format(key, ttl1, ttl2))
예제 #6
0
파일: reporter.py 프로젝트: ehles/trep
    def get_check_create_test_run(self, plan, cases):
        plan = self.project.plans.get(plan.id)
        suite_cases = self.suite.cases()
        run_name = self.get_run_name()
        runs = plan.runs.find_all(name=run_name)
        run = self.check_need_create_run(plan,
                                         runs,
                                         suite_cases)

        if run is None:
            logger.info('Run not found in plan "{}", create: "{}"'.format(
                plan.name, run_name))

            # Create new test run with cases from test suite
            suite_cases = self.get_suite_cases()

            if not suite_cases:
                logger.error('Empty test cases set.')
                return None

            # suite_cases = self.suite.cases.find(type_id=type_ids[0])
            run = Run(name=run_name,
                      description=self.run_description,
                      suite_id=self.suite.id,
                      milestone_id=self.milestone.id,
                      config_ids=[],
                      case_ids=[x.id for x in suite_cases]
                      )
            plan.add_run(run)
            logger.debug('Run created "{}"'.format(run_name))
        return run
예제 #7
0
    async def rd_zrange_cmd(self):
        """
        Returns the specified range of elements in the
          sorted set stored at key. The elements are
          considered to be ordered from the lowest to
          the highest score. Lexicographical order is
          used for elements with equal score.
          Return value:
          - list of elements in the specified range
            (optionally with their scores, in case
            the WITHSCORES option is given).

        :return: None
        """
        key1, key2 = 'key1', 'key2'
        values = ('TEST1', 'TEST1', 'TEST2', 'TEST3')
        scores = (1, 2, 2, 1)
        pairs = list(chain(*zip(scores, values)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs)
            res1 = await conn.zrange(key1, 0, -1, withscores=True)
            res2 = await conn.zrange(key2, 0, -1, withscores=True)
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZRANGE': KEY- {0}, RES_EXIST_LEN - {1}, RES_NOT_EXIST_LEN - {2}\n"
        logger.debug(frm.format(key1, res1, res2))
예제 #8
0
파일: deamon.py 프로젝트: lkolacz/ewsods
    def stop(self):
        """Stop the daemon with checking is daemon already run"""
        # Get the pid from the pidfile
        logger.debug( "EWSoDS daemon is going to stop." )
        try:
            pf = file(self.pidfile,'r')
            pid = int(pf.read().strip())
            pf.close()
        except IOError:
            pid = None

        if not pid:
            logger.debug( "EWSoDS daemon check pid. [pidfile] : %s does not exist. Daemon not running?" % self.pidfile )
            return 

        try:
            while 1:
                os.kill(pid, SIGTERM)
                time.sleep(0.1)
        except OSError, err:
            err = str(err)
            if err.find("No such process") > 0:
                if os.path.exists(self.pidfile):
                    os.remove(self.pidfile)
            else:
                print str(err)
                sys.exit(1)
예제 #9
0
    async def rd_type_cmd(self):
        """
        Returns the string representation of the type of the value
          stored at key. The different types that can be returned are:
            - string,
            - list,
            - set,
            - zset,
            - hash.

        :return: None
        """
        key1, key2, key3 = 'key_1', 'key_2', 'key_3'
        value1, value2, value3 = 'str', 'list', 'set'
        with await self.rd1 as conn:
            await conn.set(key1, value1)
            await conn.lpush(key2, value2)
            await conn.sadd(key3, value3)
            res1 = await conn.type(key1)
            res2 = await conn.type(key2)
            res3 = await conn.type(key3)
            await conn.delete(key1, key2, key3)
        frm = "GENERIC_CMD - 'TYPE': KEY- {0}, TYPE_STR - {1}," \
              " TYPE_LIST - {2}, TYPE_SET - {3}\n"
        logger.debug(frm.format([key1, key2, key3], res1, res2, res3))
예제 #10
0
    def batch_encode_packets(
            self,
            flows: Union[pd.DataFrame, np.ndarray],
            target_class: Optional[str] = None,
            add_special_tokens: bool = True,
            return_tensors: Optional[Union[str, TensorType]] = TensorType.PYTORCH,
            return_attention_mask: Optional[bool] = True,
    ) -> BatchEncoding:

        if isinstance(flows, pd.DataFrame):
            flows = flows.values

        if flows.shape[1] // 2 != self.max_model_input_sizes:
            logger.debug(f'input number of features ({flows.shape[1] // 2}) does not match '
                         f'max_model_input_sizes ({self.max_model_input_sizes})')
        clusters = self.packet_quantizer.transform(flows)

        if add_special_tokens:
            first_token = self.convert_tokens_to_ids(target_class) if target_class is not None else self.bos_token_id
            expander = partial(self._expand_with_special_tokens, first_token=first_token)
            clusters = np.apply_along_axis(expander, axis=1, arr=clusters)
        else:
            clusters = np.apply_along_axis(self._pad_flow, axis=1, arr=clusters)

        result = {'input_ids': clusters.astype(np.int64)}

        if return_attention_mask:
            token_mask = (clusters != self.pad_token_id).astype(np.int64)
            result.update({'attention_mask': token_mask})

        return BatchEncoding(result, tensor_type=TensorType(return_tensors), prepend_batch_axis=False)
예제 #11
0
    async def rd_iscan_cmd(self):
        """
        Incrementally iterate the keys space using async for.

        SCAN is a cursor based iterator. This means that at
          every call of the command, the server returns an
          updated cursor that the user needs to use as the
          cursor argument in the next call. An iteration
          starts when the cursor is set to 0, and terminates
          when the cursor returned by the server is 0.

        :return: None
        """
        values = ['test_%s', 'match_%s', 'scan_%s', 'sort_%s']
        key_tmp = ['key_%s', 'test_%s', 'scan_%s']
        match = b'test*'
        matched_keys = []
        with await self.rd1 as conn:
            for i in range(1, 20):
                await conn.set(choice(key_tmp) % i, choice(values) % i)

            async for key in conn.iscan(match=match):
                matched_keys.append(key)

            await conn.flushdb()
        frm = "GENERIC_CMD - 'ISCAN': KEY_TMP- {0}, MATCH_STR - {1}, MATCHED_KEYS - {2}\n"
        logger.debug(frm.format(key_tmp, match, len(matched_keys)))
예제 #12
0
    async def rd_migrate_cmd(self):
        """
        Atomically transfer a key from a source Redis instance to a destination
          Redis instance. On success the key is deleted from the original instance
          and is guaranteed to exist in the target instance.
        The command is atomic and blocks the two instances for the time required
          to transfer the key, at any given time the key will appear to exist in a
          given instance or in the other instance, unless a timeout error occurs.
          In 3.2 and above, multiple keys can be pipelined in a single call to MIGRATE
          by passing the empty string ("") as key and adding the KEYS clause.

        :return: None
        """
        key1, key2, key3, key4 = 'one', 'two', 'three', 'four'
        value1, value2, value3, value4 = 1, 2, 3, 4
        pattern = '*o*'
        with await self.rd1 as conn:
            await conn.mset(key1, value1, key2, value2, key3, value3, key4,
                            value4)
            db1_res = await conn.keys(pattern)
            res = await conn.migrate(self.rd_conf['host'],
                                     self.rd_conf['port'], '*', 2, 5000)
            await conn.delete(key1, key2, key3, key4)

        with await self.rd2 as conn:
            db2_res = await conn.keys(pattern)
            await conn.delete(key1, key2, key3, key4)
        frm = "GENERIC_CMD - 'MIGRATE': KEY- {0}, MIGRATE_KEY - {1}, DB1_RES - {2}, DB2_RES - {3}\n"
        logger.debug(
            frm.format([key1, key2, key3, key4], pattern, db1_res, db2_res))
예제 #13
0
    async def rd_zrevrank_cmd(self):
        """
        Returns the rank of member in the sorted
          set stored at key, with the scores ordered
          from high to low. The rank (or index) is
          0-based, which means that the member with
          the highest score has rank 0.
          Return value:
          - If member exists in the sorted set -
            the rank of member.
          - If member does not exist in the sorted
            set or key does not exist, - nil.

        :return: None
        """
        key1 = 'key1'
        values = ('a', 'b', 'c', 'd', 'f')
        scores = (1, 2, 3, 4, 5, 6)
        pairs = list(chain(*zip(scores, values)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs)
            res1 = await conn.zrevrank(key1, 'b')
            res2 = await conn.zrevrank(key1, 'g')
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZREVRANK': KEY- {0}, " \
              "EXIST_VALUE - {1}, NOT_EXIST_VALUE - {2}\n"
        logger.debug(frm.format(key1, res1, res2))
예제 #14
0
    async def rd_zscan_cmd(self):
        """
        SCAN is a cursor based iterator. This means that at
          every call of the command, the server returns an
          updated cursor that the user needs to use as the
          cursor argument in the next call. An iteration
          starts when the cursor is set to 0, and terminates
          when the cursor returned by the server is 0.

        :return: None
        """
        key1 = 'key1'
        values_tmp = ('TEST%s', 'test%s', 't%s')
        values = (choice(values_tmp) % i for i in range(1, 5))
        scores = (randint(1, 10) for _ in range(1, 5))
        pairs = list(chain(*zip(scores, values)))
        matched_keys = []
        match, cur = b'test*', b'0'
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs)
            while cur:
                cur, keys = await conn.zscan(key1, cur, match=match)
                matched_keys.extend(keys)
            await conn.flushdb()
        frm = "SET_CMD - 'ZSCAN': KEY_TMP- {0}, MATCH_STR - {1}, MATCHED_KEYS - {2}\n"
        logger.debug(frm.format(key1, match, len(matched_keys)))
예제 #15
0
    async def rd_rename_cmd(self):
        """
        Renames key to newkey. It returns an error when key does not exist.
          If newkey already exists it is overwritten, when this happens
          RENAME executes an implicit DEL operation, so if the deleted
          key contains a very big value it may cause high latency even
          if RENAME itself is usually a constant-time operation.

        :return: None
        """
        key1, key2 = 'key_1', 'new_key'
        value1 = 'TEST1'
        res1, res_rename1 = None, None
        try:
            with await self.rd1 as conn:
                await conn.set(key1, value1)
                res_rename1 = await conn.rename(key1, key2)
                res1 = await conn.get(key2)
                res_rename2 = await conn.rename(key1, key2)
        except aioredis.errors.ReplyError as e:
            await conn.delete(key1, key2)
            res_rename2 = e
        frm = "GENERIC_CMD - 'RENAME': KEY- %s, RENAME - %s," \
              " EXIST_KEY - %s, NOT_EXIST_KEY - %s\n"
        logger.debug(frm, [key1, key2], res1, res_rename1, res_rename2)
예제 #16
0
def get_tests_groups_from_jenkins(runner_name, build_number, distros):
    runner_build = Build(runner_name, build_number)
    res = {}
    for b in runner_build.build_data['subBuilds']:

        if b['result'] is None:
            logger.debug("Skipping '{0}' job (build #{1}) because it's still "
                         "running...".format(b['jobName'], b['buildNumber'],))
            continue

        # Get the test group from the console of the job
        z = Build(b['jobName'], b['buildNumber'])
        console = z.get_job_console()
        groups = [keyword.split('=')[1]
                  for line in console
                  for keyword in line.split()
                  if 'run_tests.py' in line and '--group=' in keyword]
        if not groups:
            logger.error("No test group found in console of the job {0}/{1}"
                         .format(b['jobName'], b['buildNumber']))
            continue
        # Use the last group (there can be several groups in upgrade jobs)
        test_group = groups[-1]

        # Get the job suffix
        job_name = b['jobName']
        for distro in distros:
            if distro in job_name:
                sep = '.' + distro + '.'
                job_suffix = job_name.split(sep)[-1]
                break
        else:
            job_suffix = job_name.split('.')[-1]
        res[job_suffix] = test_group
    return res
예제 #17
0
    def get(self, *args, **kwargs):
        node = self._get_node(kwargs.get('path', ''))
        interface = self.get_argument("if")
        status, error_message = 200, None

        if not node or not interface:
            status = BAD_REQUEST
            error_message = "No node or interface specified"
            logger.exception(error_message)
        else:
            try:
                response = ControllerOperations().get_acls(node, interface)
                if response:
                    logger.debug(response)

                    self.set_header("Content-Type", "application/json")
                    self.write(json.dumps(response))
                    # self.finish()
                else:
                    status = NOT_FOUND
                    error_message = "No data found"
            except ValueError as e:
                status = INTERNAL_SERVER_ERROR
                error_message = e.message

        if error_message:
            logger.exception(error_message)
            if DEBUG:
                self.set_status(status, error_message)
            else:
                self.set_status(status)
        else:
            self.set_status(status)
예제 #18
0
    async def rd_renamenx_cmd(self):
        """
        Renames key to newkey if newkey does not yet exist.
          It returns an error when key does not exist.
          Return value:
            True if key was renamed to newkey.
            False if newkey already exists.

        :return: None
        """
        key1, key2, key3 = 'key_1', 'new_key', 'exist_key'
        value1, value3 = 'TEST1', 'TEST3'
        res1, res_rename1, res_rename2, res_rename2 = None, None, None, None
        try:
            with await self.rd1 as conn:
                await conn.mset(key1, value1, key3, value3)
                res_rename1 = await conn.renamenx(key1, key2)
                res1 = await conn.mget(key2)
                res_rename2 = await conn.renamenx(key2, key3)
                res_rename3 = await conn.renamenx(key1, key2)
        except aioredis.errors.ReplyError as e:
            await conn.delete(key1, key2, key3)
            res_rename3 = e
        frm = "GENERIC_CMD - 'RENAMENX': RENAME - %s," \
              " NOT_EXIST_NEW_KEY - %s, EXIST_KEY_NEW - %s, NOT_EXIST_KEY - %s\n"
        logger.debug(frm, res1, res_rename1, res_rename2, res_rename3)
예제 #19
0
    async def rd_restore_cmd(self):
        """
        Create a key associated with a value that is obtained by
          deserializing the provided serialized value (obtained via DUMP).
          If ttl is 0 the key is created without any expire, otherwise
          the specified expire time (in milliseconds) is set.
          RESTORE will return a "Target key name is busy" error when key
          already exists unless you use the REPLACE modifier (Redis 3.0 or greater).
          RESTORE checks the RDB version and data checksum. If they don't
          match an error is returned.

        :return: None
        """
        key1 = 'key_1'
        value1 = 'TEST1'
        value_restore = "\n\x17\x17\x00\x00\x00\x12\x00\x00\x00\x03\x00\
                        x00\xc0\x01\x00\x04\xc0\x02\x00\x04\xc0\x03\x00\
                        xff\x04\x00u#<\xc0;.\xe9\xdd"

        ttl = 0
        res_restore, res_type, res = None, None, None
        try:
            with await self.rd1 as conn:
                await conn.set(key1, value1)
                await conn.delete(key1)
                res_restore = await conn.restore(key1, ttl, value_restore)
                res_type = await conn.type(key1)
                res = await conn.LRANGE(key1, 0, -1)
        except aioredis.errors.ReplyError as e:
            res_restore = e
            await conn.delete(key1)
        frm = "GENERIC_CMD - 'RESTORE': RESTORE_RES - %s, KEY_TYPE - %s, RESTORE_VALUE - %s\n"
        logger.debug(frm, res_restore, res_type, res)
예제 #20
0
def scrape_well():
    logger.debug("Scraping www.well.ca for deals on diapers...")
    root_url = settings.WELL['root_url']
    diaper_category_url = settings.WELL['diaper_category_url']

    # Parse main page
    response = requests.get(root_url + diaper_category_url)
    soup = BeautifulSoup(response.text, "lxml")
    diaper_size_links = [a.attrs.get('href') for a in soup.select(settings.WELL['size_category_css_selector'])]
    diaper_size_list = [p.text for p in soup.select(settings.WELL['size_category_css_selector'])]
    assert(len(diaper_size_links) == len(diaper_size_list))

    # Create product list
    product_list = []
    x = 0

    # For each size category parse for products
    for i in range(len(diaper_size_links)):
        size = diaper_size_list[i].strip()
        r = requests.get(diaper_size_links[i])
        s = BeautifulSoup(r.text, "lxml")
        product_category_links = [a.attrs.get('href') for a in s.select(settings.WELL['product_category_css_selector'])]
        for link in product_category_links:
            r = requests.get(link)
            s = BeautifulSoup(r.text, "lxml")
            product_name = [p.text.strip() for p in s.select(settings.WELL['product_name_css_selector'])]
            assert(len(product_name) == 1)
            product_list.append(WellProduct(product_name[0].strip(), link, 'well.ca', size=size))
            product_list[x].parse_data()

            if DEBUG:
                product_list[x].print_properties()
            else:
                product_list[x].save_to_db()
            x += 1
예제 #21
0
    async def rd_ttl_cmd(self):
        """
        Returns the remaining time to live of a key that
          has a timeout. This introspection capability
          allows a Redis client to check how many seconds
          a given key will continue to be part of the dataset.
          Return value in case of error changed:
            - '-2' - if the key does not exist.
            - '-1' - if the key exists but has no associated expire.

        :return: None
        """
        key1, key2, key3 = 'key_1', 'key_2', 'key_3'
        value1, value2 = 'test_ttl', 'test_ttl_not_ex'
        ttl = 10
        with await self.rd1 as conn:
            await conn.setex(key1, ttl, value1)
            await conn.set(key2, value2)
            await asyncio.sleep(2)
            res1 = await conn.ttl(key1)
            res2 = await conn.ttl(key2)
            res3 = await conn.ttl(key3)
            await conn.delete(key1, key2, key3)
        frm = "GENERIC_CMD - 'TTL': KEY- {0}, TTL_OK - {1}," \
              " TTL_NOT_EXPIRE - {2}, TTL_NOT_EXIST - {3}\n"
        logger.debug(frm.format([key1, key2, key3], res1, res2, res3))
예제 #22
0
def save_as_csv(dfs,suffix='_scaled.csv'):
    for df in dfs:
        file_path_prefix, _ = os.path.splitext(df.fullpath)
        csv_path = file_path_prefix + suffix
        df.to_csv(csv_path)
        logger.debug("CSV File saved to %s"%csv_path)
    return dfs
예제 #23
0
파일: empty.py 프로젝트: gtalarico/foldify
def apply_transactions(transactions, auto=False):
    ''' Apply renaming transactions.
    apply_transactions(transactions)
    transactions = [(old_path, new_path),(old_path),(new_path),...]
    Manual review of transactions is required.
    '''
    if auto:
        logger.warning('Auto is On. No confirmation required.')
    print('='*30)
    if not transactions:
        logger.debug('NO TRANSACTIONS')
        sys.exit('No Transactions to apply.')
        return

    for t in transactions:
        print('[{}] > [{}]'.format(t[0].name, t[1].name))
    print('{} Transactions to apply. Renaming...'.format(len(transactions)))
    count = 0
    if auto or input('EXECUTE ? [y]\n>') == 'y':
        for src, dst in transactions:
            try:
                src.rename(dst)
            except:
                logger.error(sys.exc_info()[0].__name__)
                logger.error('Could not rename: [{}]>[{}]'.format(src, dst))
            else:
                logger.debug('[{}] renamed to [{}]'.format(src, dst))
                count += 1

        print('{} folders renamed.'.format(count))
예제 #24
0
    async def rd_zscore_cmd(self):
        """
        Returns the score of member in the
          sorted set at key. If member does
          not exist in the sorted set, or key
          does not exist, nil is returned.
          Return value:
          - the score of member (a double
            precision floating point number),
            represented as string.

        :return: None
        """
        key1, key2 = 'key1', 'key2'
        values = ('a', 'b', 'c', 'd', 'f')
        scores = (1, 2, 3, 4, 5, 6)
        pairs = list(chain(*zip(scores, values)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs)
            res1 = await conn.zscore(key1, 'b')
            res2 = await conn.zscore(key1, 'g')
            res3 = await conn.zscore(key2, 'b')
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZSCORE': KEYS - {0}, EXIST_VALUE - {1}," \
              " NOT_EXIST_VALUE - {2}, NOT_EXIST_KEY - {3}\n"
        logger.debug(frm.format((key1, key2), res1, res2, res3))
예제 #25
0
async def send_binary_message(sid):
    """
    Custom event handler with event_name and
    Socket.IO namespace for the event. This handler send
    image file in base64 gzip.
    :param sid: Session ID of the client
    :return: emit file base64 gzip
    """
    content_b64 = ''
    hash_sum = ''
    try:
        async with aiofiles.open('static/test.png', mode='rb') as image_file:
            content = await image_file.read()
            gzip_file = gzip.compress(content)
            content_b64 = base64.b64encode(gzip_file)
            hash_sum = hashlib.md5(content_b64).hexdigest()
    except OSError as e:
        logger.error('Handle ERROR: %s' % e)
    await sio.emit('file response', {
        'data': content_b64.decode('utf-8'),
        'hash_sum': hash_sum
    },
                   room=sid,
                   namespace='/chat',
                   callback=call_back_from_client)
    logger.debug('My EVENT(FILE) (%s): %s' % (sid, content_b64[:20]))
    del content_b64
예제 #26
0
async def close(sid, message):
    await sio.emit('my response',
                   {'data': 'Room %s is closing' % message['room']},
                   room=message['room'],
                   namespace='/chat')
    await sio.close_room(message['room'], namespace='/chat')
    logger.debug('CLOSE ROOM (%s): %s' % (sid, message))
예제 #27
0
    async def rd_zrevrange_cmd(self):
        """
        Returns the specified range of elements in the
          sorted set stored at key. The elements are
          considered to be ordered from the highest to
          the lowest score. Descending lexicographical
          order is used for elements with equal score.
          Apart from the reversed ordering, ZREVRANGE
          is similar to ZRANGE.
          Return value:
          - list of elements in the specified range
            (optionally with their scores).

        :return: None
        """
        key1 = 'key1'
        values = ('a', 'b', 'c', 'd', 'f')
        scores = (1, 2, 3, 4, 5)
        pairs = list(chain(*zip(scores, values)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs)
            res1 = await conn.zrevrange(key1, 0, -1, withscores=True)
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZREVRANGE': KEY- {0}, REVRANGE - {1}\n"
        logger.debug(frm.format(key1, res1))
예제 #28
0
    async def rd_zrevrangebyscore_cmd(self):
        """
        Returns all the elements in the sorted set at
          key with a score between max and min (including
          elements with score equal to max or min). In
          contrary to the default ordering of sorted sets,
          for this command the elements are considered to
          be ordered from high to low scores.
          The elements having the same score are returned
          in reverse lexicographical order.
          Return value:
          - list of elements in the specified score range
            (optionally with their scores).

        :return: None
        """
        key1 = 'key1'
        values = ('a', 'b', 'c', 'd', 'f')
        scores = (1, 2, 3, 4, 5)
        pairs = list(chain(*zip(scores, values)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs)
            res1 = await conn.zrevrangebyscore(key1, withscores=True)
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZREVRANGEBYSCORE': KEY- {0}, REVRANGEBYSCORE - {1}\n"
        logger.debug(frm.format(key1, res1))
예제 #29
0
    async def rd_zremrangebyrank_cmd(self):
        """
        Removes all elements in the sorted set stored
          at key with rank between start and stop.
          Both start and stop are 0 -based indexes
          with 0 being the element with the lowest score.
          These indexes can be negative numbers, where
          they indicate offsets starting at the element
          with the highest score. For example: -1 is the
          element with the highest score, -2 the element
          with the second highest score and so forth.
          Return value:
          - the number of elements removed.

        :return: None
        """
        key1 = 'key1'
        values = ('a', 'b', 'c', 'd', 'f')
        scores = (1, 2, 3, 4, 5)
        pairs = list(chain(*zip(scores, values)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs)
            res1 = await conn.zrange(key1, 0, -1)
            res2 = await conn.zremrangebyrank(key1, 0, 3)
            res3 = await conn.zrange(key1, 0, -1)
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZREMRANGEBYRANK': KEY- {0}, " \
              "BEFORE - {1}, REM_NUM - {2}, AFTER - {3}\n"
        logger.debug(frm.format(key1, res1, res2, res3))
예제 #30
0
    async def rd_zadd_cmd(self):
        """
        Adds all the specified members with the specified
          scores to the sorted set stored at key. It is
          possible to specify multiple score / member pairs.
          If a specified member is already a member of the
          sorted set, the score is updated and the element
          reinserted at the right position to ensure the
          correct ordering.
          Return value:
          - The number of elements added to the sorted sets,
            not including elements already existing for which
            the score was updated.
          - If the INCR option is specified, the return value
            will be Bulk string reply: the new score of member
            (a double precision floating point number),
            represented as string.

        :return: None
        """
        key1 = 'key1'
        values = ('TEST1', 'TEST1', 'TEST2', 'TEST3')
        scores = (1, 2, 2, 1)
        pairs = list(chain(*zip(scores, values)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs)
            res1 = await conn.zrange(key1, 0, -1, withscores=True)
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZADD': KEY- {0}, RES_VALUE - {1}\n"
        logger.debug(frm.format(key1, res1))
예제 #31
0
    async def rd_zrem_cmd(self):
        """
        Removes the specified members from the sorted
          set stored at key. Non existing members are ignored.
          An error is returned when key exists and does not
          hold a sorted set.
          Return value:
          - The number of members removed from the sorted set,
            not including non existing members.

        :return: None
        """
        key1 = 'key1'
        values = ('TEST1', 'TEST2', 'TEST3', 'TEST4', 'TEST5', 'TEST6')
        scores = (1.3, 1.2, 1.1, 1.5, 2.0, 2.5)
        pairs = list(chain(*zip(scores, values)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs)
            res1 = await conn.zrem(key1, *values[:4])
            res2 = await conn.zrange(key1, 0, -1, withscores=True)
            res3 = await conn.zrem(key1, 'TEST7')
            res4 = await conn.zrange(key1, 0, -1, withscores=True)
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZREM': KEY- {0}, " \
              "REM_OK({1}) - {2}, RES_SKIP({3}) - {4}\n"
        logger.debug(frm.format(key1, res1, res2, res3, res4))
예제 #32
0
    async def rd_zremrangebylex_cmd(self):
        """
        Removes the specified members from the sorted
          set stored at key. Non existing members are ignored.
          An error is returned when key exists and does not
          hold a sorted set.
          Return value:
          - The number of members removed from the sorted set,
            not including non existing members.

        :return: None
        """
        key1 = 'key1'
        values = ('a', 'b', 'c', 'd', 'f')
        scores = (0, 0, 0, 0, 0)
        pairs = list(chain(*zip(scores, values)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs)
            res1 = await conn.zrange(key1, 0, -1)
            res2 = await conn.zremrangebylex(key1,
                                             min=b'a',
                                             max=b'd',
                                             include_min=True,
                                             include_max=True)
            res3 = await conn.zrange(key1, 0, -1)
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZREMRANGEBYLEX': KEY- {0}, " \
              "BEFORE - {1}, REM_NUM - {2}, AFTER - {3}\n"
        logger.debug(frm.format(key1, res1, res2, res3))
예제 #33
0
    async def rd_zrangebyscore_cmd(self):
        """
        Returns all the elements in the sorted set at key
          with a score between min and max (including
          elements with score equal to min or max).
          The elements are considered to be ordered from
          low to high scores.
          Return value:
          - list of elements in the specified score range
          (optionally with their scores).

        :return: None
        """
        key1, key2 = 'key1', 'key2'
        values = ('TEST1', 'TEST2', 'TEST3', 'TEST4', 'TEST5', 'TEST6')
        scores = (1.3, 1.2, 1.1, 1.5, 2.0, 2.5)
        pairs = list(chain(*zip(scores, values)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs)
            res1 = await conn.zrangebyscore(key1,
                                            min=1.0,
                                            max=1.1,
                                            withscores=True)
            res2 = await conn.zrangebyscore(key1,
                                            min=1.0,
                                            max=2.0,
                                            withscores=True,
                                            offset=1,
                                            count=2)
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZRANGEBYSCORE': KEY- {0}, " \
              "RES1 - {1}, RES_NOT_OFFSET - {2}\n"
        logger.debug(frm.format(key1, res1, res2))
예제 #34
0
 def run(self):
     logger.debug('sync consumer...')
     while True:
         try:
             logger.info('start consuming...')
             self.channel.basic_consume(self.on_message,
                                        self._conf.get('queue'))
             self.channel.start_consuming()
         except pika.exceptions.ConnectionClosed:
             logger.error('lost connection...')
             logger.error('reconnect 5 seconds later...')
             self._channel = None
             self._connection = None
             time.sleep(5)
         except KeyboardInterrupt:
             logger.error('stop consuming...')
             self.channel.stop_consuming()
             self.channel.close()
             self.connection.close()
             break
         except Exception as e:
             logger.error(str(e))
             time.sleep(1)
         finally:
             pass
예제 #35
0
def get_downstream_builds_from_html(url):
    """Return list of downstream jobs builds from specified job
    """
    url = "/".join([url, 'downstreambuildview/'])
    logger.debug("Request downstream builds data from {}".format(url))
    req = urllib2.Request(url)
    opener = urllib2.build_opener(urllib2.HTTPHandler)
    s = opener.open(req).read()
    opener.close()
    jobs = []
    raw_downstream_builds = re.findall(
        '.*downstream-buildview.*href="(/job/\S+/[0-9]+/).*', s)
    for raw_build in raw_downstream_builds:
        sub_job_name = raw_build.split('/')[2]
        sub_job_build = raw_build.split('/')[3]
        build = Build(name=sub_job_name, number=sub_job_build)
        jobs.append(
            {
                'name': build.name,
                'number': build.number,
                'result': build.build_data['result']
            }
        )

    return jobs
예제 #36
0
    async def rd_zrangebylex_cmd(self):
        """
       When all the elements in a sorted set are
         inserted with the same score, in order to
         force lexicographical ordering, this command
         returns all the elements in the sorted set
         at key with a value between min and max.
         If the elements in the sorted set have different
         scores, the returned elements are unspecified.
          Return value:
          - list of elements in the specified score range.

        :return: None
        """
        key1, key2 = 'key1', 'key2'
        values = ('TEST1', 'TEST2', 'TEST3', 'TEST4', 'TEST5', 'TEST6')
        scores = (1, 1, 1, 1, 1, 1)
        pairs = list(chain(*zip(scores, values)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs)
            res1 = await conn.zrangebylex(key1,
                                          b'TEST2',
                                          b'TEST5',
                                          include_min=True,
                                          include_max=True)
            res2 = await conn.zrangebylex(key1,
                                          b'TEST2',
                                          b'TEST5',
                                          include_min=False,
                                          include_max=False)
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZRANGEBYLEX': KEY- {0}, " \
              "RES_INCLUDE - {1}, RES_NOT_INCLUDE - {2}\n"
        logger.debug(frm.format(key1, res1, res2))
예제 #37
0
    async def rd_zunionstore_cmd(self):
        """
        Returns the score of member in the
          sorted set at key. If member does
          not exist in the sorted set, or key
          does not exist, nil is returned.
          Return value:
          - the score of member (a double
            precision floating point number),
            represented as string.

        :return: None
        """
        key1, key2, dest_key = 'key1', 'key2', 'key3'
        values = ('a', 'b', 'c', 'd', 'f')
        scores1, scores2 = (1, 2, 3, 4, 5, 6), (6, 5, 4, 3, 2, 1)
        pairs1 = list(chain(*zip(scores1, values)))
        pairs2 = list(chain(*zip(scores2, values)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs1)
            await conn.zadd(key2, *pairs2)
            await conn.zunionstore(dest_key, (key1, 1), (key2, 2),
                                   with_weights=True,
                                   aggregate='ZSET_AGGREGATE_MAX')
            res1 = await conn.zrange(dest_key, 0, -1, withscores=True)
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZUNIONSTORE': KEYS - {0}, UNION_RES - {1}\n"
        logger.debug(frm.format((key1, key2), res1))
예제 #38
0
    async def rd_zcount_cmd(self):
        """
        Returns the number of elements in the sorted
          set at key with a score between min and max.
          The command has a complexity of just O(log(N))
          because it uses elements ranks (see ZRANK) to
          get an idea of the range. Because of this there
          is no need to do a work proportional to the size
          of the range.

          Return value:
          - Integer reply: the number of elements in the
            specified score range.
        :return: None
        """
        key1, key2 = 'key1', 'key2'
        values = ('TEST1', 'TEST2', 'TEST3', 'TEST4', 'TEST5')
        scores = (1, 2, 2, 1, 2)
        pairs = list(chain(*zip(scores, values)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs)
            res1 = await conn.zcount(key1, 2, 2)
            res2 = await conn.zcount(key2)
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZCOUNT': KEY- {0}, ZCOUNT_EXIST_SET - {1}," \
              " ZCOUNT_NOT_EXIST_SET - {2}\n"
        logger.debug(frm.format(key1, res1, res2))
예제 #39
0
def upload_tests_descriptions(testrail_project, section_id, tests, check_all_sections):
    tests_suite = testrail_project.get_suite_by_name(TestRailSettings.tests_suite)
    check_section = None if check_all_sections else section_id
    existing_cases = [
        case["custom_test_group"]
        for case in testrail_project.get_cases(suite_id=tests_suite["id"], section_id=check_section)
    ]
    for test_case in tests:
        if test_case["custom_test_group"] in existing_cases:
            logger.debug(
                'Skipping uploading "{0}" test case because it '
                'already exists in "{1}" tests section.'.format(
                    test_case["custom_test_group"], TestRailSettings.tests_suite
                )
            )
            continue

        logger.debug(
            'Uploading test "{0}" to TestRail project "{1}", '
            'suite "{2}", section "{3}"'.format(
                test_case["custom_test_group"],
                TestRailSettings.project,
                TestRailSettings.tests_suite,
                TestRailSettings.tests_section,
            )
        )
        testrail_project.add_case(section_id=section_id, case=test_case)
예제 #40
0
    async def rd_zrevrangebylex_cmd(self):
        """
        When all the elements in a sorted set are inserted
          with the same score, in order to force lexicographical
          ordering, this command returns all the elements
          in the sorted set at key with a value between max
          and min.
          Return value:
          - list of elements in the specified score range.

        :return: None
        """
        key1 = 'key1'
        values = ('a', 'b', 'c', 'd', 'f')
        scores = (1, 2, 2, 3, 3)
        pairs = list(chain(*zip(scores, values)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs)
            res1 = await conn.zrevrangebylex(key1,
                                             min=b'a',
                                             max=b'd',
                                             include_min=True,
                                             include_max=True)
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZREVRANGEBYLEX': KEY- {0}, REVRANGEBYLEX - {1}\n"
        logger.debug(frm.format(key1, res1))
예제 #41
0
    async def rd_zrank_cmd(self):
        """
        Returns the rank of member in the sorted set
          stored at key, with the scores ordered from
          low to high. The rank (or index) is 0-based,
          which means that the member with the lowest
          score has rank 0.
          Return value:
          - If member exists in the sorted set, Integer
            reply: the rank of member.
          - If member does not exist in the sorted set
            or key does not exist, Bulk string reply: nil.

        :return: None
        """
        key1, key2 = 'key1', 'key2'
        values = ('TEST1', 'TEST2', 'TEST3', 'TEST4', 'TEST5', 'TEST6')
        scores = (1.3, 1.2, 1.1, 1.5, 2.0, 2.5)
        pairs = list(chain(*zip(scores, values)))
        with await self.rd1 as conn:
            await conn.zadd(key1, *pairs)
            res1 = await conn.zrank(key1, values[-1])
            res2 = await conn.zrank(key2, values[-1])
            await conn.delete(key1)
        frm = "SORTED_SET_CMD - 'ZRANK': KEYS- {0}, " \
              "RES_EXIST - {1}, RES_NOT_EXIST - {2}\n"
        logger.debug(frm.format((key1, key2), res1, res2))
예제 #42
0
def get_tests_descriptions(milestone_id, tests_include, tests_exclude, groups):
    import_tests()

    tests = []

    for jenkins_suffix in groups:
        group = groups[jenkins_suffix]
        for case in TestProgram(groups=[group]).cases:
            if not case.entry.info.enabled:
                continue
            if tests_include:
                if tests_include not in case.entry.home.func_name:
                    logger.debug(
                        "Skipping '{0}' test because it doesn't "
                        "contain '{1}' in method name".format(case.entry.home.func_name, tests_include)
                    )
                    continue
            if tests_exclude:
                if tests_exclude in case.entry.home.func_name:
                    logger.debug(
                        "Skipping '{0}' test because it contains"
                        " '{1}' in method name".format(case.entry.home.func_name, tests_exclude)
                    )
                    continue

            docstring = case.entry.home.func_doc or ""
            docstring = "\n".join([s.strip() for s in docstring.split("\n")])

            steps = [{"content": s, "expected": "pass"} for s in docstring.split("\n") if s and s[0].isdigit()]

            test_duration = re.search(r"Duration\s+(\d+[s,m])\b", docstring)
            title = docstring.split("\n")[0] or case.entry.home.func_name
            test_group = case.entry.home.func_name

            if case.entry.home.func_name in GROUPS_TO_EXPAND:
                """Expand specified test names with the group names that are
                   used in jenkins jobs where this test is started.
                """
                title = " - ".join([title, jenkins_suffix])
                test_group = "_".join([case.entry.home.func_name, jenkins_suffix])

            test_case = {
                "title": title,
                "type_id": 1,
                "milestone_id": milestone_id,
                "priority_id": 5,
                "estimate": test_duration.group(1) if test_duration else "3m",
                "refs": "",
                "custom_test_group": test_group,
                "custom_test_case_description": docstring or " ",
                "custom_test_case_steps": steps,
            }

            if not any([x["custom_test_group"] == test_group for x in tests]):
                tests.append(test_case)
            else:
                logger.warning("Testcase '{0}' run in multiple Jenkins jobs!".format(test_group))
    return tests
예제 #43
0
 def decorated_function(*args, **kwargs):
     if not 'survey_key' in kwargs:
         logger.debug("with_survey: No survey key given!")
         abort(404)
     request.survey = Survey.collection.find_one({'key':kwargs['survey_key']})
     if not request.survey:
         logger.debug("with_survey: Survey not found!")
         abort(404)
     return f(*args, **kwargs)
예제 #44
0
 def decorated_function(*args, **kwargs):
     if not hasattr(request,'survey') or not hasattr(request,'user'):
         logger.debug("with_admin: survey or user not loaded!")
         abort(404)
     print request.user.document_id,str(request.survey['user'])
     if not request.survey['user'] == request.user:
         logger.debug("with_admin: not an admin!")
         abort(403)
     return f(*args, **kwargs)
예제 #45
0
def get_build_artifact(url, artifact):
    """Return content of job build artifact
    """
    url = "/".join([url, 'artifact', artifact])
    logger.debug("Request artifact content from {}".format(url))
    req = urllib2.Request(url)
    opener = urllib2.build_opener(urllib2.HTTPHandler)
    s = opener.open(req).read()
    opener.close()
    return s
예제 #46
0
파일: builds.py 프로젝트: SergK/fuel-qa
    def get_test_data(self, url, result_path=None):
        if result_path:
            test_url = "/".join(
                [url.rstrip("/"), 'testReport'] + result_path + ['api/json'])
        else:
            test_url = "/".join([url.rstrip("/"), 'testReport', 'api/json'])

        logger.debug("Request test data from {}".format(test_url))
        response = urllib2.urlopen(test_url)
        return json.load(response)
예제 #47
0
파일: database.py 프로젝트: dyan0123/Utils
	def __connect(self):
		conn_cnt = 0
		logger.info('trying to connect to sqlserver on %s:%s' % (s.get('host'), s.get('port')))
		while conn_cnt < s.get('reconnect_cnt', 3):
			try:
				conn = pymssql.connect(host=s.get('host'), port=s.get('port'), user=s.get('user'),\
					password=s.get('password'), database=s.get('database'), charset=s.get('charset'))
				return conn
			except Exception, e:	# add a specified exception
				conn_cnt += 1
				logger.debug('connecting failed, times to reconnect: %d' % conn_cnt)
예제 #48
0
파일: reporter.py 프로젝트: ehles/trep
 def get_or_create_plan(self):
     """Get exists or create new TestRail Plan"""
     plan_name = self.get_plan_name()
     plan = self.project.plans.find(name=plan_name)
     if plan is None:
         plan = self.project.plans.add(name=plan_name,
                                       description=self.plan_description,
                                       milestone_id=self.milestone.id)
         logger.debug('Plan created"{}"'.format(plan_name))
     else:
         logger.debug('Plan found "{}"'.format(plan_name))
     return plan
예제 #49
0
    def bugs_statistics(self):
        if self._bugs_statistics != {}:
            return self._bugs_statistics
        logger.info('Collecting stats for TestRun "{0}" on "{1}"...'.format(
            self.run['name'], self.run['config'] or 'default config'))

        for test in self.tests:
            logger.debug('Checking "{0}" test...'.format(
                test['title'].encode('utf8')))
            test_results = sorted(
                self.project.get_results_for_test(test['id'], self.results),
                key=lambda x: x['id'], reverse=True)

            linked_bugs = []
            is_blocked = False

            for result in test_results:
                if result['status_id'] in self.blocked_statuses:
                    if self.check_blocked:
                        new_bug_link = self.handle_blocked(test, result)
                        if new_bug_link:
                            linked_bugs.append(new_bug_link)
                            is_blocked = True
                            break
                    if result['custom_launchpad_bug']:
                        linked_bugs.append(result['custom_launchpad_bug'])
                        is_blocked = True
                        break
                if result['status_id'] in self.failed_statuses \
                        and result['custom_launchpad_bug']:
                    linked_bugs.append(result['custom_launchpad_bug'])

            bug_ids = set([re.search(r'.*bug/(\d+)/?', link).group(1)
                           for link in linked_bugs
                           if re.search(r'.*bug/(\d+)/?', link)])

            for bug_id in bug_ids:
                if bug_id in self._bugs_statistics:
                    self._bugs_statistics[bug_id][test['id']] = {
                        'group': test['custom_test_group'] or 'manual',
                        'config': self.run['config'] or 'default',
                        'blocked': is_blocked
                    }

                else:
                    self._bugs_statistics[bug_id] = {
                        test['id']: {
                            'group': test['custom_test_group'] or 'manual',
                            'config': self.run['config'] or 'default',
                            'blocked': is_blocked
                        }
                    }
        return self._bugs_statistics
예제 #50
0
def get_jobs_for_view(view):
    """Return list of jobs from specified view
    """
    view_url = "/".join([JENKINS["url"], 'view', view, 'api/json'])
    logger.debug("Request view data from {}".format(view_url))
    req = urllib2.Request(view_url)
    opener = urllib2.build_opener(urllib2.HTTPHandler)
    s = opener.open(req).read()
    opener.close()
    view_data = json.loads(s)
    jobs = [job["name"] for job in view_data["jobs"]]
    return jobs
예제 #51
0
    def delete(self, *args, **kwargs):
        logger.debug('Delete acl payload: {}'.format(self.request.body))
        result, msg = ControllerOperations().delete_acl(self.request.body)
        response = {
            'ret_code': result,
            'message': msg
        }

        if result is True:
            self.set_header('Content-Type', 'application/json')
            self.write(response)
        else:
            self.set_status(INTERNAL_SERVER_ERROR, msg)
예제 #52
0
def publish_results(project, milestone_id, test_plan, suite_id, config_id, results):
    test_run_ids = [
        run["id"]
        for entry in test_plan["entries"]
        for run in entry["runs"]
        if suite_id == run["suite_id"] and config_id in run["config_ids"]
    ]
    logger.debug(
        'Looking for previous tests runs on "{0}" using tests suite '
        '"{1}"...'.format(project.get_config(config_id)["name"], project.get_suite(suite_id)["name"])
    )
    previous_tests_runs = project.get_previous_runs(milestone_id=milestone_id, suite_id=suite_id, config_id=config_id)
    cases = project.get_cases(suite_id=suite_id)
    tests = project.get_tests(run_id=test_run_ids[0])
    results_to_publish = []

    for result in results:
        test = project.get_test_by_group(run_id=test_run_ids[0], group=result.group, tests=tests)
        if not test:
            logger.error("Test for '{0}' group not found: {1}".format(result.group, result.url))
            continue
        existing_results_versions = [r["version"] for r in project.get_results_for_test(test["id"])]
        case_id = project.get_case_by_group(suite_id=suite_id, group=result.group, cases=cases)["id"]
        if result.version in existing_results_versions or not result.status:
            if result.status != "passed":
                previous_results = project.get_all_results_for_case(run_ids=[test_run_ids[0]], case_id=case_id)
                lp_bug = get_existing_bug_link(previous_results)
                if lp_bug:
                    result.launchpad_bug = lp_bug["bug_link"]
                    result.launchpad_bug_status = lp_bug["status"]
                    result.launchpad_bug_importance = lp_bug["importance"]
                    result.launchpad_bug_title = lp_bug["title"]
            continue
        if result.status != "passed":
            run_ids = [run["id"] for run in previous_tests_runs[0 : int(TestRailSettings.previous_results_depth)]]
            previous_results = project.get_all_results_for_case(run_ids=run_ids, case_id=case_id)
            lp_bug = get_existing_bug_link(previous_results)
            if lp_bug:
                result.launchpad_bug = lp_bug["bug_link"]
                result.launchpad_bug_status = lp_bug["status"]
                result.launchpad_bug_importance = lp_bug["importance"]
                result.launchpad_bug_title = lp_bug["title"]
        results_to_publish.append(result)

    try:
        if len(results_to_publish) > 0:
            project.add_results_for_cases(run_id=test_run_ids[0], suite_id=suite_id, tests_results=results_to_publish)
    except:
        logger.error("Failed to add new results for tests: {0}".format([r.group for r in results_to_publish]))
        raise
    return results_to_publish
예제 #53
0
    def handle_blocked(self, test, result):
        if result['custom_launchpad_bug']:
            return False
        m = re.search(r'Blocked by "(\S+)" test.', result['comment'])
        if m:
            blocked_test_group = m.group(1)
        else:
            logger.debug('Blocked result #{0} for test {1} does '
                         'not have upstream test name in its '
                         'comments!'.format(result['id'],
                                            test['custom_test_group']))
            return False

        if not result['version']:
            logger.debug('Blocked result #{0} for test {1} does '
                         'not have version, can\'t find upstream '
                         'test case!'.format(result['id'],
                                             test['custom_test_group']))
            return False

        bug_link = None
        blocked_test = self.get_test_by_group(blocked_test_group,
                                              result['version'])
        if not blocked_test:
            return False
        logger.debug('Test {0} was blocked by failed test {1}'.format(
            test['custom_test_group'], blocked_test_group))

        blocked_results = self.project.get_results_for_test(
            blocked_test['id'])

        # Since we manually add results to failed tests with statuses
        # ProdFailed, TestFailed, etc. and attach bugs links to them,
        # we could skip original version copying. So look for test
        # results with target version, but allow to copy links to bugs
        # from other results of the same test (newer are checked first)
        if not any(br['version'] == result['version'] and
                   br['status_id'] in self.failed_statuses
                   for br in blocked_results):
            logger.debug('Did not find result for test {0} with version '
                         '{1}!'.format(blocked_test_group, result['version']))
            return False

        for blocked_result in sorted(blocked_results,
                                     key=lambda x: x['id'],
                                     reverse=True):
            if blocked_result['status_id'] not in self.failed_statuses:
                continue

            if blocked_result['custom_launchpad_bug']:
                bug_link = blocked_result['custom_launchpad_bug']
                break

        if bug_link is not None:
            result['custom_launchpad_bug'] = bug_link
            self.project.add_raw_results_for_test(test['id'], result)
            logger.info('Added bug {0} to blocked result of {1} test.'.format(
                bug_link, test['custom_test_group']))
            return bug_link
        return False
예제 #54
0
def create_dataframes(filenames, headers=['X', 'Y', 'E', 'DX']):
    '''
    Parses the CSV files into Pandas Dataframes
    @param headers: List of headers present in the file 
    @return: list of dataframes
    '''
    dfs = []
    for filename in filenames:
        df = pd.read_csv(filename, skiprows=[0,1], names=headers)
        df.filename = filename
        df.fullpath = os.path.abspath(filename)
        logger.debug("File: %s; Shape: %s; Header: %s" % (df.fullpath, df.shape, df.columns.values))
        dfs.append(df)
    return dfs
예제 #55
0
def discard_points(dfs,begin,end):
    '''
    Remove from the list of dataframes
    the first and last n points
    @param begin: n points to remove at the beginning
    @param end: n points to remove at the end
    '''
    for df in dfs:
        if begin > 0:
            df.drop(df.index[0:begin],inplace=True)
        if end > 0:
            df.drop(df.index[-end:],inplace=True)
        logger.debug("File: %s; New Shape: %s." % (df.filename, df.shape))
    return dfs
예제 #56
0
def publish_results(project, milestone_id, test_plan,
                    suite_id, config_id, results):
    test_run_ids = [run['id'] for entry in test_plan['entries']
                    for run in entry['runs'] if suite_id == run['suite_id']
                    and config_id in run['config_ids']]
    logger.debug('Looking for previous tests runs on "{0}" using tests suite '
                 '"{1}"...'.format(project.get_config(config_id)['name'],
                                   project.get_suite(suite_id)['name']))
    previous_tests_runs = project.get_previous_runs(milestone_id=milestone_id,
                                                    suite_id=suite_id,
                                                    config_id=config_id)
    cases = project.get_cases(suite_id=suite_id)
    tests = project.get_tests(run_id=test_run_ids[0])
    results_to_publish = []

    for result in results:
        test = project.get_test_by_group(run_id=test_run_ids[0],
                                         group=result.group,
                                         tests=tests)
        if not test:
            logger.error("Test for '{0}' group not found: {1}".format(
                result.group, result.url))
            continue
        existing_results_versions = [r['version'] for r in
                                     project.get_results_for_test(test['id'])]
        if result.version in existing_results_versions:
            continue
        if result.status != 'passed':
            run_ids = [run['id'] for run in previous_tests_runs[0:
                       int(TestRailSettings.previous_results_depth)]]
            case_id = project.get_case_by_group(suite_id=suite_id,
                                                group=result.group,
                                                cases=cases)['id']
            previous_results = project.get_all_results_for_case(
                run_ids=run_ids,
                case_id=case_id)
            result.launchpad_bug = get_existing_bug_link(previous_results)
        results_to_publish.append(result)
    try:
        if len(results_to_publish) > 0:
            project.add_results_for_cases(run_id=test_run_ids[0],
                                          suite_id=suite_id,
                                          tests_results=results_to_publish)
    except:
        logger.error('Failed to add new results for tests: {0}'.format(
            [r.group for r in results_to_publish]
        ))
        raise
    return results_to_publish
예제 #57
0
    def post(self, *args, **kwargs):
        logger.debug('Apply acl payload: {}'.format(self.request.body))
        result, msg = ControllerOperations().apply_acls(self.request.body)
        response = {
            'ret_code': result,
            'message': msg
        }

        if result is True:
            logger.debug('Result: {}.\n Message: {}'.format('ok', msg))

            self.set_header('Content-Type', 'application/json')
            self.write(response)
        else:
            self.set_status(INTERNAL_SERVER_ERROR, json.dumps(msg))
예제 #58
0
파일: reporter.py 프로젝트: ehles/trep
 def get_or_create_test_run(self, plan, cases):
     """Get exists or create new TestRail Run"""
     run_name = self.get_run_name()
     run = plan.runs.find(name=run_name)
     if run is None:
         run = Run(name=run_name,
                   description=self.run_description,
                   suite_id=self.suite.id,
                   milestone_id=self.milestone.id,
                   config_ids=[],
                   case_ids=[x.id for x in cases])
         plan.add_run(run)
         logger.debug('Run created "{}"'.format(run_name))
     else:
         logger.debug('Run found"{}"'.format(run_name))
     return run
예제 #59
0
    def create(self):
	if not self.id:
	    ins_stat="INSERT INTO {0} (".format(self.table)
	    fields=self.db_fields.keys()
	    for column in fields:
		ins_stat+=column+','
	    ins_stat=ins_stat[:-1]+") VALUES ("
	    for column in fields:
		ins_stat+="%s,"
	    ins_stat=ins_stat[:-1]+") RETURNING id";
	    stmt=self.self_cursor.mogrify(ins_stat,(self.db_fields.values()))
	    try:
		self.self_cursor.execute(stmt)
	    except Exception as e:
		logger.error("Cannot create record: {0}".format(e.pgerror))
		return
	    logger.debug("Created new object with statement: {0}".format(stmt))
	    self.id=self.self_cursor.fetchone()[0]
예제 #60
0
파일: deamon.py 프로젝트: lkolacz/ewsods
    def start(self):
        """Start the daemon with checking is he already run"""
        try:
            pf = file(self.pidfile,'r')
            pid = pf.read().strip()
            pf.close()
        except IOError:
            pid = None
       
        if pid != None and os.path.exists("/proc/" + pid):
            message = "pidfile %s already exist. Daemon already running!!!\n" % self.pidfile
            logger.error( message )
            sys.exit(1)

        logger.debug( "EWSoDS daemon is going to star." )
        self.daemonize()
        self.run()
        logger.debug( "EWSoDS daemon has been started." )