Пример #1
0
    def _createIndex(self, indexName, body, mappings, settings):
        logging.info('Create index %s ...', indexName)
        body = self._createIndexConfig(body, mappings, settings)
        logging.debug(json.dumps(body, ensure_ascii=False, indent=4))

        response = self.esConn.indices.create(index=indexName, body=body)
        return self._isOk(response)
Пример #2
0
def create_batch_archives_and_send_to_s3(s3_buckets):
    elasticsearch_docs = []
    with TemporaryDirectory() as temporary_directory:
        buckets_to_sync = set()

        # this step only creates the archives and deletes original files
        # the archive creation and s3 sync decouple is done so to avoid race conditions to S3_SYNC_BUCKETS_FOLDER
        for bucket in s3_buckets:
            bucket_directory = os.path.join(S3_BATCH_BUCKETS_FOLDER, bucket)
            for directory, subdirectories, files in os.walk(bucket_directory):
                if files:
                    logging.debug(directory)
                    logging.debug(files)
                    bucket_elasticsearch_docs = create_tar_archives(bucket, directory, files, temporary_directory)
                    if bucket_elasticsearch_docs:
                        elasticsearch_docs += bucket_elasticsearch_docs
                        buckets_to_sync.add(bucket)

        # this step syncs buckets to s3 using s3_client.upload_file for performance
        for bucket in buckets_to_sync:
            try:
                sync_bucket_folder_and_delete_files(bucket, temporary_directory)
            except Exception as e:
                logging.exception(e)
    return elasticsearch_docs
 def commit_all(self, msg):
     try:
         self.repo.git.add('.')
         self.repo.git.commit('-m', msg)
     except GitCommandError as err:
         log.debug(err)
         pass  # nothing to commit
Пример #4
0
def httpResponse(url, method='POST', data=None, headers=None):
    response = None
    if headers == None:
        headers = {
            "Origin": "https://www.baidu.com",
            "User-Agent": "Mozilla/5.0 (X11; Linux x86_64) " +\
                "AppleWebKit/537.36 (KHTML, like Gecko) " +\
                "Chrome/34.0.1847.116 Safari/537.36",
            "Content-Type": "application/x-www-form-urlencoded",
            "Referer": "https://www.baidu.com/login?forward=http://localhost",
            "Accept-Encoding": "gzip,deflate,sdch",
            "Accept-Language": "zh-CN,zh;q=0.8,en;q=0.6,zh-TW;q=0.4",
            "Cookie": "sessionid=ubwzabfvvyy0ft4y4nk5qlduv7nswrim",
        }
    try:
        request = urllib2.Request(url,
                                  data,
                                  headers=headers,
                                  origin_req_host=None,
                                  unverifiable=False)
        #request = urllib2.Request(url, data, origin_req_host=None, unverifiable=False)
        request.get_method = lambda: method
        try:
            response = urllib2.urlopen(request)  #python <= 2.6
        except AttributeError, e:
            logging.debug(u'URLopen AttributeError: {msg}'.format(msg=e))
            response = urllib2.urlopen(
                request, context=ssl._create_unverified_context())  #python 2.7
    except urllib2.HTTPError, e:
        logging.error(u'HTTP 服务器({url})无法完成请求.错误码:{code}'.format(url=url,
                                                                 code=e.code),
                      exc_info=True)
def _download_resource_type_templates(project_name, resource_kind,
                                      working_dir):
    try:
        resource_names = openshift_client.get_resource_type_names(
            project_name, resource_kind)
    except ApiException as err:
        log.warn('Unable to list {0} resource in project {1}'.format(
            resource_kind, project_name))
        log.debug(err)
        return

    for name in resource_names:
        try:
            resource_file_name = '{0}/{1}/{2}.yaml'.format(
                working_dir, resource_kind, name)

            utils.create_dir('{0}/{1}'.format(working_dir, resource_kind))
            single_resource = openshift_client.get_single_resource_yaml(
                project_name, resource_kind, name)

            with open(resource_file_name, 'w') as resource_file:
                yaml.dump(single_resource.to_dict(),
                          resource_file,
                          default_flow_style=False)
        except ApiException as err:
            log.warn('Unable to backup {0}'.format(resource_file_name))
            log.debug(err)
Пример #6
0
  def get_videolinks(self, episode_ids: Dict[int, str]) -> Dict[int, str]:
    # get video source html page
    source_info_path = NineAnime.EPISODE_INFO
    url_decoder = VideoURLDecoder()
    logging.debug('headers:\n%s', self.request.headers)
    videolinks = {}
    for current_ep, episode_hash in episode_ids.items():
      if not episode_hash:
        logging.info(f'Hash not found for episode {current_ep}! Skipping.')
        continue

      logging.debug("Episode %s data-id=%s", current_ep, episode_hash)
      # sensitive code
      content = self.request.get(source_info_path, {
          'id': episode_hash})
      try:
        source_html_url = url_decoder.get(json.loads(content)['url'])
        logging.info(f'Link for episode {current_ep}')
      except Exception:
        logging.exception(f'source_info_url response:\n{content}')
        return
      videolinks[current_ep] = source_html_url
      # to avoid being blocked by spamming
      duration = random.uniform(0.2, 1)
      time.sleep(duration)

    return videolinks
Пример #7
0
 def fetch(self, download_link, save_loc, mode):
   logging.debug("Download link: %s", download_link)
   wget_command = WgetCommand(download_link, save_loc, [self.browser_req, self.referer])
   curl_command = CurlCommand(download_link, save_loc, [self.browser_req, self.referer])
   returncode = wget_command.run(mode)
   if returncode > 0:
     returncode = curl_command.run(mode)
   return returncode
def main():
    logging.basicConfig(
        format='%(funcName)s:%(lineno)d %(levelname)s %(message)s',
        level=logging.DEBUG)
    drive_id = '14BzAsfL5ZOC8oH2pWgjOjilEapUWeDqH'
    uploader = GdriveUploader(4242, drive_id)
    response = uploader.upload('/Users/anupamghosh/Movies/Anohana/ano.mp4')
    logging.debug(f'response={response}')
Пример #9
0
    def balance_home_wins(self, df):
        _, counts = np.unique(df["outcome"].values, return_counts=True)
        frac = np.around(1 - ((counts[0] + counts[2]) / 2) / counts[2], 2)
        logging.debug(f"Flip games with fraction: {frac}")

        flip_df = self.get_subset_of_home_wins(df, frac=frac)
        flip_df = self.switch_home_and_away(flip_df)
        df.update(flip_df)
        return df
Пример #10
0
    def get_dataset(self):
        df = self.load_dataset()
        df["outcome"] = np.sign(df["FTHG"] - df["FTAG"])

        X = self.get_feature_matrix(df)
        X = X.dropna()
        y = df.loc[X.index, self.label]
        logging.debug(f"Dataset size: {X.shape}")
        return X, y
Пример #11
0
 def _func(*args, **kwargs):
     begin_time = time.time()
     result = func(*args, **kwargs)
     end_time = time.time()
     logging.debug("%s.%s COST %.2f ms", 
                   func.__module__, func.__name__, 
                   1000 * (end_time - begin_time))
     
     return result
Пример #12
0
def read_file(fname):
    def read_it(encoding):
        with open(fname, 'r', encoding=encoding) as f:
            return f.read().splitlines()

    try:
        logging.info('Reading {}'.format(fname))
        return read_it('utf-8')
    except UnicodeDecodeError:
        logging.debug('(error while reading file, trying latin-1)')
        return read_it('latin-1')
Пример #13
0
def read_file(fname):

    def read_it(encoding):
        with open(fname, 'r', encoding=encoding) as f:
            return f.read().splitlines()

    try:
        logging.info('Reading {}'.format(fname))
        return read_it('utf-8')
    except UnicodeDecodeError:
        logging.debug('(error while reading file, trying latin-1)')
        return read_it('latin-1')
Пример #14
0
 def download(self, target_url, save_loc, mode):
   logging.debug("Source html url: %s", target_url)
   source_html = self.get_source_html(target_url)
   html_name = PurePath(save_loc).name
   self.store_source_html(source_html, html_name, target_url)
   link = self.parse_link(source_html)
   path = f'{save_loc}.mp4'
   returncode = self.fetch(link, path, mode)
   
   if returncode == 0:
     self.notify_downlaod(path)
   return returncode
Пример #15
0
def get_opened_files(log_errors=True):
    for pid in psutil.pids():
        try:
            yield (file[0] for file in psutil.Process(pid).open_files())
        except psutil.AccessDenied as e:
            if log_errors:
                logging.error("Access denied while getting process opened files")
                logging.exception(e)
        except psutil.NoSuchProcess:
            logging.debug("Process no longer exists")
        except Exception as e:
            if log_errors:
                logging.exception(e)
Пример #16
0
    def get_by_rfc(self, rfc):
        stmt = ("SELECT "
                "   id, "
                "   rfc, "
                "   ws_user, "
                "   ws_password "
                "FROM payment_company "
                "WHERE rfc = %(rfc)s ")
        payment_companies = self.db_manager.select(stmt, {'rfc': rfc})
        for payment_company in payment_companies:
            logging.debug(payment_company)

            return payment_company
Пример #17
0
 def read_meta(self):
     currencies = UNITS.keys()  # ['EUR', 'USD']
     usecols = ','.join([self._XLSX_CONF['columns'][c] for c in currencies])
     logging.debug('reading names')
     names = pd.read_excel(self.excel_file,
                           skiprows=self._XLSX_CONF['rows']['names'],
                           usecols=usecols,
                           names=currencies,
                           nrows=1,
                           header=None)
     logging.debug('reading quantities')
     quantities = pd.read_excel(
         self.excel_file,
         skiprows=self._XLSX_CONF['rows']['quantity'],
         usecols=usecols,
         names=currencies,
         nrows=1,
         header=None)
     logging.debug('reading codes')
     codes = pd.read_excel(self.excel_file,
                           skiprows=self._XLSX_CONF['rows']['code'],
                           usecols=usecols,
                           names=currencies,
                           nrows=1,
                           header=None)
     for c in currencies:
         currency_exists = self.db_session.query(
             exists().where(RefCurrency.code == codes[c][0])).scalar()
         if not currency_exists:
             rc = RefCurrency(name=names[c][0],
                              quantity=quantities[c][0],
                              code=codes[c][0])
             self.db_session.add(rc)
     logging.debug('commiting changes')
     self.db_session.commit()
    async def connect(self, payload: dict):
        reader, writer = await asyncio.open_connection(self.ip, self.port)

        logging.debug(f'Sending message {payload}')
        writer.write(json.dumps(payload).encode())
        await writer.drain()

        response = await reader.read(100)
        logging.debug(f'Received: {response.decode()!r}')

        # Close the connection
        writer.close()
        await writer.wait_closed()
        return response
Пример #19
0
    def resize_image(self,
                     image_path,
                     image_width,
                     image_height,
                     transparency):
        """Resize image.

        :param image_path: Path to the source icon.
        :param image_width: Width of the icon.
        :param image_height: Height of the icon.
        :param image_format: Format of the icon.
        :param target_format: Target icon format.
        :param transparency: Whether to add transparency or not.

        :returns:
            Path to the resized image.
        """

        resized_file = tempfile.NamedTemporaryFile(
                    prefix='resized_',
                    suffix='.' + os.path.splitext(image_path)[1][1:],
                    delete=False)
        resized_path = resized_file.name

        # Adding transparency to make a square image
        if transparency:
            args_string = "%s %s -gravity center -background transparent " \
                            "-extent %dx%d %s" % (self.converttool,
                                        image_path,
                                        image_width,
                                        image_height,
                                        resized_path)
        # Resizing square image to the closest supported size
        else:
            args_string = "%s %s -resize %dx%d %s" % (
                                        self.converttool,
                                        image_path,
                                        image_width,
                                        image_height,
                                        resized_path)

        args = args_string.split()

        logging.debug('Conversion call arguments: %r' % (args))

        try:
            subprocess.check_output(args,
                                    stderr = subprocess.STDOUT)
        except subprocess.CalledProcessError, e:
            raise ConversionError('Failed to resize image %s' % (e.output))
Пример #20
0
  def get_episodes_html(self):
    path_matched = re.search(r".*\.(\w+)\/(\w+)", self.base_path)
    servers_id = path_matched.group(1)
    episode_id = path_matched.group(2)
    episode_url = NineAnime.EPISODES_URL
    content = self.request.get(episode_url, {'id': servers_id, 'episode': episode_id})

    try:
      html_episodes = content
      path = self.anime_html_filepath()
      with open(path, 'w') as html_text:
        html_text.write(html_episodes)
    except Exception as e:
      logging.debug("content:\n%s", content)
      raise e
    async def run_background(self):
        cmd = str(self)
        process = await asyncio.create_subprocess_shell(
            cmd,
            stdout=asyncio.subprocess.PIPE,
            stderr=asyncio.subprocess.PIPE)

        for out in [process.stderr, process.stdout]:
            reader = BufferedReader(out)
            while not out.at_eof():
                line = await reader.readline()
                logging.debug(line)

        await process.communicate()
        return process.returncode
Пример #22
0
def http_request(url):
    """ Sends an HTTP GET request and returns the received response.
    """
    response = False
    conn = httplib.HTTPConnection( settings.DOMAIN, settings.PORT )
    conn.putrequest( "GET", url )
    
    try:
        conn.endheaders()
    except:
        logging.debug( 'Failed connecting to server.' )
        logging.debug( sys.exc_info() )
    else:
        response = conn.getresponse()
        conn.close()
    
    return response
Пример #23
0
def move_ready_files_to_temp_dir_and_sync(s3_buckets):
    with TemporaryDirectory() as temporary_directory:
        buckets_to_sync = []

        # this step moves files to temporary directory
        for bucket in s3_buckets:
            bucket_directory = os.path.join(S3_SYNC_BUCKETS_FOLDER, bucket)
            for directory, subdirectories, files in os.walk(bucket_directory):
                if files:
                    logging.debug(directory)
                    logging.debug(files)
                    move_ready_files_to_temp_dir(bucket, directory, files, temporary_directory)
                    buckets_to_sync.append(bucket)

        # this step syncs buckets to s3. aws cli is used for convenience
        for bucket in buckets_to_sync:
            sync_bucket_folder_and_delete_files(bucket, temporary_directory)
Пример #24
0
def restore_from_dir(openshift_client, directory, resources):
    for resource_kind in os.listdir(directory):
        print(resources)
        if resource_kind in resources:
            resource_kind_dir = directory + '/' + resource_kind
            for single_resource in os.listdir(resource_kind_dir):
                full_path = '{0}/{1}'.format(resource_kind, single_resource)
                log.info('Restoring {}'.format(full_path))
                try:
                    with open(resource_kind_dir + '/' + single_resource,
                              'r') as f:
                        resource_yaml = yaml.load(f)
                    openshift_client.create_resource(resource_kind,
                                                     resource_yaml,
                                                     args.restore_project_name)
                except ApiException as err:
                    log.error('Unable to restore {0}'.format(full_path))
                    log.debug(err)
Пример #25
0
    def convert(self, 
                image_list, 
                target_format, 
                target_path):
        """Convert a list of image files to an ico/icns file.

        :param image_list:
            List of image files to convert (either local paths or URLs).
        :param target_format:
            Target format. Must be one of ``FORMAT_ICO`` and ``FORMAT_ICNS``.
        :param target_path:
            Target path of the conversion.
        """

        # Validate the input arguments.
        if target_format not in Converter.SUPPORTED_TARGET_FORMATS:
            raise ConversionError('invalid target format identifier: %s' % (
                target_format))

        if len(image_list) == 0:
            raise ValueError('image input list cannot be empty')

        # Make sure that all input files are stored locally and as PNGs.
        # image_list can contain either a local path or an http url
        local_image_list = []
        for image_location in image_list:
            if ((image_location.startswith("http:")) or
                (image_location.startswith("https:"))):

                # Skip invalid/corrupt URLs
                try:
                    image_location = self.fetch_image(image_location)
                except requests.exceptions.HTTPError, e:
                    err = 'Could not retrieve image: %s' % str(e)
                    self.notices.append(err)
                    logging.debug(err)
                    continue
                except ImageError, e:
                    err = 'Could not save image: %s' % str(e)
                    self.notices.append(err)
                    logging.debug(err)
                    continue
Пример #26
0
 def parse_market(self):
     offset = 0
     try:
         logging.info("parsing market pairs")
         while True:
             pairs = self.api_instance.get_pairs(limit=500, offset=offset)
             for pair in pairs:
                 self.pairs.append(pair)
                 if pair["quote_asset_symbol"] == self.BNB:
                     self.bnb_pairs[pair["base_asset_symbol"]] = pair["lot_size"]
                 elif pair["quote_asset_symbol"] == self.BUSD:
                     self.busd_pairs[pair["base_asset_symbol"]] = pair["lot_size"]
             offset += 500
             time.sleep(1)
     except ApiException as e:
         if e.reason == "Bad Request":
             logging.info("parsing finished, %s market pairs" % len(self.pairs))
             logging.debug("bnb pairs: %s" % self.bnb_pairs)
             logging.debug("busd pairs: %s" % self.busd_pairs)
         else:
             logging.info("Exception when calling DefaultApi->getPairs: %s\n" % e)
Пример #27
0
  def make_request(self, url, params):
    params = params or {}
    reg_match = re.match(r"https:\/\/([^\/]*)([^?]*)\??(.*)", url)
    domain = reg_match.group(1)
    path = reg_match.group(2)
    temp_params = reg_match.group(3)
    if len(temp_params) > 1:
      temp_params = re.findall(r"([^=]+)=([^&]+)&", temp_params + '&')
      params.update({key: val for key, val in temp_params})
    if len(params):
      path += '?' + urllib.parse.urlencode(params)

    con = HTTPSConnection(domain)
    logging.debug("Requesting url: https://%s%s", domain, path)
    headers = self.headers.copy()
    headers['cookie'] = '; '.join(self.cookies)
    con.request('GET', path, None, headers)
    res = con.getresponse()
    self._res_text = res.read().decode('utf-8')
    con.close()
    return res
    def __init__(self, remote, working_dir, private_key=None):
        log.info('Cloning git repo: {0} into: {1}'.format(remote, working_dir))
        self.remote = remote
        self.working_dir = working_dir
        self.repo = Repo.init(working_dir)

        if private_key is not None:
            ssh_cmd = 'ssh -i {0}'.format(private_key)
            log.debug('ssh_cmd: {}'.format(ssh_cmd))
            self.repo.git.update_environment(GIT_SSH_COMMAND=ssh_cmd)

        self.origin = self.repo.create_remote('origin', self.remote)
        self.origin.fetch()
        self.repo.create_head('master',
                              self.origin.refs.master).set_tracking_branch(
                                  self.origin.refs.master).checkout()

        with self.repo.config_writer() as cw:
            cw.add_section('user')
            cw.set('user', 'email', '*****@*****.**')
            cw.set('user', 'name', 'OpenShift Backup-Script')
Пример #29
0
    def convert_to_png32(self,
                         source_path,
                         target_path):
        """Convert a source image to a 32 bit PNG image.

        :param source_path: Path of the source image.
        :param target_path: Path of the target image.
        :raises ConversionError: if conversion fails.
        """

        logging.debug('Converting input image to 32-bit PNG: %s -> %s' % (
            source_path, target_path))
        # Perform the conversion.
        try:
            subprocess.check_output([
                    self.converttool,
                    source_path,
                    'png32:%s' % (target_path)
                ], stderr = subprocess.STDOUT)
        except subprocess.CalledProcessError, e:
            raise ConversionError('Failed to convert input file to 32-bit PNG: %s' % (
                e.output))
Пример #30
0
def create_tar_archives(bucket, directory, files, temporary_directory):
    logging.debug(bucket)
    logging.debug(directory)
    remaining_files_to_archive = files
    s3_file_prefix = get_s3_file_prefix(bucket, directory, S3_BATCH_BUCKETS_FOLDER)

    elasticsearch_docs = []
    while remaining_files_to_archive:
        archive_files = get_batch_files(directory, remaining_files_to_archive)
        remaining_files_to_archive = remaining_files_to_archive[len(archive_files):]
        archive_files = filter_opened_files(archive_files, S3_BATCH_BUCKETS_FOLDER)

        if archive_files:
            s3_object_key = create_batch_s3_key(s3_file_prefix)
            tar_info = create_archive(bucket, s3_object_key, archive_files, temporary_directory)
            elasticsearch_docs += get_batch_elasticsearch_docs(bucket, s3_object_key, tar_info)

            for archive_file in archive_files:
                os.remove(archive_file)

            logging.info(f"Archive created: {os.path.basename(tar_info['name'])}, {get_file_size_mb(tar_info['name'])}MB, {len(archive_files)} files")
    return elasticsearch_docs
Пример #31
0
    def read_data(self):
        columns = self._XLSX_CONF['columns'].keys()  # ['date', 'EUR', 'USD']
        logging.debug('reading data')
        data = pd.read_excel(self.excel_file,
                             skiprows=self._XLSX_CONF['rows']['values'],
                             usecols=','.join(
                                 self._XLSX_CONF['columns'].values()),
                             header=None,
                             names=columns)
        logging.debug('starting data insertion')

        eur_id = self.db_session.query(RefCurrency).filter(
            RefCurrency.code == 'EUR').one().id
        usd_id = self.db_session.query(RefCurrency).filter(
            RefCurrency.code == 'USD').one().id

        for _, row in data.iterrows():
            if not pd.isna(row['EUR']):
                rate_exists = self.db_session.query(
                    exists().where(Rate.currency_id == eur_id).where(
                        Rate.date == row['date'])).scalar()
                if not rate_exists:
                    rate_eur = Rate(currency_id=eur_id,
                                    rate=row['EUR'],
                                    date=row['date'].to_pydatetime())
                    self.db_session.add(rate_eur)
            if not pd.isna(row['USD']):
                rate_exists = self.db_session.query(
                    exists().where(Rate.currency_id == usd_id).where(
                        Rate.date == row['date'])).scalar()
                if not rate_exists:
                    rate_usd = Rate(currency_id=usd_id,
                                    rate=row['USD'],
                                    date=row['date'].to_pydatetime())
                    self.db_session.add(rate_usd)

        logging.debug('commiting changes')
        self.db_session.commit()
Пример #32
0
    def _read(self,
              directory,
              relative_path="",
              recursive=True,
              root_dir=False):
        # Avoid unexpected results
        if os.path.isdir(directory) is False:
            #print("Nada por hacer")
            logging.debug("Nothing to do")

            return

        for i_directory in os.listdir(directory):
            #print(i_directory)
            logging.debug(i_directory)

            if "recibos" == i_directory and root_dir is False:
                #print("Ya llegué a los recibos")
                logging.debug("All is done in this path")

                continue

            if "recibos" == i_directory and root_dir is True:
                logging.debug(
                    "Is on root directory, let's try to check if has archives")
                self.directories[i_directory] = []

                continue

            if ".DS_Store" == i_directory:

                logging.debug("There's no directory here")

                continue

            if "txt" in i_directory:
                #print("Ya llegue a los archivos")
                logging.debug("There's in the final folder")
                break

            if "pdf" in i_directory:
                logging.debug("There's in the final folder")
                break

            if "xml" in i_directory:
                logging.debug("There's in the final folder")
                break

            if root_dir is True:
                logging.info("Root directory")
                #                self.directories[i_directory] = {i_directory: i_directory}
                self.directories[i_directory] = []

            if root_dir is False:
                logging.info("Relative path: %s" % relative_path)
                self.directories[relative_path].append(i_directory)

            if recursive is True:
                i_dir = directory + "/" + i_directory
                i_dir = i_dir.replace("//", "/")
                self._read(i_dir, root_dir=False, relative_path=i_directory)
Пример #33
0
for fname in sorted(glob('{}/*.txt'.format(PWD_LISTS))):
    n = 0
    # we use a dict to get O(1) performance, instead of O(n) with a list
    l = {}

    for line in read_file(fname):
        clear_text_password = line.strip()
        if len(clear_text_password) > 0 and clear_text_password not in l:
            # yes, files have duplicates and we don't want them
            cur.execute('execute p_select(%(clear_text_password)s)',
                        {'clear_text_password': clear_text_password})
            if cur.rowcount == 0:
                l[clear_text_password] = True
                n += 1
                if n % 10000 == 0:
                    logging.debug('Queued {} passwords'.format(n))
    logging.debug('Queued {} passwords'.format(n))

    stream = StringIO()
    writer = csv.writer(stream,
                        delimiter='\t',
                        escapechar='\\',
                        quotechar=None,
                        doublequote=False)
    for clear_text_password in l:
        t = (clear_text_password,)
        writer.writerow(t)
    stream.seek(0)

    if args.keep:
        dname = 'DEBUG_{}.csv'.format(os.path.basename(fname))
Пример #34
0
                    type=str,
                    dest='hash_value',
                    required=True)

args = parser.parse_args()

if args.hash_type == 'auto':
    args.hash_type = {
        32: 'md5',
        40: 'sha1',
        56: 'sha224',
        64: 'sha256',
        96: 'sha384',
        128: 'sha512',
    }[len(args.hash_value)]
    logging.debug('Assuming {} hash'.format(args.hash_type))


conn = psycopg2.connect('dbname=rainbows user=rainbows')
cur = conn.cursor()


cur.execute('select clear_text_password'
            ' from rainbows'
            ' where digest(clear_text_password, %(h_type)s) = %(h_value)s',
            {
                'h_type': args.hash_type,
                'h_value': '\\x{}'.format(args.hash_value),
            })

if cur.rowcount == 0:
Пример #35
0
 def __exit__(self, _type, value, traceback):
     end_time = time.time()
     logging.debug("%s COST %.2f ms", self.profiler_name, 1000 * (end_time - self.begin_time))