예제 #1
0
def _create_test_rings(path):
    testgz = os.path.join(path, 'object.ring.gz')
    intended_replica2part2dev_id = [
        [0, 1, 2, 3, 4, 5, 6],
        [1, 2, 3, 0, 5, 6, 4],
        [2, 3, 0, 1, 6, 4, 5],
    ]
    intended_devs = [
        {'id': 0, 'device': 'sda', 'zone': 0, 'ip': '127.0.0.0', 'port': 6000},
        {'id': 1, 'device': 'sda', 'zone': 1, 'ip': '127.0.0.1', 'port': 6000},
        {'id': 2, 'device': 'sda', 'zone': 2, 'ip': '127.0.0.2', 'port': 6000},
        {'id': 3, 'device': 'sda', 'zone': 4, 'ip': '127.0.0.3', 'port': 6000},
        {'id': 4, 'device': 'sda', 'zone': 5, 'ip': '127.0.0.4', 'port': 6000},
        {'id': 5, 'device': 'sda', 'zone': 6,
         'ip': 'fe80::202:b3ff:fe1e:8329', 'port': 6000},
        {'id': 6, 'device': 'sda', 'zone': 7,
         'ip': '2001:0db8:85a3:0000:0000:8a2e:0370:7334', 'port': 6000},
    ]
    intended_part_shift = 30
    with closing(GzipFile(testgz, 'wb')) as f:
        pickle.dump(
            ring.RingData(intended_replica2part2dev_id,
                          intended_devs, intended_part_shift),
            f)

    testgz = os.path.join(path, 'object-1.ring.gz')
    with closing(GzipFile(testgz, 'wb')) as f:
        pickle.dump(
            ring.RingData(intended_replica2part2dev_id,
                          intended_devs, intended_part_shift),
            f)
    return
예제 #2
0
    def iter_hashes(self, name='RECORD'):
        '''Iterate over the files and hashes of a RECORD file.

        The RECORD file with the given name will be iterated over
        yielding a three tuple with each iteration: filename (relative
        to the package), computed hash (just calculated), and expected
        hash (from RECORD file).
        '''
        hashless = [posixpath.join(self.dist_info, name + ext)
                    for ext in ['', '.jws', '.p7s']]
        path = posixpath.join(self.dist_info, name)
        with closing(self.open(path)) as record_file:
            for row in csv.reader(record_file):
                filename, hashspec = row[:2]
                if not hashspec:
                    if filename not in hashless:
                        yield filename, None, None
                    continue
                algo, expected_hash = hashspec.split('=', 1)
                hash = hashlib.new(algo)
                with closing(self.open(filename, 'rb')) as file:
                    while True:
                        data = file.read(4096)
                        if not data:
                            break
                        hash.update(data)
                hash = base64.urlsafe_b64encode(hash.digest()).rstrip('=')
                yield filename, hash, expected_hash
예제 #3
0
파일: test_wsgi.py 프로젝트: Taejun/swift
def _fake_rings(tmpdir):
    account_ring_path = os.path.join(tmpdir, 'account.ring.gz')
    with closing(GzipFile(account_ring_path, 'wb')) as f:
        pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
                    [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
                      'port': 6012},
                     {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
                      'port': 6022}], 30),
                    f)
    container_ring_path = os.path.join(tmpdir, 'container.ring.gz')
    with closing(GzipFile(container_ring_path, 'wb')) as f:
        pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
                    [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
                      'port': 6011},
                     {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
                      'port': 6021}], 30),
                    f)
    object_ring_path = os.path.join(tmpdir, 'object.ring.gz')
    with closing(GzipFile(object_ring_path, 'wb')) as f:
        pickle.dump(ring.RingData([[0, 1, 0, 1], [1, 0, 1, 0]],
                    [{'id': 0, 'zone': 0, 'device': 'sda1', 'ip': '127.0.0.1',
                      'port': 6010},
                     {'id': 1, 'zone': 1, 'device': 'sdb1', 'ip': '127.0.0.1',
                      'port': 6020}], 30),
                    f)
예제 #4
0
def test_ascii():
    """ Test ascii/unicode bar """
    # Test ascii autodetection
    with closing(StringIO()) as our_file:
        with tqdm(total=10, file=our_file, ascii=None) as t:
            assert t.ascii  # TODO: this may fail in the future

    # Test ascii bar
    with closing(StringIO()) as our_file:
        for _ in tqdm(_range(3), total=15, file=our_file, miniters=1,
                      mininterval=0, ascii=True):
            pass
        our_file.seek(0)
        res = our_file.read().strip("\r").split("\r")
    assert '7%|6' in res[1]
    assert '13%|#3' in res[2]
    assert '20%|##' in res[3]

    # Test unicode bar
    with closing(UnicodeIO()) as our_file:
        with tqdm(total=15, file=our_file, ascii=False, mininterval=0) as t:
            for _ in _range(3):
                t.update()
        our_file.seek(0)
        res = our_file.read().strip("\r").split("\r")
    assert "7%|\u258b" in res[1]
    assert "13%|\u2588\u258e" in res[2]
    assert "20%|\u2588\u2588" in res[3]
예제 #5
0
def test_list_inconsistent_obj(ctx, manager, osd_remote, pg, acting, osd_id,
                               obj_name, obj_path):
    mon = manager.controller
    pool = 'rbd'
    omap_key = 'key'
    omap_val = 'val'
    manager.do_rados(mon, ['-p', pool, 'setomapval', obj_name,
                           omap_key, omap_val])
    messup = MessUp(manager, osd_remote, pool, osd_id, obj_name, obj_path,
                    omap_key, omap_val)
    for test in [messup.rm_omap, messup.add_omap, messup.change_omap,
                 messup.append, messup.truncate, messup.change_obj,
                 messup.remove]:
        with test() as checks:
            deep_scrub(manager, pg)
            cmd = 'rados list-inconsistent-pg {pool} ' \
                  '--format=json'.format(pool=pool)
            with contextlib.closing(StringIO()) as out:
                mon.run(args=cmd.split(), stdout=out)
                pgs = json.loads(out.getvalue())
            assert pgs == [pg]

            cmd = 'rados list-inconsistent-obj {pg} ' \
                  '--format=json'.format(pg=pg)
            with contextlib.closing(StringIO()) as out:
                mon.run(args=cmd.split(), stdout=out)
                objs = json.loads(out.getvalue())
            assert len(objs['inconsistents']) == 1

            checker = InconsistentObjChecker(osd_id, acting, obj_name)
            inc_obj = objs['inconsistents'][0]
            log.info('inc = %r', inc_obj)
            checker.basic_checks(inc_obj)
            for check in checks:
                checker.run(check, inc_obj)
예제 #6
0
파일: util.py 프로젝트: bchess/mrjob
def unarchive(archive_path, dest):
    """Extract the contents of a tar or zip file at *archive_path* into the
    directory *dest*.

    :type archive_path: str
    :param archive_path: path to archive file
    :type dest: str
    :param dest: path to directory where archive will be extracted

    *dest* will be created if it doesn't already exist.

    tar files can be gzip compressed, bzip2 compressed, or uncompressed. Files
    within zip files can be deflated or stored.
    """
    if tarfile.is_tarfile(archive_path):
        with contextlib.closing(tarfile.open(archive_path, 'r')) as archive:
            archive.extractall(dest)
    elif zipfile.is_zipfile(archive_path):
        with contextlib.closing(zipfile.ZipFile(archive_path, 'r')) as archive:
            for name in archive.namelist():
                # the zip spec specifies that front slashes are always
                # used as directory separators
                dest_path = os.path.join(dest, *name.split('/'))

                # now, split out any dirname and filename and create
                # one and/or the other
                dirname, filename = os.path.split(dest_path)
                if dirname and not os.path.exists(dirname):
                    os.makedirs(dirname)
                if filename:
                    with open(dest_path, 'wb') as dest_file:
                        dest_file.write(archive.read(name))
    else:
        raise IOError('Unknown archive type: %s' % (archive_path,))
예제 #7
0
def database(timestamp, channel, epg, out_file):

    rec_date = datetime.datetime.fromtimestamp(
        int(timestamp)).strftime('%Y.%m.%d')
    rec_time = datetime.datetime.fromtimestamp(
        int(timestamp)).strftime('%H:%M')
    pubdate = formatdate(time.time(), True)
    title = epg[1]
    if epg[2] == '':
        description = epg[3]
    else:
        description = epg[2] + '\n\n' + epg[3]
    audiofile = os.path.basename(out_file)
    length, length_bytes = audiolength(out_file)

    with closing(MySQLdb.connect(
            login.DB_HOST, login.DB_USER,
            login.DB_PASSWORD, login.DB_DATABASE)) as connection:
        with closing(connection.cursor()) as cursor:

            cursor.execute('INSERT INTO recordings \
            (date, time, channel, title, description, audiofile, timestamp, \
            length, bytes, pubdate) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)',
                    (rec_date, rec_time, channel, title, description,
                     audiofile, timestamp, length, length_bytes, pubdate))

            connection.commit()
예제 #8
0
def name_to_config(template):
    """Read template file into a dictionary to use as base for all samples.

    Handles well-known template names, pulled from GitHub repository and local
    files.
    """
    if objectstore.is_remote(template):
        with objectstore.open(template) as in_handle:
            config = yaml.load(in_handle)
        with objectstore.open(template) as in_handle:
            txt_config = in_handle.read()
    elif os.path.isfile(template):
        if template.endswith(".csv"):
            raise ValueError("Expected YAML file for template and found CSV, are arguments switched? %s" % template)
        with open(template) as in_handle:
            txt_config = in_handle.read()
        with open(template) as in_handle:
            config = yaml.load(in_handle)
    else:
        base_url = "https://raw.github.com/chapmanb/bcbio-nextgen/master/config/templates/%s.yaml"
        try:
            with contextlib.closing(urllib2.urlopen(base_url % template)) as in_handle:
                txt_config = in_handle.read()
            with contextlib.closing(urllib2.urlopen(base_url % template)) as in_handle:
                config = yaml.load(in_handle)
        except (urllib2.HTTPError, urllib2.URLError):
            raise ValueError("Could not find template '%s' locally or in standard templates on GitHub"
                             % template)
    return config, txt_config
def api_request_native(url, data=None, method=None):
    request = urllib2.Request(url)
    if method:
        request.get_method = lambda: method
    try:
        request.add_header('Authorization', 'token ' + token_auth_string())
    except MissingTokenException:
        request.add_header('Authorization', 'Basic ' + base64.urlsafe_b64encode(basic_auth_string()))
    request.add_header('Accept', 'application/json')
    request.add_header('Content-Type', 'application/json')

    if data is not None:
        request.add_data(data)

    if settings.get('https_proxy'):
        opener = urllib2.build_opener(urllib2.HTTPHandler(), urllib2.HTTPSHandler(),
                                      urllib2.ProxyHandler({'https': settings.get('https_proxy')}))

        urllib2.install_opener(opener)

    try:
        with contextlib.closing(urllib2.urlopen(request)) as response:
            if response.code == 204: # No Content
                return None
            else:
                return json.loads(response.read())
    except urllib2.HTTPError as err:
        with contextlib.closing(err):
            raise SimpleHTTPError(err.code, err.read())
예제 #10
0
def handle_ds_client(reader, writer, key):
    with closing(writer):
        loop = reader._loop
        bind_address = yield from get_host_name(writer)
        # Connect to client
        connection = yield from get_connection(key, loop)
        if not connection:
            return
        ds_reader, ds_writer = connection
        # Loop over reply/requests
        with closing(ds_writer):
            while not reader.at_eof() and not ds_reader.at_eof():
                # Read request
                request = yield from forward_giop_frame(
                    reader, ds_writer, bind_address)
                if not request:
                    break
                # Choose patch
                if ZMQ_SUBSCRIPTION_CHANGE in request:
                    patch = Patch.ZMQ
                else:
                    patch = Patch.NONE
                # Read reply_header
                reply = yield from forward_giop_frame(
                    ds_reader, writer, bind_address, patch=patch)
예제 #11
0
 def setUp(self):
     self.TOO_LONG_RAW = ''.join(random.choice(string.ascii_uppercase)
                                 for _ in range(2*Util.MAX_BYTES))
     with closing(StringIO()) as buf:
         with closing(GzipFile('', 'wb', 9, buf)) as f:
             f.write(self.TOO_LONG_RAW)
         self.TOO_LONG_GZIP = base64.b64encode(buf.getvalue())
예제 #12
0
def get_quote_info(ctx, pline, userdata):
    args = pline.trailing.split()
    args.pop(0)
    if args:
        try:
            quote_id = int(args.pop(0))
        except ValueError:
            ctx.command('/say Invalid argument.')
            return
    else:
        ctx.command('/say You must provide a quote id.')
        return

    init_quotes_db()
    with closing(sqlite3.connect(quote_db_filename)) as quote_db:
        with closing(quote_db.cursor()) as cursor:
            cursor.execute('''SELECT "id", "server", "channel", "date" FROM "quotes" WHERE "id"=?''', (quote_id,))
            row = cursor.fetchone()

    if row is None:
        ctx.command('/say That quote does not exist.')
        return

    quote_id, quote_server, quote_channel, quote_date = row
    date, time = quote_date.split()
    year, month, day = map(int, date.split('-'))
    hour, minute, second = map(int, time.split(':'))
    dt_utc_naive = datetime(year, month, day, hour, minute, second)
    dt = pytz.timezone('Europe/Berlin').fromutc(dt_utc_naive)
    ctx.command(u'/say \x02#{} infos:\x02 {} \x02@\x02 {} \x02at\x02 {}'.format(
        quote_id, quote_channel, quote_server, dt.strftime(u'%Y-%m-%d %H:%M')
    ).encode('utf-8'))
예제 #13
0
def handle_db_client(reader, writer, key):
    with closing(writer):
        loop = reader._loop
        bind_address = yield from get_host_name(writer)
        # Connect to client
        connection = yield from get_connection(key, loop)
        if not connection:
            return
        db_reader, db_writer = connection
        # Loop over reply/requests
        with closing(db_writer):
            while not reader.at_eof() and not db_reader.at_eof():
                # Read request
                request = yield from forward_giop_frame(
                    reader, db_writer, bind_address)
                if not request:
                    break
                # Choose patch
                if IMPORT_DEVICE in request:
                    patch = Patch.IOR
                elif GET_CSDB_SERVER in request:
                    patch = Patch.CSD
                else:
                    patch = Patch.NONE
                # Read reply_header
                reply = yield from forward_giop_frame(
                    db_reader, writer, bind_address, patch=patch)
예제 #14
0
def test_get_job(conn):
    job_source_name = "dummy-job-src"
    path = "/data/kpi_2.csv"
    job_type = 'dummy'
    filesize = 1060
    description = {"uri": path}
    description_json = json.dumps(description)

    with closing(conn.cursor()) as cursor:
        job_source_id = add_job_source(cursor, job_source_name, "dummy", '{}')

    enqueue_job(conn, job_type, description_json, filesize, job_source_id)

    conn.commit()

    with closing(conn.cursor()) as cursor:
        job = get_job(cursor)

    conn.commit()

    _, job_type, description, _, _ = job

    eq_(path, json.loads(description)["uri"])

    with closing(conn.cursor()) as cursor:
        job = get_job(cursor)

        eq_(job, None)
예제 #15
0
def add_quote(ctx, pline, userdata):
    trailing = pline.trailing.split(None, 1)
    if len(trailing) <= 1:
        ctx.command('/say No quote given.')
        return
    _, quote = trailing

    quote = try_decode(quote)
    server = try_decode(ctx.server)
    channel = try_decode(ctx.channel)

    init_quotes_db()
    with closing(sqlite3.connect(quote_db_filename)) as quote_db:
        with closing(quote_db.cursor()) as cursor:
            cursor.execute(u'''
              INSERT INTO quotes
                ("line", "server", "channel", "date")
              VALUES
                (?, ?, ?, datetime('now', 'utc'))
              ''', (quote, server, channel)
            )
            insert_id = cursor.lastrowid
        quote_db.commit()

    ctx.command('/say Quote inserted (#{})'.format(insert_id))
예제 #16
0
 def add_packages(self, connection, repository, packages):
     basedir = utils.get_path_from_url(repository.url)
     index_file = utils.get_path_from_url(
         self._get_url_of_metafile(repository, "Packages")
     )
     utils.ensure_dir_exist(os.path.dirname(index_file))
     index_gz = index_file + ".gz"
     count = 0
     # load existing packages
     self.get_packages(connection, repository, packages.add)
     with open(index_file, "wb") as fd1:
         with closing(gzip.open(index_gz, "wb")) as fd2:
             writer = utils.composite_writer(fd1, fd2)
             for pkg in packages:
                 filename = os.path.join(basedir, pkg.filename)
                 with closing(debfile.DebFile(filename)) as deb:
                     debcontrol = deb.debcontrol()
                 debcontrol.setdefault("Origin", repository.origin)
                 debcontrol["Size"] = str(pkg.filesize)
                 debcontrol["Filename"] = pkg.filename
                 for k, v in six.moves.zip(_CHECKSUM_METHODS, pkg.checksum):
                     debcontrol[k] = v
                 writer(debcontrol.dump())
                 writer("\n")
                 count += 1
     self.logger.info("saved %d packages in %s", count, repository)
     self._update_suite_index(repository)
예제 #17
0
def test_waiting_locks(conn):
    job_source_name = "dummy-job-src"
    path = "/data/kpi_2.csv"
    job_type = 'dummy'
    filesize = 1060
    description_json = '{{"uri": "{}"}}'.format(path)

    with closing(conn.cursor()) as cursor:
        datasource = name_to_datasource(cursor, "dummy-src")

        job_source_id = get_job_source(cursor, job_source_name)

        if not job_source_id:
            job_source_id = add_job_source(cursor, job_source_name, "dummy", '{}')

    enqueue_job(conn, job_type, description_json, filesize, job_source_id)

    conn.commit()

    with closing(conn.cursor()) as cursor:
        job = get_job(cursor)

    conn.commit()

    with closing(conn.cursor()) as cursor:
        cursor.execute("select relation::regclass, * FROM pg_locks WHERE NOT GRANTED")

        rows = cursor.fetchall()

    job_id, job_type, description, size, parser_config = job

    eq_(path, json.loads(description)["uri"])
예제 #18
0
 def recommend(self, uid, comment, title=None, t=None, ck=None):
     """recommend a uid with some comment. ck is optional, if
     not provided, we will try to fetch a ck."""
     
     t = t or 'W'
     if ck is None:
     ## get recommend ck
         url = "http://www.douban.com/j/recommend?type=%s&uid=%s&rec=" % (t,uid)
         with contextlib.closing(httplib.HTTPConnection("music.douban.com")) as conn:
             cookie =  'dbcl2="%s"; bid="%s"; ' % (self.dbcl2, self.bid)
             conn.request('GET', url, None, {'Cookie': cookie})
             result = conn.getresponse().read()
             ck = self.__parse_ck(result)
             
     if ck:
         post = {'ck':ck, 'comment':comment, 'novote':1, 'type':t, 'uid':uid}
         if title:
             post['title'] = title
         
         ## convert unicode chars to bytes
         data = urllib.urlencode(post)
         ## ck ?
         cookie = 'dbcl2="%s"; bid="%s"; ck=%s' % (self.dbcl2, self.bid, ck)
         accept = 'application/json'
         content_type= 'application/x-www-form-urlencoded; charset=UTF-8'
         header = {"Cookie": cookie, "Accept": accept,
                 "Content-Type":content_type, }
                 
         with contextlib.closing(httplib.HTTPConnection("www.douban.com")) as conn:
             conn.request('POST', "/j/recommend", data, header)
             conn.getresponse().read()
예제 #19
0
	def start(self, *args, **kws):
		with\
				closing(socket.socket( socket.AF_INET,
					socket.SOCK_RAW, socket.getprotobyname('icmp') )) as self.ipv4,\
				closing(socket.socket( socket.AF_INET6,
					socket.SOCK_RAW, socket.getprotobyname('ipv6-icmp') )) as self.ipv6:
			return self._start(*args, **kws)
예제 #20
0
def test_uncompress():
    # Create dummy file
    fd, temp = mkstemp()
    os.close(fd)
    # Create a zipfile
    dtemp = mkdtemp()
    ztemp = os.path.join(dtemp, 'test.zip')
    with contextlib.closing(zipfile.ZipFile(ztemp, 'w')) as testzip:
        testzip.write(temp)
    datasets.utils._uncompress_file(ztemp, verbose=0)
    assert(os.path.exists(os.path.join(dtemp, temp)))
    shutil.rmtree(dtemp)

    dtemp = mkdtemp()
    ztemp = os.path.join(dtemp, 'test.tar')
    with contextlib.closing(tarfile.open(ztemp, 'w')) as tar:
        tar.add(temp)
    datasets.utils._uncompress_file(ztemp, verbose=0)
    assert(os.path.exists(os.path.join(dtemp, temp)))
    shutil.rmtree(dtemp)

    dtemp = mkdtemp()
    ztemp = os.path.join(dtemp, 'test.gz')
    f = gzip.open(ztemp, 'wb')
    f.close()
    datasets.utils._uncompress_file(ztemp, verbose=0)
    assert(os.path.exists(os.path.join(dtemp, temp)))
    shutil.rmtree(dtemp)

    os.remove(temp)
예제 #21
0
파일: imghist.py 프로젝트: j6k4m8/ndstore
  def getHist(self):

    with closing (ocpcaproj.OCPCAProjectsDB()) as projdb:
      proj = projdb.loadToken(self.token)
    
    with closing (ocpcadb.OCPCADB(proj)) as db:
      ch = proj.getChannelObj(self.channel)

      # Get the source database sizes
      [[ximagesz, yimagesz, zimagesz], timerange] = proj.datasetcfg.imageSize(self.res)
      [xcubedim, ycubedim, zcubedim] = cubedim = proj.datasetcfg.getCubeDims()[self.res]
      [xoffset, yoffset, zoffset] = proj.datasetcfg.getOffset()[self.res]

      # Set the limits for iteration on the number of cubes in each dimension
      xlimit = (ximagesz-1) / xcubedim + 1
      ylimit = (yimagesz-1) / ycubedim + 1
      zlimit = (zimagesz-1) / zcubedim + 1

      hist_sum = np.zeros(self.numbins, dtype=np.uint32) 
      
      # sum the histograms 
      for z in range(zlimit):
        for y in range(ylimit):
          for x in range(xlimit):

            # cutout the data for the cube 
            data = db.cutout(ch, [ x*xcubedim, y*ycubedim, z*zcubedim], cubedim, self.res ).data
            
            # compute the histogram and store it 
            (hist, bins) = np.histogram(data[data > 0], bins=self.numbins, range=(0,self.numbins))
            hist_sum = np.add( hist_sum, hist )
            print "Processed cube {} {} {}".format(x,y,z)
        
      return (hist_sum, bins)
예제 #22
0
    def _cache(fn, *args, **kwargs):
        if cache.disabled:
            return fn(*args, **kwargs)

        # A bit obscure, but simplest way to generate unique key for
        # functions and methods in python 2 and 3:
        key = "{}.{}".format(fn.__module__, repr(fn).split("at")[0])

        etag = ".".join(_get_mtime(name) for name in depends_on)
        cache_path = _get_cache_path()

        try:
            with closing(shelve.open(cache_path)) as db:
                if db.get(key, {}).get("etag") == etag:
                    return db[key]["value"]
                else:
                    value = fn(*args, **kwargs)
                    db[key] = {"etag": etag, "value": value}
                    return value
        except shelve_open_errors:
            # Caused when going from Python 2 to Python 3 and vice-versa
            warn("Removing possibly out-dated cache")
            os.remove(cache_path)

            with closing(shelve.open(cache_path)) as db:
                value = fn(*args, **kwargs)
                db[key] = {"etag": etag, "value": value}
                return value
예제 #23
0
파일: package.py 프로젝트: LLNL/conduit
    def write_easy_install_pth(self, exts):
        paths = []
        for ext in sorted(exts.values()):
            ext_site_packages = os.path.join(ext.prefix, self.site_packages_dir)
            easy_pth = "%s/easy-install.pth" % ext_site_packages

            if not os.path.isfile(easy_pth):
                continue

            with closing(open(easy_pth)) as f:
                for line in f:
                    line = line.rstrip()

                    # Skip lines matching these criteria
                    if not line: continue
                    if re.search(r'^(import|#)', line): continue
                    if (ext.name != 'py-setuptools' and
                        re.search(r'setuptools.*egg$', line)): continue

                    paths.append(line)

        site_packages = os.path.join(self.prefix, self.site_packages_dir)
        main_pth = "%s/easy-install.pth" % site_packages

        if not paths:
            if os.path.isfile(main_pth):
                os.remove(main_pth)

        else:
            with closing(open(main_pth, 'w')) as f:
                f.write("import sys; sys.__plen = len(sys.path)\n")
                for path in paths:
                    f.write("%s\n" % path)
                f.write("import sys; new=sys.path[sys.__plen:]; del sys.path[sys.__plen:]; "
                        "p=getattr(sys,'__egginsert',0); sys.path[p:p]=new; sys.__egginsert = p+len(new)\n")
예제 #24
0
파일: utils.py 프로젝트: guma44/GEOparse
def download_from_url(url, destination_path, force=False, aspera=False):
    """Download file from remote server

    :param url: path to the file on remote server (including file name)
    :param destination_path: path to the file on local machine (including file name)
    :param force: bool - if file exist force to overwrite it , defaults to False
    """
    try:
        is_already_downloaded = os.path.isfile(destination_path)
        if is_already_downloaded:
            if force:
                with closing(urlopen(url)) as r:
                    with open(destination_path, mode='wb') as f:
                        stderr.write("Downloading %s to %s\n" % (url, destination_path))
                        copyfileobj(r, f)
            else:
                stderr.write("File already exist. Use force=True if you would like to overwrite it.\n")
        else:
            if aspera:
                download_aspera(url, destination_path)
            else:
                with closing(urlopen(url)) as r:
                    with open(destination_path, mode='wb') as f:
                        stderr.write("Downloading %s to %s\n" % (url, destination_path))
                        copyfileobj(r, f)
    except URLError:
        stderr.write("Cannot find file %s" % url)
예제 #25
0
    def _play_proxy(self, media, player_name, args):
        """
        Load data with python requests and pipe data to a media player.

        We need this function for url that use redirection and cookies.
        This function is used if the non-standard,
        non-API compliant '_play_proxy' attribute of the 'media' object is defined and is True.
        """
        if args is None:
            for (binary, stdin_args) in PLAYERS:
                if binary == player_name:
                    args = stdin_args

        assert args is not None

        print(':: Play_proxy streaming from %s' % media.url)
        print(':: to %s %s' % (player_name, args))
        print(player_name + ' ' + args)
        proc = Popen(player_name + ' ' + args, stdin=PIPE, shell=True)

        # Handle cookies (and redirection 302...)
        session = requests.sessions.Session()

        with closing(proc.stdin):
            with closing(session.get(media.url, stream=True)) as response:
                for buffer in response.iter_content(8192):
                    try:
                        proc.stdin.write(buffer)
                    except:
                        print("play_proxy broken pipe. Can't write anymore.")
                        break
 def test_entrypoint(self):
     # Test that the entrypoint fetching also works (even with dialects)
     # using the same configuration we used in setUp() but not using
     # the impl_sqlalchemy SQLAlchemyBackend class directly...
     with contextlib.closing(backends.fetch(self.db_conf)) as backend:
         with contextlib.closing(backend.get_connection()):
             pass
예제 #27
0
파일: utils.py 프로젝트: sproutman/thefuck
    def _cache(fn, *args, **kwargs):
        if cache.disabled:
            return fn(*args, **kwargs)

        # A bit obscure, but simplest way to generate unique key for
        # functions and methods in python 2 and 3:
        key = '{}.{}'.format(fn.__module__, repr(fn).split('at')[0])

        etag = '.'.join(_get_mtime(name) for name in depends_on)
        cache_path = _get_cache_path()

        try:
            with closing(shelve.open(cache_path)) as db:
                if db.get(key, {}).get('etag') == etag:
                    return db[key]['value']
                else:
                    value = fn(*args, **kwargs)
                    db[key] = {'etag': etag, 'value': value}
                    return value
        except dbm.error:
            # Caused when going from Python 2 to Python 3
            warn("Removing possibly out-dated cache")
            os.remove(cache_path)

            with closing(shelve.open(cache_path)) as db:
                value = fn(*args, **kwargs)
                db[key] = {'etag': etag, 'value': value}
                return value
예제 #28
0
    def generate_index(self):
        """Generated the repository index file (`index.gz`)

        index.gz is the compressed sqlite index containing all of the succeeded
        packages in the repository pool.

        Return the number of packages added to the repository index.
        """
        assert exists(self.repository.path)
        idx_path = xjoin(self.repository.path, 'index')
        idx_gz_path = idx_path + '.gz'
        
        sh.rm(idx_path)
        db = RepoPackageDatabase(idx_path, touch=True)
        
        # Tag BE packages; so client may use it to determine if a package is
        # available only to BE customers or not.
        # See also: RepoPackage.requires_be_license property
        pkgtags = 'be' if self.repository.name == 'be' else ''

        with closing(db):
            LOG.debug('finding packages in %s', self.repository.path)
            packages = self.repository.find_packages()

            LOG.debug('processing %d packages', len(packages))
            rpkg_list = [
                RepoPackage.create_from(
                    BinaryPackage(**self._read_info_json(pkgfile)),
                    relpath=relpath(pkgfile, self.repository.path),
                    tags=pkgtags)
                for pkgfile in textui.ProgressBar.iterate(packages, note="Package")
            ]
            
            # Optimize index size by removing the "description" field.
            # PyPI's descriptions are typically very long - see
            # http://pypi.python.org/pypi/zc.buildout for example - hence we
            # must remove them from the index.
            for rpkg in rpkg_list:
                rpkg.description = ''

            # keep only the latest pkg_version in index
            LOG.debug("pruning older pkg_version's")
            rpkg_list = _prune_older_binary_releases(rpkg_list)
            LOG.debug('.. resulting in %d packages', len(rpkg_list))

            LOG.info('  writing index (please wait) ...')
            with db.transaction() as session:
                session.add_all(rpkg_list)
                session.commit()
                session.close()

        LOG.info('  compressing index: ...%s%s',
                 os.path.basename(idx_gz_path),
                 (' (%d)' % len(rpkg_list)) if rpkg_list else '')
        sh.rm(idx_gz_path)
        with closing(gzip.open(idx_gz_path, 'wb')) as f:
            f.write(open(idx_path, 'rb').read())
        sh.rm(idx_path)

        return len(rpkg_list)
예제 #29
0
파일: Events.py 프로젝트: alimg/insight
    def post(self):
        userid = SessionUtil.get_user_id(request.form['session'])
        if not userid:
            return {'status': ServerConstants.STATUS_INVALID_SESSION}

        eventid = request.form['eventid']

        with closing(ServerConstants.mysql_pool.get_connection()) as db:
            with closing(db.cursor()) as cursor:
                sql = "SELECT filename, type FROM `events` WHERE id='{}'".format(eventid)
                cursor.execute(sql)
                rows = cursor.fetchall()
                if rows:
                    filename = ServerConstants.STORAGE_DIR+rows[0][0]
                    event_type = rows[0][1]
                    if event_type == 'jpeg':
                        #output = StringIO()
                        #img = Image.open(filename)
                        #img.save(output, 'JPEG')
                        #output.seek(0)
                        #return send_file(output, mimetype='image/jpeg')
                        return send_file(filename, mimetype='image/jpeg')
                    elif event_type == 'ogg':
                        return send_file(filename, mimetype='audio/ogg')
                    elif event_type == 'h264':
                        return send_file(filename, mimetype='video/h264')
예제 #30
0
파일: utils.py 프로젝트: Googulator/thefuck
    def _cache(fn, *args, **kwargs):
        if cache.disabled:
            return fn(*args, **kwargs)

        # A bit obscure, but simplest way to generate unique key for
        # functions and methods in python 2 and 3:
        key = '{}.{}'.format(fn.__module__, repr(fn).split('at')[0])

        etag = '.'.join(_get_mtime(name) for name in depends_on)
        cache_dir = get_cache_dir()
        cache_path = Path(cache_dir).joinpath('thefuck').as_posix()

        try:
            with closing(shelve.open(cache_path)) as db:
                if db.get(key, {}).get('etag') == etag:
                    return db[key]['value']
                else:
                    value = fn(*args, **kwargs)
                    db[key] = {'etag': etag, 'value': value}
                    return value
        except (shelve_open_error, ImportError):
            # Caused when switching between Python versions
            warn("Removing possibly out-dated cache")
            os.remove(cache_path)

            with closing(shelve.open(cache_path)) as db:
                value = fn(*args, **kwargs)
                db[key] = {'etag': etag, 'value': value}
                return value
예제 #31
0
 def establish_host_connectivity():
     """Continually try to recreate the SSH Tunnels to all hosts for 2 minutes
     """
     return closing(
         TunnelCollection(ssh_user, ssh_key_path, host_list_w_port))
예제 #32
0
파일: db.py 프로젝트: ctvera/swift-deb
    def initialize(self, put_timestamp=None, storage_policy_index=None):
        """
        Create the DB

        The storage_policy_index is passed through to the subclass's
        ``_initialize`` method.  It is ignored by ``AccountBroker``.

        :param put_timestamp: internalized timestamp of initial PUT request
        :param storage_policy_index: only required for containers
        """
        if self.db_file == ':memory:':
            tmp_db_file = None
            conn = get_db_connection(self.db_file, self.timeout)
        else:
            mkdirs(self.db_dir)
            fd, tmp_db_file = mkstemp(suffix='.tmp', dir=self.db_dir)
            os.close(fd)
            conn = sqlite3.connect(tmp_db_file, check_same_thread=False,
                                   factory=GreenDBConnection, timeout=0)
        # creating dbs implicitly does a lot of transactions, so we
        # pick fast, unsafe options here and do a big fsync at the end.
        with closing(conn.cursor()) as cur:
            cur.execute('PRAGMA synchronous = OFF')
            cur.execute('PRAGMA temp_store = MEMORY')
            cur.execute('PRAGMA journal_mode = MEMORY')
        conn.create_function('chexor', 3, chexor)
        conn.row_factory = sqlite3.Row
        conn.text_factory = str
        conn.executescript("""
            CREATE TABLE outgoing_sync (
                remote_id TEXT UNIQUE,
                sync_point INTEGER,
                updated_at TEXT DEFAULT 0
            );
            CREATE TABLE incoming_sync (
                remote_id TEXT UNIQUE,
                sync_point INTEGER,
                updated_at TEXT DEFAULT 0
            );
            CREATE TRIGGER outgoing_sync_insert AFTER INSERT ON outgoing_sync
            BEGIN
                UPDATE outgoing_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
            CREATE TRIGGER outgoing_sync_update AFTER UPDATE ON outgoing_sync
            BEGIN
                UPDATE outgoing_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
            CREATE TRIGGER incoming_sync_insert AFTER INSERT ON incoming_sync
            BEGIN
                UPDATE incoming_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
            CREATE TRIGGER incoming_sync_update AFTER UPDATE ON incoming_sync
            BEGIN
                UPDATE incoming_sync
                SET updated_at = STRFTIME('%s', 'NOW')
                WHERE ROWID = new.ROWID;
            END;
        """)
        if not put_timestamp:
            put_timestamp = Timestamp(0).internal
        self._initialize(conn, put_timestamp,
                         storage_policy_index=storage_policy_index)
        conn.commit()
        if tmp_db_file:
            conn.close()
            with open(tmp_db_file, 'r+b') as fp:
                os.fsync(fp.fileno())
            with lock_parent_directory(self.db_file, self.pending_timeout):
                if os.path.exists(self.db_file):
                    # It's as if there was a "condition" where different parts
                    # of the system were "racing" each other.
                    raise DatabaseAlreadyExists(self.db_file)
                renamer(tmp_db_file, self.db_file)
            self.conn = get_db_connection(self.db_file, self.timeout)
        else:
            self.conn = conn
예제 #33
0
def download_segment(segment_url, dash_folder):
    """ Module to download the segment """
    #parse_url = urlparse.urlparse(segment_url)
    #connection = HTTPConnectionPool(parse_url.netloc)
    #chunk_dl_rates = []
    parsed_uri = urlparse.urlparse(segment_url)
    segment_path = '{uri.path}'.format(uri=parsed_uri)
    while segment_path.startswith('/'):
        segment_path = segment_path[1:]
    segment_filename = os.path.join(dash_folder,
                                    os.path.basename(segment_path))
    make_sure_path_exists(os.path.dirname(segment_filename))
    #segment_file_handle = open(segment_filename, 'wb')
    chunk_dl_rates = []
    segment_size = 0
    try:
        #print segment_url
        total_data_dl_time = 0

        chunk_number = 0
        chunk_start_time = timeit.default_timer()
        with closing(connection.get(segment_url, stream=True)) as seg_conn:
            with open(segment_filename, 'wb') as segment_file_handle:
                for segment_data in seg_conn.iter_content(DOWNLOAD_CHUNK):
                    if segment_data is None:
                        break
                    segment_file_handle.write(segment_data)
                    segment_size += len(segment_data)
                    if len(segment_data) < DOWNLOAD_CHUNK:
                        timenow = timeit.default_timer()
                        chunk_dl_time = timenow - chunk_start_time
                        chunk_number += 1
                        total_data_dl_time += chunk_dl_time
                        current_chunk_dl_rate = segment_size * 8 / total_data_dl_time
                        chunk_dl_rates.append(current_chunk_dl_rate)
                        with open(
                                '/mnt/QUIClientServer0/chunk_rate_iter_squad_requests_HTTP1.txt',
                                'a') as chk:
                            chk.write("%s" % segment_url)
                            for item in chunk_dl_rates:
                                chk.write(",%s" % item)
                            chk.write("\n")

                        #print chunk_dl_rates
                        #print "-----------------"
                        #print segment_w_chunks
                        #print "!!!!!!!!!!!!!!!!!"
                        segment_w_chunks.append(chunk_dl_rates)
                        #print segment_w_chunks
                        #print "##################"
                        break
                    timenow = timeit.default_timer()
                    chunk_dl_time = timenow - chunk_start_time
                    total_data_dl_time += chunk_dl_time
                    current_chunk_dl_rate = segment_size * 8 / total_data_dl_time
                    chunk_start_time = timenow
                    chunk_number += 1
                    chunk_dl_rates.append(current_chunk_dl_rate)

    except urllib2.HTTPError, error:
        config_dash.LOG.error(
            "Unable to download DASH Segment {} HTTP Error:{} ".format(
                segment_url, str(error.code)))
        return None
예제 #34
0
#importing librarries
import wave, math, contextlib
import speech_recognition as sr
from moviepy.editor import AudioFileClip

#extracting the audio from the video file. hard coded the file names for now
transcribed_audio_file_name = "transcribed_speech.wav"
zoom_video_file_name = "Fork_tutorial.mp4"
audioclip = AudioFileClip(zoom_video_file_name)
audioclip.write_audiofile(transcribed_audio_file_name)

#finding out the total duration of the audio clip
with contextlib.closing(wave.open(transcribed_audio_file_name,'r')) as f:
    frames = f.getnframes()
    rate = f.getframerate()
    duration = frames / float(rate)
total_duration = math.ceil(duration / 60)

#transcribing the audio in to text in chunks of 10 MB (limitation of the api call)
r = sr.Recognizer()
for i in range(0, total_duration):
    with sr.AudioFile(transcribed_audio_file_name) as source:
        audio = r.record(source, offset=i*60, duration=60)
    f = open("transcription.txt", "a")
    f.write(r.recognize_google(audio))
    f.write(" ")
f.close()
예제 #35
0
        link = "https://www.youtube.com/watch?v=dQw4w9WgXcQ"
        default_video = True
if not default_video:
    chrome_options.add_argument("--mute-audio")
amount = 3
try:
    amount = int(input("Amount of reloads to search: "))
except ValueError:
    amount = 3
errors = 1
try:
    errors = int(input("Amount of errors to be displayed: "))
except ValueError:
    errors = 1

with closing(Chrome(chrome_options=chrome_options)) as driver:
    wait = WebDriverWait(driver, 50)

    if sys.platform == "win32":
        os.system("cls")
    else:
        os.system("clear")

    req = Request(link, headers={'User-Agent': 'Mozilla/5.0'})
    webpage = urlopen(req).read()

    # Creating a BeautifulSoup object of the html page for easy extraction of data.

    soup = BeautifulSoup(webpage, 'html.parser')
    html = soup.prettify('utf-8')
    if not default_video:
예제 #36
0
def init_db():
	with closing(connect_db()) as db:
		with app.open_resource('schema.sql', mode='r') as f:
			db.cursor().executescript(f.read())

		db.commit()
예제 #37
0
파일: fetch.py 프로젝트: sirex/databot-bots
def main():
    """

    Install system packages:

        postgresql postgis osm2pgsql

    """
    logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)

    assert shutil.which('osm2pgsql'), 'install osm2pgsql'

    dbname = 'lietuva'
    database_created = False
    with contextlib.closing(psycopg2.connect('postgresql:///postgres')) as conn:
        conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
        with contextlib.closing(conn.cursor()) as cur:
            cur.execute("SELECT EXISTS(SELECT * FROM pg_database WHERE datname=%s)", (dbname,))
            exists, = cur.fetchone()
            if not exists:
                logger.info("create database %s", dbname)
                cur.execute('CREATE DATABASE ' + dbname)
                database_created = True

    if database_created:
        with contextlib.closing(psycopg2.connect('postgresql:///' + dbname)) as conn:
            conn.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
            with contextlib.closing(conn.cursor()) as cur:
                logger.info("create extension postgis")
                cur.execute('CREATE EXTENSION postgis;')

    code = pathlib.Path('bots/osm')
    data = pathlib.Path('data/osm')
    output = data / 'LT.tar.gz2'
    source = 'http://download.gisgraphy.com/openstreetmap/pbf/LT.tar.bz2'

    logger.info('downloading %s', source)
    http_code = subprocess.run(funcy.flatten([
        'curl', source,
        ['--time-cond', str(output)] if output.exists() else [],
        '--output', str(output),
        '--location',
        '--silent',
        '--write-out', '%{http_code}',
    ]), check=True, stdout=subprocess.PIPE).stdout

    http_code = http_code.decode()
    logger.info('http code: %s', http_code)

    if http_code == '200':
        logger.info('extracting %s', output)
        subprocess.run(['tar', '--directory', str(output.parent), '-xjf', str(output)], check=True)

        logger.info('importing %s', data / 'LT')
        # https://github.com/openstreetmap/osm2pgsql#usage
        subprocess.run([
            'osm2pgsql',
            '--create',
            '--database', dbname,
            '--style', str(code / 'lietuva.style'),
            '--input-reader', 'pbf',
            str(data / 'LT'),
        ], check=True)

    logger.info('done')
예제 #38
0
def get_config():
    with closing(open("configuration.json")) as f:
        data = f.read()
        config = json.loads(data)
        return config
예제 #39
0
 def __contains__(self, item):
     with locking("%s.lock" % self.metadata_file, LOCK_SH, timeout=TIMEOUT):
         with closing(shelve.open(self.metadata_file)) as database:
             result = item in database  # pylint: disable=E1135
     return result
예제 #40
0
def main():
    options = check_environment()

    host_list = None
    vpc = None  # Set if the test owns the VPC

    if options.host_list is None:
        log.info('CCM_VPC_HOSTS not provided, requesting new VPC from CCM...')
        vpc = make_vpc(use_bare_os=options.test_install_prereqs)
        host_list = vpc.hosts()
    else:
        host_list = options.host_list

    assert os.path.exists(
        'ssh_key'), 'Valid SSH key for hosts must be in working dir!'
    # key must be chmod 600 for test_runner to use
    os.chmod('ssh_key', stat.S_IREAD | stat.S_IWRITE)

    # Create custom SSH Runnner to help orchestrate the test
    ssh_user = '******'
    ssh_key_path = 'ssh_key'
    remote_dir = '/home/centos'

    if options.use_api:
        installer = test_util.installer_api_test.DcosApiInstaller()
        if not options.test_install_prereqs:
            # If we dont want to test the prereq install, use offline mode to avoid it
            installer.offline_mode = True
    else:
        installer = test_util.installer_api_test.DcosCliInstaller()

    host_list_w_port = [i + ':22' for i in host_list]

    @retry(stop_max_delay=120000)
    def establish_host_connectivity():
        """Continually try to recreate the SSH Tunnels to all hosts for 2 minutes
        """
        return closing(
            TunnelCollection(ssh_user, ssh_key_path, host_list_w_port))

    log.info('Checking that hosts are accessible')
    with establish_host_connectivity() as tunnels:
        local_ip = {}
        for tunnel in tunnels.tunnels:
            local_ip[tunnel.host] = get_local_address(tunnel, remote_dir)
            if options.do_setup:
                # Make the default user priveleged to use docker
                tunnel.remote_cmd(
                    ['sudo', 'usermod', '-aG', 'docker', ssh_user])

    # use first node as bootstrap node, second node as master, all others as agents
    test_host = host_list[0]
    test_host_local = local_ip[host_list[0]]
    master_list = [local_ip[host_list[1]]]
    agent1 = local_ip[host_list[2]]
    agent2 = local_ip[host_list[3]]
    agent_list = [agent1, agent2]
    public_agent_list = [local_ip[host_list[4]]]
    log.info('Test host public/private IP: ' + test_host + '/' +
             test_host_local)

    with closing(SSHTunnel(ssh_user, ssh_key_path,
                           test_host)) as test_host_tunnel:
        log.info('Setting up installer on test host')

        installer.setup_remote(tunnel=test_host_tunnel,
                               installer_path=remote_dir +
                               '/dcos_generate_config.sh',
                               download_url=options.installer_url)
        if options.do_setup:
            # only do on setup so you can rerun this test against a living installer
            log.info('Verifying installer password hashing')
            test_pass = '******'
            hash_passwd = installer.get_hashed_password(test_pass)
            assert passlib.hash.sha512_crypt.verify(
                test_pass, hash_passwd), 'Hash does not match password'
            if options.use_api:
                installer.start_web_server()

        with open(pkg_resources.resource_filename(
                "gen", "ip-detect/aws.sh")) as ip_detect_fh:
            ip_detect_script = ip_detect_fh.read()
        with open('ssh_key', 'r') as key_fh:
            ssh_key = key_fh.read()
        # Using static exhibitor is the only option in the GUI installer
        if options.use_api:
            log.info(
                'Installer API is selected, so configure for static backend')
            zk_host = None  # causes genconf to use static exhibitor backend
        else:
            log.info('Installer CLI is selected, so configure for ZK backend')
            zk_host = test_host_local + ':2181'
            zk_cmd = [
                'sudo', 'docker', 'run', '-d', '-p', '2181:2181', '-p',
                '2888:2888', '-p', '3888:3888', 'jplock/zookeeper'
            ]
            test_host_tunnel.remote_cmd(zk_cmd)

        log.info("Configuring install...")
        installer.genconf(zk_host=zk_host,
                          master_list=master_list,
                          agent_list=agent_list,
                          public_agent_list=public_agent_list,
                          ip_detect_script=ip_detect_script,
                          ssh_user=ssh_user,
                          ssh_key=ssh_key,
                          add_config_path=options.add_config_path,
                          rexray_config_preset='aws')

        log.info("Running Preflight...")
        if options.test_install_prereqs:
            # Runs preflight in --web or --install-prereqs for CLI
            # This may take up 15 minutes...
            installer.install_prereqs()
            if options.test_install_prereqs_only:
                if vpc:
                    vpc.delete()
                sys.exit(0)
        else:
            # Will not fix errors detected in preflight
            installer.preflight()

        log.info("Running Deploy...")
        installer.deploy()

        log.info("Running Postflight")
        installer.postflight()

    with closing(SSHTunnel(ssh_user, ssh_key_path,
                           host_list[1])) as master_tunnel:
        # Runs dcos-image integration tests inside the cluster
        result = test_util.test_runner.integration_test(
            tunnel=master_tunnel,
            test_dir=remote_dir,
            region=vpc.get_region() if vpc else DEFAULT_AWS_REGION,
            dcos_dns=master_list[0],
            master_list=master_list,
            agent_list=agent_list,
            public_agent_list=public_agent_list,
            provider='onprem',
            # Setting dns_search: mesos not currently supported in API
            test_dns_search=not options.use_api,
            aws_access_key_id=options.aws_access_key_id,
            aws_secret_access_key=options.aws_secret_access_key,
            add_env=options.add_env,
            pytest_dir=options.pytest_dir,
            pytest_cmd=options.pytest_cmd)

    if result == 0:
        log.info("Test successsful! Deleting VPC if provided in this run...")
        if vpc is not None:
            vpc.delete()
    else:
        log.info(
            "Test failed! VPC will remain for debugging 1 hour from instantiation"
        )
    if options.ci_flags:
        result = 0  # Wipe the return code so that tests can be muted in CI
    sys.exit(result)
예제 #41
0
 def __delitem__(self, key):
     with locking("%s.lock" % self.metadata_file, LOCK_EX, timeout=TIMEOUT):
         with closing(shelve.open(self.metadata_file)) as database:
             del database[key]  # pylint: disable=E1138
예제 #42
0
 def items(self):
     with locking("%s.lock" % self.metadata_file, LOCK_SH, timeout=TIMEOUT):
         with closing(shelve.open(self.metadata_file)) as database:
             for k in database.keys():  # pylint: disable=E1101
                 yield (k, database[k])  # pylint: disable=E1136
예제 #43
0
def tcp_socket():
    import socket
    from contextlib import closing

    with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
        yield sock
예제 #44
0
 def pop(self, *args):
     with locking("%s.lock" % self.metadata_file, LOCK_EX, timeout=TIMEOUT):
         with closing(shelve.open(self.metadata_file)) as database:
             result = database.pop(*args)  # pylint: disable=E1101
     return result
예제 #45
0
def unused_tcp_port() -> int:
    """Return an unused port."""
    with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as sock:
        sock.bind(("", 0))
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        return cast(int, sock.getsockname()[1])
예제 #46
0
 def __getitem__(self, key):
     with locking("%s.lock" % self.metadata_file, LOCK_SH, timeout=TIMEOUT):
         with closing(shelve.open(self.metadata_file)) as database:
             result = database[key]  # pylint: disable=E1136
     return result
예제 #47
0
 def delete(self, key):
     with self.lock:
         with closing(shelve.open(self.filename)) as cache:
             if key in cache:
                 del cache[key]
예제 #48
0
def find_free_port():
    with closing(socket.socket(socket.AF_INET, socket.SOCK_STREAM)) as s:
        s.bind(("localhost", 0))
        # s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        port = s.getsockname()[1]
        return port
예제 #49
0
 def get(self, key):
     with closing(shelve.open(self.filename)) as cache:
         if key in cache:
             return cache.get(key)
예제 #50
0
 def clear(self):
     with self.lock:
         with closing(shelve.open(self.filename)) as cache:
             cache.clear()
예제 #51
0
def get_free_port(host='127.0.0.1'):
    """Return an unused TCP port."""
    with contextlib.closing(socket.socket()) as sock:
        sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        sock.bind((host, 0))
        return sock.getsockname()[1]
예제 #52
0
 def set(self, key, value):
     with self.lock:
         with closing(shelve.open(self.filename)) as cache:
             cache.setdefault(key, value)
예제 #53
0
    def copy_to_cache(self, ident):
        ident = unquote(ident)

        #get source image and write to temporary file
        (source_url, options) = self._web_request_url(ident)
        assert source_url is not None

        cache_dir = self.cache_dir_path(ident)
        mkdir_p(cache_dir)

        with closing(requests.get(source_url, stream=True, **options)) as response:
            if not response.ok:
                logger.warn(
                    "Source image not found at %s for identifier: %s. "
                    "Status code returned: %s.",
                    source_url, ident, response.status_code
                )
                raise ResolverException(
                    "Source image not found for identifier: %s. "
                    "Status code returned: %s." % (ident, response.status_code)
                )

            extension = self.cache_file_extension(ident, response)
            local_fp = join(cache_dir, "loris_cache." + extension)

            with tempfile.NamedTemporaryFile(dir=cache_dir, delete=False) as tmp_file:
                for chunk in response.iter_content(2048):
                    tmp_file.write(chunk)

        # Now rename the temp file to the desired file name if it still
        # doesn't exist (another process could have created it).
        #
        # Note: This is purely an optimisation; if the file springs into
        # existence between the existence check and the copy, it will be
        # overridden.
        if exists(local_fp):
            logger.info('Another process downloaded src image %s', local_fp)
            remove(tmp_file.name)
        else:
            safe_rename(tmp_file.name, local_fp)
            logger.info("Copied %s to %s", source_url, local_fp)

        # Check for rules file associated with image file
        # These files are < 2k in size, so fetch in one go.
        # Assumes that the rules will be next to the image
        # cache_dir is image specific, so this is easy

        bits = split(source_url)
        fn = bits[1].rsplit('.')[0] + "." + self.auth_rules_ext
        rules_url = bits[0] + '/' + fn
        try:
            resp = requests.get(rules_url)
            if resp.status_code == 200:
                local_rules_fp = join(cache_dir, "loris_cache." + self.auth_rules_ext)
                if not exists(local_rules_fp):
                    with open(local_rules_fp, 'w') as fh:
                        fh.write(resp.text)
        except:
            # No connection available
            pass

        return local_fp
예제 #54
0
            print("Downloading blender from " + url)
            file_tmp = urlretrieve(url, None, DownloadProgressBar())[0]
        except ImportError:
            print("Progressbar for downloading, can only be shown, "
                  "when the python package \"progressbar\" is installed")
            file_tmp = urlretrieve(url, None)[0]


        if platform == "linux" or platform == "linux2":

            if version_info.major == 3:
                with tarfile.open(file_tmp) as tar:
                    tar.extractall(blender_install_path)
            else:
                with contextlib.closing(lzma.LZMAFile(file_tmp)) as xz:
                    with tarfile.open(fileobj=xz) as f:
                        f.extractall(blender_install_path)
        elif platform == "darwin":
            if not os.path.exists(blender_install_path):
                os.makedirs(blender_install_path)
            os.rename(file_tmp, os.path.join(blender_install_path, blender_version + ".dmg"))
            # installing the blender app by mounting it and extracting the information
            subprocess.Popen(["hdiutil attach {}".format(os.path.join(blender_install_path, blender_version + ".dmg"))],
                             shell=True).wait()
            subprocess.Popen(
                ["cp -r {} {}".format(os.path.join("/", "Volumes", "Blender", "Blender.app"), blender_install_path)],
                shell=True).wait()
            subprocess.Popen(["diskutil unmount {}".format(os.path.join("/", "Volumes", "Blender"))], shell=True)
            # removing the downloaded image again
            subprocess.Popen(["rm {}".format(os.path.join(blender_install_path, blender_version + ".dmg"))], shell=True).wait()
예제 #55
0
def get_duration(fname):
    with contextlib.closing(wave.open(fname, 'r')) as f:
        frames = f.getnframes()
        rate = f.getframerate()
        duration = frames / float(rate)
        return duration
예제 #56
0
def check_connection_ntuple(conn):
    """Check validity of a connection namedtuple."""
    # check ntuple
    assert len(conn) in (6, 7), conn
    has_pid = len(conn) == 7
    has_fd = getattr(conn, 'fd', -1) != -1
    assert conn[0] == conn.fd
    assert conn[1] == conn.family
    assert conn[2] == conn.type
    assert conn[3] == conn.laddr
    assert conn[4] == conn.raddr
    assert conn[5] == conn.status
    if has_pid:
        assert conn[6] == conn.pid

    # check fd
    if has_fd:
        assert conn.fd >= 0, conn
        if hasattr(socket, 'fromfd') and not WINDOWS:
            try:
                dupsock = socket.fromfd(conn.fd, conn.family, conn.type)
            except (socket.error, OSError) as err:
                if err.args[0] != errno.EBADF:
                    raise
            else:
                with contextlib.closing(dupsock):
                    assert dupsock.family == conn.family
                    assert dupsock.type == conn.type

    # check family
    assert conn.family in (AF_INET, AF_INET6, AF_UNIX), repr(conn.family)
    if conn.family in (AF_INET, AF_INET6):
        # actually try to bind the local socket; ignore IPv6
        # sockets as their address might be represented as
        # an IPv4-mapped-address (e.g. "::127.0.0.1")
        # and that's rejected by bind()
        if conn.family == AF_INET:
            s = socket.socket(conn.family, conn.type)
            with contextlib.closing(s):
                try:
                    s.bind((conn.laddr[0], 0))
                except socket.error as err:
                    if err.errno != errno.EADDRNOTAVAIL:
                        raise
    elif conn.family == AF_UNIX:
        assert conn.status == psutil.CONN_NONE, conn.status

    # check type (SOCK_SEQPACKET may happen in case of AF_UNIX socks)
    assert conn.type in (SOCK_STREAM, SOCK_DGRAM, SOCK_SEQPACKET), \
        repr(conn.type)
    if conn.type == SOCK_DGRAM:
        assert conn.status == psutil.CONN_NONE, conn.status

    # check laddr (IP address and port sanity)
    for addr in (conn.laddr, conn.raddr):
        if conn.family in (AF_INET, AF_INET6):
            assert isinstance(addr, tuple), addr
            if not addr:
                continue
            assert isinstance(addr.port, int), addr.port
            assert 0 <= addr.port <= 65535, addr.port
            check_net_address(addr.ip, conn.family)
        elif conn.family == AF_UNIX:
            assert isinstance(addr, str), addr

    # check status
    assert isinstance(conn.status, str), conn
    valids = [getattr(psutil, x) for x in dir(psutil) if x.startswith('CONN_')]
    assert conn.status in valids, conn
예제 #57
0
 def was_spam(self, searchkey):
     with closing(bsddb.btopen(self.spamdb, 'c')) as db:
         return searchkey in db
예제 #58
0
 def wrapper(self, *args, **kwargs):
     with contextlib.closing(self.mysql_connect()) as connection:
         with contextlib.closing(connection.cursor()) as cursor:
             return func(self, cursor, *args, **kwargs)
예제 #59
0
 def mark_spam(self, searchkey):
     with closing(bsddb.btopen(self.spamdb, 'c')) as db:
         db[searchkey] = ''
예제 #60
0
    async def handle_report(self, data):
        with closing(self.db.cursor()) as cursor:
            cursor.execute(
                '''
                -- Find tasks with a matching outhash (that is, tasks that
                -- are equivalent)
                SELECT taskhash, method, unihash FROM tasks_v2 WHERE method=:method AND outhash=:outhash

                -- If there is an exact match on the taskhash, return it.
                -- Otherwise return the oldest matching outhash of any
                -- taskhash
                ORDER BY CASE WHEN taskhash=:taskhash THEN 1 ELSE 2 END,
                    created ASC

                -- Only return one row
                LIMIT 1
                ''', {k: data[k]
                      for k in ('method', 'outhash', 'taskhash')})

            row = cursor.fetchone()

            # If no matching outhash was found, or one *was* found but it
            # wasn't an exact match on the taskhash, a new entry for this
            # taskhash should be added
            if row is None or row['taskhash'] != data['taskhash']:
                # If a row matching the outhash was found, the unihash for
                # the new taskhash should be the same as that one.
                # Otherwise the caller provided unihash is used.
                unihash = data['unihash']
                if row is not None:
                    unihash = row['unihash']

                insert_data = {
                    'method': data['method'],
                    'outhash': data['outhash'],
                    'taskhash': data['taskhash'],
                    'unihash': unihash,
                    'created': datetime.now()
                }

                for k in ('owner', 'PN', 'PV', 'PR', 'task',
                          'outhash_siginfo'):
                    if k in data:
                        insert_data[k] = data[k]

                cursor.execute(
                    '''INSERT INTO tasks_v2 (%s) VALUES (%s)''' %
                    (', '.join(sorted(insert_data.keys())), ', '.join(
                        ':' + k for k in sorted(insert_data.keys()))),
                    insert_data)

                self.db.commit()

                logger.info('Adding taskhash %s with unihash %s',
                            data['taskhash'], unihash)

                d = {
                    'taskhash': data['taskhash'],
                    'method': data['method'],
                    'unihash': unihash
                }
            else:
                d = {k: row[k] for k in ('taskhash', 'method', 'unihash')}

        self.write_message(d)