Пример #1
0
def RunSshCmdWithStdin(cluster_name,
                       node,
                       basecmd,
                       port,
                       data,
                       debug=False,
                       verbose=False,
                       use_cluster_key=False,
                       ask_key=False,
                       strict_host_check=False,
                       ensure_version=False):
    """Runs a command on a remote machine via SSH and provides input in stdin.

  @type cluster_name: string
  @param cluster_name: Cluster name
  @type node: string
  @param node: Node name
  @type basecmd: string
  @param basecmd: Base command (path on the remote machine)
  @type port: int
  @param port: The SSH port of the remote machine or None for the default
  @param data: JSON-serializable input data for script (passed to stdin)
  @type debug: bool
  @param debug: Enable debug output
  @type verbose: bool
  @param verbose: Enable verbose output
  @type use_cluster_key: bool
  @param use_cluster_key: See L{ssh.SshRunner.BuildCmd}
  @type ask_key: bool
  @param ask_key: See L{ssh.SshRunner.BuildCmd}
  @type strict_host_check: bool
  @param strict_host_check: See L{ssh.SshRunner.BuildCmd}

  """
    cmd = [basecmd]

    # Pass --debug/--verbose to the external script if set on our invocation
    if debug:
        cmd.append("--debug")

    if verbose:
        cmd.append("--verbose")

    if ensure_version:
        all_cmds = _EnsureCorrectGanetiVersion(cmd)
    else:
        all_cmds = [cmd]

    if port is None:
        port = netutils.GetDaemonPort(constants.SSH)

    srun = SshRunner(cluster_name)
    scmd = srun.BuildCmd(node,
                         constants.SSH_LOGIN_USER,
                         utils.ShellQuoteArgs(
                             utils.ShellCombineCommands(all_cmds)),
                         batch=False,
                         ask_key=ask_key,
                         quiet=False,
                         strict_host_check=strict_host_check,
                         use_cluster_key=use_cluster_key,
                         port=port)

    tempfh = tempfile.TemporaryFile()
    try:
        tempfh.write(serializer.DumpJson(data))
        tempfh.seek(0)

        result = utils.RunCmd(scmd, interactive=True, input_fd=tempfh)
    finally:
        tempfh.close()

    if result.failed:
        raise errors.OpExecError("Command '%s' failed: %s" %
                                 (result.cmd, result.fail_reason))
Пример #2
0
version = '2.3.0'

# Read README for PyPI, fallback to short description if it fails.
desc = 'Powerful, efficient trajectory analysis in scientific Python.'
try:
    readme_file = os.path.join(os.path.dirname(os.path.abspath(__file__)),
                               'README.rst')
    with open(readme_file) as f:
        readme = f.read()
except ImportError:
    readme = desc

# Using a temporary file as a buffer to hold stderr output allows us
# to parse error messages from the underlying compiler and parse them
# for known errors.
tfile = tempfile.TemporaryFile(mode='w+b')

try:
    with stderr_manager(tfile):
        setup(
            name='freud-analysis',
            version=version,
            packages=['freud'],
            description=desc,
            long_description=readme,
            long_description_content_type='text/x-rst',
            keywords=('simulation analysis molecular dynamics soft matter '
                      'particle system computational physics'),
            classifiers=[
                "Development Status :: 5 - Production/Stable",
                "Intended Audience :: Science/Research",
Пример #3
0
def plot_map(assets, regions, colors={}, font_size=15):
    """Plots the map of regions. Currently only NUTS.
    
    Args:
        assets (list): List of ISO-3 countries to cover.
        regions (list): List of regexes matching NUTS codes.
        colors (dict): Mapping of ISO-2 to tuple of 3 (rgb fractions).
        font_size (float): Font size.
    """

    # set the visualization
    plt.rcParams["figure.figsize"] = (7, 9)
    plt.rcParams.update({'font.size': font_size})

    def get_color(s):
        try:
            return colors[s]
        except:
            return COLOR_DEFAULT

    # download and extract zip
    zip_bits = requests.get(URL).content
    tmp = tempfile.TemporaryFile()
    tmp.write(zip_bits)
    tmp.seek(0)
    # load as zip
    zpf = zipfile.ZipFile(tmp)

    # load NUTS geojsons
    geojson_files = [
        f for f in zpf.namelist() if f.split('.')[-1] == 'geojson'
    ]
    geojsons = {f: geopandas.read_file(zpf.open(f)) for f in geojson_files}
    # load nuts mapper
    with io.TextIOWrapper(zpf.open('NUTS_RG_BN_10M_2016.csv', 'r'),
                          encoding="utf-8") as fp:
        rdr = csv.reader(fp)
        _ = next(rdr)  # drop header
        csv_file = list(rdr)
        nuts, code = [i[0] for i in csv_file], [int(i[1]) for i in csv_file]
        nutsmap = pd.DataFrame({'NUTS_BN_ID': code, 'id': nuts})

    # filters
    match_region = lambda s: s is not None and any(
        re.match(r, s) for r in regions)

    def keep_level_bn(lvl):
        def _drop_out_level(s):
            r = "^[A-Z]{2}[0-9]{%d}$" % lvl
            return s is not None and bool(re.match(r, s))

        return _drop_out_level

    # parse relevant files
    boundaries, centers, shapes = {}, {}, {}
    for lvl in range(4):
        # corresponding file per level
        bn_key = f'NUTS_BN_{str(SCALE).zfill(2)}M_2016_4326_LEVL_{lvl}.geojson'
        lb_key = f'NUTS_LB_2016_4326_LEVL_{lvl}.geojson'
        rg_key = f'NUTS_RG_{str(SCALE).zfill(2)}M_2016_4326_LEVL_{lvl}.geojson'
        # get boundaries
        boundaries[lvl] = geojsons[bn_key]
        centers[lvl] = geojsons[lb_key]
        shapes[lvl] = geojsons[rg_key]
        # map boundary ID onto NUTS ID
        boundaries[lvl] = boundaries[lvl]\
            .merge(nutsmap, how = 'inner', on = 'NUTS_BN_ID')
        # filter boundaries from different level
        boundaries[lvl] = boundaries[lvl][boundaries[lvl].id.apply(
            keep_level_bn(lvl))]
        centers[lvl] = centers[lvl][centers[lvl].id.apply(keep_level_bn(lvl))]
        # keep only input
        centers[lvl] = centers[lvl][centers[lvl].id.apply(match_region)]
        shapes[lvl] = shapes[lvl][shapes[lvl].id.apply(match_region)]
        boundaries[lvl] = boundaries[lvl][boundaries[lvl].id.apply(
            match_region)]

    # concat the levels
    boundaries = pd.concat(boundaries.values(), ignore_index=True)
    centers = pd.concat(centers.values(), ignore_index=True)
    shapes = pd.concat(shapes.values(), ignore_index=True)

    # keep only longest centers
    to_remove = []
    for i in range(int(centers.id.apply(len).max()), 2, -1):
        cs = centers[centers.id.apply(len) == i].id
        cs_ = cs.apply(lambda s: s[:-1]).unique()
        for code in cs_:
            if not centers[centers.id == code].empty:
                to_remove.append(code)
    centers = centers[~centers.id.isin(to_remove)]

    # load library data
    borders = geopandas.read_file(
        geopandas.datasets.get_path('naturalearth_lowres'))
    borders = borders[~borders.iso_a3.isin(assets)]

    # ===== plot map =====
    # aspect ratio
    minx, miny, maxx, maxy = shapes.total_bounds
    w = 1 / math.cos(math.radians((maxy + miny) / 2 - 5))
    # prepare plot
    fig, ax = plt.subplots(figsize=(PLOT_SIZE, PLOT_SIZE / w),
                           facecolor=(1, 1, 1))
    ax.axis('off')  # remove axis

    # plot continent shades
    borders.plot(ax=ax, color=(.95, .95, .95), linewidth=.5)
    # plot shapes
    for name, group in shapes.groupby('CNTR_CODE'):
        group.plot(ax=ax, color=get_color(name))
    # add boundaries
    boundaries.plot(ax=ax, color="black", linewidth=.8, aspect=w)
    # add labels
    centers.apply(
        lambda s: ax.annotate(text=s.id, xy=s.geometry.coords[0], ha='center'),
        axis=1)

    # set crop
    x_over, y_over = min(1, 20 / (maxx - minx)), min(1, 20 / (maxy - minx))
    ax.set_xlim(minx - x_over, maxx + x_over)
    ax.set_ylim(miny - y_over, maxy + y_over)
    plt.show()
Пример #4
0
    def _initializeMolObj(self,
                          mol,
                          force_reading,
                          ignore_errors,
                          verbose=True):
        """
        Read the input and tries to convert it into a rdkit.Chem.rdchem.Mol obj

        Parameters
        ----------
        mol: str or rdkit.Chem.rdchem.Mol or moleculekit.smallmol.smallmol.SmallMol
            i) rdkit.Chem.rdchem.Mol ii) The path to the pdb/mol2 to load iii) The smile string iv) SmallMol object
            v) moleculekit.molecule.nolecule.Molecule
        force_reading: bool
        If the mol provided is not accepted, the molecule will be initially converted into sdf

        Returns
        -------
        _mol: rdkit.Chem.Molecule object
            The rdkit molecule
        smallMol: moleculekit.smallmol.smallmol.SmallMol
            The smallMol object if SmallMol was passed
        """
        from moleculekit.molecule import Molecule

        message = None

        # If we are converting a Molecule object to a SmallMol object
        remove = False
        natoms = None
        if isinstance(mol, Molecule):
            natoms = mol.numAtoms
            mol = self._fromMolecule(mol)  # Returns a temp filename
            remove = True

        _mol = None
        if isinstance(mol, Chem.Mol):
            _mol = mol
        elif isinstance(mol, str):
            if os.path.isfile(mol):
                name_suffix = os.path.splitext(mol)[-1]

                with tempfile.TemporaryFile(mode='w+') as stderr:
                    # Redirect stderr to a file
                    # temp_fileno = os.dup(sys.stderr.fileno())
                    # os.dup2(stderr.fileno(), sys.stderr.fileno()) # Change process stderr
                    # sys.stderr = stderr # Change Python stderr

                    # load mol2 file
                    if name_suffix == ".mol2":
                        _mol = Chem.MolFromMol2File(mol, removeHs=False)
                    # load pdb file
                    elif name_suffix == ".pdb":
                        _mol = Chem.MolFromPDBFile(mol, removeHs=False)
                    # if the file failed to be loaded and 'force_reading' = True, file convert to sdf and than loaded
                    if _mol is None and force_reading:
                        logger.warning(
                            'Reading {} with force_reading procedure'.format(
                                mol))
                        sdf = openbabelConvert(mol, name_suffix, 'pdb')
                        #_mol = Chem.SDMolSupplier(sdf, removeHs=False)[0]

                        # fallback format is pdb now because of the broken valence error, see 1olu
                        _mol = Chem.MolFromPDBFile(sdf, removeHs=False)
                        os.remove(sdf)

                    # Reset stderr
                    # os.dup2(temp_fileno, sys.__stderr__.fileno())
                    # os.close(temp_fileno)
                    # sys.stderr = sys.__stderr__

                    # # Read RDKit warnings
                    # stderr.flush()
                    # stderr.seek(0)
                    # message = stderr.read()

                if verbose:
                    logger.warning(message)
            else:
                # assuming it is a smile
                psmile = Chem.SmilesParserParams()
                psmile.removeHs = False
                _mol = Chem.MolFromSmiles(mol, psmile)

        if remove:  # Remove temp file
            os.remove(mol)

        if _mol is None and not ignore_errors:
            if message is not None and not verbose:
                # Print it anyway if there was an error
                logger.warning(message)
            if isinstance(mol, str):
                frerr = ' Try by setting the force_reading option as True.' if not force_reading else ''
                raise ValueError(
                    f'Failed to read file {mol}.{frerr}, {message}')
            elif isinstance(mol, Molecule):
                raise ValueError('Failed converting Molecule to SmallMol')
            else:
                raise RuntimeError(f"Failed reading molecule {mol}.")

        if natoms is not None and natoms != _mol.GetNumAtoms():
            raise RuntimeError(
                "Number of atoms changed while converting to rdkit molecule")

        return _mol
Пример #5
0
 def test_to_hdf5(self):
     """Test to_hdf5."""
     with tempfile.TemporaryFile() as tmp_file:
         with h5py.File(tmp_file, "w") as file:
             self.prop.to_hdf5(file)
Пример #6
0
def __main__():
    # FIXME: getopt & --help.
    print " ".join(sys.argv)
    branches = sys.argv[-1]
    makedistopts = sys.argv[1:-1]

    # Output from makedist.py goes here.
    outputroot = tempfile.mkdtemp()
    repodir = tempfile.mkdtemp()

    print "makedist output under: %s\ncombined repo: %s\n" % (outputroot,
                                                              repodir)
    sys.stdout.flush()
    # Add more dist/version/architecture tuples as they're supported.
    dists = (
        ("ubuntu", "10.10"),
        ("ubuntu", "10.4"),
        ("ubuntu", "9.10"),
        ("ubuntu", "9.4"),
        #("ubuntu", "8.10"),
        ("debian", "5.0"),
        ("centos", "5.4"),
        #("fedora", "12"),
        ("fedora", "13"),
        ("fedora", "14"))
    arches = ("x86", "x86_64")
    #    mongos = branches.split(',')
    # Run a makedist for each distro/version/architecture tuple above.
    winners = []
    losers = []
    winfh = tempfile.TemporaryFile()
    losefh = tempfile.TemporaryFile()
    procs = []
    count = 0
    for ((distro, distro_version), arch,
         spec) in gen([dists, arches, [branches]]):
        # FIXME: no x86 fedoras on RackSpace circa 04/10.
        if distro == "fedora" and arch == "x86":
            continue
        count += 1
        opts = makedistopts
        if distro in ["debian", "ubuntu"]:
            outputdir = "%s/deb/%s" % (outputroot, distro)
        elif distro in ["centos", "fedora", "redhat"]:
            outputdir = "%s/rpm/%s/%s/os" % (outputroot, distro,
                                             distro_version)
        else:
            raise Exception("unsupported distro %s" % distro)
            #opts += ["--subdirs"]

        procs.append(spawn(distro, distro_version, arch, spec, outputdir,
                           opts))

        if len(procs) == 8:
            wait(procs, winfh, losefh, winners, losers)

    while procs:
        wait(procs, winfh, losefh, winners, losers)

    winfh.seek(0)
    losefh.seek(0)
    nwinners = len(winners)
    nlosers = len(losers)
    print "%d winners; %d losers" % (nwinners, nlosers)
    cat(winfh, sys.stdout)
    cat(losefh, sys.stdout)
    print "%d winners; %d losers" % (nwinners, nlosers)
    if count == nwinners + nlosers:
        print "All jobs accounted for"


#        return 0
    else:
        print "Lost some jobs...?"
        return 1

    sys.stdout.flush()
    sys.stderr.flush()

    # this is sort of ridiculous, but the outputs from rpmbuild look
    # like RPM/<arch>, but the repo wants to look like
    # <arch>/RPM.
    for dist in os.listdir(outputroot + '/rpm'):
        if dist in ["centos", "fedora", "redhat"]:
            distdir = "%s/rpm/%s" % (outputroot, dist)
            rpmdirs = subprocess.Popen(
                ["find", distdir, "-type", "d", "-a", "-name", "RPMS"],
                stdout=subprocess.PIPE).communicate()[0].split('\n')[:-1]
            for rpmdir in rpmdirs:
                for arch in os.listdir(rpmdir):
                    archdir = "%s/../%s" % (rpmdir, arch)
                    os.mkdir(archdir)
                    os.rename("%s/%s" % (rpmdir, arch),
                              "%s/RPMS" % (archdir, ))
                os.rmdir(rpmdir)

    for flavor in os.listdir(outputroot):
        argv = [
            "python", "mergerepositories.py", flavor,
            "%s/%s" % (outputroot, flavor), repodir
        ]
        print "running %s" % argv
        print " ".join(argv)
        r = subprocess.Popen(argv).wait()
        if r != 0:
            raise Exception("mergerepositories.py exited %d" % r)
        print repodir
    #pushrepo(repodir)
    #shutil.rmtree(outputroot)
    #shutil.rmtree(repodir)

    return 0
Пример #7
0
 def test_seek_too_far_real_file(self):
     # StringIO doesn't raise IOError if you see past the start of the file.
     flo = tempfile.TemporaryFile()
     content = '1234567890'
     flo.write(content)
     self.assertEqual((content, 0), utils.last_bytes(flo, 1000))
Пример #8
0
def run(options):
    dim_mm = None
    scale = None
    size = None
    bbox = None
    rotate = not options.norotate

    if (options.ozi and options.projection.lower() != 'epsg:3857'
            and options.projection != EPSG_3857):
        raise Exception('Ozi map file output is only supported for Web Mercator (EPSG:3857). ' +
                        'Please remove --projection.')

    if options.url:
        parse_url(options.url, options)

    # format should not be empty
    if options.fmt:
        fmt = options.fmt.lower()
    elif '.' in options.output:
        fmt = options.output.split('.')[-1].lower()
    else:
        fmt = 'png256'

    need_cairo = fmt in ['svg', 'pdf']

    # output projection
    if options.projection.isdigit():
        proj_target = mapnik.Projection('+init=epsg:{}'.format(options.projection))
    else:
        proj_target = mapnik.Projection(options.projection)
    transform = mapnik.ProjTransform(proj_lonlat, proj_target)

    # get image size in millimeters
    if options.paper:
        portrait = False
        if options.paper[0] == '-':
            portrait = True
            rotate = False
            options.paper = options.paper[1:]
        elif options.paper[0] == '+':
            rotate = False
            options.paper = options.paper[1:]
        else:
            rotate = True
        dim_mm = get_paper_size(options.paper.lower())
        if not dim_mm:
            raise Exception('Incorrect paper format: ' + options.paper)
        if portrait:
            dim_mm = [dim_mm[1], dim_mm[0]]
    elif options.size:
        dim_mm = options.size
    if dim_mm and options.margin:
        dim_mm[0] = max(0, dim_mm[0] - options.margin * 2)
        dim_mm[1] = max(0, dim_mm[1] - options.margin * 2)

    # ppi and scale factor are the same thing
    if options.ppi:
        ppmm = options.ppi / 25.4
        scale_factor = options.ppi / 90.7
    else:
        scale_factor = options.factor
        ppmm = 90.7 / 25.4 * scale_factor

    # svg / pdf can be scaled only in cairo mode
    if scale_factor != 1 and need_cairo and not HAS_CAIRO:
        logging.error('Warning: install pycairo for using --factor or --ppi')
        scale_factor = 1
        ppmm = 90.7 / 25.4

    # convert physical size to pixels
    if options.size_px:
        size = options.size_px
    elif dim_mm:
        size = [int(round(dim_mm[0] * ppmm)), int(round(dim_mm[1] * ppmm))]

    if size and size[0] + size[1] <= 0:
        raise Exception('Both dimensions are less or equal to zero')

    if options.bbox:
        bbox = options.bbox

    # scale can be specified with zoom or with 1:NNN scale
    fix_scale = False
    if options.zoom:
        scale = 2 * 3.14159 * 6378137 / 2 ** (options.zoom + 8) / scale_factor
    elif options.scale:
        scale = options.scale * 0.00028 / scale_factor
        # Now we have to divide by cos(lat), but we might not know latitude at this point
        # TODO: division should only happen for EPSG:3857 or not at all
        if options.center:
            scale = scale / math.cos(math.radians(options.center[1]))
        elif options.bbox:
            scale = scale / math.cos(math.radians((options.bbox[3] + options.bbox[1]) / 2))
        else:
            fix_scale = True

    # all calculations are in EPSG:3857 projection (it's easier)
    if bbox:
        bbox = transform.forward(mapnik.Box2d(*bbox))
        bbox_web_merc = transform_lonlat_webmerc.forward(mapnik.Box2d(*(options.bbox)))
        if scale:
            scale = correct_scale(bbox, scale, bbox_web_merc, bbox)

    # calculate bbox through center, zoom and target size
    if not bbox and options.center and size and size[0] > 0 and size[1] > 0 and scale:
        # We don't know over which latitude range the bounding box spans, so we
        # first do everything in Web Mercator.
        center = transform_lonlat_webmerc.forward(mapnik.Coord(*options.center))
        w = size[0] * scale / 2
        h = size[1] * scale / 2
        bbox_web_merc = mapnik.Box2d(center.x-w, center.y-h, center.x+w, center.y+h)
        bbox = transform_lonlat_webmerc.backward(bbox_web_merc)
        bbox = transform.forward(bbox)
        # now correct the scale
        scale = correct_scale(bbox, scale, bbox_web_merc, bbox)
        center = transform.forward(mapnik.Coord(*options.center))
        w = size[0] * scale / 2
        h = size[1] * scale / 2
        bbox = mapnik.Box2d(center.x-w, center.y-h, center.x+w, center.y+h)

    # reading style xml into memory for preprocessing
    if options.style == '-':
        style_xml = sys.stdin.read()
        style_path = ''
    else:
        with codecs.open(options.style, 'r', 'utf-8') as style_file:
            style_xml = style_file.read()
        style_path = os.path.dirname(options.style)
    if options.base:
        style_path = options.base
    if options.vars:
        style_xml = xml_vars(style_xml, options.vars)
    if options.layers or options.add_layers:
        style_xml = reenable_layers(
            style_xml, parse_layers_string(options.layers) +
            parse_layers_string(options.add_layers))

    # for layer processing we need to create the Map object
    m = mapnik.Map(100, 100)  # temporary size, will be changed before output
    mapnik.load_map_from_string(m, style_xml.encode("utf-8"), False, style_path)
    m.srs = proj_target.params()

    # register non-standard fonts
    if options.fonts:
        for f in options.fonts:
            add_fonts(f)

    # get bbox from layer extents
    if options.fit:
        bbox = layer_bbox(m, options.fit.split(','), proj_target, bbox)
        # here's where we can fix scale, no new bboxes below
        if bbox and fix_scale:
            scale = scale / math.cos(math.radians(transform.backward(bbox.center()).y))
        bbox_web_merc = transform_lonlat_webmerc.forward(transform.backward(bbox))
        if scale:
            scale = correct_scale(bbox, scale, bbox_web_merc, bbox)
        # expand bbox with padding in mm
        if bbox and options.padding and (scale or size):
            if scale:
                tscale = scale
            else:
                tscale = min((bbox.maxx - bbox.minx) / max(size[0], 0.01),
                             (bbox.maxy - bbox.miny) / max(size[1], 0.01))
            bbox.pad(options.padding * ppmm * tscale)

    # bbox should be specified by this point
    if not bbox:
        raise Exception('Bounding box was not specified in any way')

    # rotate image to fit bbox better
    if rotate and size:
        portrait = bbox.maxy - bbox.miny > bbox.maxx - bbox.minx
        # take into consideration zero values, which mean they should be calculated from bbox
        if (size[0] == 0 or size[0] > size[1]) and portrait:
            size = [size[1], size[0]]

    # calculate pixel size from bbox and scale
    if not size:
        if scale:
            size = [int(round(abs(bbox.maxx - bbox.minx) / scale)),
                    int(round(abs(bbox.maxy - bbox.miny) / scale))]
        else:
            raise Exception('Image dimensions or scale were not specified in any way')
    elif size[0] == 0:
        size[0] = int(round(size[1] * (bbox.maxx - bbox.minx) / (bbox.maxy - bbox.miny)))
    elif size[1] == 0:
        size[1] = int(round(size[0] / (bbox.maxx - bbox.minx) * (bbox.maxy - bbox.miny)))

    if options.output == '-' or (need_cairo and (options.tiles_x > 1 or options.tiles_y > 1)):
        options.tiles_x = 1
        options.tiles_y = 1
    max_img_size = max(size[0] / options.tiles_x, size[1] / options.tiles_y)
    if max_img_size > 16384:
        larger_part = 'a larger value for ' if options.tiles_x > 1 or options.tiles_y > 1 else ''
        raise Exception('Image size exceeds mapnik limit ({} > {}), use {}--tiles'.format(
           max_img_size, 16384, larger_part))

    # add / remove some layers
    if options.layers:
        filter_layers(m, parse_layers_string(options.layers))
    if options.add_layers or options.hide_layers:
        select_layers(m, parse_layers_string(options.add_layers),
                      parse_layers_string(options.hide_layers))

    logging.debug('scale=%s', scale)
    logging.debug('scale_factor=%s', scale_factor)
    logging.debug('size=%s,%s', size[0], size[1])
    logging.debug('bbox=%s', bbox)
    logging.debug('bbox_wgs84=%s', transform.backward(bbox) if bbox else None)
    logging.debug('layers=%s', ','.join([l.name for l in m.layers if l.active]))

    # export image
    m.aspect_fix_mode = mapnik.aspect_fix_mode.GROW_BBOX
    m.resize(size[0], size[1])
    m.zoom_to_box(bbox)
    logging.debug('m.envelope(): {}'.format(m.envelope()))

    outfile = options.output
    if options.output == '-':
        outfile = tempfile.TemporaryFile(mode='w+b')

    if need_cairo:
        if HAS_CAIRO:
            if fmt == 'svg':
                surface = cairo.SVGSurface(outfile, size[0], size[1])
            else:
                surface = cairo.PDFSurface(outfile, size[0], size[1])
            mapnik.render(m, surface, scale_factor, 0, 0)
            surface.finish()
        else:
            mapnik.render_to_file(m, outfile, fmt)
        write_metadata(m.envelope(), size[0], size[1], transform, options.output,
                       options.wld, options.ozi)
    else:
        if options.tiles_x == options.tiles_y == 1:
            im = mapnik.Image(size[0], size[1])
            mapnik.render(m, im, scale_factor)
            im.save(outfile, fmt)
            write_metadata(m.envelope(), size[0], size[1], transform, options.output,
                           options.wld, options.ozi)
        else:
            # we cannot make mapnik calculate scale for us, so fixing aspect ratio outselves
            rdiff = (bbox.maxx-bbox.minx) / (bbox.maxy-bbox.miny) - size[0] / size[1]
            if rdiff > 0:
                bbox.height((bbox.maxx - bbox.minx) * size[1] / size[0])
            elif rdiff < 0:
                bbox.width((bbox.maxy - bbox.miny) * size[0] / size[1])
            scale = (bbox.maxx - bbox.minx) / size[0]
            width = max(32, int(math.ceil(1.0 * size[0] / options.tiles_x)))
            height = max(32, int(math.ceil(1.0 * size[1] / options.tiles_y)))
            m.resize(width, height)
            m.buffer_size = TILE_BUFFER
            tile_cnt = [int(math.ceil(1.0 * size[0] / width)),
                        int(math.ceil(1.0 * size[1] / height))]
            logging.debug('tile_count=%s %s', tile_cnt[0], tile_cnt[1])
            logging.debug('tile_size=%s,%s', width, height)
            tmp_tile = '{:02d}_{:02d}_{}'
            tile_files = []
            for row in range(0, tile_cnt[1]):
                for column in range(0, tile_cnt[0]):
                    logging.debug('tile=%s,%s', row, column)
                    tile_bbox = mapnik.Box2d(
                        bbox.minx + 1.0 * width * scale * column,
                        bbox.maxy - 1.0 * height * scale * row,
                        bbox.minx + 1.0 * width * scale * (column + 1),
                        bbox.maxy - 1.0 * height * scale * (row + 1))
                    tile_size = [
                        width if column < tile_cnt[0] - 1 else size[0] - width * (tile_cnt[0] - 1),
                        height if row < tile_cnt[1] - 1 else size[1] - height * (tile_cnt[1] - 1)]
                    m.zoom_to_box(tile_bbox)
                    im = mapnik.Image(tile_size[0], tile_size[1])
                    mapnik.render(m, im, scale_factor)
                    tile_name = tmp_tile.format(row, column, options.output)
                    im.save(tile_name, fmt)
                    if options.just_tiles:
                        # write ozi/wld for a tile if needed
                        if '.' not in tile_name:
                            tile_basename = tile_name + '.'
                        else:
                            tile_basename = tile_name[0:tile_name.rindex('.')+1]
                        if options.ozi:
                            with open(tile_basename + 'ozi', 'w') as f:
                                f.write(prepare_ozi(tile_bbox, tile_size[0], tile_size[1],
                                                    tile_basename + '.ozi', transform))
                        if options.wld:
                            with open(tile_basename + 'wld', 'w') as f:
                                f.write(prepare_wld(tile_bbox, tile_size[0], tile_size[1]))
                    else:
                        tile_files.append(tile_name)
            if not options.just_tiles:
                # join tiles and remove them if joining succeeded
                import subprocess
                result = subprocess.call([
                    IM_MONTAGE, '-geometry', '+0+0', '-tile',
                    '{}x{}'.format(tile_cnt[0], tile_cnt[1])] +
                    tile_files + [options.output])
                if result == 0:
                    for tile in tile_files:
                        os.remove(tile)
                    write_metadata(bbox, size[0], size[1], transform, options.output,
                                   options.wld, options.ozi)

    if options.output == '-':
        if sys.platform == "win32":
            # fix binary output on windows
            import msvcrt
            msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)

        outfile.seek(0)
        sys.stdout.write(outfile.read())
        outfile.close()
Пример #9
0
        'position': 'left',
        'separator': ' | ',
        'delimiters': '()'
    }, {
        'position': 'right'
    }]
    for cust in cust_list:
        out_list = []
        for i in progressinfo(string, style='timer', custom=cust):
            time.sleep(0.02)
            out_list.append(i)
        if inp_list != out_list:
            raise Exception('Something wrong with progressinfo...')

    # write random file
    with tempfile.TemporaryFile(mode='r+') as fl:
        for i in range(1000):
            fl.write(str(i) + '\n')
        fl.flush()
        # rewind
        fl.seek(0)
        lines = []
        for line in progressinfo(fl, 1000):
            lines.append(int(line))
            time.sleep(0.01)
        if lines != list(range(1000)):
            raise Exception('Something wrong with progressinfo...')

    # test iterate on items
    with tempfile.TemporaryFile(mode='r+') as fl:
        for i in range(10):
Пример #10
0
    def handle(self, *args, **options):  # pylint: disable=too-many-locals
        hashes = settings.REPORT_DEVICES  # PurpleRobotPayload.objects.order_by().values('user_id').distinct()

        #        start = datetime.datetime.now() - datetime.timedelta(days=120)
        start_ts = datetime.datetime(2015,
                                     11,
                                     10,
                                     0,
                                     0,
                                     0,
                                     0,
                                     tzinfo=pytz.timezone('US/Central'))
        end_ts = start_ts + datetime.timedelta(days=1)

        labels = PurpleRobotReading.objects.exclude(
            probe__startswith='edu.northwestern').values('probe').distinct()

        for user_hash in hashes:
            for label in labels:
                slug_label = slugify(label['probe'])
                payloads = PurpleRobotReading.objects.filter(
                    user_id=user_hash,
                    probe=label['probe'],
                    logged__gte=start_ts,
                    logged__lt=end_ts).order_by('logged')
                count = payloads.count()

                if count > 0:
                    temp_file = tempfile.TemporaryFile()
                    gzf = gzip.GzipFile(mode='wb', fileobj=temp_file)

                    gzf.write('User ID\tTimestamp\tValue\n')

                    index = 0

                    while index < count:
                        end = index + 100

                        if end > count:
                            end = count

                        for payload in payloads[index:end]:
                            reading_json = json.loads(payload.payload)

                            gzf.write(user_hash + '\t' +
                                      str(reading_json['TIMESTAMP']) + '\t' +
                                      reading_json['FEATURE_VALUE'] + '\n')

                        index += 100

                    gzf.flush()
                    gzf.close()

                    temp_file.seek(0)

                    report = PurpleRobotReport(generated=timezone.now(),
                                               mime_type='application/x-gzip',
                                               probe=slug_label,
                                               user_id=hash)
                    report.save()
                    report.report_file.save(
                        user_hash + '-' + slug_label + '.txt.gz',
                        File(temp_file))
                    report.save()
Пример #11
0
 def _get_exporter(self, **kwargs):
     self.output = tempfile.TemporaryFile()
     return MarshalItemExporter(self.output, **kwargs)
Пример #12
0
    def _run_cmd(self, cmd):
        starttime = time.time()

        if len(self.test.extra_paths) > 0:
            self.env['PATH'] = os.pathsep.join(self.test.extra_paths + ['']) + self.env['PATH']
            if substring_is_in_list('wine', cmd):
                wine_paths = ['Z:' + p for p in self.test.extra_paths]
                wine_path = ';'.join(wine_paths)
                # Don't accidentally end with an `;` because that will add the
                # current directory and might cause unexpected behaviour
                if 'WINEPATH' in self.env:
                    self.env['WINEPATH'] = wine_path + ';' + self.env['WINEPATH']
                else:
                    self.env['WINEPATH'] = wine_path

        # If MALLOC_PERTURB_ is not set, or if it is set to an empty value,
        # (i.e., the test or the environment don't explicitly set it), set
        # it ourselves. We do this unconditionally for regular tests
        # because it is extremely useful to have.
        # Setting MALLOC_PERTURB_="0" will completely disable this feature.
        if ('MALLOC_PERTURB_' not in self.env or not self.env['MALLOC_PERTURB_']) and not self.options.benchmark:
            self.env['MALLOC_PERTURB_'] = str(random.randint(1, 255))

        stdout = None
        stderr = None
        if not self.options.verbose:
            stdout = tempfile.TemporaryFile("wb+")
            stderr = tempfile.TemporaryFile("wb+") if self.options.split else stdout
        if self.test.protocol == 'tap' and stderr is stdout:
            stdout = tempfile.TemporaryFile("wb+")

        # Let gdb handle ^C instead of us
        if self.options.gdb:
            previous_sigint_handler = signal.getsignal(signal.SIGINT)
            # Make the meson executable ignore SIGINT while gdb is running.
            signal.signal(signal.SIGINT, signal.SIG_IGN)

        def preexec_fn():
            if self.options.gdb:
                # Restore the SIGINT handler for the child process to
                # ensure it can handle it.
                signal.signal(signal.SIGINT, signal.SIG_DFL)
            else:
                # We don't want setsid() in gdb because gdb needs the
                # terminal in order to handle ^C and not show tcsetpgrp()
                # errors avoid not being able to use the terminal.
                os.setsid()

        p = subprocess.Popen(cmd,
                             stdout=stdout,
                             stderr=stderr,
                             env=self.env,
                             cwd=self.test.workdir,
                             preexec_fn=preexec_fn if not is_windows() else None)
        timed_out = False
        kill_test = False
        if self.test.timeout is None:
            timeout = None
        elif self.options.timeout_multiplier is not None:
            timeout = self.test.timeout * self.options.timeout_multiplier
        else:
            timeout = self.test.timeout
        try:
            p.communicate(timeout=timeout)
        except subprocess.TimeoutExpired:
            if self.options.verbose:
                print('%s time out (After %d seconds)' % (self.test.name, timeout))
            timed_out = True
        except KeyboardInterrupt:
            mlog.warning('CTRL-C detected while running %s' % (self.test.name))
            kill_test = True
        finally:
            if self.options.gdb:
                # Let us accept ^C again
                signal.signal(signal.SIGINT, previous_sigint_handler)

        additional_error = None

        if kill_test or timed_out:
            # Python does not provide multiplatform support for
            # killing a process and all its children so we need
            # to roll our own.
            if is_windows():
                subprocess.call(['taskkill', '/F', '/T', '/PID', str(p.pid)])
            else:
                try:
                    # Kill the process group that setsid() created.
                    os.killpg(p.pid, signal.SIGKILL)
                except ProcessLookupError:
                    # Sometimes (e.g. with Wine) this happens.
                    # There's nothing we can do (maybe the process
                    # already died) so carry on.
                    pass
            try:
                p.communicate(timeout=1)
            except subprocess.TimeoutExpired:
                # An earlier kill attempt has not worked for whatever reason.
                # Try to kill it one last time with a direct call.
                # If the process has spawned children, they will remain around.
                p.kill()
                try:
                    p.communicate(timeout=1)
                except subprocess.TimeoutExpired:
                    additional_error = b'Test process could not be killed.'
            except ValueError:
                additional_error = b'Could not read output. Maybe the process has redirected its stdout/stderr?'
        endtime = time.time()
        duration = endtime - starttime
        if additional_error is None:
            if stdout is None:
                stdo = ''
            else:
                stdout.seek(0)
                stdo = decode(stdout.read())
            if stderr is None or stderr is stdout:
                stde = ''
            else:
                stderr.seek(0)
                stde = decode(stderr.read())
        else:
            stdo = ""
            stde = additional_error
        if timed_out:
            return TestRun(self.test, TestResult.TIMEOUT, p.returncode, duration, stdo, stde, cmd)
        else:
            if self.test.protocol == 'exitcode':
                return TestRun.make_exitcode(self.test, p.returncode, duration, stdo, stde, cmd)
            else:
                if self.options.verbose:
                    print(stdo, end='')
                return TestRun.make_tap(self.test, p.returncode, duration, stdo, stde, cmd)
Пример #13
0
    def test_transmit(self):
        import binascii
        try:
            bytes = os.urandom(1024 * 1024)
        except AttributeError:
            # must be py2.3...
            bytes = ''.join([chr(random.randint(0, 255)) for _ in range(5)])
        val = binascii.hexlify(bytes)
        val_length = len(val)
        f = tempfile.TemporaryFile()
        f.write(val)

        def runner():
            s1 = socket.socket()
            self.addr = ('localhost', random.randint(10000, 64000))
            s1.bind(self.addr)
            s1.listen(1)
            cli, addr = s1.accept()
            buf = 1
            self.request = []
            while buf:
                buf = cli.recv(1024 * 100)
                self.request.append(buf)

        th = threading.Thread(target=runner)
        th.start()
        time.sleep(0.5)
        s2 = socket.socket()
        s2.connect(self.addr)

        length = 0
        aaa = str2bytes("[AAA]")
        bbb = str2bytes("[BBB]")
        ccc = str2bytes("[CCC]")
        ddd = str2bytes("[DDD]")
        empty = str2bytes("")
        ol = pywintypes.OVERLAPPED()
        f.seek(0)
        win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()),
                               val_length, 0, ol, 0)
        length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)

        ol = pywintypes.OVERLAPPED()
        f.seek(0)
        win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()),
                               val_length, 0, ol, 0, aaa, bbb)
        length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)

        ol = pywintypes.OVERLAPPED()
        f.seek(0)
        win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()),
                               val_length, 0, ol, 0, empty, empty)
        length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)

        ol = pywintypes.OVERLAPPED()
        f.seek(0)
        win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()),
                               val_length, 0, ol, 0, None, ccc)
        length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)

        ol = pywintypes.OVERLAPPED()
        f.seek(0)
        win32file.TransmitFile(s2, win32file._get_osfhandle(f.fileno()),
                               val_length, 0, ol, 0, ddd)
        length += win32file.GetOverlappedResult(s2.fileno(), ol, 1)

        s2.close()
        th.join()
        buf = str2bytes('').join(self.request)
        self.assertEqual(length, len(buf))
        expected = val + aaa + val + bbb + val + val + ccc + ddd + val
        self.assertEqual(type(expected), type(buf))
        self.assert_(expected == buf)
Пример #14
0
def convert_pcap_to_dataframe(input_file):
    """
    Convert a pcap file to a Pandas dataframe
    :param input_file: The path to the file, e.g. /home/user/example.pcap
    :return: The Pandas dataframe
    """
    if not os.path.exists(input_file):
        raise IOError("File " + input_file + " does not exist")

    tshark_fields = "-e frame.time_epoch " \
                    "-e _ws.col.Source " \
                    "-e _ws.col.Destination " \
                    "-e _ws.col.Protocol " \
                    "-e frame.len " \
                    "-e ip.ttl " \
                    "-e ip.flags.mf " \
                    "-e ip.frag_offset " \
                    "-e icmp.type " \
                    "-e tcp.srcport " \
                    "-e tcp.dstport " \
                    "-e udp.srcport " \
                    "-e udp.dstport " \
                    "-e dns.qry.name " \
                    "-e dns.qry.type " \
                    "-e http.request " \
                    "-e http.response " \
                    "-e http.user_agent " \
                    "-e tcp.flags.str " \
                    "-e ssl.app_data " \
                    "-e ntp.priv.reqcode "

    temporary_file = tempfile.TemporaryFile("r+b")

    # print(shutil.which(command))

    p = subprocess.Popen([
        settings.TSHARK + " -n -r \"" + input_file +
        "\" -E separator='\x03'  -E header=y -T fields " + tshark_fields
    ],
                         shell=True,
                         stdout=temporary_file)  #\x03 is ETX
    p.communicate()
    p.wait()

    # Reset file pointer to start of file
    temporary_file.seek(0)

    df = pd.read_csv(temporary_file,
                     sep="\x03",
                     low_memory=False,
                     error_bad_lines=False)

    temporary_file.close()

    if ('tcp.srcport' in df.columns) and ('udp.srcport' in df.columns) and ('tcp.dstport' in df.columns) and \
            ('udp.dstport' in df.columns):
        # Combine source and destination ports from tcp and udp
        df['srcport'] = df['tcp.srcport'].fillna(df['udp.srcport'])
        df['dstport'] = df['tcp.dstport'].fillna(df['udp.dstport'])

        df['srcport'] = df['srcport'].apply(
            lambda x: int(x) if str(x).replace('.', '', 1).isdigit() else 0)
        df['dstport'] = df['dstport'].apply(
            lambda x: int(x) if str(x).replace('.', '', 1).isdigit() else 0)

    # Remove columns: 'tcp.srcport', 'udp.srcport','tcp.dstport', 'udp.dstport'
    df.drop(['tcp.srcport', 'udp.srcport', 'tcp.dstport', 'udp.dstport'],
            axis=1,
            inplace=True)

    # Drop all empty columns (for making the analysis more efficient! less memory.)
    df.dropna(axis=1, how='all', inplace=True)
    df = df.fillna(0)

    if 'icmp.type' in df.columns:
        df['icmp.type'] = df['icmp.type'].astype(str)

    if 'ip.frag_offset' in df.columns:
        df['ip.frag_offset'] = df['ip.frag_offset'].astype(str)

    if 'ip.flags.mf' in df.columns:
        df['ip.flags.mf'] = df['ip.flags.mf'].astype(str)

    if ('ip.flags.mf' in df.columns) and ('ip.frag_offset' in df.columns):
        # Analyse fragmented packets
        df['fragmentation'] = (df['ip.flags.mf']
                               == '1') | (df['ip.frag_offset'] != '0')
        df.drop(['ip.flags.mf', 'ip.frag_offset'], axis=1, inplace=True)

    if 'tcp.flags.str' in df.columns:
        df['tcp.flags.str'] = df['tcp.flags.str'].str.encode("utf-8")

    df['ip.ttl'] = df['ip.ttl']
    df['tcp.flags.str'] = df['tcp.flags.str'].str.decode("utf-8")

    return df
Пример #15
0
def main():
    import argparse
    parser = argparse.ArgumentParser(description='X12 Validation')
    parser.add_argument('--verbose', '-v', action='count')
    parser.add_argument('--quiet', '-q', action='store_true')
    parser.add_argument('--debug', '-d', action='store_true')
    parser.add_argument('--eol',
                        '-e',
                        action='store_true',
                        help="Add eol to each segment line")
    parser.add_argument('--inplace',
                        '-i',
                        action='store_true',
                        help="Make changes to files in place")
    parser.add_argument('--fixcounting',
                        '-f',
                        action='store_true',
                        help="Try to fix counting errors")
    #parser.add_argument('--fixwhitespace', '-w', action='store_true', help="Try to fix extra whitespace errors.")
    parser.add_argument('--output',
                        '-o',
                        action='store',
                        dest="outputfile",
                        default=None,
                        help="Output filename.  Defaults to stdout")
    parser.add_argument('--version',
                        action='version',
                        version='{prog} {version}'.format(prog=parser.prog,
                                                          version=__version__))
    parser.add_argument('input_files', nargs='*')
    args = parser.parse_args()

    logger = logging.getLogger()
    formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
    stdout_hdlr = logging.StreamHandler()
    stdout_hdlr.setFormatter(formatter)
    logger.addHandler(stdout_hdlr)
    logger.setLevel(logging.INFO)

    eol = '\n' if args.eol else ''
    for file_in in args.input_files:
        if not os.path.isfile(file_in):
            logger.error('Could not open file "%s"' % (file_in))

        fd_out = tempfile.TemporaryFile()
        src = pyx12.x12file.X12Reader(file_in)
        for seg_data in src:
            if args.fixcounting:
                err_codes = [(x[1]) for x in src.pop_errors()]
                if seg_data.get_seg_id() == 'IEA' and '021' in err_codes:
                    seg_data.set('IEA01', '%i' % (src.gs_count))
                elif seg_data.get_seg_id() == 'GE' and '5' in err_codes:
                    seg_data.set('GE01', '%i' % (src.st_count))
                elif seg_data.get_seg_id() == 'SE' and '4' in err_codes:
                    seg_data.set('SE01', '%i' % (src.seg_count + 1))
                elif seg_data.get_seg_id() == 'HL' and 'HL1' in err_codes:
                    seg_data.set('HL01', '%i' % (src.hl_count))
            #if args.fixwhitespace:
            #    err_codes = [(x[1]) for x in src.pop_errors()]
            #    if 'SEG1' in err_codes:
            fd_out.write(seg_data.format() + eol)
        if eol == '':
            fd_out.write('\n')

        fd_out.seek(0)
        if args.outputfile:
            fd_out = codecs.open(args.outputfile, mode='w', encoding='ascii')
        else:
            if args.inplace:
                with codecs.open(file_in, mode='w',
                                 encoding='ascii') as fd_orig:
                    fd_orig.write(fd_out.read())
            else:
                sys.stdout.write(fd_out.read())
    return True
Пример #16
0
def module_funcs(stdscr):
    "Test module-level functions"

    for func in [
            curses.baudrate, curses.beep, curses.can_change_color,
            curses.cbreak, curses.def_prog_mode, curses.doupdate,
            curses.filter, curses.flash, curses.flushinp, curses.has_colors,
            curses.has_ic, curses.has_il, curses.isendwin, curses.killchar,
            curses.longname, curses.nocbreak, curses.noecho, curses.nonl,
            curses.noqiflush, curses.noraw, curses.reset_prog_mode,
            curses.termattrs, curses.termname, curses.erasechar, curses.getsyx
    ]:
        func()

    # Functions that actually need arguments
    if curses.tigetstr("cnorm"):
        curses.curs_set(1)
    curses.delay_output(1)
    curses.echo()
    curses.echo(1)

    f = tempfile.TemporaryFile()
    stdscr.putwin(f)
    f.seek(0)
    curses.getwin(f)
    f.close()

    curses.halfdelay(1)
    curses.intrflush(1)
    curses.meta(1)
    curses.napms(100)
    curses.newpad(50, 50)
    win = curses.newwin(5, 5)
    win = curses.newwin(5, 5, 1, 1)
    curses.nl()
    curses.nl(1)
    curses.putp('abc')
    curses.qiflush()
    curses.raw()
    curses.raw(1)
    curses.setsyx(5, 5)
    curses.tigetflag('hc')
    curses.tigetnum('co')
    curses.tigetstr('cr')
    curses.tparm('cr')
    curses.typeahead(sys.__stdin__.fileno())
    curses.unctrl('a')
    curses.ungetch('a')
    curses.use_env(1)

    # Functions only available on a few platforms
    if curses.has_colors():
        curses.start_color()
        curses.init_pair(2, 1, 1)
        curses.color_content(1)
        curses.color_pair(2)
        curses.pair_content(curses.COLOR_PAIRS - 1)
        curses.pair_number(0)

        if hasattr(curses, 'use_default_colors'):
            curses.use_default_colors()

    if hasattr(curses, 'keyname'):
        curses.keyname(13)

    if hasattr(curses, 'has_key'):
        curses.has_key(13)

    if hasattr(curses, 'getmouse'):
        (availmask, oldmask) = curses.mousemask(curses.BUTTON1_PRESSED)
        # availmask indicates that mouse stuff not available.
        if availmask != 0:
            curses.mouseinterval(10)
            # just verify these don't cause errors
            m = curses.getmouse()
            curses.ungetmouse(*m)

    if hasattr(curses, 'is_term_resized'):
        curses.is_term_resized(*stdscr.getmaxyx())
    if hasattr(curses, 'resizeterm'):
        curses.resizeterm(*stdscr.getmaxyx())
    if hasattr(curses, 'resize_term'):
        curses.resize_term(*stdscr.getmaxyx())
Пример #17
0
def rule_add(rule, filename, lineno, force_all_family_option, filename_path):
    '''
    Adds a rule
    '''
    # TODO Check if a rule is added correctly.
    ret = warning = error = unit_tests = 0

    if not table_list or not chain_list:
        reason = "Missing table or chain to add rule."
        print_error(reason, filename, lineno)
        return [-1, warning, error, unit_tests]

    if rule[1].strip() == "ok":
        payload_expected = None
        try:
            payload_log = open("%s.payload" % filename_path)
            payload_expected = payload_find_expected(payload_log, rule[0])
        except:
            payload_log = None

        if enable_json_option:
            try:
                json_log = open("%s.json" % filename_path)
                json_input = json_find_expected(json_log, rule[0])
            except:
                json_input = None

            if not json_input:
                print_error("did not find JSON equivalent for rule '%s'" %
                            rule[0])
            else:
                try:
                    json_input = json_dump_normalize(json_input)
                except ValueError:
                    reason = "Invalid JSON syntax in rule: %s" % json_input
                    print_error(reason)
                    return [-1, warning, error, unit_tests]

            try:
                json_log = open("%s.json.output" % filename_path)
                json_expected = json_find_expected(json_log, rule[0])
            except:
                # will use json_input for comparison
                json_expected = None

            if json_expected:
                try:
                    json_expected = json_dump_normalize(json_expected)
                except ValueError:
                    reason = "Invalid JSON syntax in expected output: %s" % json_expected
                    print_error(reason)
                    return [-1, warning, error, unit_tests]

    for table in table_list:
        if rule[1].strip() == "ok":
            table_payload_expected = None
            try:
                payload_log = open("%s.payload.%s" %
                                   (filename_path, table.family))
                table_payload_expected = payload_find_expected(
                    payload_log, rule[0])
            except:
                if not payload_log:
                    print_error("did not find any payload information",
                                filename_path)
                elif not payload_expected:
                    print_error(
                        "did not find payload information for "
                        "rule '%s'" % rule[0], payload_log.name, 1)
            if not table_payload_expected:
                table_payload_expected = payload_expected

        for table_chain in table.chains:
            chain = chain_get_by_name(table_chain)
            unit_tests += 1
            table_flush(table, filename, lineno)

            payload_log = tempfile.TemporaryFile(mode="w+")

            # Add rule and check return code
            cmd = "add rule %s %s %s" % (table, chain, rule[0])
            ret = execute_cmd(cmd,
                              filename,
                              lineno,
                              payload_log,
                              debug="netlink")

            state = rule[1].rstrip()
            if (ret in [0, 134] and state == "fail") or (ret != 0
                                                         and state == "ok"):
                if state == "fail":
                    test_state = "This rule should have failed."
                else:
                    test_state = "This rule should not have failed."
                reason = cmd + ": " + test_state
                print_error(reason, filename, lineno)
                ret = -1
                error += 1
                if not force_all_family_option:
                    return [ret, warning, error, unit_tests]

            if state == "fail" and ret != 0:
                ret = 0
                continue

            if ret != 0:
                continue

            # Check for matching payload
            if state == "ok" and not payload_check(table_payload_expected,
                                                   payload_log, cmd):
                error += 1
                gotf = open("%s.payload.got" % filename_path, 'a')
                payload_log.seek(0, 0)
                gotf.write("# %s\n" % rule[0])
                while True:
                    line = payload_log.readline()
                    if line == "":
                        break
                    gotf.write(line)
                gotf.close()
                print_warning("Wrote payload for rule %s" % rule[0], gotf.name,
                              1)

            # Check for matching ruleset listing
            numeric_proto_old = nftables.set_numeric_proto_output(True)
            stateless_old = nftables.set_stateless_output(True)
            list_cmd = 'list table %s' % table
            rc, pre_output, err = nftables.cmd(list_cmd)
            nftables.set_numeric_proto_output(numeric_proto_old)
            nftables.set_stateless_output(stateless_old)

            output = pre_output.split(";")
            if len(output) < 2:
                reason = cmd + ": Listing is broken."
                print_error(reason, filename, lineno)
                ret = -1
                error += 1
                if not force_all_family_option:
                    return [ret, warning, error, unit_tests]
                continue

            rule_output = output_clean(pre_output, chain)
            retest_output = False
            if len(rule) == 3:
                teoric_exit = rule[2]
                retest_output = True
            else:
                teoric_exit = rule[0]

            if rule_output.rstrip() != teoric_exit.rstrip():
                if rule[0].find("{") != -1:  # anonymous sets
                    if not set_check_element(teoric_exit.rstrip(),
                                             rule_output.rstrip()):
                        warning += 1
                        retest_output = True
                        print_differences_warning(filename, lineno,
                                                  teoric_exit.rstrip(),
                                                  rule_output, cmd)
                        if not force_all_family_option:
                            return [ret, warning, error, unit_tests]
                else:
                    if len(rule_output) <= 0:
                        error += 1
                        print_differences_error(filename, lineno, cmd)
                        if not force_all_family_option:
                            return [ret, warning, error, unit_tests]

                    warning += 1
                    retest_output = True
                    print_differences_warning(filename, lineno,
                                              teoric_exit.rstrip(),
                                              rule_output, cmd)

                    if not force_all_family_option:
                        return [ret, warning, error, unit_tests]

            if retest_output:
                table_flush(table, filename, lineno)

                # Add rule and check return code
                cmd = "add rule %s %s %s" % (table, chain,
                                             rule_output.rstrip())
                ret = execute_cmd(cmd,
                                  filename,
                                  lineno,
                                  payload_log,
                                  debug="netlink")

                if ret != 0:
                    test_state = "Replaying rule failed."
                    reason = cmd + ": " + test_state
                    print_warning(reason, filename, lineno)
                    ret = -1
                    error += 1
                    if not force_all_family_option:
                        return [ret, warning, error, unit_tests]
                # Check for matching payload
                elif not payload_check(table_payload_expected, payload_log,
                                       cmd):
                    error += 1

            if not enable_json_option:
                continue

            # Generate JSON equivalent for rule if not found
            if not json_input:
                json_old = nftables.set_json_output(True)
                rc, json_output, err = nftables.cmd(list_cmd)
                nftables.set_json_output(json_old)

                json_output = json.loads(json_output)
                for item in json_output["nftables"]:
                    if "rule" in item:
                        del (item["rule"]["handle"])
                        json_output = item["rule"]
                        break
                json_input = json.dumps(json_output["expr"], sort_keys=True)

                gotf = open("%s.json.got" % filename_path, 'a')
                jdump = json_dump_normalize(json_input, True)
                gotf.write("# %s\n%s\n\n" % (rule[0], jdump))
                gotf.close()
                print_warning("Wrote JSON equivalent for rule %s" % rule[0],
                              gotf.name, 1)

            table_flush(table, filename, lineno)
            payload_log = tempfile.TemporaryFile(mode="w+")

            # Add rule in JSON format
            cmd = json.dumps({
                "nftables": [{
                    "add": {
                        "rule": {
                            "family": table.family,
                            "table": table.name,
                            "chain": chain.name,
                            "expr": json.loads(json_input),
                        }
                    }
                }]
            })

            if enable_json_schema:
                json_validate(cmd)

            json_old = nftables.set_json_output(True)
            ret = execute_cmd(cmd,
                              filename,
                              lineno,
                              payload_log,
                              debug="netlink")
            nftables.set_json_output(json_old)

            if ret != 0:
                reason = "Failed to add JSON equivalent rule"
                print_error(reason, filename, lineno)
                continue

            # Check for matching payload
            if not payload_check(table_payload_expected, payload_log, cmd):
                error += 1
                gotf = open("%s.json.payload.got" % filename_path, 'a')
                payload_log.seek(0, 0)
                gotf.write("# %s\n" % rule[0])
                while True:
                    line = payload_log.readline()
                    if line == "":
                        break
                    gotf.write(line)
                gotf.close()
                print_warning("Wrote JSON payload for rule %s" % rule[0],
                              gotf.name, 1)

            # Check for matching ruleset listing
            numeric_proto_old = nftables.set_numeric_proto_output(True)
            stateless_old = nftables.set_stateless_output(True)
            json_old = nftables.set_json_output(True)
            rc, json_output, err = nftables.cmd(list_cmd)
            nftables.set_json_output(json_old)
            nftables.set_numeric_proto_output(numeric_proto_old)
            nftables.set_stateless_output(stateless_old)

            if enable_json_schema:
                json_validate(json_output)

            json_output = json.loads(json_output)
            for item in json_output["nftables"]:
                if "rule" in item:
                    del (item["rule"]["handle"])
                    json_output = item["rule"]
                    break
            json_output = json.dumps(json_output["expr"], sort_keys=True)

            if not json_expected and json_output != json_input:
                print_differences_warning(filename, lineno, json_input,
                                          json_output, cmd)
                error += 1
                gotf = open("%s.json.output.got" % filename_path, 'a')
                jdump = json_dump_normalize(json_output, True)
                gotf.write("# %s\n%s\n\n" % (rule[0], jdump))
                gotf.close()
                print_warning("Wrote JSON output for rule %s" % rule[0],
                              gotf.name, 1)
                # prevent further warnings and .got file updates
                json_expected = json_output
            elif json_expected and json_output != json_expected:
                print_differences_warning(filename, lineno, json_expected,
                                          json_output, cmd)
                error += 1

    return [ret, warning, error, unit_tests]
Пример #18
0
 def setUp(self):
     self._pickle_here = tempfile.NamedTemporaryFile(mode='a+b')
     self._out_here = tempfile.TemporaryFile(mode='a+b')
     _write_pickles_native_objs(self._pickle_here)
Пример #19
0
    def _launch(self, nodes):
        self.logger.debug("WorkerTree._launch on %s (fanout=%d)", nodes,
                          self.task.info("fanout"))

        # Prepare copy params if source is defined
        destdir = None
        if self.source:
            if self.reverse:
                self.logger.debug("rcopy source=%s, dest=%s", self.source,
                                  self.dest)
                # dest is a directory
                destdir = self.dest
            else:
                self.logger.debug("copy source=%s, dest=%s", self.source,
                                  self.dest)
                # Special processing to determine best arcname and destdir for
                # tar. The only case that we don't support is when source is a
                # file and dest is a dir without a finishing / (in that case we
                # cannot determine remotely whether it is a file or a
                # directory).
                if isfile(self.source):
                    # dest is not normalized here
                    arcname = basename(self.dest) or \
                              basename(normpath(self.source))
                    destdir = dirname(self.dest)
                else:
                    # source is a directory: if dest has a trailing slash
                    # like in /tmp/ then arcname is basename(source)
                    # but if dest is /tmp/newname (without leading slash) then
                    # arcname becomes newname.
                    if self.dest[-1] == '/':
                        arcname = basename(self.source)
                    else:
                        arcname = basename(self.dest)
                    # dirname has not the same behavior when a leading slash is
                    # present, and we want that.
                    destdir = dirname(self.dest)
                self.logger.debug("copy arcname=%s destdir=%s", arcname,
                                  destdir)

        # And launch stuffs
        next_hops = self._distribute(self.task.info("fanout"), nodes.copy())
        self.logger.debug("next_hops=%s" % [(str(n), str(v))
                                            for n, v in next_hops])
        for gw, targets in next_hops:
            if gw == targets:
                self.logger.debug(
                    'task.shell cmd=%s source=%s nodes=%s '
                    'timeout=%s remote=%s', self.command, self.source, nodes,
                    self.timeout, self.remote)
                self._child_count += 1
                self._target_count += len(targets)
                if self.remote:
                    if self.source:
                        # Note: specific case where targets are not in topology
                        # as self.source is never used on remote gateways
                        # so we try a direct copy/rcopy:
                        self.logger.debug(
                            '_launch copy r=%s source=%s dest=%s',
                            self.reverse, self.source, self.dest)
                        worker = self.task.copy(self.source,
                                                self.dest,
                                                targets,
                                                handler=self.metahandler,
                                                stderr=self.stderr,
                                                timeout=self.timeout,
                                                preserve=self.preserve,
                                                reverse=self.reverse,
                                                tree=False)
                    else:
                        worker = self.task.shell(self.command,
                                                 nodes=targets,
                                                 timeout=self.timeout,
                                                 handler=self.metahandler,
                                                 stderr=self.stderr,
                                                 tree=False)
                else:
                    assert self.source is None
                    worker = ExecWorker(nodes=targets,
                                        command=self.command,
                                        handler=self.metahandler,
                                        timeout=self.timeout,
                                        stderr=self.stderr)
                    self.task.schedule(worker)

                self.workers.append(worker)
                self.logger.debug("added child worker %s count=%d", worker,
                                  len(self.workers))
            else:
                self.logger.debug("trying gateway %s to reach %s", gw, targets)
                if self.source:
                    self._copy_remote(self.source, destdir, targets, gw,
                                      self.timeout, self.reverse)
                else:
                    self._execute_remote(self.command, targets, gw,
                                         self.timeout)

        # Copy mode: send tar data after above workers have been initialized
        if self.source and not self.reverse:
            try:
                # create temporary tar file with all source files
                tmptar = tempfile.TemporaryFile()
                tar = tarfile.open(fileobj=tmptar, mode='w:')
                tar.add(self.source, arcname=arcname)
                tar.close()
                tmptar.flush()
                # read generated tar file
                tmptar.seek(0)
                rbuf = tmptar.read(32768)
                # send tar data to remote targets only
                while len(rbuf) > 0:
                    self._write_remote(rbuf)
                    rbuf = tmptar.read(32768)
            except OSError as exc:
                raise WorkerError(exc)
Пример #20
0
    def _mountShare(self, sMountPoint, sType, sServer, sShare, sUser,
                    sPassword, sMountOpt, sWhat):
        """
        Mounts the specified share if needed.
        Raises exception on failure.
        """
        # Only mount if the type is specified.
        if sType is None:
            return True

        # Test if already mounted.
        sTestFile = os.path.join(sMountPoint + os.path.sep,
                                 sShare + '-new.txt')
        if os.path.isfile(sTestFile):
            return True

        #
        # Platform specific mount code.
        #
        sHostOs = utils.getHostOs()
        if sHostOs in ('darwin', 'freebsd'):
            if sMountOpt != '':
                sMountOpt = ',' + sMountOpt
            utils.sudoProcessCall(['/sbin/umount', sMountPoint])
            utils.sudoProcessCall(['/bin/mkdir', '-p', sMountPoint])
            utils.sudoProcessCall(
                ['/usr/sbin/chown',
                 str(os.getuid()), sMountPoint])
            # pylint: disable=E1101
            if sType == 'cifs':
                # Note! no smb://server/share stuff here, 10.6.8 didn't like it.
                utils.processOutputChecked([
                    '/sbin/mount_smbfs', '-o',
                    'automounted,nostreams,soft,noowners,noatime,rdonly' +
                    sMountOpt, '-f', '0555', '-d', '0555',
                    '//%s:%s@%s/%s' % (sUser, sPassword, sServer, sShare),
                    sMountPoint
                ])
            else:
                raise TestBoxScriptException('Unsupported server type %s.' %
                                             (sType, ))

        elif sHostOs == 'linux':
            if sMountOpt != '':
                sMountOpt = ',' + sMountOpt
            utils.sudoProcessCall(['/bin/umount', sMountPoint])
            utils.sudoProcessCall(['/bin/mkdir', '-p', sMountPoint])
            if sType == 'cifs':
                utils.sudoProcessOutputChecked([
                    '/bin/mount',
                    '-t',
                    'cifs',
                    '-o',
                    'user='******',password='******',sec=ntlmv2' + ',uid=' + str(os.getuid())  # pylint: disable=E1101
                    + ',gid=' + str(os.getgid())  # pylint: disable=E1101
                    + ',nounix,file_mode=0555,dir_mode=0555,soft,ro' +
                    sMountOpt,
                    '//%s/%s' % (sServer, sShare),
                    sMountPoint
                ])
            elif sType == 'nfs':
                utils.sudoProcessOutputChecked([
                    '/bin/mount', '-t', 'nfs', '-o', 'soft,ro' + sMountOpt,
                    '%s:%s' % (sServer, sShare if sShare.find('/') >= 0 else
                               ('/export/' + sShare)), sMountPoint
                ])

            else:
                raise TestBoxScriptException('Unsupported server type %s.' %
                                             (sType, ))

        elif sHostOs == 'solaris':
            if sMountOpt != '':
                sMountOpt = ',' + sMountOpt
            utils.sudoProcessCall(['/sbin/umount', sMountPoint])
            utils.sudoProcessCall(['/bin/mkdir', '-p', sMountPoint])
            if sType == 'cifs':
                ## @todo This stuff doesn't work on wei01-x4600b.de.oracle.com running 11.1. FIXME!
                oPasswdFile = tempfile.TemporaryFile()
                oPasswdFile.write(sPassword + '\n')
                oPasswdFile.flush()
                utils.sudoProcessOutputChecked(
                    [
                        '/sbin/mount',
                        '-F',
                        'smbfs',
                        '-o',
                        'user='******',uid=' + str(os.getuid())  # pylint: disable=E1101
                        + ',gid=' + str(os.getgid())  # pylint: disable=E1101
                        + ',fileperms=0555,dirperms=0555,noxattr,ro' +
                        sMountOpt,
                        '//%s/%s' % (sServer, sShare),
                        sMountPoint
                    ],
                    stdin=oPasswdFile)
                oPasswdFile.close()
            elif sType == 'nfs':
                utils.sudoProcessOutputChecked([
                    '/sbin/mount', '-F', 'nfs', '-o', 'noxattr,ro' + sMountOpt,
                    '%s:%s' % (sServer, sShare if sShare.find('/') >= 0 else
                               ('/export/' + sShare)), sMountPoint
                ])

            else:
                raise TestBoxScriptException('Unsupported server type %s.' %
                                             (sType, ))

        elif sHostOs == 'win':
            if sType != 'cifs':
                raise TestBoxScriptException(
                    'Only CIFS mounts are supported on Windows.')
            utils.processCall(['net', 'use', sMountPoint, '/d'])
            utils.processOutputChecked([
                'net',
                'use',
                sMountPoint,
                '\\\\' + sServer + '\\' + sShare,
                sPassword,
                '/USER:'******'Unsupported host %s' % (sHostOs, ))

        #
        # Re-test.
        #
        if not os.path.isfile(sTestFile):
            raise TestBoxException(
                'Failed to mount %s (%s[%s]) at %s: %s not found' %
                (sWhat, sServer, sShare, sMountPoint, sTestFile))

        return True
Пример #21
0
 def test_dataset_download(self):
     with tempfile.TemporaryFile() as f:
         self.ds.download(f)
         f.seek(0)
         self.assertEqual(six.b(FOO_DATA), f.read())
Пример #22
0
def _server_info(server_val, get_defaults=False, options=None):
    """Show information about a running server

    This method gathers information from a running server. This information is
    returned as a tuple to be displayed to the user in a format specified. The
    information returned includes the following:

    * server connection information
    * version number of the server
    * data directory path
    * base directory path
    * plugin directory path
    * configuration file location and name
    * current binary log file
    * current binary log position
    * current relay log file
    * current relay log position

    server_val[in]    the server connection values or a connected server
    get_defaults[in]  if True, get the default settings for the server
    options[in]       options for connecting to the server

    Return tuple - information about server
    """
    if options is None:
        options = {}
    # Parse source connection values
    source_values = parse_connection(server_val, None, options)

    # Connect to the server
    conn_options = {
        'version': "5.1.30",
    }
    servers = connect_servers(source_values, None, conn_options)
    server = servers[0]

    params_dict = defaultdict(str)

    # Initialize list of warnings
    params_dict['warnings'] = []

    # Identify server by string: 'host:port[:socket]'.
    server_id = "{0}:{1}".format(source_values['host'], source_values['port'])
    if source_values.get('socket', None):
        server_id = "{0}:{1}".format(server_id, source_values.get('socket'))
    params_dict['server'] = server_id

    # Get _SERVER_VARIABLES values from the server
    for server_var in _SERVER_VARIABLES:
        res = server.show_server_variable(server_var)
        if res:
            params_dict[server_var] = res[0][1]
        else:
            raise UtilError("Unable to determine {0} of server '{1}'"
                            ".".format(server_var, server_id))

    # Verify if the server is a local server.
    server_is_local = server.is_alias('localhost')

    # Get _LOG_FILES_VARIABLES values from the server
    for msg, log_tpl in _LOG_FILES_VARIABLES.iteritems():
        res = server.show_server_variable(log_tpl.log_name)
        if res:
            # Check if log is turned off
            params_dict[log_tpl.log_name] = res[0][1]
            # If logs are turned off, skip checking information about the file
            if res[0][1] in ('', 'OFF'):
                continue

            # Logging is enabled, so we can get get information about log_file
            # unless it is log_error because in that case we already have it.
            if log_tpl.log_file is not None:  # if it is not log_error
                log_file = server.show_server_variable(log_tpl.log_file)[0][1]
                params_dict[log_tpl.log_file] = log_file
            else:  # log error, so log_file_name is already on params_dict
                log_file = params_dict[log_tpl.log_name]

            # Size can only be obtained from the files of a local server.
            if not server_is_local:
                params_dict[log_tpl.log_file_size] = 'UNAVAILABLE'
                # Show warning about log size unaviable.
                params_dict['warnings'].append("Unable to get information "
                                               "regarding variable '{0}' "
                                               "from a remote server."
                                               "".format(msg))
            # If log file is stderr, we cannot get the correct size.
            elif log_file in ["stderr", "stdout"]:
                params_dict[log_tpl.log_file_size] = 'UNKNOWN'
                # Show warning about log unknown size.
                params_dict['warnings'].append("Unable to get size information"
                                               " from '{0}' for '{1}'."
                                               "".format(log_file, msg))
            else:
                # Now get the information about the size of the logs
                try:
                    # log_file might be a relative path, in which case we need
                    # to prepend the datadir path to it
                    if not os.path.isabs(log_file):
                        log_file = os.path.join(params_dict['datadir'],
                                                log_file)
                    params_dict[log_tpl.log_file_size] = "{0} bytes".format(
                        os.path.getsize(log_file))
                except os.error:
                    # if we are unable to get the log_file_size
                    params_dict[log_tpl.log_file_size] = ''
                    warning_msg = _WARNING_TEMPLATE.format(msg, log_file)
                    params_dict['warnings'].append(warning_msg)

        else:
            params_dict['warnings'].append(
                "Unable to get information "
                "regarding variable '{0}'").format(msg)

    # if audit_log plugin is installed and enabled
    if server.supports_plugin('audit'):
        res = server.show_server_variable('audit_log_file')
        if res:
            # Audit_log variable might be a relative path to the datadir,
            # so it needs to be treated accordingly
            if not os.path.isabs(res[0][1]):
                params_dict['audit_log_file'] = os.path.join(
                    params_dict['datadir'], res[0][1])
            else:
                params_dict['audit_log_file'] = res[0][1]

            # Add audit_log field to the _COLUMNS List unless it is already
            # there
            if 'audit_log_file' not in _COLUMNS_SET:
                _COLUMNS.append('audit_log_file')
                _COLUMNS.append('audit_log_file_size')
                _COLUMNS_SET.add('audit_log_file')
            try:
                params_dict['audit_log_file_size'] = "{0} bytes".format(
                    os.path.getsize(params_dict['audit_log_file']))

            except os.error:
                # If we are unable to get the size of the audit_log_file
                params_dict['audit_log_file_size'] = ''
                warning_msg = _WARNING_TEMPLATE.format(
                    "audit log", params_dict['audit_log_file'])
                params_dict['warnings'].append(warning_msg)

    # Build search path for config files
    if os.name == "posix":
        my_def_search = [
            "/etc/my.cnf", "/etc/mysql/my.cnf",
            os.path.join(params_dict['basedir'], "my.cnf"), "~/.my.cnf"
        ]
    else:
        my_def_search = [
            r"c:\windows\my.ini", r"c:\my.ini", r"c:\my.cnf",
            os.path.join(os.curdir, "my.ini")
        ]
    my_def_search.append(os.path.join(os.curdir, "my.cnf"))

    # Get server's default configuration values.
    defaults = []
    if get_defaults:
        # Can only get defaults for local servers (need to access local data).
        if server_is_local:
            try:
                my_def_path = get_tool_path(params_dict['basedir'],
                                            "my_print_defaults",
                                            quote=True)
            except UtilError as err:
                raise UtilError("Unable to retrieve the defaults data "
                                "(requires access to my_print_defaults): {0} "
                                "(basedir: {1})".format(
                                    err.errmsg, params_dict['basedir']))
            out_file = tempfile.TemporaryFile()
            # Execute tool: <basedir>/my_print_defaults mysqld
            cmd_list = shlex.split(my_def_path)
            cmd_list.append("mysqld")
            subprocess.call(cmd_list, stdout=out_file)
            out_file.seek(0)
            # Get defaults data from temp output file.
            defaults.append("\nDefaults for server {0}".format(server_id))
            for line in out_file.readlines():
                defaults.append(line.rstrip())
        else:
            # Remote server; Cannot get the defaults data.
            defaults.append("\nWARNING: The utility can not get defaults from "
                            "a remote host.")

    # Find config file
    config_file = ""
    for search_path in my_def_search:
        if os.path.exists(search_path):
            if len(config_file) > 0:
                config_file = "{0}, {1}".format(config_file, search_path)
            else:
                config_file = search_path
    params_dict['config_file'] = config_file

    # Find binary log, relay log
    params_dict['binary_log'], params_dict['binary_log_pos'] = _get_binlog(
        server)
    params_dict['relay_log'], params_dict['relay_log_pos'] = _get_relay_log(
        server)

    server.disconnect()

    return params_dict, defaults
Пример #23
0
    print(idf.info(sort_by_group=False, detailed=True))

if True:
    print(zone.info(detailed=False))

# check to_str
if True:
    zone.head_comment = "Hello\n\n\nhello!!"
    zone.tail_comment = "Here is my tail comment\nwritten on several lines..."
    zone.field_comment(
        0,
        zone.field_comment(0) + " **modified with\nline\nbreaks**")
    print(zone.to_str(style="console"))  # idf, console

# check save_as
if True:
    # modify idf comment
    idf.comment = "I HAVE MODIFIED THE COMMENTS\n2 blank lines follow\n\n" + idf.comment

    # modify building
    building.head_comment = "MY BUILDING HEAD COMMENT"
    building.field_comment(
        "terrain",
        building.field_comment("terrain") + " WITH MY NEW COMMENT")
    building.tail_comment = idf.comment

    f = tempfile.TemporaryFile("r+")
    idf.save_as(f)
    f.seek(0)
    print(f.read())
Пример #24
0
def IssueCommand(cmd, force_info_log=False, suppress_warning=False,
                 env=None, timeout=DEFAULT_TIMEOUT, cwd=None,
                 raise_on_failure=True,
                 raise_on_timeout=True):
  """Tries running the provided command once.

  Args:
    cmd: A list of strings such as is given to the subprocess.Popen()
        constructor.
    force_info_log: A boolean indicating whether the command result should
        always be logged at the info level. Command results will always be
        logged at the debug level if they aren't logged at another level.
    suppress_warning: A boolean indicating whether the results should
        not be logged at the info level in the event of a non-zero
        return code. When force_info_log is True, the output is logged
        regardless of suppress_warning's value.
    env: A dict of key/value strings, such as is given to the subprocess.Popen()
        constructor, that contains environment variables to be injected.
    timeout: Timeout for the command in seconds. If the command has not finished
        before the timeout is reached, it will be killed. Set timeout to None to
        let the command run indefinitely. If the subprocess is killed, the
        return code will indicate an error, and stdout and stderr will
        contain what had already been written to them before the process was
        killed.
    cwd: Directory in which to execute the command.
    raise_on_failure: A boolean indicating if non-zero return codes should raise
        IssueCommandError.
    raise_on_timeout: A boolean indicating if killing the process due to the
        timeout being hit should raise a IssueCommandTimeoutError

  Returns:
    A tuple of stdout, stderr, and retcode from running the provided command.

  Raises:
    IssueCommandError: When raise_on_failure=True and retcode is non-zero.
    IssueCommandTimeoutError:  When raise_on_timeout=True and
                               command duration exceeds timeout
  """
  if env:
    logging.debug('Environment variables: %s', env)

  full_cmd = ' '.join(cmd)
  logging.info('Running: %s', full_cmd)

  time_file_path = '/usr/bin/time'

  running_on_windows = RunningOnWindows()
  running_on_darwin = RunningOnDarwin()
  should_time = (not (running_on_windows or running_on_darwin) and
                 os.path.isfile(time_file_path) and FLAGS.time_commands)
  shell_value = running_on_windows
  with tempfile.TemporaryFile() as tf_out, \
      tempfile.TemporaryFile() as tf_err, \
      tempfile.NamedTemporaryFile(mode='r') as tf_timing:

    cmd_to_use = cmd
    if should_time:
      cmd_to_use = [time_file_path,
                    '-o', tf_timing.name,
                    '--quiet',
                    '-f', ',  WallTime:%Es,  CPU:%Us,  MaxMemory:%Mkb '] + cmd

    process = subprocess.Popen(cmd_to_use, env=env, shell=shell_value,
                               stdin=subprocess.PIPE, stdout=tf_out,
                               stderr=tf_err, cwd=cwd)

    did_timeout = _BoxedObject(False)
    was_killed = _BoxedObject(False)

    def _KillProcess():
      did_timeout.value = True
      if not raise_on_timeout:
        logging.warning('IssueCommand timed out after %d seconds. '
                        'Killing command "%s".', timeout, full_cmd)
      process.kill()
      was_killed.value = True

    timer = threading.Timer(timeout, _KillProcess)
    timer.start()

    try:
      process.wait()
    finally:
      timer.cancel()

    tf_out.seek(0)
    stdout = tf_out.read().decode('ascii', 'ignore')
    tf_err.seek(0)
    stderr = tf_err.read().decode('ascii', 'ignore')

    timing_output = ''
    if should_time:
      timing_output = tf_timing.read().rstrip('\n')

  debug_text = ('Ran: {%s}\nReturnCode:%s%s\nSTDOUT: %s\nSTDERR: %s' %
                (full_cmd, process.returncode, timing_output, stdout, stderr))
  if force_info_log or (process.returncode and not suppress_warning):
    logging.info(debug_text)
  else:
    logging.debug(debug_text)

  # Raise timeout error regardless of raise_on_failure - as the intended
  # semantics is to ignore expected errors caused by invoking the command
  # not errors from PKB infrastructure.
  if did_timeout.value and raise_on_timeout:
    debug_text = (
        '{0}\nIssueCommand timed out after {1} seconds.  '
        '{2} by perfkitbenchmarker.'.format(
            debug_text, timeout,
            'Process was killed' if was_killed.value else
            'Process may have been killed'))
    raise errors.VmUtil.IssueCommandTimeoutError(debug_text)
  elif process.returncode and raise_on_failure:
    raise errors.VmUtil.IssueCommandError(debug_text)

  return stdout, stderr, process.returncode
Пример #25
0
    async def post(self):
        """Return POST parameters."""
        if self._post is not None:
            return self._post
        if self._method not in self.POST_METHODS:
            self._post = MultiDictProxy(MultiDict())
            return self._post

        content_type = self.content_type
        if (content_type not in ('', 'application/x-www-form-urlencoded',
                                 'multipart/form-data')):
            self._post = MultiDictProxy(MultiDict())
            return self._post

        out = MultiDict()

        if content_type == 'multipart/form-data':
            multipart = await self.multipart()

            field = await multipart.next()
            while field is not None:
                size = 0
                max_size = self._client_max_size
                content_type = field.headers.get(hdrs.CONTENT_TYPE)

                if field.filename:
                    # store file in temp file
                    tmp = tempfile.TemporaryFile()
                    chunk = await field.read_chunk(size=2**16)
                    while chunk:
                        chunk = field.decode(chunk)
                        tmp.write(chunk)
                        size += len(chunk)
                        if 0 < max_size < size:
                            raise ValueError(
                                'Maximum request body size exceeded')
                        chunk = await field.read_chunk(size=2**16)
                    tmp.seek(0)

                    ff = FileField(field.name, field.filename, tmp,
                                   content_type, field.headers)
                    out.add(field.name, ff)
                else:
                    value = await field.read(decode=True)
                    if content_type is None or \
                            content_type.startswith('text/'):
                        charset = field.get_charset(default='utf-8')
                        value = value.decode(charset)
                    out.add(field.name, value)
                    size += len(value)
                    if 0 < max_size < size:
                        raise ValueError('Maximum request body size exceeded')

                field = await multipart.next()
        else:
            data = await self.read()
            if data:
                charset = self.charset or 'utf-8'
                out.extend(
                    parse_qsl(data.rstrip().decode(charset),
                              keep_blank_values=True,
                              encoding=charset))

        self._post = MultiDictProxy(out)
        return self._post
Пример #26
0
def test_guess_filename_with_tempfile():
    with tempfile.TemporaryFile() as fp:
        assert (helpers.guess_filename(fp, 'no-throw') is not None)
Пример #27
0
def exp_dump(db_name, format):
    with tempfile.TemporaryFile(mode='w+b') as t:
        dump_db(db_name, t, format)
        t.seek(0)
        return base64.b64encode(t.read()).decode()
Пример #28
0
            if expectedversion.startswith('Version: '):
                return expectedversion.split()[1]
            else:
                return [x.split()[1] for x in datas if x.startswith("Version: ")][0]
        except:
            pass

    @classmethod
    def get_requirements(cls, file='requirements.txt'):
        from pip.req import parse_requirements
        return list(parse_requirements(file))


# Setup initial loggers

tmpfile = tempfile.TemporaryFile('w+', encoding='utf8')
log = logging.getLogger('launcher')
log.setLevel(logging.DEBUG)

sh = logging.StreamHandler(stream=sys.stdout)
sh.setFormatter(logging.Formatter(
    fmt="[%(levelname)s] %(name)s: %(message)s"
))

sh.setLevel(logging.INFO)
log.addHandler(sh)

tfh = logging.StreamHandler(stream=tmpfile)
tfh.setFormatter(logging.Formatter(
    fmt="[%(relativeCreated).9f] %(asctime)s - %(levelname)s - %(name)s: %(message)s"
))
Пример #29
0
        raise WindowsError, 'process timeout exceeded'
    return child.exitCode()


if __name__ == '__main__':

    # Pipe commands to a shell and display the output in notepad
    print 'Testing winprocess.py...'

    import tempfile

    timeoutSeconds = 15
    cmdString = """\
REM      Test of winprocess.py piping commands to a shell.\r
REM      This window will close in %d seconds.\r
vol\r
net user\r
_this_is_a_test_of_stderr_\r
""" % timeoutSeconds

    cmd, out = tempfile.TemporaryFile(), tempfile.TemporaryFile()
    cmd.write(cmdString)
    cmd.seek(0)
    print 'CMD.EXE exit code:', run('cmd.exe', show=0, stdin=cmd,
                                    stdout=out, stderr=out)
    cmd.close()
    print 'NOTEPAD exit code:', run('notepad.exe %s' % out.file.name,
                                    show=win32con.SW_MAXIMIZE,
                                    mSec=timeoutSeconds*1000)
    out.close()
Пример #30
0
def fastq_strand(argv, working_dir=None):
    """
    Driver for fastq_strand

    Generate strandedness statistics for single FASTQ or
    FASTQ pair, by running STAR using one or more genome
    indexes
    """
    # Process command line
    p = argparse.ArgumentParser(
        description="Generate strandedness statistics "
        "for FASTQ or FASTQpair, by running STAR using "
        "one or more genome indexes")
    p.add_argument('--version', action='version', version=__version__)
    p.add_argument("r1", metavar="READ1", default=None, help="R1 Fastq file")
    p.add_argument("r2",
                   metavar="READ2",
                   default=None,
                   nargs="?",
                   help="R2 Fastq file")
    p.add_argument("-g",
                   "--genome",
                   dest="star_genomedirs",
                   metavar="GENOMEDIR",
                   default=None,
                   action="append",
                   help="path to directory with STAR index "
                   "for genome to use (use as an alternative "
                   "to -c/--conf; can be specified multiple "
                   "times to include additional genomes)")
    p.add_argument("--subset",
                   type=int,
                   default=10000,
                   help="use a random subset of read pairs "
                   "from the input Fastqs; set to zero to "
                   "use all reads (default: 10000)")
    p.add_argument("-o",
                   "--outdir",
                   default=None,
                   help="specify directory to write final "
                   "outputs to (default: current directory)")
    p.add_argument("-c",
                   "--conf",
                   metavar="FILE",
                   default=None,
                   help="specify delimited 'conf' file with "
                   "list of NAME and STAR index directory "
                   "pairs. NB if a conf file is supplied "
                   "then any indices specifed on the command "
                   "line will be ignored")
    p.add_argument("-n",
                   type=int,
                   default=1,
                   help="number of threads to run STAR with "
                   "(default: 1)")
    p.add_argument("--counts",
                   action="store_true",
                   help="include the count sums for "
                   "unstranded, 1st read strand aligned and "
                   "2nd read strand aligned in the output "
                   "file (default: only include percentages)")
    p.add_argument("--keep-star-output",
                   action="store_true",
                   help="keep the output from STAR (default: "
                   "delete outputs on completion)")
    args = p.parse_args(argv)
    # Print parameters
    print("READ1\t: %s" % args.r1)
    print("READ2\t: %s" % args.r2)
    # Check that STAR is on the path
    star_exe = find_program("STAR")
    if star_exe is None:
        logging.critical("STAR not found")
        return 1
    print("STAR\t: %s" % star_exe)
    # Gather genome indices
    genome_names = {}
    if args.conf is not None:
        print("Conf file\t: %s" % args.conf)
        star_genomedirs = []
        with io.open(args.conf, 'rt') as fp:
            for line in fp:
                if line.startswith('#'):
                    continue
                name, star_genomedir = line.rstrip().split('\t')
                star_genomedirs.append(star_genomedir)
                # Store an associated name
                genome_names[star_genomedir] = name
    else:
        star_genomedirs = args.star_genomedirs
    if not star_genomedirs:
        logging.critical("No genome indices specified")
        return 1
    print("Genomes:")
    for genome in star_genomedirs:
        print("- %s" % genome)
    # Output directory
    if args.outdir is None:
        outdir = os.getcwd()
    else:
        outdir = os.path.abspath(args.outdir)
    if not os.path.exists(outdir):
        logging.critical("Output directory doesn't exist: %s" % outdir)
        return 1
    # Output file
    outfile = "%s_fastq_strand.txt" % os.path.join(
        outdir, os.path.basename(strip_ngs_extensions(args.r1)))
    if os.path.exists(outfile):
        logging.warning("Removing existing output file '%s'" % outfile)
        os.remove(outfile)
    # Prefix for temporary output
    prefix = "fastq_strand_"
    # Working directory
    if working_dir is None:
        working_dir = os.getcwd()
    else:
        working_dir = os.path.abspath(working_dir)
        if not os.path.isdir(working_dir):
            raise Exception("Bad working directory: %s" % working_dir)
    print("Working directory: %s" % working_dir)
    # Make subset of input read pairs
    nreads = sum(1 for i in getreads(os.path.abspath(args.r1)))
    print("%d reads" % nreads)
    if args.subset == 0:
        print("Using all read pairs in Fastq files")
        subset = nreads
    elif args.subset > nreads:
        print("Actual number of read pairs smaller than requested subset")
        subset = nreads
    else:
        subset = args.subset
        print("Using random subset of %d read pairs" % subset)
    if subset == nreads:
        subset_indices = [i for i in range(nreads)]
    else:
        subset_indices = random.sample(range(nreads), subset)
    fqs_in = filter(lambda fq: fq is not None, (args.r1, args.r2))
    fastqs = []
    for fq in fqs_in:
        fq_subset = os.path.join(working_dir, os.path.basename(fq))
        if fq_subset.endswith(".gz"):
            fq_subset = '.'.join(fq_subset.split('.')[:-1])
        fq_subset = "%s.subset.fq" % '.'.join(fq_subset.split('.')[:-1])
        with io.open(fq_subset, 'wt') as fp:
            for read in getreads_subset(os.path.abspath(fq), subset_indices):
                fp.write(u'\n'.join(read) + '\n')
        fastqs.append(fq_subset)
    # Make directory to keep output from STAR
    if args.keep_star_output:
        star_output_dir = os.path.join(
            outdir, "STAR.%s.outputs" %
            os.path.basename(strip_ngs_extensions(args.r1)))
        print("Output from STAR will be copied to %s" % star_output_dir)
        # Check if directory already exists from earlier run
        if os.path.exists(star_output_dir):
            # Move out of the way
            i = 0
            backup_dir = "%s.bak" % star_output_dir
            while os.path.exists(backup_dir):
                i += 1
                backup_dir = "%s.bak%s" % (star_output_dir, i)
            logging.warning("Moving existing output directory to %s" %
                            backup_dir)
            os.rename(star_output_dir, backup_dir)
        # Make the directory
        os.mkdir(star_output_dir)
    # Write output to a temporary file
    with tempfile.TemporaryFile(mode='w+t') as fp:
        # Iterate over genome indices
        for star_genomedir in star_genomedirs:
            # Basename for output for this genome
            try:
                name = genome_names[star_genomedir]
            except KeyError:
                name = star_genomedir
            # Build a command line to run STAR
            star_cmd = [star_exe]
            star_cmd.extend([
                '--runMode', 'alignReads', '--genomeLoad', 'NoSharedMemory',
                '--genomeDir',
                os.path.abspath(star_genomedir)
            ])
            star_cmd.extend(['--readFilesIn', fastqs[0]])
            if len(fastqs) > 1:
                star_cmd.append(fastqs[1])
            star_cmd.extend([
                '--quantMode', 'GeneCounts', '--outSAMtype', 'BAM', 'Unsorted',
                '--outSAMstrandField', 'intronMotif', '--outFileNamePrefix',
                prefix, '--runThreadN',
                str(args.n)
            ])
            print("Running %s" % ' '.join(star_cmd))
            try:
                subprocess.check_output(star_cmd, cwd=working_dir)
            except subprocess.CalledProcessError as ex:
                raise Exception("STAR returned non-zero exit code: %s" %
                                ex.returncode)
            # Save the outputs
            if args.keep_star_output:
                # Make a subdirectory for this genome index
                genome_dir = os.path.join(star_output_dir,
                                          name.replace(os.sep, "_"))
                print("Copying STAR outputs to %s" % genome_dir)
                os.mkdir(genome_dir)
                for f in os.listdir(working_dir):
                    if f.startswith(prefix):
                        shutil.copy(os.path.join(working_dir, f),
                                    os.path.join(genome_dir, f))
            # Process the STAR output
            star_tab_file = os.path.join(working_dir,
                                         "%sReadsPerGene.out.tab" % prefix)
            if not os.path.exists(star_tab_file):
                raise Exception("Failed to find .out file: %s" % star_tab_file)
            sum_col2 = 0
            sum_col3 = 0
            sum_col4 = 0
            with io.open(star_tab_file, 'rt') as out:
                for i, line in enumerate(out):
                    if i < 4:
                        # Skip first four lines
                        continue
                    # Process remaining delimited columns
                    cols = line.rstrip('\n').split('\t')
                    sum_col2 += int(cols[1])
                    sum_col3 += int(cols[2])
                    sum_col4 += int(cols[3])
            print("Sums:")
            print("- col2: %d" % sum_col2)
            print("- col3: %d" % sum_col3)
            print("- col4: %d" % sum_col4)
            if sum_col2 > 0.0:
                forward_1st = float(sum_col3) / float(sum_col2) * 100.0
                reverse_2nd = float(sum_col4) / float(sum_col2) * 100.0
            else:
                logging.warning("Sum of mapped reads is zero!")
                forward_1st = 0.0
                reverse_2nd = 0.0
            print("Strand percentages:")
            print("- 1st forward: %.2f%%" % forward_1st)
            print("- 2nd reverse: %.2f%%" % reverse_2nd)
            # Append to output file
            data = [name, "%.2f" % forward_1st, "%.2f" % reverse_2nd]
            if args.counts:
                data.extend([sum_col2, sum_col3, sum_col4])
            fp.write(u"%s\n" % "\t".join([str(d) for d in data]))
        # Finished iterating over genomes
        # Rewind temporary output file
        fp.seek(0)
        with io.open(outfile, 'wt') as out:
            # Header
            out.write(u"#fastq_strand version: %s\t"
                      "#Aligner: %s\t"
                      "#Reads in subset: %s\n" % (__version__, "STAR", subset))
            columns = ["Genome", "1st forward", "2nd reverse"]
            if args.counts:
                columns.extend([
                    "Unstranded", "1st read strand aligned",
                    "2nd read strand aligned"
                ])
            out.write(u"#%s\n" % "\t".join(columns))
            # Copy content from temp to final file
            for line in fp:
                out.write(str(line))
    return 0