Example #1
0
def mlir_convert_file(graph_def_filename,
                      input_tensors,
                      output_tensors,
                      quantization_params=None,
                      additional_flags=""):
  """Convert a graphdef file into a tflite model with MLIR-based conversion.

  NOTE: this currently shells out to the MLIR binary binary, but we would like
  convert to Python API tooling in the future.

  Args:
    graph_def_filename: A GraphDef file.
    input_tensors: List of input tensor tuples `(name, shape, type)`. name
      should be a string. shape should be a tuple of integers. type should be a
      string, for example 'DT_FLOAT'
    output_tensors: List of output tensors (names).
    quantization_params: parameters `(inference_type, min_values, max_values)`
      to quantize the model.
    additional_flags: A string of additional command line flags to be passed
      to MLIR converter.

  Returns:
    output tflite model, log_txt from conversion
    or None, log_txt if it did not convert properly.
  """
  bin_path = resource_loader.get_path_to_datafile(
      "../../../../compiler/mlir/lite/tf_tfl_translate")

  with tempfile.NamedTemporaryFile() as output_file, \
       tempfile.NamedTemporaryFile("w+") as stdout_file:
    input_shapes = []
    for input_tensor in input_tensors:
      shape = input_tensor[1]
      input_shapes.append(",".join([str(dim) for dim in shape]))
    input_shapes_str = ":".join(input_shapes)

    input_types = ",".join([x[2] for x in input_tensors])

    quant_flags = ""
    if quantization_params is not None:
      min_vals = ",".join([str(val) for val in quantization_params[1]])
      max_vals = ",".join([str(val) for val in quantization_params[2]])
      quant_flags = ("-tf-inference-type=" + quantization_params[0] +
                     " -tf-input-min-values='" + min_vals +
                     "' -tf-input-max-values='" + max_vals + "' " +
                     "-emit-quant-adaptor-ops ")
    cmd = ("%s -tf-input-arrays=%s -tf-input-data-types=%s -tf-input-shapes=%s "
           "-tf-output-arrays=%s " + quant_flags + additional_flags +
           "%s -o %s")
    cmd = cmd % (
        bin_path,
        ",".join([x[0] for x in input_tensors]),
        input_types,
        input_shapes_str,
        ",".join(output_tensors),
        graph_def_filename,
        output_file.name,
    )
    exit_code = os.system(cmd)
    log = (
        cmd + "exited with code %d" % exit_code + "\n------------------\n" +
        stdout_file.read())
    return (None if exit_code != 0 else output_file.read()), log
Example #2
0
try:
    import llfuse
    # Does this version of llfuse support ns precision?
    have_fuse_mtime_ns = hasattr(llfuse.EntryAttributes, 'st_mtime_ns')
except ImportError:
    have_fuse_mtime_ns = False

try:
    from pytest import raises
except ImportError:
    raises = None

has_lchflags = hasattr(os, 'lchflags') or sys.platform.startswith('linux')
no_lchlfags_because = '' if has_lchflags else '(not supported on this platform)'
try:
    with tempfile.NamedTemporaryFile() as file:
        platform.set_flags(file.name, stat.UF_NODUMP)
except OSError:
    has_lchflags = False
    no_lchlfags_because = '(the file system at %s does not support flags)' % tempfile.gettempdir()

try:
    import llfuse
    has_llfuse = True or llfuse  # avoids "unused import"
except ImportError:
    has_llfuse = False

# The mtime get/set precision varies on different OS and Python versions
if 'HAVE_FUTIMENS' in getattr(posix, '_have_functions', []):
    st_mtime_ns_round = 0
elif 'HAVE_UTIMES' in sysconfig.get_config_vars():
Example #3
0
 def __init__(self, config):
     config_file = tempfile.NamedTemporaryFile(delete=False)
     config_file.write(encodeutils.safe_encode(json.dumps(config)))
     config_file.close()
     self.filename = config_file.name
Example #4
0
#!/usr/bin/env python
import subprocess
import tempfile

passed = 0

n_tests = 6;

# Run a for loop of tests.
for i in range(1,n_tests+1):
    print "Running test " + str(i) + "... "
    
    # Create a temporary file to contain output
    tmpfile = tempfile.NamedTemporaryFile();

    # Run case i, put output into tmpfile
    cmdstr = "./hashtable_test " + str(i) + " " + tmpfile.name;
    subprocess.call(cmdstr,shell=True)
    
    # Compare output to the ground truth.
    cmdstr = "diff -b " + tmpfile.name + " " + str(i) + ".gt > /dev/null"
    rt = subprocess.call(cmdstr,shell=True);

    # Check if the test was passed.
    if rt == 0:
        passed += 1
        print "passed"
    else:
        print "failed"

    # Delete temporary file
Example #5
0
 def convert_to_file(self, question):
     metadata_as_dict = dict()
     if type(self.metadata) is dict:
         metadata_as_dict = self.metadata
     elif type(self.metadata) is list:
         for data in self.metadata:
             if type(data) is dict:
                 for key in data:
                     metadata_as_dict[key] = data[key]
     if self.output_format == 'rtf to docx':
         self.output_extension = 'rtf'
     else:
         self.output_extension = self.output_format
     if self.output_format in ('rtf', 'rtf to docx') and self.template_file is None:
         self.template_file = docassemble.base.functions.standard_template_filename('Legal-Template.rtf')
     if self.output_format == 'docx' and self.reference_file is None:
         self.reference_file = docassemble.base.functions.standard_template_filename('Legal-Template.docx')
     if (self.output_format == 'pdf' or self.output_format == 'tex') and self.template_file is None:
         self.template_file = docassemble.base.functions.standard_template_filename('Legal-Template.tex')
     yaml_to_use = list()
     if self.output_format in ('rtf', 'rtf to docx'):
         #logmessage("pre input content is " + str(self.input_content))
         self.input_content = docassemble.base.filter.rtf_prefilter(self.input_content, metadata=metadata_as_dict)
         #logmessage("post input content is " + str(self.input_content))
     if self.output_format == 'docx':
         self.input_content = docassemble.base.filter.docx_filter(self.input_content, metadata=metadata_as_dict, question=question)
     if self.output_format == 'pdf' or self.output_format == 'tex':
         if len(self.initial_yaml) == 0:
             standard_file = docassemble.base.functions.standard_template_filename('Legal-Template.yml')
             if standard_file is not None:
                 self.initial_yaml.append(standard_file)
         for yaml_file in self.initial_yaml:
             if yaml_file is not None:
                 yaml_to_use.append(yaml_file)
         for yaml_file in self.additional_yaml:
             if yaml_file is not None:
                 yaml_to_use.append(yaml_file)
         #logmessage("Before: " + repr(self.input_content))
         self.input_content = docassemble.base.filter.pdf_filter(self.input_content, metadata=metadata_as_dict, question=question)
         #logmessage("After: " + repr(self.input_content))
     if not re.search(r'[^\s]', self.input_content):
         self.input_content = u"\\textbf{}\n"
     if PY3:
         temp_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="w", suffix=".md", delete=False, encoding='utf-8')
         temp_file.write(self.input_content)
     else:
         temp_file = tempfile.NamedTemporaryFile(prefix="datemp", mode="w", suffix=".md", delete=False)
         with open(temp_file.name, 'w', encoding='utf-8') as fp:
             fp.write(self.input_content)
     temp_file.close()
     temp_outfile = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix="." + str(self.output_extension), delete=False)
     temp_outfile.close()
     current_temp_dir = 'epsconv'
     latex_conversion_directory = os.path.join(tempfile.gettempdir(), 'conv')
     if not os.path.isdir(latex_conversion_directory):
         os.makedirs(latex_conversion_directory)
     if not os.path.isdir(latex_conversion_directory):
         raise Exception("Could not create latex conversion directory")
     icc_profile_in_temp = os.path.join(tempfile.gettempdir(), 'sRGB_IEC61966-2-1_black_scaled.icc')
     if not os.path.isfile(icc_profile_in_temp):
         shutil.copyfile(docassemble.base.functions.standard_template_filename('sRGB_IEC61966-2-1_black_scaled.icc'), icc_profile_in_temp)
     subprocess_arguments = [PANDOC_PATH, PANDOC_ENGINE]
     if PANDOC_OLD:
         subprocess_arguments.append("--smart")
     subprocess_arguments.extend(['-M', 'latextmpdir=' + os.path.join('.', 'conv'), '-M', 'pdfa=' + ('true' if self.pdfa else 'false')])
     if len(yaml_to_use) > 0:
         subprocess_arguments.extend(yaml_to_use)
     if self.template_file is not None:
         subprocess_arguments.extend(['--template=%s' % self.template_file])
     if self.reference_file is not None:
         if PANDOC_OLD:
             subprocess_arguments.extend(['--reference-docx=%s' % self.reference_file])
         else:
             subprocess_arguments.extend(['--reference-doc=%s' % self.reference_file])
     subprocess_arguments.extend(['-s', '-o', temp_outfile.name])
     subprocess_arguments.extend([temp_file.name])
     subprocess_arguments.extend(self.arguments)
     #logmessage("Arguments are " + str(subprocess_arguments) + " and directory is " + tempfile.gettempdir())
     try:
         msg = subprocess.check_output(subprocess_arguments, cwd=tempfile.gettempdir(), stderr=subprocess.STDOUT).decode('utf-8', 'ignore')
     except subprocess.CalledProcessError as err:
         raise Exception("Failed to assemble file: " + err.output.decode())
     if msg:
         self.pandoc_message = msg
     os.remove(temp_file.name)
     if os.path.exists(temp_outfile.name):
         if self.output_format in ('rtf', 'rtf to docx'):
             with open(temp_outfile.name, encoding='utf-8') as the_file:
                 file_contents = the_file.read()
             # with open('/tmp/asdf.rtf', 'w') as deb_file:
             #     deb_file.write(file_contents)
             file_contents = docassemble.base.filter.rtf_filter(file_contents, metadata=metadata_as_dict, styles=get_rtf_styles(self.template_file), question=question)
             with open(temp_outfile.name, "wb") as the_file:
                 the_file.write(bytearray(file_contents, encoding='utf-8'))
             if self.output_format == 'rtf to docx':
                 docx_outfile = tempfile.NamedTemporaryFile(prefix="datemp", mode="wb", suffix=".docx", delete=False)
                 success = rtf_to_docx(temp_outfile.name, docx_outfile.name)
                 if not success:
                     raise Exception("Could not convert RTF to DOCX.")
                 temp_outfile = docx_outfile
         if self.output_filename is not None:
             shutil.copyfile(temp_outfile.name, self.output_filename)
         else:
             self.output_filename = temp_outfile.name
         self.output_content = None
         if self.output_format == 'pdf' and self.password:
             pdf_encrypt(self.output_filename, self.password)
     else:
         raise IOError("Failed creating file: %s" % output_filename)
     return
Example #6
0
    def do_GET(self):
        """Respond to a GET request."""
        if "?" in self.path:
            page, arg = self.path.split("?", 1)
            arg = dict(urlparse.parse_qsl(arg))
        else:
            page = self.path
            arg = None

#        print self.path,type(self.path)
#        print page
#        print arg

        if page == "/send":
            if arg is None: return
            for key, value in arg.items():
                if key == "gcode":
                    for line in value.split('\n'):
                        httpd.app.queue.put(line + "\n")
                elif key == "cmd":
                    httpd.app.pendant.put(urllib.unquote(value))
            #send empty response so browser does not generate errors
            self.do_HEAD(200, "text/text", cl=len(""))
            self.wfile.write("")

        elif page == "/state":
            tmp = {}
            for name in [
                    "controller", "state", "pins", "color", "msg", "wx", "wy",
                    "wz", "G", "OvFeed", "OvRapid", "OvSpindle"
            ]:
                tmp[name] = OCV.CD[name]
            contentToSend = json.dumps(tmp)
            self.do_HEAD(200, content="text/text", cl=len(contentToSend))
            self.wfile.write(contentToSend)

        elif page == "/config":
            snd = {}
            snd["rpmmax"] = httpd.app.get("CNC", "spindlemax")
            contentToSend = json.dumps(snd)
            self.do_HEAD(200, content="text/text", cl=len(contentToSend))
            self.wfile.write(contentToSend)

        elif page == "/icon":
            if arg is None: return
            filename = os.path.join(iconpath, arg["name"] + ".gif")
            self.do_HEAD(200,
                         content="image/gif",
                         cl=os.path.getsize(filename))
            try:
                f = open(filename, "rb")
                self.wfile.write(f.read())
                f.close()
            except:
                pass

        elif page == "/canvas":
            if not Image: return
            with tempfile.NamedTemporaryFile(suffix='.ps') as tmp:
                httpd.app.canvasFrame.canvas.postscript(
                    file=tmp.name,
                    colormode='color',
                )
                tmp.flush()
                try:
                    with tempfile.NamedTemporaryFile(suffix='.gif') as out:
                        Image.open(tmp.name).save(out.name, 'GIF')
                        out.flush()
                        out.seek(0)
                        self.do_HEAD(200,
                                     content="image/gif",
                                     cl=os.path.getsize(tmp.name))
                        self.wfile.write(out.read())
                except:
                    filename = os.path.join(iconpath, "warn.gif")
                    self.do_HEAD(200,
                                 content="image/gif",
                                 cl=os.path.getsize(filename))
                    try:
                        f = open(filename, "rb")
                        self.wfile.write(f.read())
                        f.close()
                    except:
                        pass

        elif page == "/camera":
            if not Camera.hasOpenCV(): return
            if Pendant.camera is None:
                Pendant.camera = Camera.Camera("webcam")
                Pendant.camera.start()

            if Pendant.camera.read():
                Pendant.camera.save("camera.jpg")
                #cv.imwrite("camera.jpg",img)
                self.do_HEAD(200,
                             content="image/jpeg",
                             cl=os.path.getsize("camera.jpg"))
                try:
                    f = open("camera.jpg", "rb")
                    self.wfile.write(f.read())
                    f.close()
                except:
                    pass
        else:
            self.mainPage(page[1:])
Example #7
0
def main(options: Optional[List[str]] = None) -> None:
    """
    Launch Zulip Terminal.
    """

    argv = options if options is not None else sys.argv[1:]
    args = parse_args(argv)

    set_encoding("utf-8")

    if args.debug:
        print("NOTE: Debug mode enabled; API calls being logged to {}.".format(
            in_color("blue", API_CALL_LOG_FILENAME)))
        requests_logfile_handler = logging.FileHandler(API_CALL_LOG_FILENAME)
        requests_logger.addHandler(requests_logfile_handler)
    else:
        requests_logger.addHandler(logging.NullHandler())

    if args.profile:
        import cProfile

        prof = cProfile.Profile()
        prof.enable()

    if args.version:
        print(f"Zulip Terminal {ZT_VERSION}")
        sys.exit(0)

    if args.list_themes:
        print(list_themes())
        sys.exit(0)

    if args.config_file:
        zuliprc_path = args.config_file
    else:
        zuliprc_path = "~/zuliprc"

    try:
        zterm = parse_zuliprc(zuliprc_path)

        if args.autohide:
            zterm["autohide"] = (args.autohide, "on command line")

        if args.theme:
            theme_to_use = (args.theme, "on command line")
        else:
            theme_to_use = zterm["theme"]

        if (zterm["footlinks"][1] == ZULIPRC_CONFIG
                and zterm["maximum-footlinks"][1] == ZULIPRC_CONFIG):
            exit_with_error(
                "Footlinks property is not allowed alongside maximum-footlinks"
            )

        if (zterm["maximum-footlinks"][1] == ZULIPRC_CONFIG
                and int(zterm["maximum-footlinks"][0]) < 0):
            exit_with_error("Minimum value allowed for maximum-footlinks is 0")

        if zterm["footlinks"][1] == ZULIPRC_CONFIG:
            if zterm["footlinks"][0] == DEFAULT_SETTINGS["footlinks"]:
                maximum_footlinks = 3
            else:
                maximum_footlinks = 0
        else:
            maximum_footlinks = int(zterm["maximum-footlinks"][0])

        available_themes = all_themes()
        theme_aliases = aliased_themes()
        is_valid_theme = (theme_to_use[0] in available_themes
                          or theme_to_use[0] in theme_aliases)
        if not is_valid_theme:
            exit_with_error(
                "Invalid theme '{}' was specified {}.".format(*theme_to_use),
                helper_text=list_themes(),
            )
        if theme_to_use[0] not in available_themes:
            # theme must be an alias, as it is valid
            real_theme_name = theme_aliases[theme_to_use[0]]
            theme_to_use = (
                real_theme_name,
                "{} (by alias '{}')".format(theme_to_use[1], theme_to_use[0]),
            )

        if args.color_depth:
            zterm["color-depth"] = (args.color_depth, "on command line")

        color_depth_str = zterm["color-depth"][0]
        if color_depth_str == "24bit":
            color_depth = 2**24
        else:
            color_depth = int(color_depth_str)

        if args.notify:
            zterm["notify"] = (args.notify, "on command line")

        print("Loading with:")
        print("   theme '{}' specified {}.".format(*theme_to_use))
        complete, incomplete = complete_and_incomplete_themes()
        if theme_to_use[0] in incomplete:
            if complete:
                incomplete_theme_warning = (
                    "   WARNING: Incomplete theme; results may vary!\n"
                    "      (you could try: {})".format(", ".join(complete)))
            else:
                incomplete_theme_warning = (
                    "   WARNING: Incomplete theme; results may vary!\n"
                    "      (all themes are incomplete)")
            print(in_color("yellow", incomplete_theme_warning))
        print("   autohide setting '{}' specified {}.".format(
            *zterm["autohide"]))
        if zterm["footlinks"][1] == ZULIPRC_CONFIG:
            print(
                "   maximum footlinks value '{}' specified {} from footlinks.".
                format(maximum_footlinks, zterm["footlinks"][1]))
        else:
            print("   maximum footlinks value '{}' specified {}.".format(
                *zterm["maximum-footlinks"]))
        print("   color depth setting '{}' specified {}.".format(
            *zterm["color-depth"]))
        print("   notify setting '{}' specified {}.".format(*zterm["notify"]))

        # For binary settings
        # Specify setting in order True, False
        valid_settings = {
            "autohide": ["autohide", "no_autohide"],
            "notify": ["enabled", "disabled"],
            "color-depth": ["1", "16", "256", "24bit"],
        }
        boolean_settings: Dict[str, bool] = dict()
        for setting, valid_values in valid_settings.items():
            if zterm[setting][0] not in valid_values:
                helper_text = (
                    ["Valid values are:"] +
                    [f"  {option}" for option in valid_values] +
                    [f"Specify the {setting} option in zuliprc file."])
                exit_with_error(
                    "Invalid {} setting '{}' was specified {}.".format(
                        setting, *zterm[setting]),
                    helper_text="\n".join(helper_text),
                )
            if setting == "color-depth":
                break
            boolean_settings[setting] = zterm[setting][0] == valid_values[0]

        theme_data = generate_theme(theme_to_use[0], color_depth)

        Controller(
            zuliprc_path,
            maximum_footlinks,
            theme_to_use[0],
            theme_data,
            color_depth,
            args.explore,
            **boolean_settings,
        ).main()
    except ServerConnectionFailure as e:
        # Acts as separator between logs
        zt_logger.info(f"\n\n{e}\n\n")
        zt_logger.exception(e)
        exit_with_error(f"\nError connecting to Zulip server: {e}.")
    except (display_common.AttrSpecError, display_common.ScreenError) as e:
        # NOTE: Strictly this is not necessarily just a theme error
        # FIXME: Add test for this - once loading takes place after UI setup

        # Acts as separator between logs
        zt_logger.info(f"\n\n{e}\n\n")
        zt_logger.exception(e)
        exit_with_error(f"\nPossible theme error: {e}.")
    except Exception as e:
        zt_logger.info("\n\n{e}\n\n")
        zt_logger.exception(e)
        if args.debug:
            sys.stdout.flush()
            traceback.print_exc(file=sys.stderr)
            run_debugger = input("Run Debugger? (y/n): ")
            if run_debugger in ["y", "Y", "yes"]:
                # Open PUDB Debugger
                import pudb

                pudb.post_mortem()

        if hasattr(e, "extra_info"):
            print(in_color("red", f"\n{e.extra_info}"),
                  file=sys.stderr)  # type: ignore

        print(
            in_color(
                "red",
                "\nZulip Terminal has crashed!"
                f"\nPlease refer to {TRACEBACK_LOG_FILENAME}"
                " for full log of the error.",
            ),
            file=sys.stderr,
        )
        print(
            "You can ask for help at:"
            "\nhttps://chat.zulip.org/#narrow/stream/206-zulip-terminal",
            file=sys.stderr,
        )
        print("\nThanks for using the Zulip-Terminal interface.\n")
        sys.stderr.flush()

    finally:
        if args.profile:
            prof.disable()
            import tempfile

            with tempfile.NamedTemporaryFile(prefix="zulip_term_profile.",
                                             suffix=".dat",
                                             delete=False) as profile_file:
                profile_path = profile_file.name
            # Dump stats only after temporary file is closed (for Win NT+ case)
            prof.dump_stats(profile_path)
            print("Profile data saved to {0}.\n"
                  "You can visualize it using e.g. `snakeviz {0}`".format(
                      profile_path))

        sys.exit(1)
Example #8
0
def createIGVsessionAnnot(genome, uuids, annot_uuids=None, samp_file=None):
    """ Creates session file for selected file uuids, returns newly created filestore uuid 
    
    :param genome: Genome to be used in session file i.e. hg18, dm3
    :type genome: string.
    :param uuids: Array of UUIDs to be used
    :type uuids: array.
    :param uuids: Host URL i.e. 127.0.0.1:8000
    :type uuids: string
    """
    
    # Create IGV Session file and put into Filestore
    """
    http://www.postneo.com/projects/pyxml/
    
    <?xml version="1.0" encoding="UTF-8"?>
        <Global genome="hg18" locus="EGFR" version="3">
        <Resources>
            <Resource name="RNA Genes" path="http://www.broadinstitute.org/igvdata/tcga/gbm/GBM_batch1-8_level3_exp.txt.recentered.080820.gct.tdf"/>
            <Resource name="RNA Genes" path="http://www.broadinstitute.org/igvdata/annotations/hg18/rna_genes.bed"/>
            <Resource name="sno/miRNA" path="http://www.broadinstitute.org/igvdata/tcga/gbm/Sample_info.txt"/>
        </Resources>
    </Global>
    """
    #logger.debug("visualization_manager.views createIGVsessionAnnot called")
    
    # Create the minidom document
    doc = Document()
    
    # Create the <wml> base element
    xml = doc.createElement("Global")
    xml.setAttribute("genome", genome)
    xml.setAttribute("locus", "All")
    xml.setAttribute("version", "4")
    doc.appendChild(xml)
    
    # Add Resources
    xml_resources = doc.createElement("Resources")
    xml.appendChild(xml_resources)        
    
    # adding selected samples to xml file
    addIGVResource(uuids["node_uuid"], xml_resources, doc)
    
    if annot_uuids:
        # adding selected samples to xml file
        addIGVResource(annot_uuids["node_uuid"], xml_resources, doc)
        
        
    # adds sample information file to IGV session file 
    if samp_file:
        #<Resource name="Sample Information" path="http://igv.broadinstitute.org/data/hg18/tcga/gbm/gbmsubtypes/sampleTable.txt.gz"/>
        # creates Resource element 
        res = doc.createElement("Resource")
        res.setAttribute("name", "Sample Information")
        res.setAttribute("path", samp_file)
        xml_resources.appendChild(res)    
    
    #<HiddenAttributes>
    #    <Attribute name="DATA FILE"/>
    #    <Attribute name="Linking_id"/>
    #    <Attribute name="DATA TYPE"/>
    #</HiddenAttributes>
    # Adding parameters to hide basic unnecessary sample info
    hidden_attr = doc.createElement("HiddenAttributes")
    xml.appendChild(hidden_attr) 
    
    attr = doc.createElement("Attribute")
    attr.setAttribute("name", "DATA FILE")
    hidden_attr.appendChild(attr) 
    
    attr = doc.createElement("Attribute")
    attr.setAttribute("name", "Linking_id")
    hidden_attr.appendChild(attr) 
    
    attr = doc.createElement("Attribute")
    attr.setAttribute("name", "DATA TYPE")
    hidden_attr.appendChild(attr) 
    
    
    # Creating temp file to enter into file_store
    tempfilename = tempfile.NamedTemporaryFile(delete=False)
    tempfilename.write(doc.toprettyxml(indent="  "))
    tempfilename.close()
    
    # getting file_store_uuid
    filestore_uuid = create(tempfilename.name, permanent=True, filetype="xml")
    filestore_item = import_file(filestore_uuid, permanent=True, refresh=True)
    
    # file to rename
    temp_name = filestore_item.datafile.name.split('/')
    temp_name = temp_name[len(temp_name)-1] + '.xml'
    
    # rename file by way of file_store
    filestore_item = rename(filestore_uuid, temp_name)
    
    # delete temp file
    os.unlink(tempfilename.name)
    
    # Print our newly created XML
    #logger.info( doc.toprettyxml(indent="  "))
    #print filestore_item.datafile.url
    
    # Url for session file 
    fs_url = filestore_item.get_full_url()
    
    # IGV url for automatic launch of Java Webstart
    igv_url = "http://www.broadinstitute.org/igv/projects/current/igv.php?sessionURL=" + fs_url
    
    return igv_url
Example #9
0
def addIGVSamples(fields, results_samp, annot_samples=None):
    """ creates phenotype file for IGV 
    
    :param samples: Solr results for samples to be included 
    :type samples: Array.
    :param annot_samples: includes annotation files included with solr results
    :type annot_samples: Array
    """
    
    #logger.debug("visualization_manager.views addIGVSamples called, fields=%s" % fields)
    
    # creates human readable indexes of fields to iterate over
    fields_dict = {}
    for i in fields:
        find_index = i.find("_Characteristics_")
        if find_index > -1:
            new_key = i.split("_Characteristics_")[0]
            fields_dict[i] = new_key
    
    # Creating temp file to enter into file_store
    tempsampname = tempfile.NamedTemporaryFile(delete=False)
    
    # writing header to sample file 
    tempsampname.write("#sampleTable" + "\n")
    
    # writing column names to sample file 
    col_names = "Linking_id"
    for k,v in fields_dict.iteritems():
        col_names = col_names + '\t' + v
    tempsampname.write(col_names + "\n")
    
    # iterating over sample files 
    pheno_results = get_sample_lines(fields_dict, results_samp)
    tempsampname.write(pheno_results)
    
    # if annotations are not null
    if annot_samples:
        #results_annot = annot_samples["response"]["docs"]
        pheno_annot = get_sample_lines(fields_dict, annot_samples)
        tempsampname.write(pheno_annot)
        
    # closing temp file 
    tempsampname.close()

    # getting file_store_uuid
    filestore_uuid = create(tempsampname.name, permanent=True, filetype="txt")
    filestore_item = import_file(filestore_uuid, permanent=True, refresh=True)
    
    # file to rename
    temp_file = filestore_item.datafile.name.split('/')
    temp_file = temp_file[len(temp_file)-1] + '.txt'
    
    # rename file by way of file_store
    filestore_item = rename(filestore_uuid, temp_file)
    
    # getting file information based on file_uuids
    curr_fs = FileStoreItem.objects.get(uuid=filestore_uuid)
    curr_name = curr_fs.datafile.name
    
    # full path to selected UUID File
    curr_url = curr_fs.get_full_url()
    
    # delete temp file
    os.unlink(tempsampname.name)
    
    return curr_url
Example #10
0
# Licence:     <your licence>
#-------------------------------------------------------------------------------

import os, sys, re
import tempfile


def writeline(string):
    txt.write(string.encode("utf-8"))


##tmp = tempfile.NamedTemporaryFile(prefix='FolderGISInventory',delete=False)
##output = tmp.name + ".txt"
desktop = r'C:\Users\chrism\Desktop'
output = tempfile.NamedTemporaryFile(suffix='.txt',
                                     dir=desktop,
                                     prefix='FolderGISInventory_',
                                     delete=False)
txt = open(output.name, "w")

d_dir = r'C:\Temp'
ext_dict = {
    '.shp': 'shapefile',
    '.mdb': 'pGDB',
    '.lyr': 'layer file',
    '.sde': 'eGDB',
    '.img': 'raster (img)',
    '.jpg': 'raster (jpg)',
    '.mxd': 'map document',
    '.ja1': 'RPF',
    '.csv': 'CSV',
    '.kmz': 'KML',
Example #11
0
	def _gen_group_config (self, gdata) :
		# generate list of users
		#self._log ("_gen_group_config")
		members = []
		#self._log (gdata.keys())
		if 'appSpecName' in gdata.keys() :
			if gdata['appSpecName'] is None :
				self._log ('appSpecName is None, this groups is not a twiki group')
				# tell the caller that everything is fine, even if we didn't do anything
				return True
			asn = gdata['appSpecName']
			#self._log ("asn : "+str(asn))
			if ('twiki' in asn.keys()) and ('members' in gdata.keys()) :
				twiki_group_name = asn['twiki']
				#self._log (twiki_group_name)
				gdm = gdata['members']
				for m in gdm :
					n = None
					if 'appSpecName' in m.keys() :
						asn=m['appSpecName']
						try :
							asn = json.loads(asn)
						except ValueError as e :
							pass
						else :
							if 'twiki' in asn :
								n = asn['twiki']
					if (n is None) and ('first_name' in m.keys()) and ('last_name' in m.keys()) :
						n = self.gen_user_name (m['first_name'], m['last_name'])
					if n is None :
						continue
					members.append(n)
				# sort members
				members.sort()
				# generate members list
				members = ['Main.'+s for s in members]
				twiki_group_members = ', '.join(members)
				
				import time
				# generate file
				s = u'%META:TOPICINFO{author="adminToolCore-Twiki-module" date="'+str(int(time.time()))+u'"}%\n'
				s+= u'%META:TOPICPARENT{name="TWikiGroups"}%\n'
				s+= u'---+!! <nop>'+twiki_group_name+u'\n'
				s+= u'\n'
				s+= u'   * Member list:\n'
				s+= u'      * Set GROUP = '+twiki_group_members+u'\n'
				s+= u'\n'
				s+= u'   * Persons/group who can change the list:\n'
				s+= u'      * Set ALLOWTOPICCHANGE = '+twiki_group_name+u'\n'
				s+= u'\n'
				s+= u'__%MAKETEXT{"Related Topics:"}%__ %WIKIUSERSTOPIC%, TWikiGroups, %TWIKIWEB%.TWikiAccessControl\n'
				s+= u'---\n'
				s+= u'<small> <font color="#808080">\n'
				s+= u'_DerniĆØre mise Ć  jour : '+time.strftime('%d %B %Y')+u'_\n'
				s+= u'</small>\n'
				s+= u'\n'
				s+= u'<!--\n'
				s+= u'   * Set CACHEABLE = off\n'
				s+= u'-->\n'
				#self._log (s)
			
				# copy the contents of s to the twiki system, so as to update the contents of the group
				a = af.rem()
				# save to a temporary file
				import tempfile
				f = tempfile.NamedTemporaryFile ()
				f.write (s.encode('utf-8'))
				f.flush ()
				import os
				# call the remote access file copy
				res = a.copy (TWIKI_SERVER, TWIKI_FILE_OWNER, TWIKI_FILE_GROUP, f.name, 
							  os.path.join(TWIKI_MAIN, twiki_group_name+'.txt'), '0664')
				# destroy the temporary file		
				f.close()
				if not res : 
					a.log ('FATAL: Impossible to create twiki group '+twiki_group_name+'\nunable to copy file to it\'s destination')
					return False
				# everything is fine
				return True
			else :
				# need to log this better
				self._log('FATAL: missing bits in group data')
				return False
		else :
			self._log('no appSpecName defined')
			return False
Example #12
0
    def _execute(self, args, extension, show_output=True):
        now = self._get_now()
        openssl_template = open(os.path.join(self.target_dir, 'ca-material/openssl-template.conf')).read()

        ou_attrs = ('organizational_unit', 'organizational-unit')
        template_args = {}
        for name in('organization', 'locality', 'state_or_province', 'country'):
            value = self._get_arg(args, name, ca_defaults[name])
            template_args[name.replace('-', '_')] = value

        for name in ou_attrs:
            has_name = self._get_arg(args, name, None)
            if has_name:
                value = self._get_arg(args, name, ca_defaults[name])
                template_args[name.replace('-', '_')] = value
                break
        else:
            if hasattr(self, 'get_organizational_unit'):
                template_args['organizational_unit'] = self.get_organizational_unit(args)
            else:
                template_args['organizational_unit'] = ca_defaults['organizational_unit']

        template_args['common_name'] = self._get_arg(args, 'common_name', default_common_name)
        template_args['target_dir'] = self.target_dir

        f = tempfile.NamedTemporaryFile(mode='w+') # noqa
        f.write(openssl_template.format(**template_args))
        f.flush()

        file_args = {
            'now':now,
            'target_dir':self.target_dir
        }

        for arg in('cluster_name', 'server_name', 'scheduler_name'):
            if hasattr(args, arg):
                file_args[arg] = getattr(args, arg)

        file_args['file_prefix'] = self.get_file_prefix(file_args)

        csr_name = '{target_dir}/out-csr/{file_prefix}-csr-{now}.pem'.format(**file_args)
        priv_key_name = '{target_dir}/out-priv/{file_prefix}-priv-{now}.pem'.format(**file_args)
        pub_key_name = '{target_dir}/out-pub/{file_prefix}-pub-{now}.pem'.format(**file_args)
        cert_name = '{target_dir}/out-cert/{file_prefix}-cert-{now}.pem'.format(**file_args)

        format_args = {
            'config': f.name,
            'extension': extension,
            'csr_name': csr_name,
            'priv_key_name': priv_key_name,
            'pub_key_name': pub_key_name,
            'cert_name': cert_name,
            'target_dir': self.target_dir
        }

        # Create the CSR and keys ..
        cmd = """openssl req -batch -new -nodes -extensions {extension} \
                  -out {csr_name} \
                  -keyout {priv_key_name} \
                  -pubkey \
                  -newkey rsa:4096 -config {config} \
                  >/dev/null 2>&1""".format(**format_args)
        os.system(cmd)

        # .. note that we were using "-pubkey" flag above so we now have to extract
        # the public key from the CSR.

        split_line = '-----END PUBLIC KEY-----'
        csr_pub = open(csr_name).read()
        csr_pub = csr_pub.split(split_line)

        pub = csr_pub[0] + split_line
        csr = csr_pub[1].lstrip()

        open(csr_name, 'w').write(csr)
        open(pub_key_name, 'w').write(pub)

        # Generate the certificate
        cmd = """openssl ca -batch -passin file:{target_dir}/ca-material/ca-password -config {config} \
                 -out {cert_name} \
                 -extensions {extension} \
                 -in {csr_name} \
                  >/dev/null 2>&1""".format(**format_args)

        os.system(cmd)
        f.close()

        # Now delete the default certificate stored in './', we don't really
        # need it because we have its copy in './out-cert' anyway.
        last_serial = open(os.path.join(self.target_dir, 'ca-material/ca-serial.old')).read().strip()
        os.remove(os.path.join(self.target_dir, last_serial + '.pem'))

        msg = """Crypto material generated and saved in:
  - private key: {priv_key_name}
  - public key: {pub_key_name}
  - certificate {cert_name}
  - CSR: {csr_name}""".format(**format_args)

        if show_output:
            if self.verbose:
                self.logger.debug(msg)
            else:
                self.logger.info('OK')

        # Make sure permissions are tight (GH #440)
        os.chmod(priv_key_name, 0o640)

        # In case someone needs to invoke us directly and wants to find out
        # what the format_args were.
        return format_args
Example #13
0
def test_tflite(temp_dir, board, west_cmd, tvm_debug):
    """Testing a TFLite model."""
    model = test_utils.ZEPHYR_BOARDS[board]
    input_shape = (1, 49, 10, 1)
    output_shape = (1, 12)
    build_config = {"debug": tvm_debug}

    model_url = "https://github.com/tlc-pack/web-data/raw/25fe99fb00329a26bd37d3dca723da94316fd34c/testdata/microTVM/model/keyword_spotting_quant.tflite"
    model_path = download_testdata(model_url, "keyword_spotting_quant.tflite", module="model")

    # Import TFLite model
    tflite_model_buf = open(model_path, "rb").read()
    try:
        import tflite

        tflite_model = tflite.Model.GetRootAsModel(tflite_model_buf, 0)
    except AttributeError:
        import tflite.Model

        tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)

    # Load TFLite model and convert to Relay
    relay_mod, params = relay.frontend.from_tflite(
        tflite_model, shape_dict={"input_1": input_shape}, dtype_dict={"input_1 ": "int8"}
    )

    target = tvm.target.target.micro(model)
    executor = Executor(
        "aot", {"unpacked-api": True, "interface-api": "c", "workspace-byte-alignment": 4}
    )
    runtime = Runtime("crt")
    with tvm.transform.PassContext(opt_level=3, config={"tir.disable_vectorize": True}):
        lowered = relay.build(relay_mod, target, params=params, runtime=runtime, executor=executor)

    sample_url = "https://github.com/tlc-pack/web-data/raw/967fc387dadb272c5a7f8c3461d34c060100dbf1/testdata/microTVM/data/keyword_spotting_int8_6.pyc.npy"
    sample_path = download_testdata(sample_url, "keyword_spotting_int8_6.pyc.npy", module="data")
    sample = np.load(sample_path)

    with tempfile.NamedTemporaryFile() as tar_temp_file:
        with tarfile.open(tar_temp_file.name, "w:gz") as tf:
            with tempfile.TemporaryDirectory() as tar_temp_dir:
                model_files_path = os.path.join(tar_temp_dir, "include")
                os.mkdir(model_files_path)
                header_path = generate_c_interface_header(
                    lowered.libmod_name, ["input_1"], ["output"], [], 0, model_files_path
                )
                tf.add(header_path, arcname=os.path.relpath(header_path, tar_temp_dir))

            test_utils.create_header_file("input_data", sample, "include", tf)
            test_utils.create_header_file(
                "output_data", np.zeros(shape=output_shape, dtype="int8"), "include", tf
            )

        project, _ = test_utils.build_project(
            temp_dir,
            board,
            west_cmd,
            lowered,
            build_config,
            extra_files_tar=tar_temp_file.name,
        )

    project.flash()
    with project.transport() as transport:
        aot_transport_init_wait(transport)
        transport.write(b"infer%", timeout_sec=5)
        result_line = aot_transport_find_message(transport, "result", timeout_sec=60)

    result_line = result_line.strip("\n")
    result_line = result_line.split(":")
    result = int(result_line[1])
    time = int(result_line[2])
    logging.info(f"Result: {result}\ttime: {time} ms")
    assert result == 6
Example #14
0
def mlir_convert(options, graph_def, input_tensors, output_tensors, **kwargs):
  """Convert a model's graph def into a tflite model with MLIR-based conversion.

  Args:
    options: A lite.testing.generate_examples_lib.Options instance.
    graph_def: A GraphDef object.
    input_tensors: List of input tensor tuples `(name, shape, type)`.
    output_tensors: List of output tensors (names).
    **kwargs: Extra parameters.

  Returns:
    output tflite model, log_txt from conversion
    or None, log_txt if it did not convert properly.
  """
  test_params = kwargs.get("test_params", {})
  # TODO(b/146025965): Rename ExtraTocoOptions to ExtraConvertOptions or
  #                    something else.
  extra_toco_options = kwargs.get("extra_toco_options",
                                  zip_test_utils.ExtraTocoOptions())
  input_arrays = [x[0] for x in input_tensors]
  input_shapes = zip_test_utils.get_input_shapes_map(input_tensors)

  tflite_model = None
  log = ""

  with tempfile.NamedTemporaryFile() as graphdef_file:
    graphdef_file.write(graph_def.SerializeToString())
    graphdef_file.flush()
    converter = tf.lite.TFLiteConverter.from_frozen_graph(
        graphdef_file.name, input_arrays, output_tensors, input_shapes)
    converter.allow_custom_ops = extra_toco_options.allow_custom_ops
    converter.experimental_new_quantizer = options.mlir_quantizer

    if options.run_with_flex:
      converter.supported_ops = set([
          tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS])

    if test_params.get("dynamic_range_quantize", False):
      converter.optimizations = [tf.lite.Optimize.DEFAULT]

    if test_params.get("fully_quantize", False):
      converter.optimizations = [tf.lite.Optimize.DEFAULT]

      # Read the input range for the representative dataset from parameters.
      min_value, max_value = test_params.get("input_range", (-1, 1))

      def representative_dataset(input_tensors):
        calibration_inputs = []
        for _, shape, _ in input_tensors:
          if shape:
            dims = [1 if dim.value is None else dim.value for dim in shape.dims]
            calibration_inputs.append(
                np.random.uniform(min_value, max_value,
                                  tuple(dims)).astype(np.float32))
        return calibration_inputs

      def representative_dataset_gen():
        for _ in range(100):
          yield representative_dataset(input_tensors)

      if test_params.get("quant_16x8", False):
        converter.target_spec.supported_ops = [
            tf.lite.OpsSet.\
            EXPERIMENTAL_TFLITE_BUILTINS_ACTIVATIONS_INT16_WEIGHTS_INT8
        ]
      else:
        converter.target_spec.supported_ops = [
            tf.lite.OpsSet.TFLITE_BUILTINS_INT8
        ]

      converter.representative_dataset = representative_dataset_gen
      if extra_toco_options.inference_input_type:
        converter.inference_input_type = (
            extra_toco_options.inference_input_type)

      if extra_toco_options.inference_output_type:
        converter.inference_output_type = (
            extra_toco_options.inference_output_type)

    try:
      tflite_model = converter.convert()
      if options.expected_ops_in_converted_model:
        ops_list = tflite_test_util.get_ops_list(tflite_model)
        for expected_op in options.expected_ops_in_converted_model:
          if expected_op not in ops_list:
            # Force the test to fail.
            tflite_model = None
            raise ValueError(
                "{} op not found in the converted model".format(expected_op))
    except Exception as e:  # pylint: disable=broad-except
      log = str(e)

  return tflite_model, log
Example #15
0
  def shell(cls, command, is_verbose, num_retries=DEFAULT_NUM_RETRIES,
    stdin=None):
    """Executes a command on this machine, retrying it up to five times if it
    initially fails.

    Args:
      command: A str representing the command to execute.
      is_verbose: A bool that indicates if we should print the command we are
        executing to stdout.
      num_retries: The number of times we should try to execute the given
        command before aborting.
      stdin: A str that is passes as standard input to the process
    Returns:
      A str with both the standard output and standard error produced when the
      command executes.
    Raises:
      ShellException: If, after five attempts, executing the named command
      failed.
    """
    tries_left = num_retries
    try:
      while tries_left:
        AppScaleLogger.verbose("shell> {0}".format(command), is_verbose)
        the_temp_file = tempfile.NamedTemporaryFile()
        if stdin is not None:
          stdin_strio = tempfile.TemporaryFile()
          stdin_strio.write(stdin)
          stdin_strio.seek(0)
          AppScaleLogger.verbose("       stdin str: {0}"\
            .format(stdin), is_verbose)
          result = subprocess.Popen(command, shell=True, stdout=the_temp_file,
            stdin=stdin_strio, stderr=subprocess.STDOUT)
        else:
          result = subprocess.Popen(command, shell=True, stdout=the_temp_file,
            stderr=subprocess.STDOUT)
        AppScaleLogger.verbose("       stdout buffer: {0}"\
          .format(the_temp_file.name), is_verbose)
        result.wait()
        if stdin is not None:
          stdin_strio.close()
        if result.returncode == 0:
          the_temp_file.seek(0)
          output = the_temp_file.read()
          the_temp_file.close()
          return output
        tries_left -= 1
        if tries_left:
          the_temp_file.close()
          AppScaleLogger.verbose("Command failed. Trying again momentarily." \
            .format(command), is_verbose)
        else:
          the_temp_file.seek(0)
          output = the_temp_file.read()
          the_temp_file.close()
          if stdin:
            raise ShellException("Executing command '{0} {1}' failed:\n{2}"\
                    .format(command, stdin, output))
          else:
            raise ShellException("Executing command '{0}' failed:\n{1}"\
                    .format(command, output))
        time.sleep(1)
    except OSError as os_error:
      if stdin:
        raise ShellException("Error executing command: '{0} {1}':{2}"\
                .format(command, stdin, os_error))
      else:
        raise ShellException("Error executing command: '{0}':{1}"\
                .format(command, os_error))
Example #16
0
def createIGVsession(genome, uuids, is_file_uuid=False):
    """ Creates session file for selected file uuids, returns newly created filestore uuid 
    
    :param genome: Genome to be used in session file i.e. hg18, dm3
    :type genome: string.
    :param uuids: Array of UUIDs to be used
    :type uuids: array.
    :param uuids: Host URL i.e. 127.0.0.1:8000
    :type uuids: string
    """
    
    # Create IGV Session file and put into Filestore
    """
    http://www.postneo.com/projects/pyxml/
    
    <?xml version="1.0" encoding="UTF-8"?>
        <Global genome="hg18" locus="EGFR" version="3">
        <Resources>
            <Resource name="RNA Genes" path="http://www.broadinstitute.org/igvdata/tcga/gbm/GBM_batch1-8_level3_exp.txt.recentered.080820.gct.tdf"/>
            <Resource name="RNA Genes" path="http://www.broadinstitute.org/igvdata/annotations/hg18/rna_genes.bed"/>
            <Resource name="sno/miRNA" path="http://www.broadinstitute.org/igvdata/tcga/gbm/Sample_info.txt"/>
        </Resources>
    </Global>
    """
    logger.debug("visualization_manager.createIGVsession called")
    
    # Create the minidom document
    doc = Document()
    
    # Create the <wml> base element
    xml = doc.createElement("Global")
    xml.setAttribute("genome", genome)
    xml.setAttribute("locus", "All")
    xml.setAttribute("version", "4")
    doc.appendChild(xml)
    
    # Add Resources
    xml_resources = doc.createElement("Resources")
    xml.appendChild(xml_resources)
    
    # get paths to url 
    for samp in uuids:
        # gets filestore item 
        curr_name, curr_url = get_file_name(samp, is_file_uuid=is_file_uuid)

        logger.debug( 'New resource: ' + curr_name + ' - ' +  curr_url )
        
        # What to do if fs does not exist? 
        if (curr_name):
            
            # creates Resource element 
            res = doc.createElement("Resource")
            res.setAttribute("name", curr_name)
            res.setAttribute("path", curr_url)
            xml_resources.appendChild(res)
            
    
    # Creating temp file to enter into file_store
    tempfilename = tempfile.NamedTemporaryFile(delete=False)
    tempfilename.write(doc.toprettyxml(indent="  "))
    tempfilename.close()
    
    # getting file_store_uuid
    filestore_uuid = create(tempfilename.name, permanent=True, filetype="xml")
    filestore_item = import_file(filestore_uuid, permanent=True, refresh=True)
    
    # file to rename
    temp_name = filestore_item.datafile.name.split('/')
    temp_name = temp_name[len(temp_name)-1] + '.xml'
    
    # rename file by way of file_store
    filestore_item = rename(filestore_uuid, temp_name)
    
    # delete temp file
    os.unlink(tempfilename.name)
    
    # Print our newly created XML
    #print doc.toprettyxml(indent="  ")
    #print filestore_item.datafile.url
    
    # Url for session file 
    fs_url = filestore_item.get_full_url()
    
    # IGV url for automatic launch of Java Webstart
    igv_url = "http://www.broadinstitute.org/igv/projects/current/igv.php?sessionURL=" + fs_url
    
    return igv_url
Example #17
0
    def __init__(self,
                 env,
                 format='gif',
                 path=None,
                 metadata=None,
                 enabled=True,
                 base_path=None):
        """Overrides original constructor to add support for generating gifs."""
        self.format = format

        modes = env.metadata.get('render.modes', [])
        self.enabled = enabled

        # Don't bother setting anything else if not enabled
        if not self.enabled:
            return

        self.ansi_mode = False
        if 'rgb_array' not in modes:
            if 'ansi' in modes:
                self.ansi_mode = True
            else:
                # Whoops, turns out we shouldn't be enabled after all
                self.enabled = False
                return

        if path is not None and base_path is not None:
            raise error.Error('You can pass one of `path` or `base_path`.')

        self.last_frame = None
        self.env = env

        required_ext = '.json' if self.ansi_mode else '.' + format
        if path is None:
            if base_path is not None:
                # Base path given, append ext
                path = base_path + required_ext
            else:
                # Otherwise, just generate a unique filename
                with tempfile.NamedTemporaryFile(suffix=required_ext,
                                                 delete=False) as f:
                    path = f.name
        self.path = path

        path_base, actual_ext = os.path.splitext(self.path)

        if actual_ext != required_ext:
            hint = " HINT: The environment is text-only, therefore we're recording its text output in a structured JSON format." if self.ansi_mode else ''
            raise error.Error(
                'Invalid path given: {} -- must have file extension {}.{}'.
                format(self.path, required_ext, hint))
        # Touch the file in any case, so we know it's present. (This
        # corrects for platform platform differences. Using ffmpeg on
        # OS X, the file is precreated, but not on Linux.
        touch(path)

        self.frames_per_sec = env.metadata.get('video.frames_per_second', 30)
        self.encoder = None  # lazily start the process
        self.broken = False

        # Dump metadata
        self.metadata = metadata or {}
        self.metadata[
            'content_type'] = 'video/vnd.openai.ansivid' if self.ansi_mode else 'video/' + self.format
        self.metadata_path = '{}.meta.json'.format(path_base)

        self.empty = True
def main():
  """main."""
  args = get_args()

  # Pipe dumpbin to extract all linkable symbols from libs.
  # Good symbols are collected in candidates and also written to
  # a temp file.
  candidates = []
  tmpfile = tempfile.NamedTemporaryFile(mode="w", delete=False)
  for lib_path in args.input:
    proc = subprocess.Popen([DUMPBIN, "/nologo", "/linkermember:1", lib_path],
                            stdout=subprocess.PIPE)
    for line in codecs.getreader("utf-8")(proc.stdout):
      cols = line.split()
      if len(cols) < 2:
        continue
      sym = cols[1]
      tmpfile.file.write(sym + "\n")
      candidates.append(sym)
    exit_code = proc.wait()
    if exit_code != 0:
      print("{} failed, exit={}".format(DUMPBIN, exit_code))
      return exit_code
  tmpfile.file.close()

  # Run the symbols through undname to get their undecorated name
  # so we can filter on something readable.
  with open(args.output, "w") as def_fp:
    # track dupes
    taken = set()

    # Header for the def file.
    def_fp.write("LIBRARY " + args.target + "\n")
    def_fp.write("EXPORTS\n")
    def_fp.write("\t ??1OpDef@tensorflow@@UEAA@XZ\n")

    # Each symbols returned by undname matches the same position in candidates.
    # We compare on undname but use the decorated name from candidates.
    dupes = 0
    proc = subprocess.Popen([UNDNAME, tmpfile.name], stdout=subprocess.PIPE)
    for idx, line in enumerate(codecs.getreader("utf-8")(proc.stdout)):
      decorated = candidates[idx]
      if decorated in taken:
        # Symbol is already in output, done.
        dupes += 1
        continue

      if not INCLUDEPRE_RE.search(line):
        if EXCLUDE_RE.search(line):
          continue
        if not INCLUDE_RE.search(line):
          continue

      if "deleting destructor" in line:
        # Some of the symbols convered by INCLUDEPRE_RE export deleting
        # destructor symbols, which is a bad idea.
        # So we filter out such symbols here.
        continue

      if DATA_EXCLUDE_RE.search(line):
        def_fp.write("\t" + decorated + "\n")
      else:
        def_fp.write("\t" + decorated + " DATA\n")
      taken.add(decorated)
  exit_code = proc.wait()
  if exit_code != 0:
    print("{} failed, exit={}".format(UNDNAME, exit_code))
    return exit_code

  os.unlink(tmpfile.name)

  print("symbols={}, taken={}, dupes={}"
        .format(len(candidates), len(taken), dupes))
  return 0
Example #19
0
    def test_connect_pgpass_regular(self):
        passfile = tempfile.NamedTemporaryFile('w+t', delete=False)
        passfile.write(textwrap.dedent(R'''
            abc:*:*:user:password from pgpass for user@abc
            localhost:*:*:*:password from pgpass for localhost
            cde:5433:*:*:password from pgpass for cde:5433

            *:*:*:testuser:password from pgpass for testuser
            *:*:testdb:*:password from pgpass for testdb
            # comment
            *:*:test\:db:test\\:password from pgpass with escapes
        '''))
        passfile.close()
        os.chmod(passfile.name, stat.S_IWUSR | stat.S_IRUSR)

        try:
            # passfile path in env
            self.run_testcase({
                'env': {
                    'PGPASSFILE': passfile.name
                },
                'host': 'abc',
                'user': '******',
                'database': 'db',
                'result': (
                    [('abc', 5432)],
                    {
                        'password': '******',
                        'user': '******',
                        'database': 'db',
                    }
                )
            })

            # passfile path as explicit arg
            self.run_testcase({
                'host': 'abc',
                'user': '******',
                'database': 'db',
                'passfile': passfile.name,
                'result': (
                    [('abc', 5432)],
                    {
                        'password': '******',
                        'user': '******',
                        'database': 'db',
                    }
                )
            })

            # passfile path in dsn
            self.run_testcase({
                'dsn': 'postgres://user@abc/db?passfile={}'.format(
                    passfile.name),
                'result': (
                    [('abc', 5432)],
                    {
                        'password': '******',
                        'user': '******',
                        'database': 'db',
                    }
                )
            })

            self.run_testcase({
                'host': 'localhost',
                'user': '******',
                'database': 'db',
                'passfile': passfile.name,
                'result': (
                    [('localhost', 5432)],
                    {
                        'password': '******',
                        'user': '******',
                        'database': 'db',
                    }
                )
            })

            if _system != 'Windows':
                # unix socket gets normalized as localhost
                self.run_testcase({
                    'host': '/tmp',
                    'user': '******',
                    'database': 'db',
                    'passfile': passfile.name,
                    'result': (
                        ['/tmp/.s.PGSQL.5432'],
                        {
                            'password': '******',
                            'user': '******',
                            'database': 'db',
                        }
                    )
                })

            # port matching (also tests that `:` can be part of password)
            self.run_testcase({
                'host': 'cde',
                'port': 5433,
                'user': '******',
                'database': 'db',
                'passfile': passfile.name,
                'result': (
                    [('cde', 5433)],
                    {
                        'password': '******',
                        'user': '******',
                        'database': 'db',
                    }
                )
            })

            # user matching
            self.run_testcase({
                'host': 'def',
                'user': '******',
                'database': 'db',
                'passfile': passfile.name,
                'result': (
                    [('def', 5432)],
                    {
                        'password': '******',
                        'user': '******',
                        'database': 'db',
                    }
                )
            })

            # database matching
            self.run_testcase({
                'host': 'efg',
                'user': '******',
                'database': 'testdb',
                'passfile': passfile.name,
                'result': (
                    [('efg', 5432)],
                    {
                        'password': '******',
                        'user': '******',
                        'database': 'testdb',
                    }
                )
            })

            # test escaping
            self.run_testcase({
                'host': 'fgh',
                'user': R'test\\',
                'database': R'test\:db',
                'passfile': passfile.name,
                'result': (
                    [('fgh', 5432)],
                    {
                        'password': '******',
                        'user': R'test\\',
                        'database': R'test\:db',
                    }
                )
            })

        finally:
            os.unlink(passfile.name)
Example #20
0
def FindExecutable_ReturnNoneIfFileIsNotExecutable_test():
    with tempfile.NamedTemporaryFile() as non_executable:
        assert_that(None, equal_to(utils.FindExecutable(non_executable.name)))
Example #21
0
control = os.path.abspath(args.control)

## Set tmp dir
tmpdir = tempfile.mkdtemp(prefix='tmp_sicer_', dir=os.getcwd())
if not args.keeptmp:
    atexit.register(shutil.rmtree, tmpdir)

os.chdir(tmpdir)

## Remove reduntant reads
## ======================
#if args.redThresh > 0:
sys.stderr.write("\n*** Preprocess raw files to remove reduntant reads\n")

filteredSampleBam = tempfile.NamedTemporaryFile(
    dir=tmpdir, prefix=os.path.basename(treatment) + '.',
    suffix='.rm.bam').name
filteredControlBam = tempfile.NamedTemporaryFile(
    dir=tmpdir, prefix=os.path.basename(control) + '.', suffix='.rm.bam').name

procs = []
for inBam, outBam in zip([treatment, control],
                         [filteredSampleBam, filteredControlBam]):
    # Create a separate dir for each process so tmp files don't bother each other
    tmpRedDir = os.path.join(tmpdir,
                             'tmp_' + os.path.basename(outBam) + '_dir')
    os.makedirs(tmpRedDir)
    cmd= """cd %(tmpRedDir)s
export PYTHONPATH=%(pythonpath)s
%(python)s %(script)s -t %(redThresh)s -b %(inBam)s -o %(outBam)s -f %(requiredFlag)s -F %(filterFlag)s -q %(mapq)s""" \
        %{'tmpRedDir': tmpRedDir,
Example #22
0
 def test_save_image_single_pixel(self):
     with tempfile.NamedTemporaryFile(suffix='.png') as f:
         t = torch.rand(1, 3, 1, 1)
         utils.save_image(t, f.name)
         self.assertTrue(os.path.exists(f.name), 'The pixel image is not present after save')
 
 # Fix inline image references
 if len(inline_images) > 0:
   pattern=re.compile(r'src\s*=\s*"?cid:([^"\s]+)\s*"?')
   for cid in re.findall(pattern, body):
     for image in inline_images:
       filename = image['filename']
       if re.search(cid, image['cid']):
         body = body.replace('cid:' + cid, filename)
 
 # Append any attached images to the end if they exist
 for image in append_images:
   body = body + '<img src="' + image['filename']  + '"><p>'
 
 # Write out the HTML file for wkthmltopdf
 html_tempfile = tempfile.NamedTemporaryFile(suffix = '.html', dir=temp_output_dir, delete=False)
 if html_tempfile:
   html_tempfile.write(headers + body)
   html_tempfile.close()
 
 # Run the PDF conversion
 pdf_output_file = tempfile.NamedTemporaryFile(suffix = '.pdf', dir=temp_output_dir, delete=False)
 if converter_type == 'cups':
   FNULL = open(os.devnull, 'w')
   if debug:
     FNULL=None
   retcode=call([cupsfilter_bin, '-o', 'media=' + papersize, '-t', msg['subject'], html_tempfile.name], stdout=pdf_output_file, stderr=FNULL)
 elif converter_type == 'wkhtmltopdf':
   call([wkhtmltopdf_bin, '-s', papersize, '--encoding', 'utf-8', '--title', msg['subject'], '-q', html_tempfile.name, pdf_output_file.name])
 else:
   print("Unknown converter type %s, exiting" % converter_type)
Example #24
0
 def test_creates_named(self):
     # NamedTemporaryFile creates files with names
     f = tempfile.NamedTemporaryFile()
     self.failUnless(os.path.exists(f.name),
                     "NamedTemporaryFile %s does not exist" % f.name)
Example #25
0
def mpr(end):

    #   ATDF page 47
    expected_atdf = "MPR:"
    #   record length in bytes
    rec_len = 0

    #   STDF v4 page 53
    record = MPR(endian=end)

    test_num = 1
    record.set_value('TEST_NUM', test_num)
    rec_len += 4
    expected_atdf += str(test_num) + "|"

    head_num = 2
    record.set_value('HEAD_NUM', head_num)
    rec_len += 1
    expected_atdf += str(head_num) + "|"

    site_num = 3
    record.set_value('SITE_NUM', site_num)
    rec_len += 1
    expected_atdf += str(site_num) + "|"

    #    The order of fields is different in STDF and ATDF for FTR record

    #    STDF page 53| ATDF page 47

    #    TEST_NUM    = TEST_NUM
    #    HEAD_NUM    = HEAD_NUM
    #    SITE_NUM    = SITE_NUM
    #    TEST_FLG
    #    PARM_FLG
    #    RTN_ICNT    -> missing

    #    RSLT_CNT    -> missing
    #    RTN_STAT    = RTN_STAT
    #    RTN_RSLT    = RTN_RSLT
    #                -> TEST_FLG bits 6 & 7 PARM_FLG bit 5
    #                -> TEST_FLG bits 0, 2, 3, 4 & 5 PARM_FLG bits 0, 1, 2, 3 & 4
    #    TEST_TXT    = TEST_TXT
    #    ALARM_ID    = ALARM_ID
    #                -> PARM_FLG bits 6 & 7
    #    OPT_FLAG
    #    RES_SCAL
    #    LLM_SCAL
    #    HLM_SCAL
    #    LO_LIMIT
    #    HI_LIMIT
    #    START_IN
    #    INCR_IN
    #    RTN_INDX
    #    UNITS       = UNITS
    #    UNITS_IN
    #    C_RESFMT
    #    C_LLMFMT
    #    C_HLMFMT
    #    LO_SPEC
    #    HI_SPEC
    #                = LO_LIMIT
    #                = HI_LIMIT
    #                = START_IN
    #                = INCR_IN
    #                = UNITS_IN
    #                = RTN_INDX
    #                = C_RESFMT
    #                = C_LLMFMT
    #                = C_HLMFMT
    #                = LO_SPEC
    #                = HI_SPEC
    #                = RES_SCAL
    #                = LLM_SCAL
    #                = HLM_SCAL

    test_flg = ['1', '0', '1', '1', '1', '1', '0', '1']
    record.set_value('TEST_FLG', test_flg)
    rec_len += 1

    parm_flg = ['1', '1', '1', '1', '1', '1', '1', '1']
    record.set_value('PARM_FLG', parm_flg)
    rec_len += 1

    rtn_icnt = 11
    record.set_value('RTN_ICNT', rtn_icnt)
    rec_len += 2

    rslt_cnt = 12
    record.set_value('RSLT_CNT', rslt_cnt)
    rec_len += 2

    rtn_stat = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
    record.set_value('RTN_STAT', rtn_stat)
    rec_len += math.ceil(len(rtn_stat) / 2)
    expected_atdf += "1,2,3,4,5,6,7,8,9,10,11|"

    rtn_rslt = [
        0.123, 1.321, 0.4567, 1.22453, 2.32115, 3.87643, 2.214253, 3.24212,
        4.13411, 5.1, 6.1, 7.1
    ]
    record.set_value('RTN_RSLT', rtn_rslt)
    rec_len += len(rtn_rslt) * 4
    expected_atdf += "0.123,1.321,0.4567,1.22453,2.32115,3.87643,2.214253,3.24212,4.13411,5.1,6.1,7.1|"

    #   TEST_FLG bits 6&7 and PARM_FLG bit 5
    expected_atdf += "FA|"
    #   TEST_FLG bits 0,2,3,4 & 5 and PARM_FLG bits 0, 1, 2, 3 & 4
    expected_atdf += "AUTNXSDOHL|"

    test_txt = 'SCAN'
    record.set_value('TEST_TXT', test_txt)
    rec_len += len(test_txt) + 1
    expected_atdf += str(test_txt) + "|"

    alarm_id = 'OVP'
    record.set_value('ALARM_ID', alarm_id)
    rec_len += len(alarm_id) + 1
    expected_atdf += str(alarm_id) + "|"

    #   PARM_FLG bits 6 & 7
    expected_atdf += "LH|"

    opt_flag = ['0', '0', '0', '0', '0', '0', '1', '1']
    record.set_value('OPT_FLAG', opt_flag)
    rec_len += 1

    res_scal = 10
    record.set_value('RES_SCAL', res_scal)
    rec_len += 1

    llm_scal = 10
    record.set_value('LLM_SCAL', llm_scal)
    rec_len += 1

    hlm_scal = 10
    record.set_value('HLM_SCAL', hlm_scal)
    rec_len += 1

    lo_limit = 1.001
    record.set_value('LO_LIMIT', lo_limit)
    rec_len += 4

    hi_limit = 1.999
    record.set_value('HI_LIMIT', hi_limit)
    rec_len += 4

    start_in = 2.5
    record.set_value('START_IN', start_in)
    rec_len += 4

    incr_in = 1.125
    record.set_value('INCR_IN', incr_in)
    rec_len += 4

    rtn_indx = [1, 2, 3, 4000, 5000, 6000, 7000, 8000, 9000, 10000, 11000]
    record.set_value('RTN_INDX', rtn_indx)
    rec_len += len(rtn_indx) * 2

    units = 'V'
    record.set_value('UNITS', units)
    rec_len += len(units) + 1
    expected_atdf += str(units) + "|"

    units_in = 'mV'
    record.set_value('UNITS_IN', units_in)
    rec_len += len(units_in) + 1

    c_resfmt = '%3.3f'
    record.set_value('C_RESFMT', c_resfmt)
    rec_len += len(c_resfmt) + 1

    c_llmfmt = '%3.6f'
    record.set_value('C_LLMFMT', c_llmfmt)
    rec_len += len(c_llmfmt) + 1

    c_hlmfmt = '%3.6f'
    record.set_value('C_HLMFMT', c_hlmfmt)
    rec_len += len(c_hlmfmt) + 1

    lo_spec = 1.001
    record.set_value('LO_SPEC', lo_spec)
    rec_len += 4

    hi_spec = 1.999
    record.set_value('HI_SPEC', hi_spec)
    rec_len += 4

    #   LO_LIMIT
    expected_atdf += str(lo_limit) + "|"
    #   HI_LIMIT
    expected_atdf += str(hi_limit) + "|"
    #   START_IN
    expected_atdf += str(start_in) + "|"
    #   INCR_IN
    expected_atdf += str(incr_in) + "|"
    #   UNITS_IN
    expected_atdf += str(units_in) + "|"
    #   RTN_INDX
    expected_atdf += "1,2,3,4000,5000,6000,7000,8000,9000,10000,11000|"
    #   C_RESFMT
    expected_atdf += str(c_resfmt) + "|"
    #   C_LLMFMT
    expected_atdf += str(c_llmfmt) + "|"
    #   C_HLMFMT
    expected_atdf += str(c_hlmfmt) + "|"
    #   LO_SPEC
    expected_atdf += str(lo_spec) + "|"
    #   HI_SPEC
    expected_atdf += str(hi_spec) + "|"
    #   RES_SCAL
    expected_atdf += str(res_scal) + "|"
    #   LLM_SCAL
    expected_atdf += str(llm_scal) + "|"
    #   HLM_SCAL
    expected_atdf += str(hlm_scal)

    #    Test serialization
    #    1. Save MPR STDF record into a file
    #    2. Read byte by byte and compare with expected value

    tf = tempfile.NamedTemporaryFile(delete=False)

    f = open(tf.name, "wb")

    w_data = record.__repr__()
    f.write(w_data)
    f.close

    f = open(tf.name, "rb")

    stdfRecTest = STDFRecordTest(f, end)
    #   rec_len, rec_type, rec_sub
    stdfRecTest.assert_file_record_header(rec_len, 15, 15)
    #   Test TEST_NUM, expected value test_num
    stdfRecTest.assert_int(4, test_num)
    #   Test HEAD_NUM, expected value head_num
    stdfRecTest.assert_int(1, head_num)
    #   Test SITE_NUM, expected value site_num
    stdfRecTest.assert_int(1, site_num)
    #   Test TEST_FLG, expected value test_flg
    stdfRecTest.assert_bits(test_flg)
    #   Test PARM_FLG, expected value parm_flg
    stdfRecTest.assert_bits(parm_flg)
    #   Test RTN_ICNT, expected value rtn_icnt
    stdfRecTest.assert_int(2, rtn_icnt)
    #   Test RSLT_CNT, expected value rslt_cnt
    stdfRecTest.assert_int(2, rslt_cnt)
    #   Test RTN_STAT, expected value rtn_stat
    stdfRecTest.assert_nibble_array(rtn_icnt, rtn_stat)
    #   Test RTN_RSLT, expected value rtn_rslt
    stdfRecTest.assert_float_array(rtn_rslt)
    #   Test TEST_TXT, expected length of the string and value of the test_txt
    stdfRecTest.assert_ubyte(len(test_txt))
    stdfRecTest.assert_char_array(len(test_txt), test_txt)
    #   Test ALARM_ID, expected length of the string and value of the alarm_id
    stdfRecTest.assert_ubyte(len(alarm_id))
    stdfRecTest.assert_char_array(len(alarm_id), alarm_id)
    #   Test OPT_FLAG, expected value opt_flg
    stdfRecTest.assert_bits(opt_flag)
    #   Test RES_SCAL, expected value res_scal
    stdfRecTest.assert_int(1, res_scal)
    #   Test LLM_SCAL, expected value llm_scal
    stdfRecTest.assert_int(1, llm_scal)
    #   Test HLM_SCAL, expected value hlm_scal
    stdfRecTest.assert_int(1, hlm_scal)
    #   Test LO_LIMIT, expected value lo_limit
    stdfRecTest.assert_float(lo_limit)
    #   Test HI_LIMIT, expected value hi_limit
    stdfRecTest.assert_float(hi_limit)
    #   Test START_IN, expected value start_in
    stdfRecTest.assert_float(start_in)
    #   Test INCR_IN, expected value incr_in
    stdfRecTest.assert_float(incr_in)
    #   Test RTN_INDX, expected value rtn_indx
    stdfRecTest.assert_int_array(2, rtn_indx)
    #   Test UNITS, expected value units
    stdfRecTest.assert_ubyte(len(units))
    stdfRecTest.assert_char_array(len(units), units)
    #   Test UNITS_IN, expected value units_in
    stdfRecTest.assert_ubyte(len(units_in))
    stdfRecTest.assert_char_array(len(units_in), units_in)
    #   Test C_RESFMT, expected value c_resfmt
    stdfRecTest.assert_ubyte(len(c_resfmt))
    stdfRecTest.assert_char_array(len(c_resfmt), c_resfmt)
    #   Test C_LLMFMT, expected value c_llmfmt
    stdfRecTest.assert_ubyte(len(c_llmfmt))
    stdfRecTest.assert_char_array(len(c_llmfmt), c_llmfmt)
    #   Test C_HLMFMT, expected value c_hlmfmt
    stdfRecTest.assert_ubyte(len(c_hlmfmt))
    stdfRecTest.assert_char_array(len(c_hlmfmt), c_hlmfmt)
    #   Test LO_SPEC, expected value lo_spec
    stdfRecTest.assert_float(lo_spec)
    #   Test HI_SPEC, expected value hi_spec
    stdfRecTest.assert_float(hi_spec)

    f.close()

    #    Test de-serialization
    #    1. Open STDF record from a file
    #    2. Read record fields and compare with the expected value

    inst = MPR('V4', end, w_data)
    #   rec_len, rec_type, rec_sub
    stdfRecTest.assert_instance_record_header(inst, rec_len, 15, 15)
    #   Test TEST_NUM, position 3, value of test_num variable
    stdfRecTest.assert_instance_field(inst, 3, test_num)
    #   Test HEAD_NUM, position 4, value of head_num variable
    stdfRecTest.assert_instance_field(inst, 4, head_num)
    #   Test SITE_NUM, position 5, value of site_num variable
    stdfRecTest.assert_instance_field(inst, 5, site_num)
    #   Test TEST_FLG, position 6, value of test_flg variable
    stdfRecTest.assert_instance_field(inst, 6, test_flg)
    #   Test PARM_FLG, position 7, value of parm_flg variable
    stdfRecTest.assert_instance_field(inst, 7, parm_flg)
    #   Test RTN_ICNT, position 8, value of rtn_icnt variable
    stdfRecTest.assert_instance_field(inst, 8, rtn_icnt)
    #   Test RSLT_CNT, position 9, value of rslt_cnt variable
    stdfRecTest.assert_instance_field(inst, 9, rslt_cnt)
    #   Test RTN_STAT, position 10, value of rtn_stat variable
    stdfRecTest.assert_instance_field(inst, 10, rtn_stat)
    #   Test RTN_RSLT, position 11, value of rtn_rslt variable
    stdfRecTest.assert_instance_field(inst, 11, rtn_rslt)
    #   Test TEST_TXT, position 12, value of test_txt variable
    stdfRecTest.assert_instance_field(inst, 12, test_txt)
    #   Test ALARM_ID, position 13, value of alarm_id variable
    stdfRecTest.assert_instance_field(inst, 13, alarm_id)
    #   Test OPT_FLAG, position 14, value of opt_flag variable
    stdfRecTest.assert_instance_field(inst, 14, opt_flag)
    #   Test RES_SCAL, position 15, value of res_scal variable
    stdfRecTest.assert_instance_field(inst, 15, res_scal)
    #   Test LLM_SCAL, position 16, value of llm_scal variable
    stdfRecTest.assert_instance_field(inst, 16, llm_scal)
    #   Test HLM_SCAL, position 17, value of hlm_scal variable
    stdfRecTest.assert_instance_field(inst, 17, hlm_scal)
    #   Test LO_LIMIT, position 18, value of lo_limit variable
    stdfRecTest.assert_instance_field(inst, 18, lo_limit)
    #   Test HI_LIMIT, position 19, value of hi_limit variable
    stdfRecTest.assert_instance_field(inst, 19, hi_limit)
    #   Test START_IN, position 20, value of start_in variable
    stdfRecTest.assert_instance_field(inst, 20, start_in)
    #   Test INCR_IN, position 21, value of incr_in variable
    stdfRecTest.assert_instance_field(inst, 21, incr_in)
    #   Test RTN_INDX, position 22, value of rtn_indx variable
    stdfRecTest.assert_instance_field(inst, 22, rtn_indx)
    #   Test UNITS, position 23, value of units variable
    stdfRecTest.assert_instance_field(inst, 23, units)
    #   Test UNITS_IN, position 21, value of units_in variable
    stdfRecTest.assert_instance_field(inst, 24, units_in)
    #   Test C_RESFMT, position 25, value of c_resfmt variable
    stdfRecTest.assert_instance_field(inst, 25, c_resfmt)
    #   Test C_LLMFMT, position 26, value of c_llmfmt variable
    stdfRecTest.assert_instance_field(inst, 26, c_llmfmt)
    #   Test C_HLMFMT, position 27, value of c_hlmfmt variable
    stdfRecTest.assert_instance_field(inst, 27, c_hlmfmt)
    #   Test LO_SPEC, position 28, value of lo_spec variable
    stdfRecTest.assert_instance_field(inst, 28, lo_spec)
    #   Test HI_SPEC, position 29, value of hi_spec variable
    stdfRecTest.assert_instance_field(inst, 29, hi_spec)

    #   Test ATDF output
    assert inst.to_atdf() == expected_atdf

    os.remove(tf.name)

    #   Test reset method and compressed data when OPT_FLAG is used and
    #   fields after OPT_FLAG are not set

    record.reset()

    test_num = 11
    record.set_value('TEST_NUM', test_num)
    rec_len = 4

    head_num = 21
    record.set_value('HEAD_NUM', head_num)
    rec_len += 1

    site_num = 31
    record.set_value('SITE_NUM', site_num)
    rec_len += 1

    test_flg = ['1', '0', '1', '1', '1', '1', '0', '1']
    record.set_value('TEST_FLG', test_flg)
    rec_len += 1

    parm_flg = ['1', '1', '1', '1', '1', '1', '1', '1']
    record.set_value('PARM_FLG', parm_flg)
    rec_len += 1

    rtn_icnt = 11
    record.set_value('RTN_ICNT', rtn_icnt)
    rec_len += 2

    rslt_cnt = 12
    record.set_value('RSLT_CNT', rslt_cnt)
    rec_len += 2

    rtn_stat = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
    record.set_value('RTN_STAT', rtn_stat)
    rec_len += math.ceil(len(rtn_stat) / 2)

    rtn_rslt = [
        0.123, 1.321, 0.4567, 1.22453, 2.32115, 3.87643, 2.214253, 3.24212,
        4.13411, 5.1, 6.1, 7.1
    ]
    record.set_value('RTN_RSLT', rtn_rslt)
    rec_len += len(rtn_rslt) * 4

    test_txt = 'SCAN'
    record.set_value('TEST_TXT', test_txt)
    rec_len += len(test_txt) + 1

    alarm_id = 'OVP'
    record.set_value('ALARM_ID', alarm_id)
    rec_len += len(alarm_id) + 1

    tf = tempfile.NamedTemporaryFile(delete=False)

    f = open(tf.name, "wb")
    w_data = record.__repr__()
    f.write(w_data)
    f.close

    f = open(tf.name, "rb")

    stdfRecTest = STDFRecordTest(f, end)
    #   rec_len, rec_type, rec_sub
    stdfRecTest.assert_file_record_header(rec_len, 15, 15)
    #   Test TEST_NUM, expected value test_num
    stdfRecTest.assert_int(4, test_num)
    #   Test HEAD_NUM, expected value head_num
    stdfRecTest.assert_int(1, head_num)
    #   Test SITE_NUM, expected value site_num
    stdfRecTest.assert_int(1, site_num)
    #   Test TEST_FLG, expected value test_flg
    stdfRecTest.assert_bits(test_flg)
    #   Test PARM_FLG, expected value parm_flg
    stdfRecTest.assert_bits(parm_flg)
    #   Test RTN_ICNT, expected value rtn_icnt
    stdfRecTest.assert_int(2, rtn_icnt)
    #   Test RSLT_CNT, expected value rslt_cnt
    stdfRecTest.assert_int(2, rslt_cnt)
    #   Test RTN_STAT, expected value rtn_stat
    stdfRecTest.assert_nibble_array(rtn_icnt, rtn_stat)
    #   Test RTN_RSLT, expected value rtn_rslt
    stdfRecTest.assert_float_array(rtn_rslt)
    #   Test TEST_TXT, expected length of the string and value of the test_txt
    stdfRecTest.assert_ubyte(len(test_txt))
    stdfRecTest.assert_char_array(len(test_txt), test_txt)
    #   Test ALARM_ID, expected length of the string and value of the alarm_id
    stdfRecTest.assert_ubyte(len(alarm_id))
    stdfRecTest.assert_char_array(len(alarm_id), alarm_id)

    os.remove(tf.name)
		convert_alphatex(GRASS, tex_dir+"/blocks/double_plant_fern_bottom.png", "70+120", str(PXSIZE), target_dir("/mods/ITEMS/mcl_flowers/textures")+"/mcl_flowers_double_plant_fern_bottom.png")
		convert_alphatex(GRASS, tex_dir+"/blocks/double_plant_fern_top.png", "70+120", str(PXSIZE), target_dir("/mods/ITEMS/mcl_flowers/textures")+"/mcl_flowers_double_plant_fern_top.png")
		convert_alphatex(GRASS, tex_dir+"/blocks/double_plant_grass_bottom.png", "70+120", str(PXSIZE), target_dir("/mods/ITEMS/mcl_flowers/textures")+"/mcl_flowers_double_plant_grass_bottom.png")
		convert_alphatex(GRASS, tex_dir+"/blocks/double_plant_grass_top.png", "70+120", str(PXSIZE), target_dir("/mods/ITEMS/mcl_flowers/textures")+"/mcl_flowers_double_plant_grass_top.png")

#	TODO: Convert banner masks
#	if os.path.isdir(tex_dir + "/entity/banner"):
# These are the ImageMagick commands needed to convert the mask images
#		os.system("mogrify -transparent-color "+filename)
#		os.system("mogrify -clip-mask "+tex_dir+"/entity/banner/base.png"+" -alpha Copy "+filename)
#		os.system("mogrify -fill white -colorize 100 "+filename)

		print("Textures conversion COMPLETE!")
		if failed_conversions > 0:
			print("WARNING: Number of missing files in original resource pack: "+str(failed_conversions))
		print("NOTE: Please keep in mind this script does not reliably convert all the textures yet.")
		if make_texture_pack:
			print("You can now retrieve the texture pack in "+working_dir+"/texture_pack/")

# ENTRY POINT
if make_texture_pack and not os.path.isdir("./texture_pack"):
	os.mkdir("texture_pack")

tempfile1 = tempfile.NamedTemporaryFile()
tempfile2 = tempfile.NamedTemporaryFile()

convert_textures()

tempfile1.close()
tempfile2.close()
Example #27
0
def to_text(path, language='fra'):
    """Wraps Tesseract 4 OCR with custom language model.

    Parameters
    ----------
    path : str
        path of electronic invoice in JPG or PNG format

    Returns
    -------
    extracted_str : str
        returns extracted text from image in JPG or PNG format

    """
    import subprocess
    from distutils import spawn
    import tempfile
    import time

    # Check for dependencies. Needs Tesseract and Imagemagick installed.
    if not spawn.find_executable('tesseract'):
        raise EnvironmentError('tesseract not installed.')
    if not spawn.find_executable('convert'):
        raise EnvironmentError('imagemagick not installed.')
    if not spawn.find_executable('gs'):
        raise EnvironmentError('ghostscript not installed.')

    with tempfile.NamedTemporaryFile(suffix='.tiff') as tf:
        # Step 1: Convert to TIFF
        gs_cmd = [
            'gs',
            '-q',
            '-dNOPAUSE',
            '-r600x600',
            '-sDEVICE=tiff24nc',
            '-sOutputFile=' + tf.name,
            path,
            '-c',
            'quit',
        ]
        subprocess.Popen(gs_cmd)
        time.sleep(3)

        # Step 2: Enhance TIFF
        magick_cmd = [
            'convert',
            tf.name,
            '-colorspace',
            'gray',
            '-type',
            'grayscale',
            '-contrast-stretch',
            '0',
            '-sharpen',
            '0x1',
            'tiff:-',
        ]

        p1 = subprocess.Popen(magick_cmd, stdout=subprocess.PIPE, shell=True)

        tess_cmd = ['tesseract', '-l', language, '--oem', '1', '--psm', '3', 'stdin', 'stdout']
        p2 = subprocess.Popen(tess_cmd, stdin=p1.stdout, stdout=subprocess.PIPE)

        out, err = p2.communicate()

        extracted_str = out

        return extracted_str
Example #28
0
def _executeShCmd(cmd, shenv, results, timeoutHelper):
    if timeoutHelper.timeoutReached():
        # Prevent further recursion if the timeout has been hit
        # as we should try avoid launching more processes.
        return None

    if isinstance(cmd, ShUtil.Seq):
        if cmd.op == ';':
            res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
            return _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)

        if cmd.op == '&':
            raise InternalShellError(cmd,"unsupported shell operator: '&'")

        if cmd.op == '||':
            res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
            if res != 0:
                res = _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
            return res

        if cmd.op == '&&':
            res = _executeShCmd(cmd.lhs, shenv, results, timeoutHelper)
            if res is None:
                return res

            if res == 0:
                res = _executeShCmd(cmd.rhs, shenv, results, timeoutHelper)
            return res

        raise ValueError('Unknown shell command: %r' % cmd.op)
    assert isinstance(cmd, ShUtil.Pipeline)

    # Handle shell builtins first.
    if cmd.commands[0].args[0] == 'cd':
        if len(cmd.commands) != 1:
            raise ValueError("'cd' cannot be part of a pipeline")
        if len(cmd.commands[0].args) != 2:
            raise ValueError("'cd' supports only one argument")
        newdir = cmd.commands[0].args[1]
        # Update the cwd in the parent environment.
        if os.path.isabs(newdir):
            shenv.cwd = newdir
        else:
            shenv.cwd = os.path.realpath(os.path.join(shenv.cwd, newdir))
        # The cd builtin always succeeds. If the directory does not exist, the
        # following Popen calls will fail instead.
        return 0

    # Handle "echo" as a builtin if it is not part of a pipeline. This greatly
    # speeds up tests that construct input files by repeatedly echo-appending to
    # a file.
    # FIXME: Standardize on the builtin echo implementation. We can use a
    # temporary file to sidestep blocking pipe write issues.
    if cmd.commands[0].args[0] == 'echo' and len(cmd.commands) == 1:
        output = executeBuiltinEcho(cmd.commands[0], shenv)
        results.append(ShellCommandResult(cmd.commands[0], output, "", 0,
                                          False))
        return 0

    if cmd.commands[0].args[0] == 'export':
        if len(cmd.commands) != 1:
            raise ValueError("'export' cannot be part of a pipeline")
        if len(cmd.commands[0].args) != 2:
            raise ValueError("'export' supports only one argument")
        updateEnv(shenv, cmd.commands[0])
        return 0

    procs = []
    default_stdin = subprocess.PIPE
    stderrTempFiles = []
    opened_files = []
    named_temp_files = []
    # To avoid deadlock, we use a single stderr stream for piped
    # output. This is null until we have seen some output using
    # stderr.
    for i,j in enumerate(cmd.commands):
        # Reference the global environment by default.
        cmd_shenv = shenv
        if j.args[0] == 'env':
            # Create a copy of the global environment and modify it for this one
            # command. There might be multiple envs in a pipeline:
            #   env FOO=1 llc < %s | env BAR=2 llvm-mc | FileCheck %s
            cmd_shenv = ShellEnvironment(shenv.cwd, shenv.env)
            updateEnv(cmd_shenv, j)

        stdin, stdout, stderr = processRedirects(j, default_stdin, cmd_shenv,
                                                 opened_files)

        # If stderr wants to come from stdout, but stdout isn't a pipe, then put
        # stderr on a pipe and treat it as stdout.
        if (stderr == subprocess.STDOUT and stdout != subprocess.PIPE):
            stderr = subprocess.PIPE
            stderrIsStdout = True
        else:
            stderrIsStdout = False

            # Don't allow stderr on a PIPE except for the last
            # process, this could deadlock.
            #
            # FIXME: This is slow, but so is deadlock.
            if stderr == subprocess.PIPE and j != cmd.commands[-1]:
                stderr = tempfile.TemporaryFile(mode='w+b')
                stderrTempFiles.append((i, stderr))

        # Resolve the executable path ourselves.
        args = list(j.args)
        executable = None
        # For paths relative to cwd, use the cwd of the shell environment.
        if args[0].startswith('.'):
            exe_in_cwd = os.path.join(cmd_shenv.cwd, args[0])
            if os.path.isfile(exe_in_cwd):
                executable = exe_in_cwd
        if not executable:
            executable = lit.util.which(args[0], cmd_shenv.env['PATH'])
        if not executable:
            raise InternalShellError(j, '%r: command not found' % j.args[0])

        # Replace uses of /dev/null with temporary files.
        if kAvoidDevNull:
            for i,arg in enumerate(args):
                if arg == "/dev/null":
                    f = tempfile.NamedTemporaryFile(delete=False)
                    f.close()
                    named_temp_files.append(f.name)
                    args[i] = f.name

        # Expand all glob expressions
        args = expand_glob_expressions(args, cmd_shenv.cwd)

        # On Windows, do our own command line quoting for better compatibility
        # with some core utility distributions.
        if kIsWindows:
            args = quote_windows_command(args)

        try:
            procs.append(subprocess.Popen(args, cwd=cmd_shenv.cwd,
                                          executable = executable,
                                          stdin = stdin,
                                          stdout = stdout,
                                          stderr = stderr,
                                          env = cmd_shenv.env,
                                          close_fds = kUseCloseFDs))
            # Let the helper know about this process
            timeoutHelper.addProcess(procs[-1])
        except OSError as e:
            raise InternalShellError(j, 'Could not create process ({}) due to {}'.format(executable, e))

        # Immediately close stdin for any process taking stdin from us.
        if stdin == subprocess.PIPE:
            procs[-1].stdin.close()
            procs[-1].stdin = None

        # Update the current stdin source.
        if stdout == subprocess.PIPE:
            default_stdin = procs[-1].stdout
        elif stderrIsStdout:
            default_stdin = procs[-1].stderr
        else:
            default_stdin = subprocess.PIPE

    # Explicitly close any redirected files. We need to do this now because we
    # need to release any handles we may have on the temporary files (important
    # on Win32, for example). Since we have already spawned the subprocess, our
    # handles have already been transferred so we do not need them anymore.
    for (name, mode, f, path) in opened_files:
        f.close()

    # FIXME: There is probably still deadlock potential here. Yawn.
    procData = [None] * len(procs)
    procData[-1] = procs[-1].communicate()

    for i in range(len(procs) - 1):
        if procs[i].stdout is not None:
            out = procs[i].stdout.read()
        else:
            out = ''
        if procs[i].stderr is not None:
            err = procs[i].stderr.read()
        else:
            err = ''
        procData[i] = (out,err)

    # Read stderr out of the temp files.
    for i,f in stderrTempFiles:
        f.seek(0, 0)
        procData[i] = (procData[i][0], f.read())

    def to_string(bytes):
        if isinstance(bytes, str):
            return bytes
        return bytes.encode('utf-8')

    exitCode = None
    for i,(out,err) in enumerate(procData):
        res = procs[i].wait()
        # Detect Ctrl-C in subprocess.
        if res == -signal.SIGINT:
            raise KeyboardInterrupt

        # Ensure the resulting output is always of string type.
        try:
            if out is None:
                out = ''
            else:
                out = to_string(out.decode('utf-8', errors='replace'))
        except:
            out = str(out)
        try:
            if err is None:
                err = ''
            else:
                err = to_string(err.decode('utf-8', errors='replace'))
        except:
            err = str(err)

        # Gather the redirected output files for failed commands.
        output_files = []
        if res != 0:
            for (name, mode, f, path) in sorted(opened_files):
                if path is not None and mode in ('w', 'a'):
                    try:
                        with open(path, 'rb') as f:
                            data = f.read()
                    except:
                        data = None
                    if data is not None:
                        output_files.append((name, path, data))
            
        results.append(ShellCommandResult(
            cmd.commands[i], out, err, res, timeoutHelper.timeoutReached(),
            output_files))
        if cmd.pipe_err:
            # Take the last failing exit code from the pipeline.
            if not exitCode or res != 0:
                exitCode = res
        else:
            exitCode = res

    # Remove any named temporary files we created.
    for f in named_temp_files:
        try:
            os.remove(f)
        except OSError:
            pass

    if cmd.negate:
        exitCode = not exitCode

    return exitCode
Example #29
0
 def setUp(self):
     PySparkTestCase.setUp(self)
     self.checkpointDir = tempfile.NamedTemporaryFile(delete=False)
     os.unlink(self.checkpointDir.name)
     self.sc.setCheckpointDir(self.checkpointDir.name)
def updateDatabase(
        syn, database, new_dataset, database_synid,
        primary_key_cols, to_delete=False):
    """
    Updates synapse tables by a row identifier with another
    dataset that has the same number and order of columns

    Args:
        syn: Synapse object
        database: The synapse table (pandas dataframe)
        new_dataset: New dataset (pandas dataframe)
        databaseSynId: Synapse Id of the database table
        uniqueKeyCols: Column(s) that make up the unique key
        toDelete: Delete rows, Defaults to False

    Returns:
        Nothing
    """
    primary_key = 'UNIQUE_KEY'
    database = database.fillna("")
    orig_database_cols = database.columns
    col_order = ['ROW_ID', 'ROW_VERSION']
    col_order.extend(orig_database_cols.tolist())
    new_dataset = new_dataset.fillna("")
    # Columns must be in the same order
    new_dataset = new_dataset[orig_database_cols]
    database[primary_key_cols] = database[primary_key_cols].applymap(str)
    database[primary_key] = database[
        primary_key_cols].apply(lambda x: ' '.join(x), axis=1)

    new_dataset[primary_key_cols] = new_dataset[primary_key_cols].applymap(str)
    new_dataset[primary_key] = new_dataset[
        primary_key_cols].apply(lambda x: ' '.join(x), axis=1)

    allupdates = pd.DataFrame(columns=col_order)
    to_append_rows = _append_rows(new_dataset, database, primary_key)
    to_update_rows = _update_rows(new_dataset, database, primary_key)
    if to_delete:
        to_delete_rows = _delete_rows(new_dataset, database, primary_key)
    else:
        to_delete_rows = pd.DataFrame()
    allupdates = allupdates.append(to_append_rows, sort=False)
    allupdates = allupdates.append(to_update_rows, sort=False)

    storedatabase = False
    update_all_file = tempfile.NamedTemporaryFile(dir=SCRIPT_DIR,
                                                  delete=False)

    with open(update_all_file.name, "w") as updatefile:
        # Must write out the headers in case there are no appends or updates
        updatefile.write(",".join(col_order) + "\n")
        if not allupdates.empty:
            '''
            This is done because of pandas typing.
            An integer column with one NA/blank value
            will be cast as a double.
            '''
            updatefile.write(
                allupdates[col_order]
                .to_csv(index=False, header=None)
                .replace(".0,", ",")
                .replace(".0\n", "\n"))
            storedatabase = True
        if not to_delete_rows.empty:
            updatefile.write(
                to_delete_rows
                .to_csv(index=False, header=None)
                .replace(".0,", ",")
                .replace(".0\n", "\n"))
            storedatabase = True
    if storedatabase:
        syn.store(synapseclient.Table(database_synid, update_all_file.name))
    # Delete the update file
    os.unlink(update_all_file.name)