Example #1
0
    def __init__(self, client, host, port, username, password, prompt,
                 linesep="\n", log_filename=None, timeout=240,
                 internal_timeout=10):
        """
        Initialization of RemoteRunner. Init a session login to remote host or
        guest.

        :param client: The client to use ('ssh', 'telnet' or 'nc')
        :param host: Hostname or IP address
        :param port: Port to connect to
        :param username: Username (if required)
        :param password: Password (if required)
        :param prompt: Shell prompt (regular expression)
        :param linesep: The line separator to use when sending lines
                (e.g. '\\n' or '\\r\\n')
        :param log_filename: If specified, log all output to this file
        :param timeout: Total time duration to wait for a successful login
        :param internal_timeout: The maximal time duration (in seconds) to wait for
                each step of the login procedure (e.g. the "Are you sure" prompt
                or the password prompt)
        :see: wait_for_login()
        :raise: Whatever wait_for_login() raises
        """
        self.session = wait_for_login(client, host, port, username, password,
                                      prompt, linesep, log_filename,
                                      timeout, internal_timeout)
        # Init stdout pipe and stderr pipe.
        self.stdout_pipe = tempfile.mktemp()
        self.stderr_pipe = tempfile.mktemp()
Example #2
0
 def __init__(self, command, input=None, capturestderr=None):
     outfile = tempfile.mktemp()
     command = '( %s ) > %s' % (command, outfile)
     if input:
         infile = tempfile.mktemp()
         tmp = open(infile, 'w')
         tmp.write(input)
         tmp.close()
         command = command + ' <' + infile
     if capturestderr:
         errfile = tempfile.mktemp()
         command = command + ' 2>' + errfile
     try:
         self.err = None
         self.errorlevel = os.system(command) >> 8
         outfd = file(outfile, 'r')
         self.out = outfd.read()
         outfd.close()
         if capturestderr:
             errfd = file(errfile,'r')
             self.err = errfd.read()
             errfd.close()
     finally:
         if os.path.isfile(outfile):
             os.remove(outfile)
         if input and os.path.isfile(infile):
             os.remove(infile)
         if capturestderr and os.path.isfile(errfile):
             os.remove(errfile)
Example #3
0
def piped_spawn(sh, escape, cmd, args, env, stdout, stderr):
    # There is no direct way to do that in python. What we do
    # here should work for most cases:
    #   In case stdout (stderr) is not redirected to a file,
    #   we redirect it into a temporary file tmpFileStdout
    #   (tmpFileStderr) and copy the contents of this file
    #   to stdout (stderr) given in the argument
    if not sh:
        sys.stderr.write("scons: Could not find command interpreter, is it in your PATH?\n")
        return 127
    else:
        # one temporary file for stdout and stderr
        tmpFileStdout = os.path.normpath(tempfile.mktemp())
        tmpFileStderr = os.path.normpath(tempfile.mktemp())

        # check if output is redirected
        stdoutRedirected = 0
        stderrRedirected = 0
        for arg in args:
            # are there more possibilities to redirect stdout ?
            if arg.find(">", 0, 1) != -1 or arg.find("1>", 0, 2) != -1:
                stdoutRedirected = 1
            # are there more possibilities to redirect stderr ?
            if arg.find("2>", 0, 2) != -1:
                stderrRedirected = 1

        # redirect output of non-redirected streams to our tempfiles
        if stdoutRedirected == 0:
            args.append(">" + str(tmpFileStdout))
        if stderrRedirected == 0:
            args.append("2>" + str(tmpFileStderr))

        # actually do the spawn
        try:
            args = [sh, "/C", escape(" ".join(args))]
            ret = os.spawnve(os.P_WAIT, sh, args, env)
        except OSError, e:
            # catch any error
            try:
                ret = exitvalmap[e[0]]
            except KeyError:
                sys.stderr.write("scons: unknown OSError exception code %d - %s: %s\n" % (e[0], cmd, e[1]))
            if stderr is not None:
                stderr.write("scons: %s: %s\n" % (cmd, e[1]))
        # copy child output from tempfiles to our streams
        # and do clean up stuff
        if stdout is not None and stdoutRedirected == 0:
            try:
                stdout.write(open(tmpFileStdout, "r").read())
                os.remove(tmpFileStdout)
            except (IOError, OSError):
                pass

        if stderr is not None and stderrRedirected == 0:
            try:
                stderr.write(open(tmpFileStderr, "r").read())
                os.remove(tmpFileStderr)
            except (IOError, OSError):
                pass
        return ret
    def test_rename(self):
        "Histogram: rename"
        import tempfile
        outh5 = tempfile.mktemp()
        code = """
from histogram import histogram, arange
h = histogram('h', [('x', arange(10), 'meter')])
h.I = h.x*h.x
h.setAttribute('name', 'abc')
from histogram.hdf import load, dump
dump(h, %r, '/', 'c')
""" % outh5
        script = tempfile.mktemp()
        open(script, 'w').write(code)
        cmd = 'python %s' % script
        import os
        
        if os.system(cmd):
            raise RuntimeError, "%s failed" % cmd
        
        from histogram.hdf import load
        h = load(outh5, 'abc')

        os.remove(outh5)
        os.remove(script)
        return        
def test_plot_anat():
    img = _generate_img()

    # Test saving with empty plot
    z_slicer = plot_anat(anat_img=False, display_mode='z')
    filename = tempfile.mktemp(suffix='.png')
    try:
        z_slicer.savefig(filename)
    finally:
        os.remove(filename)

    z_slicer = plot_anat(display_mode='z')
    filename = tempfile.mktemp(suffix='.png')
    try:
        z_slicer.savefig(filename)
    finally:
        os.remove(filename)

    ortho_slicer = plot_anat(img, dim='auto')
    filename = tempfile.mktemp(suffix='.png')
    try:
        ortho_slicer.savefig(filename)
    finally:
        os.remove(filename)

    # Save execution time and memory
    plt.close()
Example #6
0
 def test_output_compatible_setup_nooutput(self):
     tmpfile = tempfile.mktemp()
     tmpfile2 = tempfile.mktemp()
     os.chdir(basedir)
     # Verify --silent can be supplied as app argument
     cmd_line = ('./scripts/avocado --silent run --job-results-dir %s '
                 '--sysinfo=off --xunit %s --json %s passtest.py' %
                 (self.tmpdir, tmpfile, tmpfile2))
     result = process.run(cmd_line, ignore_status=True)
     output = result.stdout + result.stderr
     expected_rc = exit_codes.AVOCADO_ALL_OK
     try:
         self.assertEqual(result.exit_status, expected_rc,
                          "Avocado did not return rc %d:\n%s" %
                          (expected_rc, result))
         self.assertEqual(output, "", "Output is not empty:\n%s" % output)
         # Check if we are producing valid outputs
         with open(tmpfile2, 'r') as fp:
             json_results = json.load(fp)
             debug_log = json_results['debuglog']
             self.check_output_files(debug_log)
         minidom.parse(tmpfile)
     finally:
         try:
             os.remove(tmpfile)
             os.remove(tmpfile2)
         except OSError:
             pass
    def test_document_fusion(self):
        # data source and model are in the same content
        alsoProvides(self.portal.REQUEST, ICollectiveDocumentfusionLayer)
        content = api.content.create(self.portal, type='letter',
                           title=u"En réponse...",
                           file=NamedFile(data=open(TEST_LETTER_ODT).read(),
                                          filename=u'letter.odt',
                                          contentType='application/vnd.oasis.opendocument.text'),
                           sender_name="Thomas Desvenain",
                           sender_address="57 Quai du Pré Long",
                           recipient_name="Vincent Fretin",
                           date=datetime.date(2012, 12, 23))

        notify(ObjectModifiedEvent(content))
        generated_stream = content.unrestrictedTraverse('@@getdocumentfusion')()
        self.assertTrue(generated_stream)
        self.assertEqual(self.portal.REQUEST.response['content-type'],
                         'application/pdf')
        generated_path = tempfile.mktemp(suffix='letter.pdf')
        generated_file = open(generated_path, 'w')
        generated_file.write(generated_stream.read())
        generated_file.close()

        txt_path = tempfile.mktemp(suffix='letter.pdf')
        subprocess.call(['pdftotext', generated_path, txt_path])
        txt = open(txt_path).read()
        self.assertIn('Vincent Fretin', txt)
        self.assertIn('57 Quai du Pré Long', txt)
        self.assertIn('2012', txt)
        self.assertIn(u'EN RÉPONSE...', txt)

        os.remove(txt_path)
        os.remove(generated_path)
def test():

    try:
        fname1 = tempfile.mktemp()
        fname2 = tempfile.mktemp()
        f = open(fname1, 'w')
    except:
        raise ImportError, "Cannot test binhex without a temp file"

    start = 'Jack is my hero'
    f.write(start)
    f.close()
    
    binhex.binhex(fname1, fname2)
    if verbose:
        print 'binhex'

    binhex.hexbin(fname2, fname1)
    if verbose:
        print 'hexbin'

    f = open(fname1, 'r')
    finish = f.readline()

    if start <> finish:
        print 'Error: binhex <> hexbin'
    elif verbose:
        print 'binhex == hexbin'

    try:
        import os
        os.unlink(fname1)
        os.unlink(fname2)
    except:
        pass
Example #9
0
 def test_output_compatible_setup_3(self):
     tmpfile = tempfile.mktemp(prefix='avocado_' + __name__)
     tmpfile2 = tempfile.mktemp(prefix='avocado_' + __name__)
     tmpdir = tempfile.mkdtemp(prefix='avocado_' + __name__)
     tmpfile3 = tempfile.mktemp(dir=tmpdir)
     os.chdir(basedir)
     cmd_line = ('./scripts/avocado run --job-results-dir %s --sysinfo=off '
                 '--xunit %s --json %s --html %s passtest.py' %
                 (self.tmpdir, tmpfile, tmpfile2, tmpfile3))
     result = process.run(cmd_line, ignore_status=True)
     output = result.stdout + result.stderr
     expected_rc = exit_codes.AVOCADO_ALL_OK
     tmpdir_contents = os.listdir(tmpdir)
     self.assertEqual(len(tmpdir_contents), 5,
                      'Not all resources dir were created: %s' % tmpdir_contents)
     try:
         self.assertEqual(result.exit_status, expected_rc,
                          "Avocado did not return rc %d:\n%s" %
                          (expected_rc, result))
         self.assertNotEqual(output, "", "Output is empty")
         # Check if we are producing valid outputs
         with open(tmpfile2, 'r') as fp:
             json_results = json.load(fp)
             debug_log = json_results['debuglog']
             self.check_output_files(debug_log)
         minidom.parse(tmpfile)
     finally:
         try:
             os.remove(tmpfile)
             os.remove(tmpfile2)
             shutil.rmtree(tmpdir)
         except OSError:
             pass
Example #10
0
    def test_convert(self):
        tmp = tempfile.mktemp()

        output = romanesco.convert("image", {
            "format": "png.base64",
            "data": self.image
        }, {
            "format": "png",
            "url": "file://" + tmp,
            "mode": "auto"
        })

        value = open(tmp).read()
        os.remove(tmp)
        self.assertEqual(output["format"], "png")
        self.assertEqual(base64.b64encode(value), self.image)

        output = romanesco.convert(
            "image",
            {"format": "png.base64", "data": self.image},
            {"format": "pil"})

        tmp = tempfile.mktemp()

        output = romanesco.convert(
            "image",
            output,
            {"format": "png"})

        io1 = StringIO(base64.b64decode(self.image))
        im1 = Image.open(io1)
        io2 = StringIO(output["data"])
        im2 = Image.open(io2)
        self.assertEqual(compareImages(im1, im2), 0)
    def test_build(self):
        client = api.Client(url=url, config_path=tempfile.mktemp(), max_size=1024 * 256)  # 256K
        client.register()
        generated = client.build(cleanup=True)
        self.assertTrue(len(generated))

        client = api.Client(url=url, config_path=tempfile.mktemp(), max_size=1024 * 512)  # 512K
        config = client.config()
        client.register()
        generated = client.build(cleanup=True)
        self.assertTrue(len(generated) == 4)

        result = json.loads(urlopen(url + "/api/online/json").read().decode("utf8"))
        result = [farmers for farmers in result["farmers"] if farmers["btc_addr"] == config["payout_address"]]
        last_seen = result[0]["last_seen"]
        result = json.dumps(result, sort_keys=True)
        expected = json.dumps(
            [
                {
                    "height": 4,
                    "btc_addr": config["payout_address"],
                    "last_seen": last_seen,
                    "payout_addr": config["payout_address"],
                }
            ],
            sort_keys=True,
        )

        self.assertEqual(result, expected)  # check that build send height=4 to the online list
Example #12
0
def hex_to_obj(_hex):
    if len(_hex) == 0 or len(_hex) % 2 != 0:
        raise Exception('Not valid _hex: %s' % _hex)

    tmp_shell_fp = mktemp()
    tmp_obj_fp = mktemp()

    asm = '.byte '
    for i in xrange(0, len(_hex), 2):
        asm += '0x%s,' % _hex[i:i+2]
    asm = asm.rstrip(',')
    asm += '\n'

    f = open(tmp_shell_fp, 'w')
    f.write(asm)
    f.close()

    cmd = 'as --32 %s -o %s' % (tmp_shell_fp, tmp_obj_fp)
    os.system(cmd)

    obj = open(tmp_obj_fp, 'rb').read()

    os.unlink(tmp_shell_fp)
    os.unlink(tmp_obj_fp)

    return obj
    def test_fit_to_mesh_file_errorsII(self):
        from anuga.load_mesh.loadASCII import import_mesh_file, export_mesh_file
        import tempfile
        import os

        # create a .tsh file, no user outline
        mesh_file = tempfile.mktemp(".tsh")
        fd = open(mesh_file,'w')
        fd.write("unit testing a bad .tsh file \n")
        fd.close()

        # create a points .csv file
        point_file = tempfile.mktemp(".csv")
        fd = open(point_file,'w')
        fd.write("x,y,elevation, stage \n\
        1.0, 1.0,2.,4 \n\
        1.0, 3.0,4,8 \n\
        3.0,1.0,4.,8 \n")
        fd.close()

        mesh_output_file = "new_triangle.tsh"
        try:
            fit_to_mesh_file(mesh_file, point_file,
                             mesh_output_file, display_errors = False)
        except IOError:
            pass
        else:
            raise Exception('Bad file did not raise error!')
            
        #clean up
        os.remove(mesh_file)
        os.remove(point_file)
Example #14
0
 def convert(self, ptype, data):
     ul_data = []
     in_wave = False
     for line in data.split("\n"):
         if in_wave:
             if line.startswith(".WAVEEND"):
                 try:
                     in_wave = False
                     f1 = tempfile.mktemp()
                     f2 = tempfile.mktemp() + ".png"
                     data = open(f1, "w")
                     data.write("\n".join(ul_data))
                     data.close()
                     os.system("wavegen %s %s" % (f1, f2))
                     self.add_image(f2)
                     os.unlink(f1)
                     os.unlink(f2)
                 except:
                     pass
             else:
                 ul_data.append(line)
         elif line.startswith(".WAVEBEGIN"):
             in_wave = True
             ul_data = []
         else:
             self.write_paragraph(ptype, line)
Example #15
0
    def __init__( self, com, debug=0, **params ):

        self.com = com

        self.rec_psf = com.rec().getPsfFile()
        self.lig_psf = com.lig().getPsfFile()

        recCode = com.rec().getPdbCode()
        ligCode = com.lig().getPdbCode()

        self.rec_in = tempfile.mktemp( recCode + ".pdb" )
        self.lig_in = tempfile.mktemp( ligCode + ".pdb" )

        self.lig_out = tempfile.mktemp( "lig_out.pdb" )
        self.rec_out = tempfile.mktemp( "rec_out.pdb" )

        self.inp_template = t.dataRoot() +\
            '/xplor/rb_minimize_complex.inp'

        self.param19 = t.dataRoot() + \
            '/xplor/toppar/param19.pro'

        self.result = None

        Xplorer.__init__( self, self.inp_template, debug=debug, **params )
Example #16
0
def test_test():
    from b4msa.command_line import params, train, test
    from microtc.utils import read_data_labels
    import os
    import sys
    import tempfile
    import json
    output = tempfile.mktemp()
    fname = os.path.dirname(__file__) + '/text.json'
    sys.argv = ['b4msa', '-o', output, '-k', '2', fname, '-s', '2']
    params()
    sys.argv = ['b4msa', '-m', output, fname, '-o', output]
    train()
    output2 = tempfile.mktemp()
    sys.argv = ['b4msa', '-m', output, fname, '-o', output2]
    test()
    X, y = read_data_labels(output2)
    print(y)
    os.unlink(output)
    with open(output2) as fpt:
        a = [json.loads(x) for x in fpt.readlines()]
    os.unlink(output2)
    for x in a:
        assert 'klass' in x
    assert len(y)
Example #17
0
def test_kfolds():
    from b4msa.command_line import params, kfolds
    import os
    import sys
    import json
    import tempfile
    output = tempfile.mktemp()
    fname = os.path.dirname(__file__) + '/text.json'
    sys.argv = ['b4msa', '-o', output, '-k', '2', fname, '-s', '2']
    params()
    output2 = tempfile.mktemp()
    sys.argv = ['b4msa', '-m', output, fname, '-o', output2]
    print(output, fname)
    kfolds()
    os.unlink(output)
    a = open(output2).readline()
    os.unlink(output2)
    a = json.loads(a)
    assert 'decision_function' in a
    sys.argv = ['b4msa', '--update-klass', '-m', output, fname, '-o', output2]
    try:
        kfolds()
    except AssertionError:
        return
    assert False
Example #18
0
def read_ply(ply_filename):
    vfile = tempfile.mktemp()
    ffile = tempfile.mktemp()
    reader = ply_reader.PlyReader(ply_filename)
    
    v_id = 0
    f_id = 0

    # Reading the header
    for evt, data in reader.read():
        if evt == ply_reader.EVENT_HEADER:
            n_vertices, n_faces = data
            vertices = np.memmap(vfile, dtype='float64', shape = (n_vertices,3),
                                mode='w+')
            faces = np.memmap(ffile, dtype='int64', shape = (n_faces,3),
                              mode='w+')
            break

    # Reading the vertices and faces
    for evt, data in reader.read():
        if evt == ply_reader.EVENT_VERTEX:
            current_vertex = data
            vertices[v_id] = current_vertex
            v_id += 1

        elif evt == ply_reader.EVENT_FACE:
            faces[f_id] = data
            f_id += 1

    return vertices, faces
def configure_network(node_name, ip, fqdn, username='******'):
    # Copy remote /etc/sysconfig/network to local file
    orgfilename = tempfile.mktemp()
    returncode, output, errors = system_command_required('scp ' + username + '@' + ip + ':/etc/sysconfig/network ' + orgfilename)
    
    # Generate new /etc/sysconfig/network file locally
    with open(orgfilename, 'r') as orgfile:
        newfilename = tempfile.mktemp()
        with open(newfilename, 'w') as newfile:
            newfile.write('DHCP_HOSTNAME=' + node_name + '\n')
            for line in orgfile:
                if line == '' or re.match('HOSTNAME=', line) or re.match('DHCP_HOSTNAME', line) or re.match('DOMAINNAME=', line):
                    pass
                else:
                    newfile.write(line)   
        
    # Copy new file to remote server
    returncode, output, errors = system_command_required('scp ' + newfilename + ' ' + username + '@' + ip + ':/etc/sysconfig/network')
    print(output + errors)
    returncode == 0 or die();

    os.remove(orgfilename)
    os.remove(newfilename)

    returncode, output, errors = ssh_command(username, ip, 'service network restart ; hostname ' + fqdn)
def show_longdesc():
    if not HAVE_README:
        logging.error(
            "To check the long description, we need the 'readme' package. "
            "(It is included if you install `zest.releaser[recommended]`)"
        )
        sys.exit(1)

    filename = tempfile.mktemp(".html")
    # Note: for the setup.py call we use _execute_command() from our
    # utils module. This makes sure the python path is set up right.
    longdesc = _execute_command(utils.setup_py("--long-description"))
    warnings = io.StringIO()
    html = render(longdesc, warnings)
    if html is None:
        logging.error("Error generating html. Invalid ReST.")
        rst_filename = tempfile.mktemp(".rst")
        with open(rst_filename, "wb") as rst_file:
            rst_file.write(longdesc.encode("utf-8"))
        warning_text = warnings.getvalue()
        warning_text = warning_text.replace("<string>", rst_filename)
        print(warning_text)
        sys.exit(1)

    if "<html" not in html[:20]:
        # Add a html declaration including utf-8 indicator
        html = HTML_PREFIX + html + HTML_POSTFIX

    with open(filename, "wb") as fh:
        fh.write(html.encode("utf-8"))

    url = "file://" + filename
    logging.info("Opening %s in your webbrowser.", url)
    webbrowser.open(url)
    def init_rrd(self):
        self.assert_rrd("%s/logstats.rrd" % self.graph_dir,
                        "DS:qsize:GAUGE:30:0:U",
                        "DS:in:COUNTER:30:0:U",
                        "DS:out:COUNTER:30:0:U",
                        "DS:drop:COUNTER:30:0:U")

        self.assert_rrd("%s/logmemory.rrd" % self.graph_dir,
                        "DS:size:GAUGE:30:0:U",
                        "DS:rss:GAUGE:30:0:U",
                        "DS:stack:GAUGE:30:0:U")

        self.graph_args = []
        if self.graph_daemon:
            self.graph_sockfile = mktemp(prefix="rrd_", suffix=".sock")
            self.graph_pidfile  = mktemp(prefix="rrd_", suffix=".pid")
            print("Starting rrdcached -l unix:%s -p %s -b %s -g" %
                  (self.graph_sockfile,self.graph_pidfile, self.graph_dir))
            devnull = file('/dev/null', 'a+')
            self.graph_process = subprocess.Popen(["/usr/bin/rrdcached",
                                                   "-l", "unix:%s" % self.graph_sockfile,
                                                   "-p", self.graph_pidfile,
                                                   "-b", self.graph_dir,
                                                   "-g"], stderr=subprocess.STDOUT)
            self.graph_daemon_args = ["--daemon", "unix:%s" % self.graph_sockfile]
Example #22
0
def create_app(name=None, urls=None, py_version='26', app_dir='/opt/apps',
    ve_dir='/opt/ve', app_port=8000):
    """
    Creates a new application container

    :param name: Name of application
    :param urls: Application public URLs (semi-colon separated - i.e. "example.com;anotherexample.com")
    :param py_version: Version of Python to use (default: 26)
    :param app_dir: Root directory for applications (default: /opt/apps)
    :param ve_dir: Root directory for virtualenvs (default: /opt/ve)
    :param app_port: Application port

    """
    if not name or not urls:
        raise RuntimeError('You must specify an name and urls')
    with default_settings():
        # create app directory
        sudo('mkdir -p {0}'.format(os.path.join(app_dir, name)))
        # create supervisor config
        uwsgi_tmp_conf = tempfile.mktemp()
        with open(uwsgi_tmp_conf, 'w') as f:
            f.write(generate_uwsgi_config(app_name=name, app_dir=app_dir, ve_dir=ve_dir, user='******', group='www-data'))
        nginx_tmp_conf = tempfile.mktemp()
        # create nginx config
        with open(nginx_tmp_conf, 'w') as f:
            f.write(generate_nginx_config(app_name=name, urls=urls.split(';'),
                app_port=app_port))
        put(uwsgi_tmp_conf, '/etc/supervisor/conf.d/uwsgi-{0}.conf'.format(name), mode=0755, use_sudo=True)
        put(nginx_tmp_conf, '/etc/nginx/conf.d/{0}.conf'.format(name), mode=0755, use_sudo=True)
        # update supervisor
        sudo('supervisorctl update')
        # cleanup
        os.remove(uwsgi_tmp_conf)
        os.remove(nginx_tmp_conf)
Example #23
0
def make_video(url, target):
    temp_gif = tempfile.mktemp(suffix=".gif")
    temp_output = tempfile.mktemp(suffix=".mp4")
    try:
        # download file
        subprocess.check_call(["timeout", "-s", "KILL", "5s",
                               "curl", "-o", temp_gif, url])

        ffprobe_output = subprocess.check_output(
            ["timeout", "-s", "KILL", "5s", "ffprobe", "-show_packets", temp_gif])

        frame_count = ffprobe_output.count(b"codec_type=video")
        if frame_count <= 5:
            return False

        # and convert
        subprocess.check_call(["timeout", "-s", "KILL", "30s",
                               "ffmpeg", "-y", "-i", temp_gif, "-vf", "scale=trunc(iw/2)*2:trunc(ih/2)*2",
                               "-codec:v", "libx264", "-preset", "medium", "-b:v", "350k", "-an",
                               "-profile:v", "baseline", "-level", "3.0", "-pix_fmt", "yuv420p",
                               "-qmin", "20", "-qmax", "42", temp_output])

        # and move to target
        shutil.copy(temp_output, str(target))
        return True
    except:
        stats.increment(metric_name("error"))
        raise

    finally:
        for temp in temp_gif, temp_output:
            try:
                os.unlink(temp)
            except OSError:
                pass
Example #24
0
def setup_for_testing(require_indexes=True):
  """Sets up the stubs for testing.

  Args:
    require_indexes: True if indexes should be required for all indexes.
  """
  from google.appengine.api import apiproxy_stub_map
  from google.appengine.api import memcache
  from google.appengine.tools import dev_appserver
  from google.appengine.tools import dev_appserver_index
  import urlfetch_test_stub
  before_level = logging.getLogger().getEffectiveLevel()
  try:
    logging.getLogger().setLevel(100)
    root_path = os.path.realpath(os.path.dirname(__file__))
    dev_appserver.SetupStubs(
        TEST_APP_ID,
        root_path=root_path,
        login_url='',
        datastore_path=tempfile.mktemp(suffix='datastore_stub'),
        history_path=tempfile.mktemp(suffix='datastore_history'),
        blobstore_path=tempfile.mktemp(suffix='blobstore_stub'),
        require_indexes=require_indexes,
        clear_datastore=False)
    dev_appserver_index.SetupIndexes(TEST_APP_ID, root_path)
    apiproxy_stub_map.apiproxy._APIProxyStubMap__stub_map['urlfetch'] = \
        urlfetch_test_stub.instance
    # Actually need to flush, even though we've reallocated. Maybe because the
    # memcache stub's cache is at the module level, not the API stub?
    memcache.flush_all()
  finally:
    logging.getLogger().setLevel(before_level)
Example #25
0
     def generate_image():
         infile = mktemp(prefix='%s-' % name)
         outfile = mktemp(prefix='%s-' % name)
         try:
             try:
                 f = codecs.open(infile, 'w', 'utf8')
                 try:
                     f.write(content)
                 finally:
                     f.close()
                 cmd = [name, '-a', '-T', type, '-o', outfile, infile]
                 if font:
                     cmd.extend(['-f', font])
                 self.env.log.debug('(%s) command: %r' % (name, cmd))
                 try:
                     proc = Popen(cmd, stderr=PIPE)
                     stderr_value = proc.communicate()[1]
                 except Exception, e:
                     self.env.log.error('(%s) %r' % (name, e))
                     raise ImageGenerationError("Failed to generate diagram. (%s is not found.)" % name)
 
                 if proc.returncode != 0 or not os.path.isfile(outfile):
                     self.env.log.error('(%s) %s' % (name, stderr_value))
                     raise ImageGenerationError("Failed to generate diagram. (rc=%d)" % proc.returncode)
                 f = open(outfile, 'rb')
                 try:
                     data = f.read()
                 finally:
                     f.close()
                 return data
             except ImageGenerationError:
                 raise
             except Exception, e:
                 self.env.log.error('(%s) %r' % (name, e))
                 raise ImageGenerationError("Failed to generate diagram.")
Example #26
0
def get_surface(pdb_file, PDB_TO_XYZR="pdb_to_xyzr", MSMS="msms"):
    """
    Return a Numeric array that represents
    the vertex list of the molecular surface.

    PDB_TO_XYZR --- pdb_to_xyzr executable (arg. to os.system)
    MSMS --- msms executable (arg. to os.system)
    """
    # extract xyz and set radii
    xyz_tmp=tempfile.mktemp()
    PDB_TO_XYZR=PDB_TO_XYZR+" %s > %s"
    make_xyz=PDB_TO_XYZR % (pdb_file, xyz_tmp)
    os.system(make_xyz)
    assert os.path.isfile(xyz_tmp), \
        "Failed to generate XYZR file using command:\n%s" % make_xyz
    # make surface
    surface_tmp=tempfile.mktemp()
    MSMS=MSMS+" -probe_radius 1.5 -if %s -of %s > "+tempfile.mktemp()
    make_surface=MSMS % (xyz_tmp, surface_tmp)
    os.system(make_surface)
    surface_file=surface_tmp+".vert"
    assert os.path.isfile(surface_file), \
        "Failed to generate surface file using command:\n%s" % make_surface
    # read surface vertices from vertex file
    surface=_read_vertex_array(surface_file)
    # clean up tmp files
    # ...this is dangerous
    #os.system("rm "+xyz_tmp)
    #os.system("rm "+surface_tmp+".vert")
    #os.system("rm "+surface_tmp+".face")
    return surface
Example #27
0
    def run_locally(self, options, handle_files):
        temp_files = []
        for name in handle_files:
            tmp = mktemp(suffix='', dir=TEMP_DIR)
            args = ["kbhs-download", "--handle", name, "-o", tmp]
            subprocess.check_call(args)
            temp_files.append(tmp)

        result_files = list(self._run_locally_internal(options, temp_files))

        if self.cleanup:
            for name in temp_files:
                os.remove(name)

        handles = []
        for name in result_files:
            tmp = mktemp(suffix='.handle', dir=TEMP_DIR)
            args = ["kbhs-upload", "-i", name, "-o", tmp]
            subprocess.check_call(args)
            fh = open(tmp)
            h = json.load(fh)
            fh.close()
            handles.append(h)
            if self.cleanup:
                os.remove(tmp)
                os.remove(name)

        return handles
def test_invert_saveload():
    dclab.PolygonFilter.clear_all_filters()
    ddict = example_data_dict(size=1234, keys=["area_um", "deform"])
    # points of polygon filter
    points = [[np.min(ddict["area_um"]), np.min(ddict["deform"])],
              [np.min(ddict["area_um"]), np.max(ddict["deform"])],
              [np.average(ddict["area_um"]), np.max(ddict["deform"])],
              [np.average(ddict["area_um"]), np.min(ddict["deform"])],
              ]
    filt1 = dclab.PolygonFilter(axes=["area_um", "deform"],
                                points=points,
                                inverted=True)
    name = tempfile.mktemp(prefix="test_dclab_polygon_")
    filt1.save(name)
    filt2 = dclab.PolygonFilter(filename=name)
    assert filt2 == filt1

    filt3 = dclab.PolygonFilter(axes=["area_um", "deform"],
                                points=points,
                                inverted=False)
    try:
        os.remove(name)
    except OSError:
        pass

    name = tempfile.mktemp(prefix="test_dclab_polygon_")
    filt3.save(name)
    filt4 = dclab.PolygonFilter(filename=name)
    assert filt4 == filt3
    try:
        os.remove(name)
    except OSError:
        pass
    def test_rename_sliced_histogram_using_rename_method(self):
        "Histogram: rename()"
        import tempfile
        outh5 = tempfile.mktemp()
        code = """
from histogram import histogram, arange
h = histogram(
  'h', 
  [('x', arange(10), 'meter'),
   ('y', arange(15), 'meter'),
  ]
  )
h1 = h[(2,5), ()].sum('x')
h1.rename('abc')
from histogram.hdf import load, dump
dump(h1, %r, '/', 'c')
""" % outh5
        script = tempfile.mktemp()
        open(script, 'w').write(code)
        cmd = 'python %s' % script
        import os
        
        if os.system(cmd):
            raise RuntimeError, "%s failed" % cmd
        
        from histogram.hdf import load
        try:
            h = load(outh5, 'abc')
        except:
            raise RuntimeError, "failed to load histogram from %s" %(
                outh5,)

        os.remove(outh5)
        os.remove(script)
        return        
Example #30
0
    def getPDF(self):
        """
        Fetch PDF and save it locally in a temporary file.
        Tries by order:
        - refereed article
        - refereed article using another machine (set ssh_user & ssh_server)
        - arXiv preprint
        - electronic journal link
        """

        if not self.links:
            return 'failed'

        def filetype(filename):
            return subprocess.Popen('file %s' % filename, shell=True,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.PIPE).stdout.read()

        #refereed
        if 'article' in self.links:
            url = self.links['article']
            #try locally
            pdf = tempfile.mktemp() + '.pdf'
            urllib.urlretrieve(url, pdf)
            if 'PDF document' in filetype(pdf):
                return pdf

            #try in remote server
            # you need to set SSH public key authentication
            # for this to work!
            elif ssh_user is not None:
                pdf = tempfile.mktemp() + '.pdf'
                cmd = 'ssh %s@%s \"touch toto.pdf; wget -O toto.pdf \\"%s\\"\"' % (ssh_user, ssh_server, url)
                cmd2 = 'scp -q %s@%s:toto.pdf %s' % (ssh_user, ssh_server, pdf)
                subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
                subprocess.Popen(cmd2, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
                if 'PDF document' in filetype(pdf):
                    return pdf

        #arXiv
        if 'preprint' in self.links:
            #arXiv page
            url = self.links['preprint']
            for line in urllib.urlopen(url).readlines():
                if 'dc:identifier' in line:
                    begin = re.search('dc:identifier="', line).end()
                    url = line[begin:-2].replace('&#38;', '&').lower()
                    #get arXiv PDF
                    pdf = tempfile.mktemp() + '.pdf'
                    urllib.urlretrieve(url.replace('abs', 'pdf'), pdf)
                    if 'PDF document' in filetype(pdf):
                        return pdf
                    else:
                        return url

        #electronic journal
        if 'ejournal' in self.links:
            return self.links['ejournal']

        return 'failed'
 def mkstemp():
     """Replacement for mkstemp, calling mktemp."""
     fname = tempfile.mktemp()
     return os.open(fname, os.O_RDWR | os.O_CREAT), fname
Example #32
0
    def submit_pipeline(
        self,
        pipeline,
        arguments=None,
        experiment=None,
        run=None,
        namespace=None,
        artifact_path=None,
        ops=None,
        ttl=None,
    ):

        if isinstance(pipeline, str):
            pipe_file = pipeline
        else:
            pipe_file = tempfile.mktemp(suffix=".yaml")
            conf = new_pipe_meta(artifact_path, ttl, ops)
            kfp.compiler.Compiler().compile(pipeline,
                                            pipe_file,
                                            type_check=False,
                                            pipeline_conf=conf)

        if pipe_file.endswith(".yaml"):
            headers = {"content-type": "application/yaml"}
        elif pipe_file.endswith(".zip"):
            headers = {"content-type": "application/zip"}
        else:
            raise ValueError("pipeline file must be .yaml or .zip")
        if arguments:
            if not isinstance(arguments, dict):
                raise ValueError("arguments must be dict type")
            headers["pipeline-arguments"] = str(arguments)

        if not path.isfile(pipe_file):
            raise OSError("file {} doesnt exist".format(pipe_file))
        with open(pipe_file, "rb") as fp:
            data = fp.read()
        if not isinstance(pipeline, str):
            remove(pipe_file)

        try:
            params = {
                "namespace": namespace,
                "experiment": experiment,
                "run": run
            }
            resp = self.api_call(
                "POST",
                "submit_pipeline",
                params=params,
                timeout=20,
                body=data,
                headers=headers,
            )
        except OSError as err:
            logger.error("error cannot submit pipeline: {}".format(err))
            raise OSError(
                "error: cannot cannot submit pipeline, {}".format(err))

        if not resp.ok:
            logger.error("bad resp!!\n{}".format(resp.text))
            raise ValueError("bad submit pipeline response, {}".format(
                resp.text))

        resp = resp.json()
        logger.info("submitted pipeline {} id={}".format(
            resp["name"], resp["id"]))
        return resp["id"]
 def pushDir(self, localDir, remoteDir, retryLimit=None, timeout=None):
     # adb "push" accepts a directory as an argument, but if the directory
     # contains symbolic links, the links are pushed, rather than the linked
     # files; we either zip/unzip or re-copy the directory into a temporary
     # one to get around this limitation
     retryLimit = retryLimit or self.retryLimit
     if self._useZip:
         self.removeDir(remoteDir)
         self.mkDirs(remoteDir + "/x")
         try:
             localZip = tempfile.mktemp() + ".zip"
             remoteZip = remoteDir + "/adbdmtmp.zip"
             proc = ProcessHandler(["zip", "-r", localZip, '.'],
                                   cwd=localDir,
                                   processOutputLine=self._log)
             proc.run()
             proc.wait()
             self.pushFile(localZip,
                           remoteZip,
                           retryLimit=retryLimit,
                           createDir=False)
             mozfile.remove(localZip)
             data = self._runCmd(
                 ["shell", "unzip", "-o", remoteZip, "-d",
                  remoteDir]).output[0]
             self._checkCmd(["shell", "rm", remoteZip],
                            retryLimit=retryLimit,
                            timeout=self.short_timeout)
             if re.search("unzip: exiting", data) or re.search(
                     "Operation not permitted", data):
                 raise Exception("unzip failed, or permissions error")
         except:
             self._logger.warning(traceback.format_exc())
             self._logger.warning(
                 "zip/unzip failure: falling back to normal push")
             self._useZip = False
             self.pushDir(localDir,
                          remoteDir,
                          retryLimit=retryLimit,
                          timeout=timeout)
     else:
         localDir = os.path.normpath(localDir)
         remoteDir = os.path.normpath(remoteDir)
         copyRequired = False
         if self._adb_version >= '1.0.36' and \
            os.path.isdir(localDir) and self.dirExists(remoteDir):
             # See do_sync_push in
             # https://android.googlesource.com/platform/system/core/+/master/adb/file_sync_client.cpp
             # Work around change in behavior in adb 1.0.36 where if
             # the remote destination directory exists, adb push will
             # copy the source directory *into* the destination
             # directory otherwise it will copy the source directory
             # *onto* the destination directory.
             #
             # If the destination directory does exist, push to its
             # parent directory.  If the source and destination leaf
             # directory names are different, copy the source directory
             # to a temporary directory with the same leaf name as the
             # destination so that when we push to the parent, the
             # source is copied onto the destination directory.
             localName = os.path.basename(localDir)
             remoteName = os.path.basename(remoteDir)
             if localName != remoteName:
                 copyRequired = True
                 tempParent = tempfile.mkdtemp()
                 newLocal = os.path.join(tempParent, remoteName)
                 dir_util.copy_tree(localDir, newLocal)
                 localDir = newLocal
             remoteDir = '/'.join(remoteDir.rstrip('/').split('/')[:-1])
         try:
             self._checkCmd(["push", localDir, remoteDir],
                            retryLimit=retryLimit,
                            timeout=timeout)
         except:
             raise
         finally:
             if copyRequired:
                 mozfile.remove(tempParent)
Example #34
0
 def setUp(self):
     import tempfile
     snippyt.shelve_name = tempfile.mktemp()
Example #35
0
from reportlab.platypus import SimpleDocTemplate, Paragraph, Spacer
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.units import inch

import cgi
import tempfile
import win32api

source_file_name = "d:/1.txt"
source_file_name = "d:/UNH-IOL_NVMe_Interop_Test_Suite_v9.0.pdf"
pdf_file_name = tempfile.mktemp(".pdf")

styles = getSampleStyleSheet()
h1 = styles["h1"]
normal = styles["Normal"]

doc = SimpleDocTemplate(pdf_file_name)
#
# reportlab expects to see XML-compliant
#  data; need to escape ampersands &c.
#
text = cgi.escape(open(source_file_name).read()).splitlines()

#
# Take the first line of the document as a
#  header; the rest are treated as body text.
#
story = [Paragraph(text[0], h1)]
for line in text[1:]:
    story.append(Paragraph(line, normal))
    story.append(Spacer(1, 0.2 * inch))
Example #36
0
def main():
    """Entry point for MRIQC's CLI."""
    import gc
    import os
    import sys
    from tempfile import mktemp
    import atexit
    from mriqc import config, messages
    from mriqc.cli.parser import parse_args

    atexit.register(config.restore_env)

    # Run parser
    parse_args()

    if config.execution.pdb:
        from mriqc.utils.debug import setup_exceptionhook

        setup_exceptionhook()

    # CRITICAL Save the config to a file. This is necessary because the execution graph
    # is built as a separate process to keep the memory footprint low. The most
    # straightforward way to communicate with the child process is via the filesystem.
    # The config file name needs to be unique, otherwise multiple mriqc instances
    # will create write conflicts.
    config_file = mktemp(dir=config.execution.work_dir,
                         prefix=".mriqc.",
                         suffix=".toml")
    config.to_filename(config_file)

    # Set up participant level
    if "participant" in config.workflow.analysis_level:
        _pool = None
        if config.nipype.plugin in ("MultiProc", "LegacyMultiProc"):
            from contextlib import suppress
            import multiprocessing as mp
            import multiprocessing.forkserver
            from concurrent.futures import ProcessPoolExecutor

            os.environ["OMP_NUM_THREADS"] = "1"

            with suppress(RuntimeError):
                mp.set_start_method("fork")
            gc.collect()

            _pool = ProcessPoolExecutor(
                max_workers=config.nipype.nprocs,
                initializer=config._process_initializer,
                initargs=(config.execution.cwd, config.nipype.omp_nthreads),
            )

        _resmon = None
        if config.execution.resource_monitor:
            from mriqc.instrumentation.resources import ResourceRecorder

            _resmon = ResourceRecorder(
                pid=os.getpid(),
                log_file=mktemp(dir=config.execution.work_dir,
                                prefix=".resources.",
                                suffix=".tsv"),
            )
            _resmon.start()

        # CRITICAL Call build_workflow(config_file, retval) in a subprocess.
        # Because Python on Linux does not ever free virtual memory (VM), running the
        # workflow construction jailed within a process preempts excessive VM buildup.
        from multiprocessing import Manager, Process

        with Manager() as mgr:
            from .workflow import build_workflow

            retval = mgr.dict()
            p = Process(target=build_workflow, args=(str(config_file), retval))
            p.start()
            p.join()

            mriqc_wf = retval.get("workflow", None)
            retcode = p.exitcode or retval.get("return_code", 0)

        # CRITICAL Load the config from the file. This is necessary because the ``build_workflow``
        # function executed constrained in a process may change the config (and thus the global
        # state of MRIQC).
        config.load(config_file)

        retcode = retcode or (mriqc_wf is None) * os.EX_SOFTWARE
        if retcode != 0:
            sys.exit(retcode)

        # Initalize nipype config
        config.nipype.init()
        # Make sure loggers are started
        config.loggers.init()

        if _resmon:
            config.loggers.cli.info(
                f"Started resource recording at {_resmon._logfile}.")

        # Resource management options
        if config.nipype.plugin in ("MultiProc", "LegacyMultiProc") and (
                1 < config.nipype.nprocs < config.nipype.omp_nthreads):
            config.loggers.cli.warning(
                "Per-process threads (--omp-nthreads=%d) exceed total "
                "threads (--nthreads/--n_cpus=%d)",
                config.nipype.omp_nthreads,
                config.nipype.nprocs,
            )

        if mriqc_wf is None:
            sys.exit(os.EX_SOFTWARE)

        if mriqc_wf and config.execution.write_graph:
            mriqc_wf.write_graph(graph2use="colored",
                                 format="svg",
                                 simple_form=True)

        if not config.execution.dry_run:
            # Warn about submitting measures BEFORE
            if not config.execution.no_sub:
                config.loggers.cli.warning(config.DSA_MESSAGE)

            # Clean up master process before running workflow, which may create forks
            gc.collect()
            # run MRIQC
            _plugin = config.nipype.get_plugin()
            if _pool:
                from mriqc.engine.plugin import MultiProcPlugin

                _plugin = {
                    "plugin":
                    MultiProcPlugin(pool=_pool,
                                    plugin_args=config.nipype.plugin_args),
                }
            mriqc_wf.run(**_plugin)

            # Warn about submitting measures AFTER
            if not config.execution.no_sub:
                config.loggers.cli.warning(config.DSA_MESSAGE)
        config.loggers.cli.log(25, messages.PARTICIPANT_FINISHED)

        if _resmon is not None:
            from mriqc.instrumentation.viz import plot
            _resmon.stop()
            plot(
                _resmon._logfile,
                param="mem_rss_mb",
                out_file=str(_resmon._logfile).replace(".tsv", ".rss.png"),
            )
            plot(
                _resmon._logfile,
                param="mem_vsm_mb",
                out_file=str(_resmon._logfile).replace(".tsv", ".vsm.png"),
            )

    # Set up group level
    if "group" in config.workflow.analysis_level:
        from ..reports import group_html
        from ..utils.bids import DEFAULT_TYPES
        from ..utils.misc import generate_tsv  # , generate_pred

        config.loggers.cli.info(messages.GROUP_START)

        # Generate reports
        mod_group_reports = []
        for mod in config.execution.modalities or DEFAULT_TYPES:
            output_dir = config.execution.output_dir
            dataframe, out_tsv = generate_tsv(output_dir, mod)
            # If there are no iqm.json files, nothing to do.
            if dataframe is None:
                continue

            tsv_message = messages.TSV_GENERATED.format(modality=mod,
                                                        path=out_tsv)
            config.loggers.cli.info(tsv_message)

            # out_pred = generate_pred(derivatives_dir, settings['output_dir'], mod)
            # if out_pred is not None:
            #     log.info('Predicted QA CSV table for the %s data generated (%s)',
            #                    mod, out_pred)

            out_html = output_dir / f"group_{mod}.html"
            group_html(
                out_tsv,
                mod,
                csv_failed=output_dir / f"group_variant-failed_{mod}.csv",
                out_file=out_html,
            )
            report_message = messages.GROUP_REPORT_GENERATED.format(
                modality=mod, path=out_html)
            config.loggers.cli.info(report_message)
            mod_group_reports.append(mod)

        if not mod_group_reports:
            raise Exception(messages.GROUP_NO_DATA)

        config.loggers.cli.info(messages.GROUP_FINISHED)

    from mriqc.utils.bids import write_bidsignore, write_derivative_description

    config.loggers.cli.info(messages.BIDS_META)
    write_derivative_description(config.execution.bids_dir,
                                 config.execution.output_dir)
    write_bidsignore(config.execution.output_dir)
    config.loggers.cli.info(messages.RUN_FINISHED)
Example #37
0
    def _generate_file(self, cr, uid, context=None):
        if context is None:
            context = {}
        res_users_obj = self.pool.get('res.users')
        period_id = []
        if context and context.get('year_id'):
            period_id.append(context.get('year_id')[0])
        start_date = end_date = False
        if period_id:
            period_data = self.pool.get('account.fiscalyear').browse(cr, uid, period_id[0])
            start_date = period_data.date_start
            end_date = period_data.date_stop
        company_data = res_users_obj.browse(cr, uid, uid).company_id
        purchase_order_obj = self.pool.get('purchase.order')
        acc_invoice_obj = self.pool.get('account.invoice')
        tax_obj = self.pool.get('account.tax')
        tax_code_obj = self.pool.get('account.tax.code')
        move_obj = self.pool.get('account.move')
        journal_obj = self.pool.get('account.journal')
        account_obj = self.pool.get('account.account')
        cur_obj = self.pool.get('res.currency')
        
        cust_arg = [('type', '=', 'out_invoice'), ('state', 'in', ['open', 'paid'])]
        supp_arg = [('type', '=', 'in_invoice'), ('state', 'in', ['open', 'paid'])]

        if start_date:
            cust_arg.append(('date_invoice', '>=', start_date))
            supp_arg.append(('date_invoice', '>=', start_date))
            
        if end_date:
            cust_arg.append(('date_invoice', '<=', end_date))
            supp_arg.append(('date_invoice', '<=', end_date))
                    
        customer_invoice_ids = acc_invoice_obj.search(cr, uid, cust_arg)
        supplier_invoice_ids = acc_invoice_obj.search(cr, uid, supp_arg)
        tgz_tmp_filename = tempfile.mktemp('.' + "txt")
        tmp_file = False
        try:
            tmp_file = open(tgz_tmp_filename, "wr")
            company_record = tools.ustr('CompInfoStart|||||||||||||') + \
                            "\r\n" + \
                            tools.ustr('CompanyName|CompanyUEN|GSTNo|PeriodStart|PeriodEnd|IAFCreationDate|ProductVersion|IAFVersion||||||') + \
                            "\r\n" + \
                            tools.ustr(company_data and company_data.name or '') + \
                            '|'.ljust(1) + \
                            tools.ustr(company_data and company_data.company_uen or '') + \
                            '|'.ljust(1) + \
                            tools.ustr(company_data and company_data.gst_no or '') + \
                            '|'.ljust(1) + \
                            tools.ustr(company_data and company_data.period_start and datetime.datetime.strptime(company_data.period_start, DEFAULT_SERVER_DATE_FORMAT).strftime('%d/%m/%Y') or '') + \
                            '|'.ljust(1) + \
                            tools.ustr(company_data and company_data.period_end and datetime.datetime.strptime(company_data.period_end, DEFAULT_SERVER_DATE_FORMAT).strftime('%d/%m/%Y') or '') + \
                            '|'.ljust(1) + \
                            tools.ustr(company_data and company_data.iaf_creation_date and datetime.datetime.strptime(company_data.iaf_creation_date, DEFAULT_SERVER_DATE_FORMAT).strftime('%d/%m/%Y') or '') + \
                            '|'.ljust(1) + \
                            tools.ustr(company_data and company_data.product_version or '') + \
                            '|'.ljust(1) + \
                            tools.ustr(company_data and company_data.iaf_version or '') + \
                            '|'.ljust(1) + \
                            "\r\n" + \
                            tools.ustr('CompInfoEnd|||||||||||||') + \
                            "\r\n" + \
                            tools.ustr('|||||||||||||') + \
                            "\r\n" + \
                            tools.ustr('PurcDataStart|||||||||||||') + \
                            "\r\n" + \
                            tools.ustr('SupplierName|SupplierUEN|InvoiceDate|InvoiceNo|PermitNo|LineNo|ProductDescription|PurchaseValueSGD|GSTValueSGD|TaxCode|FCYCode|PurchaseFCY|GSTFCY') + \
                            "\r\n"
            tmp_file.write(company_record)
            tot_line = 0
            tot_pur_sgd = tot_gst_sg = 0.0
            for supplier in acc_invoice_obj.browse(cr, uid, supplier_invoice_ids):
                line_no = 1
                for line in supplier.invoice_line:
                    SupplierName = supplier.partner_id.name or ''
                    SupplierUEN = supplier.partner_id.supplier_uen or ''
                    InvoiceDate = supplier and supplier.date_invoice and datetime.datetime.strptime(supplier.date_invoice, DEFAULT_SERVER_DATE_FORMAT).strftime('%d/%m/%Y') or ''
                    InvoiceNo = supplier.supplier_invoice_number or ''
                    PermitNo = supplier.permit_no or ''
                    LineNo = line_no
                    ProductDescription = line.name or ''
                    
                    if supplier.currency_id.id == supplier.company_id.currency_id.id:
                        PurchaseValueSGD = line.price_subtotal or 0.0
                        GSTValueSGD = 0.0
                        TaxCode = ''
                        FCYCode = 'XXX'
                        PurchaseFCY = 0.0
                        GSTFCY = 0.0
                        
                    else:
                        PurchaseValueSGD = cur_obj.compute(cr, uid, supplier.currency_id.id, supplier.company_id.currency_id.id, line.price_subtotal, context={'date': supplier.date_invoice})
                        GSTValueSGD = 0.0
                        TaxCode = ''
                        FCYCode = supplier.currency_id.name or ''
                        PurchaseFCY = line.price_subtotal or 0.0
                        GSTFCY = 0.0
                    tot_pur_sgd += PurchaseValueSGD
                    
                    for tax in line.invoice_line_tax_id:
                        tax_amt = tax_amt_foreign = 0.0
                        tax_name = ''
                        tax_data = tax_obj.compute_all(cr, uid, [tax], (line.price_unit * (1 - (line.discount or 0.0) / 100.0)), line.quantity, line.product_id, supplier.partner_id)['taxes']
                        if tax_data:
                            tax_amt = tax_data[0]['amount']
                            tax_name = tax_code_obj.browse(cr, uid, tax_data[0].get('tax_code_id')).code
                        if supplier.currency_id.id == supplier.company_id.currency_id.id:
                            GSTValueSGD = tax_amt
                            TaxCode = tax_name or ''
                            GSTFCY = 0.0
                        else:
                            GSTValueSGD = cur_obj.compute(cr, uid, supplier.currency_id.id, supplier.company_id.currency_id.id, tax_amt, context={'date': supplier.date_invoice})
                            TaxCode = tax_name or ''
                            GSTFCY = tax_amt
                        tot_gst_sg += GSTValueSGD
                        
                    supplier_record = tools.ustr(SupplierName) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(SupplierUEN) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(InvoiceDate) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(InvoiceNo) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(PermitNo) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(int(LineNo)) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(ProductDescription) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(PurchaseValueSGD) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(GSTValueSGD) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(TaxCode) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(FCYCode) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(PurchaseFCY) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(GSTFCY) + \
                                      '|'.ljust(1) + \
                                      "\r\n"
                    tmp_file.write(supplier_record)
                    line_no += 1
                    tot_line += 1
            customer_data = tools.ustr('PurcDataEnd|') + \
                            tools.ustr(float(tot_pur_sgd) or 0.0) + \
                            '|'.ljust(1) + \
                            tools.ustr(float(tot_gst_sg) or 0.0) + \
                            '|'.ljust(1) + \
                            tools.ustr(int(tot_line)) + \
                            '||||||||||'.ljust(1) + \
                            "\r\n" + \
                            tools.ustr('|||||||||||||') + \
                            "\r\n" + \
                            tools.ustr('SuppDataStart|||||||||||||') + \
                            "\r\n" + \
                            tools.ustr('CustomerName|CustomerUEN|InvoiceDate|InvoiceNo|LineNo|ProductDescription|SupplyValueSGD|GSTValueSGD|TaxCode|Country|FCYCode|SupplyFCY|GSTFCY|') + \
                            "\r\n"
            tmp_file.write(customer_data)
            
            tot_supp_line_no = 0
            tot_supp_sgd = tot_gst_sg = 0.0
            for customer in acc_invoice_obj.browse(cr, uid, customer_invoice_ids):
                supp_line_no = 1
                for line in customer.invoice_line:

                    CustomerName = customer.partner_id.name or ''
                    CustomerUEN = customer.partner_id.customer_uen or ''
                    InvoiceDate = customer and customer.date_invoice and datetime.datetime.strptime(customer.date_invoice, DEFAULT_SERVER_DATE_FORMAT).strftime('%d/%m/%Y') or ''
                    InvoiceNo = customer.number or ''
                    LineNo = supp_line_no
                    ProductDescription = line.name or ''
                    Country = customer.partner_id.country_id and customer.partner_id.country_id.name or ''
                    
                    if customer.currency_id.id == customer.company_id.currency_id.id:
                        SupplyValueSGD = line.price_subtotal or 0.0
                        GSTValueSGD = 0.0
                        TaxCode = ''
                        FCYCode = 'XXX'
                        SupplyFCY = 0.0
                        GSTFCY = 0.0
                    else:
                        SupplyValueSGD = cur_obj.compute(cr, uid, customer.currency_id.id, customer.company_id.currency_id.id, line.price_subtotal, context={'date': customer.date_invoice})
                        GSTValueSGD = 0.0
                        TaxCode = ''
                        FCYCode = customer.currency_id.name or ''
                        SupplyFCY = line.price_subtotal or 0.0
                        GSTFCY = 0.0
                    tot_supp_sgd += SupplyValueSGD
                    
                    for tax in line.invoice_line_tax_id:
                        tax_amt = tax_amt_foreign = 0.0
                        tax_name = ''
                        tax_data = tax_obj.compute_all(cr, uid, [tax], (line.price_unit * (1 - (line.discount or 0.0) / 100.0)), line.quantity, line.product_id, customer.partner_id)['taxes']
                        if tax_data:
                            tax_amt = tax_data[0]['amount']
                            tax_name = tax_code_obj.browse(cr, uid, tax_data[0].get('tax_code_id')).code
                        if customer.currency_id.id == customer.company_id.currency_id.id:
                            GSTValueSGD = tax_amt
                            TaxCode = tax_name or ''
                            GSTFCY = 0.0
                        else:
                            GSTValueSGD = cur_obj.compute(cr, uid, customer.currency_id.id, customer.company_id.currency_id.id, tax_amt, context={'date': customer.date_invoice})
                            TaxCode = tax_name or ''
                            GSTFCY = tax_amt
                        tot_gst_sg += GSTValueSGD

                 
                    supplier_record = tools.ustr(CustomerName) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(CustomerUEN) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(InvoiceDate) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(InvoiceNo) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(int(LineNo)) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(ProductDescription).encode('ascii', 'ignore').decode('ascii')+ \
                                      '|'.ljust(1) + \
                                      tools.ustr(SupplyValueSGD) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(GSTValueSGD) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(TaxCode) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(Country) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(FCYCode) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(float(SupplyFCY)) + \
                                      '|'.ljust(1) + \
                                      tools.ustr(GSTFCY) + \
                                      '|'.ljust(1) + \
                                      "\r\n"
                    tmp_file.write(supplier_record)
                    supp_line_no += 1
                    tot_supp_line_no += 1
            account_data = tools.ustr('SuppDataEnd|') + \
                            tools.ustr(float(tot_supp_sgd) or 0.0) + \
                            '|'.ljust(1) + \
                            tools.ustr(float(tot_gst_sg) or 0.0) + \
                            '|'.ljust(1) + \
                            tools.ustr(int(tot_supp_line_no)) + \
                            '||||||||||'.ljust(1) + \
                            "\r\n" + \
                            tools.ustr('|||||||||||||') + \
                            "\r\n" + \
                            tools.ustr('GLDataStart|||||||||||||') + \
                            "\r\n" + \
                            tools.ustr('TransactionDate|AccountID|AccountName|TransactionDescription|Name|TransactionID|SourceDocumentID|SourceType|Debit|Credit|Balance|||') + \
                            "\r\n"
            tmp_file.write(account_data)
            
            
            account = account_obj.browse(cr, uid, context.get('datas').get('chart_account_id')[0])
            child_data = self.get_children_accounts(cr, uid, account, context=context)
            for acc in child_data:
                obj_period = self.pool.get('account.period')
                ctx = {'fiscalyear':context.get('datas').get('fiscalyear_id')[0]}
                period_ids = obj_period.search(cr, uid, [('fiscalyear_id', '=', context.get('datas').get('fiscalyear_id')[0]), ('special', '=', True)])
                ctx.update({'datas': context.get('datas'), 'periods':period_ids})
                
                debit_amt = self._sum_debit_account(cr, uid, acc, context=ctx)
                credit_amt = self._sum_credit_account(cr, uid, acc, context=ctx)
                balance_account = self._sum_balance_account(cr, uid, acc, context=ctx)
                opening_balance = tools.ustr(datetime.datetime.strptime(start_date, DEFAULT_SERVER_DATE_FORMAT).strftime('%d/%m/%Y') or '') + \
                                  '|'.ljust(1) + \
                                  tools.ustr(acc.code) + \
                                  '|'.ljust(1) + \
                                  tools.ustr(acc.name) + \
                                  '|'.ljust(1) + \
                                  tools.ustr('OPENING BALANCE') + \
                                  '|||||'.ljust(1) + \
                                  tools.ustr(debit_amt or 0.0) + \
                                  '|'.ljust(1) + \
                                  tools.ustr(credit_amt or 0.0) + \
                                  '|'.ljust(1) + \
                                  tools.ustr(balance_account or 0.0) + \
                                  '|||'.ljust(1) + \
                                  "\r\n"
                tmp_file.write(opening_balance)
                acc_data = self.lines(cr, uid, acc, context=context)
                for ac in acc_data:
                    move_data = tools.ustr(datetime.datetime.strptime(ac.get('ldate'), DEFAULT_SERVER_DATE_FORMAT).strftime('%d/%m/%Y') or '') + \
                                '|'.ljust(1) + \
                                tools.ustr(acc.code) + \
                                '|'.ljust(1) + \
                                tools.ustr(acc.name) + \
                                '|'.ljust(1) + \
                                tools.ustr(ac.get('lname') or '') + \
                                '|'.ljust(1) + \
                                tools.ustr(ac.get('partner_name') or '') + \
                                '|'.ljust(1) + \
                                tools.ustr(ac.get('mmove_id') or '') + \
                                '|'.ljust(1) + \
                                tools.ustr(ac.get('lref') or '') + \
                                '|'.ljust(1) + \
                                tools.ustr(ac.get('line_corresp') or '') + \
                                '|'.ljust(1) + \
                                tools.ustr(float(ac.get('debit')) or 0.0) + \
                                '|'.ljust(1) + \
                                tools.ustr(float(ac.get('credit')) or 0.0) + \
                                '|'.ljust(1) + \
                                tools.ustr(ac.get('progress') or 0.0) + \
                                '|'.ljust(1) + \
                                "\r\n"
                    tmp_file.write(move_data)
                    
        finally:
            if tmp_file:
                tmp_file.close()
        file = open(tgz_tmp_filename, "rb")
        out = file.read()
        file.close()
        return base64.b64encode(out)
Example #38
0
def infer(model,
          path,
          detections_file,
          resize,
          max_size,
          batch_size,
          mixed_precision=True,
          is_master=True,
          world=0,
          annotations=None,
          use_dali=True,
          is_validation=False,
          verbose=True,
          logdir=None,
          iteration=100):
    'Run inference on images from path'

    backend = 'pytorch' if isinstance(model, Model) or isinstance(
        model, DDP) else 'tensorrt'

    stride = model.module.stride if isinstance(model, DDP) else model.stride

    # Create annotations if none was provided
    if not annotations:
        annotations = tempfile.mktemp('.json')
        images = [{
            'id': i,
            'file_name': f
        } for i, f in enumerate(os.listdir(path))]
        json.dump({'images': images}, open(annotations, 'w'))

    # TensorRT only supports fixed input sizes, so override input size accordingly
    if backend == 'tensorrt': max_size = max(model.input_size)

    # Prepare dataset
    if verbose: print('Preparing dataset...')
    data_iterator = (DaliDataIterator if use_dali else DataIterator)(
        path,
        resize,
        max_size,
        batch_size,
        stride,
        world,
        annotations,
        training=False)
    if verbose: print(data_iterator)

    # Prepare model
    if backend is 'pytorch':
        # If we are doing validation during training,
        # no need to register model with AMP again
        if not is_validation:
            if torch.cuda.is_available(): model = model.cuda()
            model = amp.initialize(model,
                                   None,
                                   opt_level='O2' if mixed_precision else 'O0',
                                   keep_batchnorm_fp32=True,
                                   verbosity=0)

        model.eval()

    if verbose:
        print('   backend: {}'.format(backend))
        print('    device: {} {}'.format(
            world, 'cpu' if not torch.cuda.is_available() else
            'gpu' if world == 1 else 'gpus'))
        print('     batch: {}, precision: {}'.format(
            batch_size, 'unknown' if backend is 'tensorrt' else
            'mixed' if mixed_precision else 'full'))
        print('Running inference...')

    results = []
    profiler = Profiler(['infer', 'fw'])
    with torch.no_grad():
        for i, (data, ids, ratios) in enumerate(data_iterator):
            # Forward pass
            profiler.start('fw')
            scores, boxes, classes = model(data)
            profiler.stop('fw')

            results.append([scores, boxes, classes, ids, ratios])

            profiler.bump('infer')
            if verbose and (profiler.totals['infer'] > 60
                            or i == len(data_iterator) - 1):
                size = len(data_iterator.ids)
                msg = '[{:{len}}/{}]'.format(min((i + 1) * batch_size, size),
                                             size,
                                             len=len(str(size)))
                msg += ' {:.3f}s/{}-batch'.format(profiler.means['infer'],
                                                  batch_size)
                msg += ' (fw: {:.3f}s)'.format(profiler.means['fw'])
                msg += ', {:.1f} im/s'.format(batch_size /
                                              profiler.means['infer'])
                print(msg, flush=True)

                profiler.reset()

    # Gather results from all devices
    if verbose: print('Gathering results...')
    results = [torch.cat(r, dim=0) for r in zip(*results)]
    if world > 1:
        for r, result in enumerate(results):
            all_result = [
                torch.ones_like(result, device=result.device)
                for _ in range(world)
            ]
            torch.distributed.all_gather(list(all_result), result)
            results[r] = torch.cat(all_result, dim=0)

    if is_master:
        # Copy buffers back to host
        results = [r.cpu() for r in results]

        # Collect detections
        detections = []
        processed_ids = set()
        for scores, boxes, classes, image_id, ratios in zip(*results):
            image_id = image_id.item()
            if image_id in processed_ids:
                continue
            processed_ids.add(image_id)

            keep = (scores > 0).nonzero()
            scores = scores[keep].view(-1)
            boxes = boxes[keep, :].view(-1, 4) / ratios
            classes = classes[keep].view(-1).int()

            for score, box, cat in zip(scores, boxes, classes):
                x1, y1, x2, y2 = box.data.tolist()
                cat = cat.item()
                if 'annotations' in data_iterator.coco.dataset:
                    cat = data_iterator.coco.getCatIds()[cat]
                detections.append({
                    'image_id': image_id,
                    'score': score.item(),
                    'bbox': [x1, y1, x2 - x1 + 1, y2 - y1 + 1],
                    'category_id': cat
                })

        if detections:
            # Save detections
            if detections_file and verbose:
                print('Writing {}...'.format(detections_file))
            detections = {'annotations': detections}
            detections['images'] = data_iterator.coco.dataset['images']
            if 'categories' in data_iterator.coco.dataset:
                detections['categories'] = [
                    data_iterator.coco.dataset['categories']
                ]
            if detections_file:
                json.dump(detections, open(detections_file, 'w'), indent=4)

            # Evaluate model on dataset
            if 'annotations' in data_iterator.coco.dataset:
                if verbose: print('Evaluating model...')
                with redirect_stdout(None):
                    coco_pred = data_iterator.coco.loadRes(
                        detections['annotations'])
                    coco_eval = COCOeval(data_iterator.coco, coco_pred, 'bbox')
                    coco_eval.evaluate()
                    coco_eval.accumulate()
                coco_eval.summarize()
                results = coco_eval.stats
                # Create TensorBoard writer
                if logdir is not None:
                    from tensorboardX import SummaryWriter
                    if is_master and verbose:
                        print('Infer writer: Writing TensorBoard logs to: {}'.
                              format(logdir))
                    writer = SummaryWriter(logdir=logdir)
                    if results != []:
                        writer.add_scalar(
                            'Average Precision/IoU=0.50:0.95/area=all/maxDets=100',
                            results[0], iteration)
                        writer.add_scalar(
                            'Average Precision/IoU=0.50/area=all/maxDets=100',
                            results[1], iteration)
                        writer.add_scalar(
                            'Average Precision/IoU=0.75/area=all/maxDets=100',
                            results[2], iteration)
                        writer.add_scalar(
                            'Average Precision/IoU=0.50:0.95/area=small/maxDets=100',
                            results[3], iteration)
                        writer.add_scalar(
                            'Average Precision/IoU=0.50:0.95/area=medium/maxDets=100',
                            results[4], iteration)
                        writer.add_scalar(
                            'Average Precision/IoU=0.50:0.95/area=large/maxDets=100',
                            results[5], iteration)
                        writer.add_scalar(
                            'Average Recall/IoU=0.50:0.95/area=all/maxDets=1',
                            results[6], iteration)
                        writer.add_scalar(
                            'Average Recall/IoU=0.50:0.95/area=all/maxDets=10',
                            results[7], iteration)
                        writer.add_scalar(
                            'Average Recall/IoU=0.50:0.95/area=all/maxDets=100',
                            results[8], iteration)
                        writer.add_scalar(
                            'Average Recall/IoU=0.50:0.95/area= small/maxDets=100',
                            results[9], iteration)
                        writer.add_scalar(
                            'Average Recall/IoU=0.50:0.95/area=medium/maxDets=100',
                            results[10], iteration)
                        writer.add_scalar(
                            'Average Recall/IoU=0.50:0.95/area= large/maxDets=100',
                            results[11], iteration)
                    writer.close()
        else:
            print('No detections!')

        if logdir is not None and detections_file is not None:
            from tensorboardX import SummaryWriter
            if is_master and verbose:
                print('Writing TensorBoard logs to: {}'.format(logdir))
            writer = SummaryWriter(logdir=logdir)

            def get_bounding_boxes(annotations: List, image_id: int) -> List:
                return [a for a in annotations if a["image_id"] == image_id]

            with open(detections_file, "r") as file:
                all_detections = json.load(file)

            with open(annotations, "r") as file:
                all_ground_truths = json.load(file)

            i = 0
            for image_json in all_detections["images"][:3]:
                image_id = image_json["id"]
                image_path = path + '/' + image_json["file_name"]
                image = io.imread(image_path)

                assert (image_json["file_name"] == [
                    x["file_name"] for x in all_ground_truths["images"]
                    if x["id"] == image_id
                ][0])

                fig, ax = plt.subplots(figsize=(16, 16))
                ax.imshow(image)

                detections = get_bounding_boxes(all_detections["annotations"],
                                                image_id)
                detections = [d for d in detections if d["score"] > 0.5]

                ground_truths = get_bounding_boxes(
                    all_ground_truths["annotations"], image_id)

                for d in detections:
                    x, y, width, height = d["bbox"]
                    score = d["score"]
                    category_id = d["category_id"]

                    rectangle = patches.Rectangle(
                        (x, y),
                        width,
                        height,
                        linewidth=2,
                        edgecolor="r",
                        facecolor="none",
                    )
                    ax.add_patch(rectangle)

                    ax.text(
                        x,
                        y - 4,
                        f"{category_id}: {score:0.2f}",
                        color="r",
                        fontsize=20,
                        fontweight="bold",
                    )

                for gt in ground_truths:
                    x, y, width, height = gt["bbox"]

                    rectangle = patches.Rectangle(
                        (x, y),
                        width,
                        height,
                        linewidth=2,
                        edgecolor="b",
                        facecolor="none",
                    )
                    ax.add_patch(rectangle)

                ax.axis("off")
                writer.add_figure('images', fig, i)
                i += 1

            writer.close()
Example #39
0
def main():
    """Exercise the Gnuplot module."""

    print(
        'This program exercises many of the features of Gnuplot.py.  The\n'
        'commands that are actually sent to gnuplot are printed for your\n'
        'enjoyment.'
        )

    wait('Popping up a blank gnuplot window on your screen.')
    g = Gnuplot.Gnuplot(debug=1)
    g.clear()

    # Make two temporary files:
    if hasattr(tempfile, 'mkstemp'):
        (fd, filename1,) = tempfile.mkstemp(text=1)
        f = os.fdopen(fd, 'w')
        (fd, filename2,) = tempfile.mkstemp(text=1)
    else:
        filename1 = tempfile.mktemp()
        f = open(filename1, 'w')
        filename2 = tempfile.mktemp()
    try:
        for x in numpy.arange(100.)/5. - 10.:
            f.write('%s %s %s\n' % (x, math.cos(x), math.sin(x)))
        f.close()

        print('############### test Func ###################################')
        wait('Plot a gnuplot-generated function')
        g.plot(Gnuplot.Func('sin(x)'))

        wait('Set title and axis labels and try replot()')
        g.title('Title')
        g.xlabel('x')
        g.ylabel('y')
        g.replot()

        wait('Style linespoints')
        g.plot(Gnuplot.Func('sin(x)', with_='linespoints'))
        wait('title=None')
        g.plot(Gnuplot.Func('sin(x)', title=None))
        wait('title="Sine of x"')
        g.plot(Gnuplot.Func('sin(x)', title='Sine of x'))
        wait('axes=x2y2')
        g.plot(Gnuplot.Func('sin(x)', axes='x2y2', title='Sine of x'))
        wait('Custom tics')
        g.set_tics(
            'x',
            [
                (-2 * math.pi, '-2pi'),
                (-math.pi, '', 2),
                (0,),
                Gnuplot.Tic(math.pi, level=2),
                Gnuplot.Tic(2 * math.pi, '2pi'),
                ],
            )
        g.plot(Gnuplot.Func('sin(x)', title='Sine of x'))
        wait('Reset to default tics')
        g.set_tics('x', 'auto')
        g.plot(Gnuplot.Func('sin(x)', title='Sine of x'))

        print('Change Func attributes after construction:')
        f = Gnuplot.Func('sin(x)')
        wait('Original')
        g.plot(f)
        wait('Style linespoints')
        f.set_option(with_='linespoints')
        g.plot(f)
        wait('title=None')
        f.set_option(title=None)
        g.plot(f)
        wait('title="Sine of x"')
        f.set_option(title='Sine of x')
        g.plot(f)
        wait('axes=x2y2')
        f.set_option(axes='x2y2')
        g.plot(f)

        print('############### test File ###################################')
        wait('Generate a File from a filename')
        g.plot(Gnuplot.File(filename1))

        wait('Style lines')
        g.plot(Gnuplot.File(filename1, with_='lines'))

        wait('using=1, using=(1,)')
        g.plot(Gnuplot.File(filename1, using=1, with_='lines'),
               Gnuplot.File(filename1, using=(1,), with_='points'))
        wait('using=(1,2), using="1:3"')
        g.plot(Gnuplot.File(filename1, using=(1,2)),
               Gnuplot.File(filename1, using='1:3'))

        wait('every=5, every=(5,)')
        g.plot(Gnuplot.File(filename1, every=5, with_='lines'),
               Gnuplot.File(filename1, every=(5,), with_='points'))
        wait('every=(10,None,0), every="10::5"')
        g.plot(Gnuplot.File(filename1, with_='lines'),
               Gnuplot.File(filename1, every=(10,None,0)),
               Gnuplot.File(filename1, every='10::5'))

        wait('title=None')
        g.plot(Gnuplot.File(filename1, title=None))
        wait('title="title"')
        g.plot(Gnuplot.File(filename1, title='title'))

        print('Change File attributes after construction:')
        f = Gnuplot.File(filename1)
        wait('Original')
        g.plot(f)
        wait('Style linespoints')
        f.set_option(with_='linespoints')
        g.plot(f)
        wait('using=(1,3)')
        f.set_option(using=(1,3))
        g.plot(f)
        wait('title=None')
        f.set_option(title=None)
        g.plot(f)

        print('############### test Data ###################################')
        x = numpy.arange(100)/5. - 10.
        y1 = numpy.cos(x)
        y2 = numpy.sin(x)
        d = numpy.transpose((x,y1,y2))

        wait('Plot Data against its index')
        g.plot(Gnuplot.Data(y2, inline=0))

        wait('Plot Data, specified column-by-column')
        g.plot(Gnuplot.Data(x,y2, inline=0))
        wait('Same thing, saved to a file')
        Gnuplot.Data(x,y2, inline=0, filename=filename1)
        g.plot(Gnuplot.File(filename1))
        wait('Same thing, inline data')
        g.plot(Gnuplot.Data(x,y2, inline=1))

        wait('Plot Data, specified by an array')
        g.plot(Gnuplot.Data(d, inline=0))
        wait('Same thing, saved to a file')
        Gnuplot.Data(d, inline=0, filename=filename1)
        g.plot(Gnuplot.File(filename1))
        wait('Same thing, inline data')
        g.plot(Gnuplot.Data(d, inline=1))
        wait('with_="lp lt 4 pt 4"')
        g.plot(Gnuplot.Data(d, with_='lp lt 4 pt 4'))
        wait('cols=0')
        g.plot(Gnuplot.Data(d, cols=0))
        wait('cols=(0,1), cols=(0,2)')
        g.plot(Gnuplot.Data(d, cols=(0,1), inline=0),
               Gnuplot.Data(d, cols=(0,2), inline=0))
        wait('Same thing, saved to files')
        Gnuplot.Data(d, cols=(0,1), inline=0, filename=filename1)
        Gnuplot.Data(d, cols=(0,2), inline=0, filename=filename2)
        g.plot(Gnuplot.File(filename1), Gnuplot.File(filename2))
        wait('Same thing, inline data')
        g.plot(Gnuplot.Data(d, cols=(0,1), inline=1),
               Gnuplot.Data(d, cols=(0,2), inline=1))
        wait('Change title and replot()')
        g.title('New title')
        g.replot()
        wait('title=None')
        g.plot(Gnuplot.Data(d, title=None))
        wait('title="Cosine of x"')
        g.plot(Gnuplot.Data(d, title='Cosine of x'))

        print('############### test compute_Data ###########################')
        x = numpy.arange(100)/5. - 10.

        wait('Plot Data, computed by Gnuplot.py')
        g.plot(
            Gnuplot.funcutils.compute_Data(x, lambda x: math.cos(x), inline=0)
            )
        wait('Same thing, saved to a file')
        Gnuplot.funcutils.compute_Data(
            x, lambda x: math.cos(x), inline=0, filename=filename1
            )
        g.plot(Gnuplot.File(filename1))
        wait('Same thing, inline data')
        g.plot(Gnuplot.funcutils.compute_Data(x, math.cos, inline=1))
        wait('with_="lp lt 4 pt 4"')
        g.plot(Gnuplot.funcutils.compute_Data(x, math.cos, with_='lp lt 4 pt 4'))

        print('############### test hardcopy ###############################')
        print('******** Generating postscript file "gp_test.ps" ********')
        wait()
        g.plot(Gnuplot.Func('cos(0.5*x*x)', with_='linespoints lt 2 pt 2',
                       title='cos(0.5*x^2)'))
        g.hardcopy('gp_test.ps')

        wait('Testing hardcopy options: mode="eps"')
        g.hardcopy('gp_test.ps', mode='eps')
        wait('Testing hardcopy options: mode="landscape"')
        g.hardcopy('gp_test.ps', mode='landscape')
        wait('Testing hardcopy options: mode="portrait"')
        g.hardcopy('gp_test.ps', mode='portrait')
        wait('Testing hardcopy options: eps=1')
        g.hardcopy('gp_test.ps', eps=1)
        wait('Testing hardcopy options: mode="default"')
        g.hardcopy('gp_test.ps', mode='default')
        wait('Testing hardcopy options: enhanced=1')
        g.hardcopy('gp_test.ps', enhanced=1)
        wait('Testing hardcopy options: enhanced=0')
        g.hardcopy('gp_test.ps', enhanced=0)
        wait('Testing hardcopy options: color=1')
        g.hardcopy('gp_test.ps', color=1)
        # For some reason, 
        #    g.hardcopy('gp_test.ps', color=0, solid=1)
        # doesn't work here (it doesn't activate the solid option), even
        # though the command sent to gnuplot looks correct.  I'll
        # tentatively conclude that it is a gnuplot bug. ###
        wait('Testing hardcopy options: color=0')
        g.hardcopy('gp_test.ps', color=0)
        wait('Testing hardcopy options: solid=1')
        g.hardcopy('gp_test.ps', solid=1)
        wait('Testing hardcopy options: duplexing="duplex"')
        g.hardcopy('gp_test.ps', solid=0, duplexing='duplex')
        wait('Testing hardcopy options: duplexing="defaultplex"')
        g.hardcopy('gp_test.ps', duplexing='defaultplex')
        wait('Testing hardcopy options: fontname="Times-Italic"')
        g.hardcopy('gp_test.ps', fontname='Times-Italic')
        wait('Testing hardcopy options: fontsize=20')
        g.hardcopy('gp_test.ps', fontsize=20)

        print('******** Generating svg file "gp_test.svg" ********')
        wait()
        g.plot(Gnuplot.Func('cos(0.5*x*x)', with_='linespoints lt 2 pt 2',
                       title='cos(0.5*x^2)'))
        g.hardcopy('gp_test.svg', terminal='svg')

        wait('Testing hardcopy svg options: enhanced')
        g.hardcopy('gp_test.ps', terminal='svg', enhanced='1')
        

        print('############### test shortcuts ##############################')
        wait('plot Func and Data using shortcuts')
        g.plot('sin(x)', d)

        print('############### test splot ##################################')
        wait('a 3-d curve')
        g.splot(Gnuplot.Data(d, with_='linesp', inline=0))
        wait('Same thing, saved to a file')
        Gnuplot.Data(d, inline=0, filename=filename1)
        g.splot(Gnuplot.File(filename1, with_='linesp'))
        wait('Same thing, inline data')
        g.splot(Gnuplot.Data(d, with_='linesp', inline=1))

        print('############### test GridData and compute_GridData ##########')
        # set up x and y values at which the function will be tabulated:
        x = numpy.arange(35)/2.0
        y = numpy.arange(30)/10.0 - 1.5
        # Make a 2-d array containing a function of x and y.  First create
        # xm and ym which contain the x and y values in a matrix form that
        # can be `broadcast' into a matrix of the appropriate shape:
        xm = x[:,numpy.newaxis]
        ym = y[numpy.newaxis,:]
        m = (numpy.sin(xm) + 0.1*xm) - ym**2
        wait('a function of two variables from a GridData file')
        g('set parametric')
        g('set style data lines')
        g('set hidden')
        g('set contour base')
        g.xlabel('x')
        g.ylabel('y')
        g.splot(Gnuplot.GridData(m,x,y, binary=0, inline=0))
        wait('Same thing, saved to a file')
        Gnuplot.GridData(m,x,y, binary=0, inline=0, filename=filename1)
        g.splot(Gnuplot.File(filename1, binary=0))
        wait('Same thing, inline data')
        g.splot(Gnuplot.GridData(m,x,y, binary=0, inline=1))

        wait('The same thing using binary mode')
        g.splot(Gnuplot.GridData(m,x,y, binary=1))
        wait('Same thing, using binary mode and an intermediate file')
        Gnuplot.GridData(m,x,y, binary=1, filename=filename1)
        g.splot(Gnuplot.File(filename1, binary=1))

        wait('The same thing using compute_GridData to tabulate function')
        g.splot(Gnuplot.funcutils.compute_GridData(
            x,y, lambda x,y: math.sin(x) + 0.1*x - y**2,
            ))
        wait('Same thing, with an intermediate file')
        Gnuplot.funcutils.compute_GridData(
            x,y, lambda x,y: math.sin(x) + 0.1*x - y**2,
            filename=filename1)
        g.splot(Gnuplot.File(filename1, binary=1))

        wait('Use compute_GridData in ufunc and binary mode')
        g.splot(Gnuplot.funcutils.compute_GridData(
            x,y, lambda x,y: numpy.sin(x) + 0.1*x - y**2,
            ufunc=1, binary=1,
            ))
        wait('Same thing, with an intermediate file')
        Gnuplot.funcutils.compute_GridData(
            x,y, lambda x,y: numpy.sin(x) + 0.1*x - y**2,
            ufunc=1, binary=1,
            filename=filename1)
        g.splot(Gnuplot.File(filename1, binary=1))

        wait('And now rotate it a bit')
        for view in range(35,70,5):
            g('set view 60, %d' % view)
            g.replot()
            time.sleep(1.0)

        wait(prompt='Press return to end the test.\n')
    finally:
        os.unlink(filename1)
        os.unlink(filename2)
Example #40
0
import os
import random
import shutil
import sys
import tempfile
import unittest

import ZConfig
from ZConfig.components.logger.tests.test_logger import LoggingTestHelper

from App.config import getConfiguration, setConfiguration
import Products
from ZServer.Zope2.Startup import get_starter
from ZServer.Zope2.Startup.options import ZopeOptions

TEMPNAME = tempfile.mktemp()
TEMPPRODUCTS = os.path.join(TEMPNAME, "Products")
_SCHEMA = {}


def getSchema(schemafile):
    global _SCHEMA
    if schemafile not in _SCHEMA:
        opts = ZopeOptions()
        opts.schemafile = schemafile
        opts.load_schema()
        _SCHEMA[schemafile] = opts.schema
    return _SCHEMA[schemafile]


# try to preserve logging state so we don't screw up other unit tests
Example #41
0
    def download_version(self, version):

        try:
            os.makedirs(self.install_dir)
        except OSError as exc:
            if exc.errno == errno.EEXIST and os.path.isdir(self.install_dir):
                pass
            else:
                raise

        urls = []
        for link_version, link_url in self.links.iteritems():
            if link_version.startswith(version):
                # If we have a "-" in our version, exact match only
                if version.find("-") >= 0:
                    if link_version != version: continue
                elif link_version.find("-") >= 0:
                    continue

                urls.append((link_version, link_url))

        if len(urls) == 0:
            raise Exception("Cannot find a link for version %s, versions %s found." \
                % (version, self.links))

        urls.sort(key=version_tuple)
        full_version = urls[-1][0]
        url = urls[-1][1]
        extract_dir = url.split("/")[-1][:-4]

        # only download if we don't already have the directory
        already_downloaded = os.path.isdir(
            os.path.join(self.install_dir, extract_dir))
        if already_downloaded:
            print "Skipping download for version %s (%s) since the dest already exists '%s'" \
                % (version, full_version, extract_dir)
        else:
            temp_dir = tempfile.mkdtemp()
            temp_file = tempfile.mktemp(suffix=".tgz")

            data = urllib2.urlopen(url)

            print "Downloading data for version %s (%s)..." % (version,
                                                               full_version)

            with open(temp_file, 'wb') as f:
                f.write(data.read())
                print "Uncompressing data for version %s (%s)..." % (
                    version, full_version)

            # Can't use cool with syntax b/c of python 2.6
            tf = tarfile.open(temp_file, 'r:gz')

            try:
                tf.extractall(path=temp_dir)
            except:
                tf.close()
                raise

            tf.close()

            temp_install_dir = os.path.join(temp_dir, extract_dir)

            shutil.move(temp_install_dir, self.install_dir)

            shutil.rmtree(temp_dir)
            os.remove(temp_file)

        self.symlink_version(
            version,
            os.path.abspath(os.path.join(self.install_dir, extract_dir)))
Example #42
0
TEMPLATE_FOLDER = os.path.join(HERE, TEMPLATE_FOLDER_NAME)
CALENDARS_TEMPLATE_FOLDER_NAME = "calendars"
CALENDAR_TEMPLATE_FOLDER = os.path.join(TEMPLATE_FOLDER, CALENDARS_TEMPLATE_FOLDER_NAME)
STATIC_FOLDER_NAME = "static"
STATIC_FOLDER_PATH = os.path.join(HERE, STATIC_FOLDER_NAME)
DHTMLX_LANGUAGES_FILE = os.path.join(STATIC_FOLDER_PATH, "js", "dhtmlx", "locale", "languages.json")

# specification
PARAM_SPECIFICATION_URL = "specification_url"

# globals
app = Flask(__name__, template_folder="templates")
# Check Configuring Flask-Cache section for more details
cache = Cache(app, config={
    'CACHE_TYPE': 'filesystem',
    'CACHE_DIR': tempfile.mktemp(prefix="cache-")})

# caching

__URL_CACHE = {}
def cache_url(url, text):
    """Cache the value of a url."""
    __URL_CACHE[url] = text
    try:
        get_text_from_url(url)
    finally:
        del __URL_CACHE[url]

# configuration

def get_configuration():
Example #43
0
 def test(self):
     a = x()
     b = tempfile.mktemp()
     self.assertNotEqual(a, b)
Example #44
0
def windows(*args, **kwargs):
    """
    NAME:
       windows
    PURPOSE:
       Generate model APOGEE spectra using Turbospectrum in selected wavelength windows (but the whole APOGEE spectral range is returned): this is a general routine that generates the non-continuum-normalized spectrum, convolves with the LSF and macrotubulence, and optionally continuum normalizes the output; use 'turbosynth' for a direct interface to Turbospectrum
    INPUT ARGUMENTS:
       Windows specification: Provide one of
          (1) Element string: the APOGEE windows for this element will be loaded
          (2) startindxs, endindxs= start and end indexes of the windows on the apStar wavelength grid
          (3) startlams, endlams= start and end wavelengths in \AA
       lists with abundance differences wrt the atmosphere (they don't all have to have the same length, missing ones are filled in with zeros):
          [Atomic number1,diff1_1,diff1_2,diff1_3,...,diff1_N]
          [Atomic number2,diff2_1,diff2_2,diff2_3,...,diff2_N]
          ...
          [Atomic numberM,diffM_1,diffM_2,diffM_3,...,diffM_N]
    INPUT KEYWORDS:
       BASELINE: you can specify the baseline spectrum and the continuous opacity to not always re-compute it
          baseline= baseline c-normalized spectrum on Turbospectrum wavelength grid (obtained from turbosynth)
          mwav= Turbospectrum wavelength grid (obtained from turbosynth)
          cflux= continuum flux from Turbospectrum
          modelopac= (None) 
                     (a) if set to an existing filename: assume babsma_lu has already been run and use this continuous opacity in bsyn_lu
                     (b) if set to a non-existing filename: store the continuous opacity in this file
          Typically, you can obtain these three keywords by doing (kwargs are the keywords you provide to this function as well, and includes modelopac='SOME FILENAME')
          >>> baseline= turbosynth(**kwargs)
          >>> mwav= baseline[0]
          >>> cflux= baseline[2]/baseline[1]
          >>> baseline= baseline[1]
       LSF:
          lsf= ('all') LSF to convolve with; output of apogee.spec.lsf.eval; sparsify for efficiency; if 'all' or 'combo' a pre-computed version will be downloaded from the web
          Either:
             xlsf= (None) pixel offset grid on which the LSF is computed (see apogee.spec.lsf.eval); unnecessary if lsf=='all' or 'combo'
             dxlsf= (None) spacing of pixel offsets
          vmacro= (6.) macroturbulence to apply
       CONTINUUM:
          cont= ('aspcap') continuum-normalization to apply:
             None: no continuum normalization
             'true': Use the true continuum
             'aspcap': Use the continuum normalization method of ASPCAP DR12
             'cannon': Normalize using continuum pixels derived from the Cannon
       SYNTHESIS:
          air= (True) if True, perform the synthesis in air wavelengths (output is still in vacuum); set to False at your own risk, as Turbospectrum expects the linelist in air wavelengths!)
          Hlinelist= (None) Hydrogen linelists to use; can be set to the path of a linelist file or to the name of an APOGEE linelist; if None, then we first search for the Hlinedata.vac in the APOGEE linelist directory (if air=False) or we use the internal Turbospectrum Hlinelist (if air=True)
          linelist= (None) molecular and atomic linelists to use; can be set to the path of a linelist file or to the name of an APOGEE linelist, or lists of such files; if a single filename is given, the code will first search for files with extensions '.atoms', '.molec' or that start with 'turboatoms.' and 'turbomolec.'
          wmin, wmax, dw= (15000.000, 17000.000, 0.10000000, 7.0000000) spectral synthesis limits, step, and width of calculation (see MOOG)
          costheta= (1.) cosine of the viewing angle
       MODEL ATMOSPHERE PARAMETERS:
          Specify one of the following:
             (a) modelatm= (None) model-atmosphere instance
             (b) parameters of a KURUCZ model atmosphere:
                 (1) teff= (4500) Teff
                     logg= (2.5) logg
                     metals= (0.) metallicity
                     cm= (0.) carbon-enhancement
                     am= (0.) alpha-enhancement
                 (2) fparam= standard ASPCAP output format
                 lib= ('kurucz_filled') model atmosphere library
          vmicro= (2.) microturbulence (only used if the MOOG-formatted atmosphere is not found) (can also be part of fparam)
       MISCELLANEOUS:
          dr= return the path corresponding to this data release
          raw= (False) if True, return the raw turbosynth output
    OUTPUT:
       spectra (nspec,nwave)
       (wavelengths,cont-norm. spectrum, spectrum (nwave)) if raw == True
    HISTORY:
       2015-04-17 - Written - Bovy (IAS)
    """
    # Pop some kwargs
    baseline = kwargs.pop('baseline', None)
    mwav = kwargs.pop('mwav', None)
    cflux = kwargs.pop('cflux', None)
    raw = kwargs.pop('raw', False)
    # Check that we have the LSF and store the relevant keywords
    lsf = kwargs.pop('lsf', 'all')
    if isinstance(lsf, str):
        xlsf, lsf = aplsf._load_precomp(dr=kwargs.get('dr', None), fiber=lsf)
        dxlsf = None
    else:
        xlsf = kwargs.pop('xlsf', None)
        dxlsf = kwargs.pop('dxlsf', None)
        if xlsf is None and dxlsf is None:
            raise ValueError(
                'xlsf= or dxlsf= input needs to be given if the LSF is given as an array'
            )
    vmacro = kwargs.pop('vmacro', 6.)
    # Parse continuum-normalization keywords
    cont = kwargs.pop('cont', 'aspcap')
    # Parse the wavelength regions
    apWave = apStarWavegrid()
    if isinstance(args[0], str):  #element string given
        si, ei = apwindow.waveregions(args[0], pad=3, asIndex=True)
        args = args[1:]
    else:
        if isinstance(args[0][0], int):  # assume index
            si, ei = args[0], args[1]
        else:  # assume wavelengths in \AA
            sl, el = args[0], args[1]
            # Convert to index
            si, ei = [], []
            for s, e in zip(sl, el):
                # Find closest index into apWave
                si.append(numpy.argmin(numpy.fabs(s - apWave)))
                ei.append(numpy.argmin(numpy.fabs(e - apWave)))
        args = args[2:]
    # Setup the model atmosphere
    modelatm = kwargs.pop('modelatm', None)
    # Parse fparam, if present
    fparam = kwargs.pop('fparam', None)
    if not fparam is None:
        kwargs['teff'] = fparam[0, paramIndx('TEFF')]
        kwargs['logg'] = fparam[0, paramIndx('LOGG')]
        kwargs['metals'] = fparam[0, paramIndx('METALS')]
        kwargs['am'] = fparam[0, paramIndx('ALPHA')]
        kwargs['cm'] = fparam[0, paramIndx('C')]
        kwargs['vmicro'] = 10.**fparam[0, paramIndx('LOG10VDOP')]
    # Need to pass a model atmosphere instance to turbosynth (needs to be made
    # more efficient, because now turbosynth always write the atmosphere
    if modelatm is None:  # Setup a model atmosphere
        modelatm = atlas9.Atlas9Atmosphere(teff=kwargs.get('teff', 4500.),
                                           logg=kwargs.get('logg', 2.5),
                                           metals=kwargs.get('metals', 0.),
                                           am=kwargs.get('am', 0.),
                                           cm=kwargs.get('cm', 0.),
                                           dr=kwargs.get('dr', None))
    if isinstance(modelatm, str) and os.path.exists(modelatm):
        raise ValueError(
            'modelatm= input is an existing filename, but you need to give an Atmosphere object instead'
        )
    elif isinstance(modelatm, str):
        raise ValueError('modelatm= input needs to be an Atmosphere instance')
    # Check temperature
    if modelatm._teff > 7000.:
        warnings.warn(
            'Turbospectrum does not include all necessary physics to model stars hotter than about 7000 K; proceed with caution',
            RuntimeWarning)
    kwargs['modelatm'] = modelatm
    try:
        rmModelopac = False
        if not 'modelopac' in kwargs:
            rmModelopac = True
            kwargs['modelopac'] = tempfile.mktemp('mopac')
            # Make sure opacity is first calculated over the full wav. range
            kwargs['babsma_wmin'] = 15000.
            kwargs['babsma_wmax'] = 17000.
        elif 'modelopac' in kwargs and not isinstance(kwargs['modelopac'],
                                                      str):
            raise ValueError('modelopac needs to be set to a filename')
        # Run synth for the whole wavelength range as a baseline
        if baseline is None or mwav is None or cflux is None:
            baseline = turbosynth(**kwargs)
            mwav = baseline[0]
            cflux = baseline[2] / baseline[1]
            baseline = baseline[1]
        elif isinstance(
                baseline, tuple
        ):  #probably accidentally gave the entire output of turbosynth
            mwav = baseline[0]
            cflux = baseline[2] / baseline[1]
            baseline = baseline[1]
        # Convert the apStarWavegrid windows to turboWavegrid regions
        sm, em = [], []
        for start, end in zip(si, ei):
            if kwargs.get('air', True):
                sm.append(
                    numpy.argmin(numpy.fabs(vac2air(apWave[start]) - mwav)))
                em.append(numpy.argmin(numpy.fabs(vac2air(apWave[end]) -
                                                  mwav)))
            else:
                sm.append(numpy.argmin(numpy.fabs(apWave[start] - mwav)))
                em.append(numpy.argmin(numpy.fabs(apWave[end] - mwav)))
        # Run Turbospectrum synth for all abundances and all windows
        if len(args) == 0:  #special case that there are *no* differences
            args = ([26, 0.], )
        nsynths = numpy.array([len(args[ii]) - 1 for ii in range(len(args))])
        nsynth = numpy.amax(nsynths)  #Take the longest abundance list
        out = numpy.tile(baseline, (nsynth, 1))
        # Run all windows
        for start, end in zip(sm, em):
            kwargs['wmin'] = mwav[start]
            kwargs['wmax'] = mwav[end] + 0.001
            for ii in range(nsynth):
                newargs = ()
                for jj in range(len(args)):
                    tab = [args[jj][0]]
                    if len(args[jj]) > ii + 1:
                        tab.append(args[jj][ii + 1])
                        newargs = newargs + (tab, )
                tmpOut = turbosynth(*newargs, **kwargs)
                if numpy.isnan(tmpOut[1][-1]):
                    # NaN returned for reasons that I don't understand
                    out[ii, start:end] = tmpOut[1][:-1]
                else:
                    out[ii, start:end + 1] = tmpOut[1]
    except:
        raise
    finally:
        if rmModelopac and os.path.exists(kwargs['modelopac']):
            os.remove(kwargs['modelopac'])
            kwargs.pop('modelopac')
    # Now multiply each continuum-normalized spectrum with the continuum
    out *= numpy.tile(cflux, (nsynth, 1))
    if raw: return (mwav, out / numpy.tile(cflux, (nsynth, 1)), out)
    # If the synthesis was done in air, convert wavelength array
    if kwargs.get('air', True):
        mwav = numpy.array([air2vac(w) for w in list(mwav)])
    # Now convolve with the LSF
    out = aplsf.convolve(mwav,
                         out,
                         lsf=lsf,
                         xlsf=xlsf,
                         dxlsf=dxlsf,
                         vmacro=vmacro)
    # Now continuum-normalize
    if cont.lower() == 'true':
        # Get the true continuum on the apStar wavelength grid
        apWave = apStarWavegrid()
        baseline = numpy.polynomial.Polynomial.fit(mwav, cflux, 4)
        ip = interpolate.InterpolatedUnivariateSpline(mwav,
                                                      cflux / baseline(mwav),
                                                      k=3)
        cflux = baseline(apWave) * ip(apWave)
        # Divide it out
        out /= numpy.tile(cflux, (nsynth, 1))
    elif not cont is None:
        cflux = apcont.fit(out, numpy.ones_like(out), type=cont)
        out[cflux > 0.] /= cflux[cflux > 0.]
        out[cflux <= 0.] = numpy.nan
    return out
Example #45
0
 def setUp(self):
     state_path = tempfile.mktemp(suffix=self.id())
     luigi.configuration.get_config().set('scheduler', 'state_path',
                                          state_path)
     self.start_server()
Example #46
0
def x():
    return tempfile.mktemp()
Example #47
0
# 0 given a bare settings
settings = LazySettings(environments=True)

# 1 Ensure all renamed vars exists in object
for old, new in RENAMED_VARS.items():
    assert old in settings
    assert new in settings

# 2 Ensure pairs has the same value
for old, new in RENAMED_VARS.items():
    assert settings.get(old) == settings.get(new)
    assert getattr(settings, new) == getattr(settings, old)

tempdir = tempfile.mkdtemp()
temptoml = tempfile.mktemp(".toml", dir=tempdir)
temptomlfilename = os.path.basename(temptoml)
temppy = tempfile.mktemp(".py", dir=tempdir)
temppyfilename = os.path.basename(temppy)

# 0 given a full old-style configured setting
settings = LazySettings(
    environments=True,
    DYNACONF_NAMESPACE="FOO",
    DYNACONF_SETTINGS_MODULE=str(temptoml),
    PROJECT_ROOT=str(tempdir),
    DYNACONF_SILENT_ERRORS=False,
    DYNACONF_ALWAYS_FRESH_VARS=["baz", "zaz", "caz"],
    BASE_NAMESPACE_FOR_DYNACONF="original",
    GLOBAL_ENV_FOR_DYNACONF="RAZAMANAZ",
)
Example #48
0
File: cli.py Project: zbww/raiden
def smoketest(ctx, debug, **kwargs):
    """ Test, that the raiden installation is sane.
    """
    from raiden.api.python import RaidenAPI
    from raiden.blockchain.abi import get_static_or_compile
    from raiden.utils import get_contract_path

    # Check the solidity compiler early in the smoketest.
    #
    # Binary distributions don't need the solidity compiler but source
    # distributions do. Since this is checked by `get_static_or_compile`
    # function, use it as a proxy for validating the setup.
    get_static_or_compile(
        get_contract_path('HumanStandardToken.sol'),
        'HumanStandardToken',
    )

    report_file = tempfile.mktemp(suffix='.log')
    open(report_file, 'w+')

    def append_report(subject, data):
        with open(report_file, 'a') as handler:
            handler.write('{:=^80}'.format(' %s ' % subject.upper()) +
                          os.linesep)
            if data is not None:
                handler.writelines([(data + os.linesep).encode('utf-8')])

    append_report('raiden version', json.dumps(get_system_spec()))
    append_report('raiden log', None)

    print('[1/5] getting smoketest configuration')
    smoketest_config = load_or_create_smoketest_config()

    print('[2/5] starting ethereum')
    ethereum, ethereum_config = start_ethereum(smoketest_config['genesis'])

    print('[3/5] starting raiden')

    # setup logging to log only into our report file
    slogging.configure(':DEBUG', log_file=report_file)
    root = slogging.getLogger()
    for handler in root.handlers:
        if isinstance(handler, slogging.logging.StreamHandler):
            root.handlers.remove(handler)
            break
    # setup cli arguments for starting raiden
    args = dict(
        discovery_contract_address=smoketest_config['contracts']
        ['discovery_address'],
        registry_contract_address=smoketest_config['contracts']
        ['registry_address'],
        eth_rpc_endpoint='http://127.0.0.1:{}'.format(ethereum_config['rpc']),
        keystore_path=ethereum_config['keystore'],
        address=ethereum_config['address'],
    )
    for option in app.params:
        if option.name in args.keys():
            args[option.name] = option.process_value(ctx, args[option.name])
        else:
            args[option.name] = option.default

    password_file = os.path.join(args['keystore_path'], 'password')
    with open(password_file, 'w') as handler:
        handler.write('password')

    args['mapped_socket'] = None
    args['password_file'] = click.File()(password_file)
    args['datadir'] = args['keystore_path']
    args['api_address'] = 'localhost:' + str(
        next(get_free_port('127.0.0.1', 5001)))
    args['sync_check'] = False

    # invoke the raiden app
    app_ = ctx.invoke(app, **args)

    raiden_api = RaidenAPI(app_.raiden)
    rest_api = RestAPI(raiden_api)
    api_server = APIServer(rest_api)
    (api_host, api_port) = split_endpoint(args['api_address'])
    api_server.start(api_host, api_port)

    success = False
    try:
        print('[4/5] running smoketests...')
        error = run_smoketests(app_.raiden, smoketest_config, debug=debug)
        if error is not None:
            append_report('smoketest assertion error', error)
        else:
            success = True
    finally:
        app_.stop()
        ethereum.send_signal(2)

        err, out = ethereum.communicate()
        append_report('geth init stdout',
                      ethereum_config['init_log_out'].decode('utf-8'))
        append_report('geth init stderr',
                      ethereum_config['init_log_err'].decode('utf-8'))
        append_report('ethereum stdout', out)
        append_report('ethereum stderr', err)
        append_report('smoketest configuration', json.dumps(smoketest_config))
    if success:
        print('[5/5] smoketest successful, report was written to {}'.format(
            report_file))
    else:
        print('[5/5] smoketest had errors, report was written to {}'.format(
            report_file))
        sys.exit(1)
Example #49
0
    def get_text_file(self):
        cr, uid, context = self.env.args
        if context is None:
            context = {}
        data = {}
        wiz_data = self.read([])
        if wiz_data:
            data = wiz_data[0]
        context = dict(context)
        start_date = data.get('start_date', False)
        end_date = data.get('end_date', False)
        if start_date >= end_date:
            raise ValidationError(
                _("You must be enter start date less than end date !"))
        context.update({
            'branch_number': data.get('branch_number'),
            'account_number': data.get("account_number"),
            'end_date': data.get('end_date'),
            'start_date': data.get('start_date'),
            'value_date': data.get("value_date"),
            'batch_number': data.get("batch_number"),
            'transaction_code': data.get("transaction_code"),
            'on_behalf_of': data.get("on_behalf_of"),
            'clearing': data.get("clearing"),
            'reference_no': data.get("reference_no"),
            'payment_detail': data.get("payment_detail"),
            'purpose_code': data.get("purpose_code"),
            'invoice_send': data.get("invoice_send"),
            'invoice_send_numebr': data.get("invoice_send_numebr"),
            'record_type': data.get("record_type"),
            'invoice_detail': data.get("invoice_detail"),
            'value_time': data.get("value_time"),
            'submission_date': data.get('submission_date'),
            'debtors_reference': data.get('debtors_reference')
        })
        user_data = self.env["res.users"].browse(uid)
        context.update({'company_name': user_data.company_id.name})
        tgz_tmp_filename = tempfile.mktemp('.' + "txt")
        tmp_file = open(tgz_tmp_filename, "wr")
        try:
            if not start_date and end_date:
                return False
#            period_rec = period_obj.browse(cr, uid, context.get("month"), context = context)
            current_date = datetime.today()
            year_month_date = current_date.strftime('%Y%m%d')

            header2_record = ''
            #            header1_record = 'OVERSEA-CHINESE BANKING CORP GROUP'.ljust(34) + \
            #                            ''.ljust(24) + \
            #                            '&&&&&&&&&&&&&&&&&&&&&&&&'.ljust(24) + \
            #                            '\r\n'
            #            tmp_file.write(header1_record)
            header2_record += ('%02s' %
                               context.get('transaction_code')).ljust(2)
            header2_record += ('%03d' % context.get('batch_number')).ljust(3)
            if context.get("submission_date"):
                submission_date = datetime.strptime(
                    context.get("submission_date"), "%Y-%m-%d")
                header2_record += submission_date.strftime('%Y%m%d').ljust(8)
            else:
                header2_record += "".ljust(8)
            emp_ids = self.env['hr.employee'].search(
                [('bank_account_id', '!=', False)], order="name")
            for employee in emp_ids:
                bank_code = employee.bank_account_id and employee.bank_account_id.bank_bic or ''
                if bank_code.__len__() <= 4:
                    header2_record += bank_code.ljust(11, '0')
                else:
                    header2_record += bank_code[0:11].ljust(11)
                header2_record += ('%34d' %
                                   context.get('account_number')).ljust(34)
                header2_record += ''.ljust(3)
                if context.get('on_behalf_of'):
                    header2_record += ('%20s' %
                                       context.get('on_behalf_of')).ljust(20)
                else:
                    header2_record += "".ljust(20)
                header2_record += ''.ljust(120)
                header2_record += ''.ljust(4)
                header2_record += ('%4s' % context.get('clearing')).ljust(4)
                if context.get('reference_no'):
                    header2_record += ('%16s' %
                                       context.get('reference_no')).ljust(16)
                else:
                    header2_record += "".ljust(16)
#            header2_record += ('%03d' % context.get('batch_number')).ljust(3)
#            company_name = context.get("company_name", '')
#            if company_name.__len__() <= 20:
#                company_name = company_name.ljust(20)
#            else:
#                company_name = company_name[0:20].ljust(20)
#            header2_record += company_name
                if context.get('value_date'):
                    value_date = datetime.strptime(context.get("value_date"),
                                                   "%Y-%m-%d")
                    header2_record += value_date.strftime('%Y%m%d').ljust(8)
                else:
                    header2_record += "".ljust(8)
                header2_record += ('%4d' % context.get('value_time')).ljust(4)
                header2_record += ''.ljust(1)
                header2_record += ''.ljust(762)
#            value_date = datetime.strptime(context.get("value_date"), "%Y-%m-%d")
#            header2_record += value_date.strftime('%Y%m%d').ljust(8) + ''.ljust(31)
            header2_record += '\r\n'
            tmp_file.write(header2_record)
            emp_ids = self.env['hr.employee'].search(
                [('bank_account_id', '!=', False)], order="name")
            serial_number = 50000
            payment_detail = ''
            for employee in emp_ids:
                if not employee.bank_account_id:
                    raise ValidationError(
                        _('There is no Bank Account define for %s employee.' %
                          (employee.name)))
#                if not employee.gender:
#                    raise ValidationError(_('There is no gender define for %s employee.' % (employee.name)))
#                if not employee.birthday:
#                    raise ValidationError(_('There is no birth date define for %s employee.' % (employee.name)))
#                if not employee.identification_id:
#                    raise ValidationError(_('There is no identification no define for %s employee.' % (employee.name)))
#                if not employee.work_phone or not employee.work_email:
#                    raise ValidationError(_('You must be configure Contact no or email for %s employee.' % (employee.name)))
                payslip_id = self.env['hr.payslip'].search([
                    ('employee_id', '=', employee.id),
                    ('pay_by_cheque', '=', False),
                    ('date_from', '>=', start_date),
                    ('date_to', '<=', end_date),
                    ('state', 'in', ['draft', 'done', 'verify'])
                ])
                if not payslip_id:
                    raise ValidationError(
                        _('There is no payslip during %s to %s to the %s employee.'
                          % (start_date, end_date, employee.name)))
#                if serial_number == 99999:
#                    serial_number = 50001
#                payment_detail += str(serial_number).ljust(5)
                bank_code = employee.bank_account_id and employee.bank_account_id.bank_bic or ''
                if bank_code.__len__() == 8:
                    payment_detail += bank_code.rjust(11, 'X')
                else:
                    payment_detail += bank_code[0:11].ljust(11)
#                emp_branch_code = employee.bank_account_id and employee.bank_account_id.branch_id or ''
#                if emp_branch_code.__len__() <= 3:
#                    payment_detail += emp_branch_code.rjust(3, '0')
#                else:
#                    payment_detail += emp_branch_code[0:3].ljust(3)
                emp_bank_ac_no = employee.bank_account_id and employee.bank_account_id.acc_number or ''
                if emp_bank_ac_no.__len__() <= 34:
                    payment_detail += emp_bank_ac_no.ljust(34, ' ')
                else:
                    payment_detail += emp_bank_ac_no[0:34].ljust(34)
#                emp_bank_ac_no = employee.bank_account_id and employee.bank_account_id.acc_number or ''
#                if emp_bank_ac_no.__len__() <= 11:
#                    payment_detail += emp_bank_ac_no.ljust(11, ' ')
#                else:
#                    payment_detail += emp_bank_ac_no[0:11].ljust(11)
#                emp_bank_name = employee.bank_account_id and employee.bank_account_id.owner_name or ''
#                if emp_bank_name:
#                    if emp_bank_name.__len__() <= 20:
#                        payment_detail += emp_bank_name.ljust(20)
#                    else:
#                        payment_detail += emp_bank_name[0:20].ljust(20)
#                else:
#                    if employee.name.__len__() <= 20:
#                        payment_detail += employee.name.ljust(20)
#                    else:
#                        payment_detail += employee.name[0:20].ljust(20)
                emp_bank_name = employee.bank_account_id and employee.bank_account_id.partner_id.name or ''
                if context.get('clearing') == 'giro':
                    if emp_bank_name:
                        if emp_bank_name.__len__() <= 140:
                            payment_detail += emp_bank_name.ljust(140)
                        else:
                            payment_detail += emp_bank_name[0:140].ljust(140)
                    else:
                        if employee.name.__len__() <= 140:
                            payment_detail += employee.name.ljust(140)
                        else:
                            payment_detail += employee.name[0:140].ljust(140)
                    payment_detail += ''.ljust(3)
                else:
                    if emp_bank_name:
                        if emp_bank_name.__len__() <= 35:
                            payment_detail += emp_bank_name.ljust(140)
                        else:
                            payment_detail += emp_bank_name[0:35].ljust(140)
                    else:
                        if employee.name.__len__() <= 35:
                            payment_detail += employee.name.ljust(140)
                        else:
                            payment_detail += employee.name[0:35].ljust(
                                140) + ''.ljust(3)
                total_amout = 0
                for line in self.env['hr.payslip'].browse(
                        payslip_id.ids[0]).line_ids:
                    if line.code == "NET":
                        total_amout = line.amount
                if total_amout:
                    total_amout = total_amout * 100
                    total_amout = int(round(total_amout))
                    payment_detail += ('%017d' % total_amout).ljust(17)
                else:
                    payment_detail += ('%017d' % 0).ljust(17)
                if context.get('payment_detail'):
                    payment_detail += ('%35s' %
                                       context.get('payment_detail')).ljust(35)
                else:
                    payment_detail += "".ljust(35)
                if context.get('purpose_code'):
                    payment_detail += ('%04s' %
                                       context.get('purpose_code')).ljust(4)
                else:
                    payment_detail += "".ljust(4)
                if context.get('purpose_code') == 'COLL':
                    payment_detail += (
                        '%35s' % context.get('debtors_reference')).rjust(35)
                else:
                    payment_detail += ''.rjust(35)
                payment_detail += "".ljust(140)
                payment_detail += "".ljust(140)
                if context.get('invoice_send'):
                    payment_detail += ('%01s' %
                                       context.get('invoice_send')).ljust(1)
                else:
                    payment_detail += "".ljust(1)
                if context.get('invoice_send_numebr'):
                    payment_detail += (
                        '%255s' %
                        context.get('invoice_send_numebr')).ljust(255)
                else:
                    payment_detail += "".ljust(255)
                payment_detail += "".ljust(185)
                payment_detail += '\r\n'
                serial_number += 1
            tmp_file.write(payment_detail)
            invoice_detail = ''
            if context.get('record_type'):
                invoice_detail += ('%03s' %
                                   context.get('record_type')[0:3]).ljust(3)
            else:
                invoice_detail += "".ljust(3)
            if context.get('invoice_detail'):
                invoice_detail += ('%97s' %
                                   context.get('invoice_detail')).ljust(97)
            else:
                invoice_detail += "".ljust(97)
            invoice_detail += '\r\n'
            tmp_file.write(invoice_detail)

        finally:
            tmp_file.close()
        file = open(tgz_tmp_filename, "rb")
        out = file.read()
        file.close()
        res = base64.b64encode(out)
        if not end_date:
            return 'ocbc_txt_file.txt'
        end_date = datetime.strptime(end_date, DEFAULT_SERVER_DATE_FORMAT)
        monthyear = end_date.strftime('%b%Y')
        file_name = 'ocbc_txt_file_' + monthyear + '.txt'
        module_rec = self.env['binary.ocbc.bank.file.wizard'].create({
            'name':
            file_name,
            'cpf_txt_file':
            res
        })
        return {
            'name': _('OCBC Text file'),
            'res_id': module_rec.id,
            'view_type': 'form',
            "view_mode": 'form',
            'res_model': 'binary.ocbc.bank.file.wizard',
            'type': 'ir.actions.act_window',
            'target': 'new',
            'context': context
        }
Example #50
0
 def __enter__(self):
     self.temp_path = tempfile.mktemp(prefix=gs.TMP_PREFIX)
     return self.temp_path
Example #51
0
    def test_external_program_simulator(self):
        stc = self.form.simulator_tab_controller
        stc.ui.btnAddParticipant.click()
        stc.ui.btnAddParticipant.click()

        stc.simulator_scene.add_counter_action(None, 0)
        action = next(item for item in stc.simulator_scene.items() if isinstance(item, CounterActionItem))
        action.model_item.start = 3
        action.model_item.step = 2
        counter_item_str = "item" + str(action.model_item.index()) + ".counter_value"

        stc.ui.gvSimulator.add_empty_message(42)
        stc.ui.gvSimulator.add_empty_message(42)

        stc.ui.cbViewType.setCurrentIndex(0)
        stc.create_simulator_label(0, 10, 20)
        stc.create_simulator_label(1, 10, 20)

        messages = stc.simulator_config.get_all_messages()
        messages[0].source = stc.project_manager.participants[0]
        messages[0].destination = stc.project_manager.participants[1]
        messages[0].destination.simulate = True
        messages[1].source = stc.project_manager.participants[1]
        messages[1].destination = stc.project_manager.participants[0]

        stc.simulator_scene.add_trigger_command_action(None, 200)
        stc.simulator_scene.add_sleep_action(None, 200)

        lbl1 = messages[0].message_type[0]  # type: SimulatorProtocolLabel
        lbl2 = messages[1].message_type[0]  # type: SimulatorProtocolLabel

        ext_program = get_path_for_data_file("external_program_simulator.py") + " " + counter_item_str
        if sys.platform == "win32":
            ext_program = "python " + ext_program

        lbl1.value_type_index = 3
        lbl1.external_program = ext_program
        lbl2.value_type_index = 3
        lbl2.external_program = ext_program

        action = next(item for item in stc.simulator_scene.items() if isinstance(item, SleepActionItem))
        action.model_item.sleep_time = 0.001
        stc.simulator_scene.clearSelection()
        action = next(item for item in stc.simulator_scene.items() if isinstance(item, TriggerCommandActionItem))
        action.setSelected(True)
        self.assertEqual(stc.ui.detail_view_widget.currentIndex(), 4)
        fname = tempfile.mktemp()
        self.assertFalse(os.path.isfile(fname))
        external_command = "cmd.exe /C copy NUL {}".format(fname) if os.name == "nt" else "touch {}".format(fname)
        stc.ui.lineEditTriggerCommand.setText(external_command)
        self.assertEqual(action.model_item.command, external_command)

        port = self.get_free_port()
        self.alice = NetworkSDRInterfacePlugin(raw_mode=True)
        self.alice.client_port = port

        dialog = stc.get_simulator_dialog()
        name = NetworkSDRInterfacePlugin.NETWORK_SDR_NAME
        dialog.device_settings_rx_widget.ui.cbDevice.setCurrentText(name)
        dialog.device_settings_tx_widget.ui.cbDevice.setCurrentText(name)
        QTest.qWait(10)
        simulator = dialog.simulator
        simulator.sniffer.rcv_device.set_server_port(port)

        port = self.get_free_port()
        s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
        s.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
        s.bind(("", port))
        s.listen(1)
        QTest.qWait(10)

        simulator.sender.device.set_client_port(port)
        dialog.ui.btnStartStop.click()
        QTest.qWait(1500)

        conn, addr = s.accept()

        modulator = dialog.project_manager.modulators[0]  # type: Modulator

        self.alice.send_raw_data(modulator.modulate("100"+"10101010"*42), 1)
        time.sleep(0.1)
        self.alice.send_raw_data(np.zeros(self.num_zeros_for_pause, dtype=np.complex64), 1)

        bits = self.__demodulate(conn)
        self.assertEqual(bits[0].rstrip("0"), "101010101")
        time.sleep(0.5)

        QTest.qWait(1000)
        self.assertTrue(simulator.simulation_is_finished())

        conn.close()
        s.close()

        QTest.qWait(100)

        self.assertTrue(os.path.isfile(fname))
Example #52
0
def temporary_filename():
    filename = tempfile.mktemp()
    try:
        yield filename
    finally:
        support.unlink(filename)
Example #53
0
        p.cmd('dc')
    password = (''.join(chr(i) for i in res))
    password = password[1:]
    return password

import tempfile
import base64
import time
import os, stat

def make_exec(file):
    os.chmod(file, stat.S_IXUSR | stat.S_IRUSR | stat.S_IWUSR)

# p = process('./server.py')
p = remote('13.234.59.79', 6000)
fil = tempfile.mktemp()
print(fil)
while True:
    t = p.recvline().strip()
    if b'flag' in t:
        print(t)
        break
    t = (base64.b64decode(t))
    # print(fil)
    try:
        os.remove(fil)
    except FileNotFoundError:
        print('asdf')
        pass
    open(fil, 'wb').write(t)
    make_exec(fil)
Example #54
0
    def _do_test_fetch(self, remote, rw_repo, remote_repo):
        # specialized fetch testing to de-clutter the main test
        self._do_test_fetch_info(rw_repo)

        def fetch_and_test(remote, **kwargs):
            progress = TestRemoteProgress()
            kwargs['progress'] = progress
            res = remote.fetch(**kwargs)
            progress.make_assertion()
            self._do_test_fetch_result(res, remote)
            return res
        # END fetch and check

        def get_info(res, remote, name):
            return res["%s/%s" % (remote, name)]

        # put remote head to master as it is guaranteed to exist
        remote_repo.head.reference = remote_repo.heads.master

        res = fetch_and_test(remote)
        # all up to date
        for info in res:
            assert info.flags & info.HEAD_UPTODATE

        # rewind remote head to trigger rejection
        # index must be false as remote is a bare repo
        rhead = remote_repo.head
        remote_commit = rhead.commit
        rhead.reset("HEAD~2", index=False)
        res = fetch_and_test(remote)
        mkey = "%s/%s" % (remote, 'master')
        master_info = res[mkey]
        assert master_info.flags & FetchInfo.FORCED_UPDATE and master_info.note is not None

        # normal fast forward - set head back to previous one
        rhead.commit = remote_commit
        res = fetch_and_test(remote)
        assert res[mkey].flags & FetchInfo.FAST_FORWARD

        # new remote branch
        new_remote_branch = Head.create(remote_repo, "new_branch")
        res = fetch_and_test(remote)
        new_branch_info = get_info(res, remote, new_remote_branch)
        assert new_branch_info.flags & FetchInfo.NEW_HEAD

        # remote branch rename ( causes creation of a new one locally )
        new_remote_branch.rename("other_branch_name")
        res = fetch_and_test(remote)
        other_branch_info = get_info(res, remote, new_remote_branch)
        assert other_branch_info.ref.commit == new_branch_info.ref.commit

        # remove new branch
        Head.delete(new_remote_branch.repo, new_remote_branch)
        res = fetch_and_test(remote)
        # deleted remote will not be fetched
        self.failUnlessRaises(IndexError, get_info, res, remote, new_remote_branch)

        # prune stale tracking branches
        stale_refs = remote.stale_refs
        assert len(stale_refs) == 2 and isinstance(stale_refs[0], RemoteReference)
        RemoteReference.delete(rw_repo, *stale_refs)

        # test single branch fetch with refspec including target remote
        res = fetch_and_test(remote, refspec="master:refs/remotes/%s/master" % remote)
        assert len(res) == 1 and get_info(res, remote, 'master')

        # ... with respec and no target
        res = fetch_and_test(remote, refspec='master')
        assert len(res) == 1

        # ... multiple refspecs ... works, but git command returns with error if one ref is wrong without
        # doing anything. This is new in  later binaries
        # res = fetch_and_test(remote, refspec=['master', 'fred'])
        # assert len(res) == 1

        # add new tag reference
        rtag = TagReference.create(remote_repo, "1.0-RV_hello.there")
        res = fetch_and_test(remote, tags=True)
        tinfo = res[str(rtag)]
        assert isinstance(tinfo.ref, TagReference) and tinfo.ref.commit == rtag.commit
        assert tinfo.flags & tinfo.NEW_TAG

        # adjust tag commit
        Reference.set_object(rtag, rhead.commit.parents[0].parents[0])
        res = fetch_and_test(remote, tags=True)
        tinfo = res[str(rtag)]
        assert tinfo.commit == rtag.commit
        assert tinfo.flags & tinfo.TAG_UPDATE

        # delete remote tag - local one will stay
        TagReference.delete(remote_repo, rtag)
        res = fetch_and_test(remote, tags=True)
        self.failUnlessRaises(IndexError, get_info, res, remote, str(rtag))

        # provoke to receive actual objects to see what kind of output we have to
        # expect. For that we need a remote transport protocol
        # Create a new UN-shared repo and fetch into it after we pushed a change
        # to the shared repo
        other_repo_dir = tempfile.mktemp("other_repo")
        # must clone with a local path for the repo implementation not to freak out
        # as it wants local paths only ( which I can understand )
        other_repo = remote_repo.clone(other_repo_dir, shared=False)
        remote_repo_url = "git://localhost%s" % remote_repo.git_dir

        # put origin to git-url
        other_origin = other_repo.remotes.origin
        other_origin.config_writer.set("url", remote_repo_url)
        # it automatically creates alternates as remote_repo is shared as well.
        # It will use the transport though and ignore alternates when fetching
        # assert not other_repo.alternates  # this would fail

        # assure we are in the right state
        rw_repo.head.reset(remote.refs.master, working_tree=True)
        try:
            self._commit_random_file(rw_repo)
            remote.push(rw_repo.head.reference)

            # here I would expect to see remote-information about packing
            # objects and so on. Unfortunately, this does not happen
            # if we are redirecting the output - git explicitly checks for this
            # and only provides progress information to ttys
            res = fetch_and_test(other_origin)
        finally:
            shutil.rmtree(other_repo_dir)
Example #55
0
    def _generate(self, cr, uid, context):
        module_model = self.pool.get('ir.module.module')
        module_ids = context['active_ids']

        module_index = []

        # create a temporary gzipped tarfile:
        tgz_tmp_filename = tempfile.mktemp('_rst_module_doc.tgz')
        try:
            tarf = tarfile.open(tgz_tmp_filename, 'w:gz')

            modules = module_model.browse(cr, uid, module_ids)
            for module in modules:
                index_dict = {
                    'name': module.name,
                    'shortdesc': module.shortdesc,
                }
                module_index.append(index_dict)

                objects = self._get_objects(cr, uid, module)
                module.test_views = self._get_views(cr,
                                                    uid,
                                                    module.id,
                                                    context=context)
                rstdoc = RstDoc(module, objects)

                # Append Relationship Graph on rst
                graph_mod = False
                module_name = False
                if module.file_graph:
                    graph_mod = base64.decodestring(module.file_graph)
                else:
                    module_data = module_model.get_relation_graph(
                        cr, uid, module.name, context=context)
                    if module_data['module_file']:
                        graph_mod = base64.decodestring(
                            module_data['module_file'])
                if graph_mod:
                    module_name = module.name
                    try:
                        tmp_file_graph = tempfile.NamedTemporaryFile()
                        tmp_file_graph.write(graph_mod)
                        tmp_file_graph.file.flush()
                        tarf.add(tmp_file_graph.name,
                                 arcname=module.name + '_module.png')
                    finally:
                        tmp_file_graph.close()

                out = rstdoc.write(module_name)
                try:
                    tmp_file = tempfile.NamedTemporaryFile()
                    tmp_file.write(out.encode('utf8'))
                    tmp_file.file.flush()  # write content to file
                    tarf.add(tmp_file.name, arcname=module.name + '.rst')
                finally:
                    tmp_file.close()

            # write index file:
            tmp_file = tempfile.NamedTemporaryFile()
            out = self._create_index(module_index)
            tmp_file.write(out.encode('utf8'))
            tmp_file.file.flush()
            tarf.add(tmp_file.name, arcname='index.rst')
        finally:
            tarf.close()

        f = open(tgz_tmp_filename, 'rb')
        out = f.read()
        f.close()

        if os.path.exists(tgz_tmp_filename):
            try:
                os.unlink(tgz_tmp_filename)
            except Exception, e:
                logger = netsvc.Logger()
                msg = "Temporary file %s could not be deleted. (%s)" % (
                    tgz_tmp_filename, e)
                logger.notifyChannel("warning", netsvc.LOG_WARNING, msg)
Example #56
0
def byte_compile(py_files,
                 optimize=0,
                 force=0,
                 prefix=None,
                 base_dir=None,
                 verbose=1,
                 dry_run=0,
                 direct=None):
    """Byte-compile a collection of Python source files to .pyc
    files in a __pycache__ subdirectory.  'py_files' is a list
    of files to compile; any files that don't end in ".py" are silently
    skipped.  'optimize' must be one of the following:
      0 - don't optimize
      1 - normal optimization (like "python -O")
      2 - extra optimization (like "python -OO")
    If 'force' is true, all files are recompiled regardless of
    timestamps.

    The source filename encoded in each bytecode file defaults to the
    filenames listed in 'py_files'; you can modify these with 'prefix' and
    'basedir'.  'prefix' is a string that will be stripped off of each
    source filename, and 'base_dir' is a directory name that will be
    prepended (after 'prefix' is stripped).  You can supply either or both
    (or neither) of 'prefix' and 'base_dir', as you wish.

    If 'dry_run' is true, doesn't actually do anything that would
    affect the filesystem.

    Byte-compilation is either done directly in this interpreter process
    with the standard py_compile module, or indirectly by writing a
    temporary script and executing it.  Normally, you should let
    'byte_compile()' figure out to use direct compilation or not (see
    the source for details).  The 'direct' flag is used by the script
    generated in indirect mode; unless you know what you're doing, leave
    it set to None.
    """

    # Late import to fix a bootstrap issue: _posixsubprocess is built by
    # setup.py, but setup.py uses distutils.
    import subprocess

    # nothing is done if sys.dont_write_bytecode is True
    if sys.dont_write_bytecode:
        raise DistutilsByteCompileError('byte-compiling is disabled.')

    # First, if the caller didn't force us into direct or indirect mode,
    # figure out which mode we should be in.  We take a conservative
    # approach: choose direct mode *only* if the current interpreter is
    # in debug mode and optimize is 0.  If we're not in debug mode (-O
    # or -OO), we don't know which level of optimization this
    # interpreter is running with, so we can't do direct
    # byte-compilation and be certain that it's the right thing.  Thus,
    # always compile indirectly if the current interpreter is in either
    # optimize mode, or if either optimization level was requested by
    # the caller.
    if direct is None:
        direct = (__debug__ and optimize == 0)

    # "Indirect" byte-compilation: write a temporary script and then
    # run it with the appropriate flags.
    if not direct:
        try:
            from tempfile import mkstemp
            (script_fd, script_name) = mkstemp(".py")
        except ImportError:
            from tempfile import mktemp
            (script_fd, script_name) = None, mktemp(".py")
        log.info("writing byte-compilation script '%s'", script_name)
        if not dry_run:
            if script_fd is not None:
                script = os.fdopen(script_fd, "w")
            else:
                script = open(script_name, "w")

            with script:
                script.write("""\
from distutils.util import byte_compile
files = [
""")

                # XXX would be nice to write absolute filenames, just for
                # safety's sake (script should be more robust in the face of
                # chdir'ing before running it).  But this requires abspath'ing
                # 'prefix' as well, and that breaks the hack in build_lib's
                # 'byte_compile()' method that carefully tacks on a trailing
                # slash (os.sep really) to make sure the prefix here is "just
                # right".  This whole prefix business is rather delicate -- the
                # problem is that it's really a directory, but I'm treating it
                # as a dumb string, so trailing slashes and so forth matter.

                #py_files = map(os.path.abspath, py_files)
                #if prefix:
                #    prefix = os.path.abspath(prefix)

                script.write(",\n".join(map(repr, py_files)) + "]\n")
                script.write("""
byte_compile(files, optimize=%r, force=%r,
             prefix=%r, base_dir=%r,
             verbose=%r, dry_run=0,
             direct=1)
""" % (optimize, force, prefix, base_dir, verbose))

        cmd = [sys.executable]
        cmd.extend(subprocess._optim_args_from_interpreter_flags())
        cmd.append(script_name)
        spawn(cmd, dry_run=dry_run)
        execute(os.remove, (script_name, ),
                "removing %s" % script_name,
                dry_run=dry_run)

    # "Direct" byte-compilation: use the py_compile module to compile
    # right here, right now.  Note that the script generated in indirect
    # mode simply calls 'byte_compile()' in direct mode, a weird sort of
    # cross-process recursion.  Hey, it works!
    else:
        from py_compile import compile

        for file in py_files:
            if file[-3:] != ".py":
                # This lets us be lazy and not filter filenames in
                # the "install_lib" command.
                continue

            # Terminology from the py_compile module:
            #   cfile - byte-compiled file
            #   dfile - purported source filename (same as 'file' by default)
            if optimize >= 0:
                opt = '' if optimize == 0 else optimize
                cfile = importlib.util.cache_from_source(file,
                                                         optimization=opt)
            else:
                cfile = importlib.util.cache_from_source(file)
            dfile = file
            if prefix:
                if file[:len(prefix)] != prefix:
                    raise ValueError(
                        "invalid prefix: filename %r doesn't start with %r" %
                        (file, prefix))
                dfile = dfile[len(prefix):]
            if base_dir:
                dfile = os.path.join(base_dir, dfile)

            cfile_base = os.path.basename(cfile)
            if direct:
                if force or newer(file, cfile):
                    log.info("byte-compiling %s to %s", file, cfile_base)
                    if not dry_run:
                        compile(file, cfile, dfile)
                else:
                    log.debug("skipping byte-compilation of %s to %s", file,
                              cfile_base)
 def setUp(self):
     self._old_ckpt_path = tempfile.mktemp()
     self._new_ckpt_path = tempfile.mktemp()
     ops.reset_default_graph()
    def test_four_clusters(self):
        """Four clusters on vertices of a square."""
        label_to_center = {
            'A': np.array([0.0, 0.0]),
            'B': np.array([0.0, 1.0]),
            'C': np.array([1.0, 0.0]),
            'D': np.array([1.0, 1.0]),
        }

        # generate training data
        train_cluster_id = ['A'] * 400 + ['B'] * 300 + ['C'] * 200 + ['D'
                                                                      ] * 100
        random.shuffle(train_cluster_id)
        train_sequence = _generate_random_sequence(train_cluster_id,
                                                   label_to_center,
                                                   sigma=0.01)
        train_sequences = [
            train_sequence[:100, :], train_sequence[100:300, :],
            train_sequence[300:600, :], train_sequence[600:, :]
        ]
        train_cluster_ids = [
            train_cluster_id[:100], train_cluster_id[100:300],
            train_cluster_id[300:600], train_cluster_id[600:]
        ]

        # generate testing data
        test_cluster_id = ['A'] * 10 + ['B'] * 20 + ['C'] * 30 + ['D'] * 40
        random.shuffle(test_cluster_id)
        test_sequence = _generate_random_sequence(test_cluster_id,
                                                  label_to_center,
                                                  sigma=0.01)

        # construct model
        model_args, training_args, inference_args = uisrnn.parse_arguments()
        model_args.rnn_depth = 2
        model_args.rnn_hidden_size = 8
        model_args.observation_dim = 2
        model_args.verbosity = 3
        training_args.learning_rate = 0.01
        training_args.learning_rate_half_life = 50
        training_args.train_iteration = 200
        training_args.enforce_cluster_id_uniqueness = False
        inference_args.test_iteration = 2

        model = uisrnn.UISRNN(model_args)

        # run training, and save the model
        model.fit(train_sequences, train_cluster_ids, training_args)
        temp_file_path = tempfile.mktemp()
        model.save(temp_file_path)

        # run testing
        predicted_label = model.predict(test_sequence, inference_args)

        # run evaluation
        model.logger.print(
            3, 'Asserting the equivalence between'
            '\nGround truth: {}\nPredicted: {}'.format(test_cluster_id,
                                                       predicted_label))
        accuracy = uisrnn.compute_sequence_match_accuracy(
            predicted_label, test_cluster_id)
        self.assertEqual(1.0, accuracy)

        # load new model
        loaded_model = uisrnn.UISRNN(model_args)
        loaded_model.load(temp_file_path)

        # run testing with loaded model
        predicted_label = loaded_model.predict(test_sequence, inference_args)

        # run evaluation with loaded model
        model.logger.print(
            3, 'Asserting the equivalence between'
            '\nGround truth: {}\nPredicted: {}'.format(test_cluster_id,
                                                       predicted_label))
        accuracy = uisrnn.compute_sequence_match_accuracy(
            predicted_label, test_cluster_id)
        self.assertEqual(1.0, accuracy)

        # keep training from loaded model on a subset of training data
        transition_bias_1 = model.transition_bias
        training_args.learning_rate = 0.001
        training_args.train_iteration = 50
        model.fit(train_sequence[:100, :], train_cluster_id[:100],
                  training_args)
        transition_bias_2 = model.transition_bias
        self.assertNotAlmostEqual(transition_bias_1, transition_bias_2)
        model.logger.print(
            3, 'Asserting transition_bias changed from {} to {}'.format(
                transition_bias_1, transition_bias_2))

        # run evaluation
        model.logger.print(
            3, 'Asserting the equivalence between'
            '\nGround truth: {}\nPredicted: {}'.format(test_cluster_id,
                                                       predicted_label))
        accuracy = uisrnn.compute_sequence_match_accuracy(
            predicted_label, test_cluster_id)
        self.assertEqual(1.0, accuracy)
 def test_config_dictlike_access(self):
     f = mktemp()
     c = config.Config(f)
     assert config.Config['server']['socket']
Example #60
0
    uploadUtil = '%s/s01_rsync_file.py' % DEPLOY_COMMON_HOME
##endregion the upload util

##region loading github ssh key
import sys
sys.stdout.write('Checking github SSH key... ')
sys.stdout.flush(
)  #print without newline ref. https://stackoverflow.com/a/493399/248616

#default taking the local key first
githubKey = CODE_GITHUB_KEY
keyFilename = makeGithubKeyFilename(DEPLOY_ID)

if CODE_GITHUB_KEY:
    from tempfile import mktemp
    githubKey = mktemp()

    import shutil
    shutil.copy2(
        CODE_GITHUB_KEY,
        githubKey)  #copy file ref. http://stackoverflow.com/a/123238/248616

    githubKey = renameFile(githubKey, keyFilename)

if not CODE_GITHUB_KEY:
    #download key from shared url when local key not provided
    githubKey = downloadFile(url=CODE_GITHUB_KEY_URL, chmod='600')

    #rename key file filename
    githubKey = renameFile(githubKey, keyFilename)