Пример #1
0
    def grd_run(self, command, timeout=10):
        # Create temporary files for stdout/stderr
        std_out_file = NamedTemporaryFile(delete=False)
        std_err_file = NamedTemporaryFile(delete=False)

        # Call the command
        subprocess.Popen(
            args=command,
            stderr=std_err_file,
            stdout=std_out_file
        ).wait(timeout=timeout)

        # Reset file pointers
        std_out_file.seek(0)
        std_err_file.seek(0)

        # Read standard out/err
        std_out = std_out_file.read().decode()
        std_err = std_err_file.read().decode()

        # Return results for further serialisation
        return {
            'stdout': std_out,
            'stderr': std_err
        }
Пример #2
0
class ZabbixSender(object):
    """"""

    def __init__(self,zabbix_server,zabbix_port,host,memcached_port):
        self._zabbix_server = zabbix_server
        self._zabbix_port = zabbix_port
        self._host = host
        self._memcached_port = memcached_port
        self._tempfile = None

    def send(self, stats):
        self._write_temporary_file(stats)
        self._send_data_to_zabbix()

    def _write_temporary_file(self, stats):
        self._tempfile = NamedTemporaryFile()
        for item in stats:
            self._tempfile.write(u'%s memcached_stats[%d,%s] %s\n' % (self._host,self._memcached_port,item.key,item.value))
        self._tempfile.flush()

        self._tempfile.seek(0)
        print self._tempfile.read()

    def _send_data_to_zabbix(self):
        cmd = "zabbix_sender -z %s -p %d -i %s" % (self._zabbix_server,self._zabbix_port,self._tempfile.name)
        #cmd = [u'zabbix_sender', u'-z',self._zabbix_server, u'-p',self._zabbix_port,u'-i', self._tempfile.name]
        #call(cmd)
	print cmd
        os.system(cmd)
        self._tempfile.close()
Пример #3
0
    def render(self, media_item, width=None, height=None, options=None):
        """Generator that streams the waveform as a PNG image with a python method"""

        wav_file = media_item
        pngFile = NamedTemporaryFile(suffix='.png')

        if not width == None:
            image_width = width
        else:
            image_width = 1500
        if not height == None:
            image_height = height
        else:
            image_height = 200

        fft_size = 2048
        args = (wav_file, pngFile.name, image_width, image_height, fft_size,
                self.bg_color, self.color_scheme)
        create_wavform_png(*args)

        buffer = pngFile.read(0xFFFF)
        while buffer:
            yield buffer
            buffer = pngFile.read(0xFFFF)

        pngFile.close()
Пример #4
0
def get_lexer(filename):
    prefix = os.path.basename(filename)[:-4]
    temp = NamedTemporaryFile(prefix = prefix, suffix = "lextab.py")
    lex = {}
    pylly.parsespec(filename, temp.name)
    exec temp.read() in lex
    temp.close()
    return lexer.lexer(lex['lexspec'])
class TestConfiguration(TestCase):
    initial_data = """cluster:
  name: elasticsearch-testcase
discovery:
  type: ec2
http.enabled: true
node:
  availability_zone: \${EC2_AZ}
  instance_id: \${EC2_INSTANCE_ID}
  name: test_node
  data: false
  master: true
path:
  data: /mnt/elasticsearch/
  logs: /var/log/elasticsearch
routing:
  allocation:
    awareness:
      attributes: instance_id, availability_zone
"""

    def setUp(self):
        self.config_file_instance = NamedTemporaryFile()
        self.config_file_instance.write(self.initial_data)
        self.config_file_instance.seek(0)

    def test_initial_file(self):
        config = ElasticSearchConfig(Cluster("elasticsearch-testcase-master-h0slave-r01"), config_file=self.config_file_instance.name)
        config.save()
        # self.assertEqual(self.initial_data, self.config_file_instance.read())
        self.assertDictEqual(yaml.load(self.initial_data), yaml.load(self.config_file_instance.read()))
        self.config_file_instance.close()

    def test_client_node(self):
        self.initial_data = self.initial_data.replace("data: false", "data: false")
        self.initial_data = self.initial_data.replace("master: true", "master: false")
        config = ElasticSearchConfig(Cluster("elasticsearch-testcase-client-h0slave-r01"), config_file=self.config_file_instance.name)
        config.save()
        self.assertDictEqual(yaml.load(self.initial_data), yaml.load(self.config_file_instance.read()))
        self.config_file_instance.close()

    def test_master_node(self):
        self.initial_data = self.initial_data.replace("data: false", "data: false")
        config = ElasticSearchConfig(Cluster("elasticsearch-testcase-master-h0slave-r01"), config_file=self.config_file_instance.name)
        config.save()
        self.assertDictEqual(yaml.load(self.initial_data), yaml.load(self.config_file_instance.read()))
        self.config_file_instance.close()

    def test_data_node(self):
        self.initial_data = self.initial_data.replace("master: true", "master: false")
        self.initial_data = self.initial_data.replace("data: false", "data: true")
        config = ElasticSearchConfig(Cluster("elasticsearch-testcase-data-h0slave-r01"), config_file=self.config_file_instance.name)
        config.save()
        self.assertDictEqual(yaml.load(self.initial_data), yaml.load(self.config_file_instance.read()))
        self.config_file_instance.close()
Пример #6
0
def get_parser(filename):
    #create temporary file for the machine-generated Python stuff
    prefix = os.path.basename(filename)[:-4]
    temp = NamedTemporaryFile(prefix = prefix, suffix = "_gramtab.py")
    pyggy.parsespec(filename, temp.name)
    gram = {}
    exec temp.read() in gram
    g = srgram.SRGram(gram['gramspec'])
    p = glr.GLR(g)
    #don't need this anymore ~desu
    temp.close()
    return p
Пример #7
0
def run_command(args):
    if DotNetSystem is None:
        # cpython
        stdout = NamedTemporaryFile()
        stderr = NamedTemporaryFile()
        res = subprocess.call(args, stdin=PIPE, stdout=stdout, stderr=stderr)
        return stdout.read(), stderr.read(), res        
    else:
        # IronPython
        stdout = StringIO()
        stderr = StringIO()
        res = subprocess.call(args, stdin=PIPE, stdout=stdout, stderr=stderr)
        return stdout.getvalue(), stderr.getvalue(), res
Пример #8
0
def createBlogData(blogFile=None):
    if blogFile == None:
        # edit temp blog data file 
        blogFile = NamedTemporaryFile(suffix='.txt', prefix='blogger_py')
        editor = os.environ.get('EDITOR', Editor)

        stat = os.spawnlp(os.P_WAIT, editor, editor, blogFile.name)

        if stat != 0:
            raise EditError, 'edit tempfile failed: %s %s' % \
                             (editor, blogFile.name)

    # read blog data from temp file, publish a public post.
    title = blogFile.readline().rstrip('\r\n')
    label = blogFile.readline().strip()
    if label == '':
        _title = title.split(':') # title == 'LABEL: string'
        if len(_title) >= 2:
            label = _title[0]

    content = blogFile.read()
    _parts = docutils.core.publish_parts(source=content, writer_name='html')
    body = _parts['body']
    title = title.decode('utf-8')

    saveBlogFile(blogFile.name)

    blogFile.close()  # blogFile will delete automatically

    return title, body, label
Пример #9
0
def download_product_hierarchy(request):
    productlines = ProductLine.objects.all().order_by("id")
    wb = Workbook()
    ws = wb.active
    column = 1
    for header in bigheaders:
        ws.cell(row=1, column=column).value = header
        ws.cell(row=1, column=count).font = Font(bold=True)
        column += 1
    tmp = NamedTemporaryFile(suffix=".xlsx")
    row = 2
    for pl in productlines:
        spls = pl.subproductline_set.all()
        if spls:
            for spl in spls:
                spl.excel_row(ws, row)
                row += 1
        else:
            pl.excel_row(ws, row)
            row += 1
        print("."),
        sys.stdout.flush()
    wb.save(tmp)
    tmp.seek(0)
    response = HttpResponse(content_type='application/xlsx')
    response['Content-Disposition'] = 'attachment; filename="{0}"'.format(os.path.basename(tmp.name))
    response.write(tmp.read())
    return(response)
Пример #10
0
    def java_encrypted_get(self, object_name):
        tmpfile = NamedTemporaryFile()
        args = " ".join([
            "--bucket hello --object " + object_name,
            "--kmsKeyId " + kms_key_id,
            "--intercept http://127.0.0.1:%s/" % self.s3_server.port,
            "--outputFile " + tmpfile.name,
            "--read"
        ])
        
        cwd=dirname(dirname(__file__)) + "/javacompat"
        proc = Popen(["mvn", "-e", "exec:java",
                      "-Dexec.mainClass=org.kanga.dist.S3Test",
                      "-Dexec.args=" + args],
                     stdin=PIPE, stdout=PIPE, stderr=PIPE, cwd=cwd)

        out, err = proc.communicate()
        if proc.returncode != 0:
            stdout.write(out)
            stderr.write(err)
            stdout.flush()
            stderr.flush()
            raise ValueError("Get failed with exitcode %d" % proc.returncode)

        tmpfile.seek(0)
        data = tmpfile.read()
        return data
Пример #11
0
def _save_thumbnails(image, path, size, suffix):
    nm = NamedTemporaryFile(suffix='.%s' % image.format)
    default_storage = get_storage_class()()
    try:
        # Ensure conversion to float in operations
        # Converting to RGBA make the background white instead of black for
        # transparent PNGs/GIFs
        image = image.convert("RGBA")
        image.thumbnail(get_dimensions(image.size, float(size)), Image.ANTIALIAS)
    except ZeroDivisionError:
        pass
    try:
        image.save(nm.name)
    except IOError:
        # e.g. `IOError: cannot write mode P as JPEG`, which gets raised when
        # someone uploads an image in an indexed-color format like GIF
        image.convert('RGB').save(nm.name)

    # Try to delete file with the same name if it already exists to avoid useless file.
    # i.e if `file_<suffix>.jpg` exists, Storage will save `a_<suffix>_<random_string>.jpg`
    # but nothing in the code is aware about this `<random_string>
    try:
        default_storage.delete(get_path(path, suffix))
    except IOError:
        pass

    default_storage.save(
        get_path(path, suffix), ContentFile(nm.read()))

    nm.close()
Пример #12
0
 def test_write_in_existing_file_with_exclude(self):
     config_file = NamedTemporaryFile()
     config_file.write(
         '[buildout]\ndevelop=.\n'
         '[versions]\nexcluded=1.0\negg=0.1'.encode('utf-8'))
     config_file.seek(0)
     with self.assertRaises(SystemExit) as context:
         check_buildout_updates.cmdline(
             '-e excluded -w %s' % config_file.name)
     self.assertEqual(context.exception.code, 0)
     self.assertLogs(
         debug=['-> Last version of egg is 0.3.',
                '=> egg current version (0.1) and '
                'last version (0.3) are different.'],
         info=['- 2 versions found in %s.' % config_file.name,
               '- 1 packages need to be checked for updates.',
               '> Fetching latest datas for egg...',
               '- 1 package updates found.',
               '- %s updated.' % config_file.name],
         warning=['[versions]',
                  'egg                             = 0.3'])
     config_file.seek(0)
     self.assertEquals(
         config_file.read().decode('utf-8'),
         '[buildout]\n'
         'develop                         = .\n\n'
         '[versions]\n'
         'excluded                        = 1.0\n'
         'egg                             = 0.3\n')
     self.assertStdOut(
         '[versions]\n'
         'egg                             = 0.3\n')
Пример #13
0
 def test_write_alpha_sorting(self):
     config_file = NamedTemporaryFile()
     config_parser = VersionsConfigParser()
     config_parser.add_section('Section 1')
     config_parser.add_section('Section 2')
     config_parser.set('Section 1', 'Option', 'Value')
     config_parser.set('Section 1', 'Option-void', None)
     config_parser.set('Section 1', 'Option-add+', 'Value added')
     config_parser.set('Section 2', 'Option-multiline', 'Value1\nValue2')
     config_parser.set('Section 2', '<', 'Value1\nValue2')
     config_parser.write(config_file.name, sorting='alpha')
     config_file.seek(0)
     self.assertEquals(
         config_file.read().decode('utf-8'),
         '[Section 1]\n'
         'Option                          = Value\n'
         'Option-add                     += Value added\n'
         'Option-void                     = \n'
         '\n'
         '[Section 2]\n'
         '<=                                Value1\n'
         '                                  Value2\n'
         'Option-multiline                = Value1\n'
         '                                  Value2\n')
     config_file.close()
Пример #14
0
    def run_filters(self):
        """
        Run this bundle's filters over the specified data. `data` is a
        file-like object. A file-like object is returned.
        """
        if self.filters:
            # Create temporary file for output
            tmpfile = NamedTemporaryFile()

            # Construct a pipe of chained filters
            pipe = pipes.Template()
            for filter in self.filters:
                for step in filter:
                    pipe.append(*step)

            # Write data through the pipe
            w = pipe.open(tmpfile.name, 'w')
            self.data.seek(0)
            w.write(self.data.read())
            w.close()

            # Read tmpfile back in
            tmpfile.seek(0)
            self.data.truncate(0)
            self.data.write(tmpfile.read())
            tmpfile.close()

        return self.data
Пример #15
0
def get_body():
    body = ""

    # Create a temporary file
    body_buffer_file = NamedTemporaryFile(delete=False)
    body_buffer_file_path = body_buffer_file.name
    body_buffer_file.close()

    # Set the default editor
    editor = 'nano'
    if os.name is 'nt':
        editor = 'notepad'

    raw_input('Press Enter to start writing the body of the mail')
    try:
        subprocess.call([editor, body_buffer_file_path])
    except OSError:
        # No suitable text editor found
        # Let the user edit the buffer file himself
        print "Enter the mail body in the file located at '" + body_buffer_file_path + "'"
        raw_input("Press Enter when done!")

    body_buffer_file = open(body_buffer_file_path)
    body = body_buffer_file.read()
    body_buffer_file.close()
    try:
        os.remove(body_buffer_file_path)
    except:
        # Unable to remove the temporary file
        # Stop the exception from propogating further,
        # since removing it is not essential to the working of the program
        pass

    return body
Пример #16
0
 def test_form_clean(self):
     """Test the RecordCreateForm clean method
     """
     f = NamedTemporaryFile()
     f.write('Test data')
     f.seek(0)
     # An accepted file type.
     up_file = SimpleUploadedFile(name=f.name, content=f.read())
     file_data = {'uploaded_file': up_file}
     form_data = {'name': 'Test file'}
     form = RecordCreateForm(data=form_data)
     # Validation should fail (no uploaded file or Infobase ID).
     self.assertFalse(form.is_valid())
     # Add both Infobase ID and uploaded file, validation passes.
     form_data['uploaded_file'] = up_file.name
     form_data['infobase_id'] = 'TestID'
     form = RecordForm(data=form_data, files=file_data)
     self.assertTrue(form.is_valid())
     # Include just the file, validation passes.
     form_data.pop('infobase_id')
     form = RecordForm(data=form_data, files=file_data)
     self.assertTrue(form.is_valid())
     # Include just the Infobase ID, validation passes.
     form_data.pop('uploaded_file')
     form_data['infobase_id'] = 'TestID'
     form = RecordForm(data=form_data)
     self.assertTrue(form.is_valid())
Пример #17
0
def module(minion, name, *args, **kwargs):
    """
    Execute an arbitrary salt module on dev minion.
    """

    script = '/usr/local/sbin/minion-' + minion
    if not os.access(script, os.X_OK):
        raise exc.CommandNotFoundError("Unknown minion " + minion)

    args = [script, name] + list(args)
    for k, v in kwargs.items():
        if k.startswith('__'):
            continue
        args.append('%s=%s' % (k, v))

    logger.debug("Calling %r", args)

    stderr = NamedTemporaryFile(
        prefix=script + '-' + name + '-', suffix='.stderr')
    child = subprocess.Popen(
        args, stdout=stderr, stderr=stderr)
    child.wait()

    stderr.seek(0)
    out = stderr.read()
    stderr.close()

    if child.returncode != 0:
        raise exc.CommandExecutionError(out)

    return out
Пример #18
0
def get_subtitle(url, path):
    in_data = urllib2.urlopen(url)
    temp_file = NamedTemporaryFile()

    temp_file.write(in_data.read())
    in_data.close()
    temp_file.seek(0)

    if is_zipfile(temp_file.name):
        zip_file = ZipFile(temp_file)
        for name in zip_file.namelist():
            # don't unzip stub __MACOSX folders
            if '.srt' in name and '__MACOSX' not in name:
                logger.info(' '.join(['Unpacking zipped subtitle', name, 'to', os.path.dirname(path)]))
                zip_file.extract(name, os.path.dirname(path))

        zip_file.close()
    elif is_rarfile(temp_file.name):
        rar_path = path + '.rar'
        logger.info('Saving rared subtitle as %s' % rar_path)
        with open(rar_path, 'w') as out_file:
            out_file.write(temp_file.read())

        try:
            import subprocess
            #extract all .srt in the rared file
            ret_code = subprocess.call(['unrar', 'e', '-n*srt', rar_path])
            if ret_code == 0:
                logger.info('Unpacking rared subtitle to %s' % os.path.dirname(path))
                os.remove(rar_path)
        except OSError:
            logger.info('Unpacking rared subtitle failed.'
                        'Please, install unrar to automate this step.')
    temp_file.close()
Пример #19
0
class TestUploadImage(object):
    def setUp(self):
        self.local_fp = NamedTemporaryFile(suffix=".jpg")
        self.local_path = self.local_fp.name

        self.local_fp.write("abcde")
        self.local_fp.flush()
        self.local_fp.seek(0)

        self.num_bytes = len(self.local_fp.read())
        self.local_fp.seek(0)

        self.dst_uri = "s3://%s/fua/upload.jpg" % app.config["PHOTO_BUCKET"]

    def tearDown(self):
        self.local_fp.close()

        try:
            (bucket, key) = s3.parse_s3_uri(self.dst_uri)
            key.delete()
        except (PinholeBucketNotFound, PinholeKeyNotFound):
            pass

    def test_from_path(self):
        assert_equal(s3.upload_image(self.local_path, self.dst_uri),
                     self.num_bytes)

    def test_from_missing_path(self):
        assert_raises(ValueError, s3.upload_image, "asd", "as")
Пример #20
0
    def update_screenshot(self, save=True):
        screenshot_file = NamedTemporaryFile()
        with io.open(screenshot_file.name, 'w+b') as stream:
            casperjs_capture(
                stream,
                self.url,
                method=self.method,
                width=self.viewport_width,
                height=self.viewport_height,
                selector=self.css_selector,
                data=self.data,
                waitfor=self.waitfor,
                size='%sx%s' % (self.screenshot_width, self.screenshot_height),
                crop=str(self.crop),
                render=self.render)

        screenshot_data = screenshot_file.read()
        file_ext = imghdr.what(screenshot_file.name)
        screenshot_file.close()
        base_filename = self.url.split('/')[2]
        base_filename = '_'.join(base_filename.split('.'))
        dig = md5(screenshot_data).hexdigest()[:8]
        base_filename = '-'.join((base_filename, dig))
        filename = '.'.join((base_filename, file_ext))

        self.screenshot = filename
        self.last_updated = timebase.now()
        self.screenshot.save(filename, ContentFile(screenshot_data), save=save)
Пример #21
0
Файл: c.py Проект: FLuptak/rpg
    def patched(self, project_dir, spec, sack):
        f = NamedTemporaryFile(delete=False, prefix="rpg_plugin_c_")
        file_name = f.name
        f.close()

        out = Command(["find " + str(project_dir)
                       + " -name *.c -o -name *.h"]).execute()

        files_list = [str(s) for s in out.splitlines()]

        makedepend = "makedepend -w10000 -f" + file_name + " -I" \
                     + str(project_dir) + " " + \
                     ' '.join(files_list) + " 2>/dev/null"
        Command(makedepend).execute()

        regex = compile(r'.*\.h')
        regex2 = compile(str(project_dir) + ".*")
        unlink(file_name + ".bak")
        with open(file_name, "r") as f:
            _ret_paths = set([path.dirname(s) for s in f.read().split()
                              if regex.match(s) and not regex2.match(s)])
        unlink(file_name)

        spec.required_files = spec.required_files.union(_ret_paths)
        spec.build_required_files = spec.build_required_files.union(_ret_paths)
Пример #22
0
def get_session_svg(viz_data):
    """Take session visualization data and return svg."""
    
    graph = Dot('graphname', graph_type='digraph')
    
    #loop create all nodes and store by id
    node_dict = {}
    for i, node_data in enumerate(viz_data['nodes']):
        id = node_data['id']
        node_dict[id] = str(i)
        graph.add_node(Node(str(i)))
        
    #add edges by links
    for link_data in viz_data['links']:
        snode = node_dict[viz_data['nodes'][link_data['source']]['id']]
        tnode = node_dict[viz_data['nodes'][link_data['target']]['id']]
        graph.add_edge(Edge(snode, tnode))
    
    #get svg of graph
    file = NamedTemporaryFile()
    graph.write_svg(file.name)
    svg = file.read()
    file.close()
    
    #f = open('/tmp/session/session.svg', 'w')
    #f.write("%s\n" % svg)
    #f.close()

    return svg
Пример #23
0
 def export(self, out_f=None, format='mp3'):
     out_f = _fd_or_path_or_tempfile(out_f, 'wb+')
     out_f.seek(0)
     data = NamedTemporaryFile(mode="wb", delete=False)
     
     wave_data = wave.open(data)
     wave_data.setnchannels(self.channels)
     wave_data.setsampwidth(self.sample_width)
     wave_data.setframerate(self.frame_rate)
     wave_data.setnframes(self.frame_count())
     wave_data.writeframesraw(self._data)
     wave_data.close()
     
     
     output = NamedTemporaryFile(mode="w+")
     
     # read stdin / write stdout
     subprocess.call(['ffmpeg', 
                      '-y', # always overwrite existing files
                      "-f", "wav", "-i", data.name, # input options (filename last)
                      "-f", format, output.name, # output options (filename last)
                      ], 
                     
                     # make ffmpeg shut up
                     stderr=open(os.devnull))
     
     output.seek(0)
     out_f.write(output.read())
     
     data.unlink(data.name)
     out_f.seek(0)
     return out_f
Пример #24
0
def xls_export(request, username, id_string):
    owner = get_object_or_404(User, username=username)
    xform = get_object_or_404(XForm, id_string=id_string, user=owner)
    if not has_permission(xform, owner, request):
        return HttpResponseForbidden(_(u"Not shared."))
    query = request.GET.get("query")
    force_xlsx = request.GET.get("xlsx") == "true"
    xls_df_builder = XLSDataFrameBuilder(username, id_string, query)
    excel_defs = {
        "xls": {"suffix": ".xls", "mime_type": "vnd.ms-excel"},
        "xlsx": {"suffix": ".xlsx", "mime_type": "vnd.openxmlformats"},  # TODO: check xlsx mime type
    }
    ext = "xls" if not force_xlsx else "xlsx"
    if xls_df_builder.exceeds_xls_limits:
        ext = "xlsx"
    try:
        temp_file = NamedTemporaryFile(suffix=excel_defs[ext]["suffix"])
        xls_df_builder.export_to(temp_file.name)

        if request.GET.get("raw"):
            id_string = None
        response = response_with_mimetype_and_name(excel_defs[ext]["mime_type"], id_string, extension=ext)
        response.write(temp_file.read())
        temp_file.seek(0, os.SEEK_END)
        response["Content-Length"] = temp_file.tell()
        temp_file.close()
        return response
    except NoRecordsFoundError:
        return HttpResponse(_("No records found to export"))
    def test_seq_pipeline_parallel_run(self):
        'It tests that the pipeline runs ok'
        pipeline = 'sanger_without_qual'

        fhand_adaptors = NamedTemporaryFile()
        fhand_adaptors.write(ADAPTORS)
        fhand_adaptors.flush()
        arabidopsis_genes = 'arabidopsis_genes+'
        univec = os.path.join(TEST_DATA_DIR, 'blast', arabidopsis_genes)
        configuration = {'remove_vectors': {'vectors': univec},
                         'remove_adaptors': {'adaptors': fhand_adaptors.name}}

        in_fhands = {}
        in_fhands['in_seq'] = open(os.path.join(TEST_DATA_DIR, 'seq.fasta'),
                                   'r')
        out_fhand = NamedTemporaryFile()
        writer = SequenceWriter(out_fhand, file_format='fasta')
        writers = {'seq': writer}

        seq_pipeline_runner(pipeline, configuration, in_fhands,
                            processes=4, writers=writers)
        out_fhand = open(out_fhand.name, 'r')

        result_seq = out_fhand.read()
        assert result_seq.count('>') == 6
        #are we keeping the description?
        assert 'mdust' in result_seq
Пример #26
0
    def decrypt(self, enctext):
        """ Decrypting an encrypted text by the use of a private key.

        :param enctext: The encrypted text as a string
        :return: The decrypted text
        """

        if self.log:
            self.log.info("input len: %d" % len(enctext))
        _, fil = make_temp("%s" % enctext, decode=False)
        ntf = NamedTemporaryFile(delete=not DEBUG)

        com_list = [self.xmlsec, "--decrypt",
                     "--privkey-pem", self.key_file,
                     "--output", ntf.name,
                     "--id-attr:%s" % ID_ATTR, ENC_KEY_CLASS,
                     fil]

        if self.debug:
            self.log.debug("Decrypt command: %s" % " ".join(com_list))

        pof = Popen(com_list, stderr=PIPE, stdout=PIPE)
        p_out = pof.stdout.read()
        p_err = pof.stderr.read()

        if self.debug:
            self.log.debug("Decrypt result (out): %s" % (p_out,))
            self.log.debug("Decrypt result (err): %s" % (p_err,))

        ntf.seek(0)
        return ntf.read()
Пример #27
0
    def test_merge_sam():
        'It merges two sams'
        reference = NamedTemporaryFile(suffix='.sam')
        reference.write('''>SGN-U572743
atatata
>SGN-U576692
gcgc''')
        sam1 = NamedTemporaryFile(suffix='.sam')
        sam1.write('''@SQ	SN:SGN-U576692	LN:1714
@SQ	SN:SGN-U572743	LN:833
@RG	ID:g1	LB:g1	SM:g1
@RG	ID:g2	LB:g2	SM:g2
SGN-E221403	0	SGN-U576692	1416	207	168M	*	0	0	AGCCTGATAAAGGTCTGCCTACGTGTTTTAAGTGGAATCCGTTTCCCCATGTCCAAACCTTCTAAATAGTTTTTTGTGTTAGTTCTTGTATGCCACATACAAAAATTAACAAACTCTTTTGCCACATATGTTCCAGCACGTCAAAGCAACATGTATTTGAGCTACTTT	558<///035EB@;550300094>>FBF>>88>BBB200>@FFMMMJJ@@755225889>0..14444::FMF@@764444448@;;84444<//,4,.,<<QFBB;::/,,,.69FBB>9:2/.409;@@>88.7,//55;BDK@11,,093777777884241<:7	AS:i:160	XS:i:0	XF:i:3	XE:i:4	XN:i:0	RG:Z:g2
SGN-E221664	0	SGN-U572743	317	226	254M24S	*	0	0	GGATGATCTTAGAGCTGCCATTCAAAAGATGTTAGACACTCCTGGGCCATACTTGTTGGATGTGATTGTACCTCATCAGGAGCATGTTCTACCGATGATTCCCAGTGGCGGTGCTTTCAAAAATGTGATTACGGAGGGTGATGGGAGACGTTCCTATTGACTTTGAGAAGCTACATAACTAGTTCAAGGCATTGTATTATCTAAAATAAACTTAATATTTATGTTTACTTAAAAGTTTTTCATTGTGTGAAGGAAAAAAAAAAAAAAAAAAAAAAAAA	999@7<22-2***-,206433>:?9<,,,66:>00066=??EEAAA?B200002<<@@@=DB99777864:..0::@833099???<@488>></...<:B<<.,,8881288@BBDDBD885@@;;9:/9.,,,99B99233885558=?DKKKDDAA??DKBB=440/0<8?DEDFBB??6@152@@FBMFIIDDDDDDKKKOK@@@@DD:N688BBDDDBBBKKDEDDBN977?<9<111:<??==BKMPKKBB==99>QQYYYYYYYYYYYYQQ	AS:i:250	XS:i:0	XF:i:0	XE:i:7	XN:i:0	RG:Z:g1
''')
        sam1.flush()
        sam2 = NamedTemporaryFile(suffix='.sam')
        sam2.write('''@SQ	SN:SGN-U576692	LN:1714
@SQ	SN:SGN-U572743	LN:833
@RG	ID:g1	LB:g1	SM:g1
@RG	ID:g3	LB:g3	SM:g3
SGN-E200000	0	SGN-U572743	317	226	254M24S	*	0	0	GGATGATCTTAGAGKTGCCATTCAAAAGATGTTAGACACTCCTGGGCCATACTTGTTGGATGTGATTGTACCTCATCAGGAGCATGTTCTACCGATGATTCCCAGTGGCGGTGCTTTCAAAAATGTGATTACGGAGGGTGATGGGAGACGTTCCTATTGACTTTGAGAAGCTACATAACTAGTTCAAGGCATTGTATTATCTAAAATAAACTTAATATTTATGTTTACTTAAAAGTTTTTCATTGTGTGAAGGAAAAAAAAAAAAAAAAAAAAAAAAA	999@7<22-2***-,206433>:?9<,,,66:>00066=??EEAAA?B200002<<@@@=DB99777864:..0::@833099???<@488>></...<:B<<.,,8881288@BBDDBD885@@;;9:/9.,,,99B99233885558=?DKKKDDAA??DKBB=440/0<8?DEDFBB??6@152@@FBMFIIDDDDDDKKKOK@@@@DD:N688BBDDDBBBKKDEDDBN977?<9<111:<??==BKMPKKBB==99>QQYYYYYYYYYYYYQQ	AS:i:250	XS:i:0	XF:i:0	XE:i:7	XN:i:0	RG:Z:g1
SGN-E40000	0	SGN-U576692	1416	207	168M	*	0	0	AGCCTGATAAAGGTCTGCCTACGTGTTTTAAGTGGAATCCGTTTCCCCATGTCCAAACCTTCTAAATAGTTTTTTGTGTTAGTTCTTGTATGCCACATACAAAAATTAACAAACTCTTTTGCCACATATGTTCCAGCACGTCAAAGCAACATGTATTTGAGCTACTTT	558<///035EB@;550300094>>FBF>>88>BBB200>@FFMMMJJ@@755225889>0..14444::FMF@@764444448@;;84444<//,4,.,<<QFBB;::/,,,.69FBB>9:2/.409;@@>88.7,//55;BDK@11,,093777777884241<:7	AS:i:160	XS:i:0	XF:i:3	XE:i:4	XN:i:0	RG:Z:g3
''')
        sam2.flush()
        sam3	= NamedTemporaryFile(suffix='.sam')
        merge_sam(infiles=[sam1,	sam2],	outfile=sam3,	reference=reference)
        sam3.seek(0)
        sam3_content = sam3.read()

        assert	'SN:SGN-U572743'	in	sam3_content
        assert	'SGN-E200000'	in	sam3_content
        assert	'SGN-E221664'	in	sam3_content
Пример #28
0
    def sign_csr(self, csr, cakey, cacrt, serial_n, days, passphrase=None, ca_capable=False):
        """Sign the CSR with given CA
        """
        cfg = self.__get_config_file()
        logger.info( 'Signing CSR' )

        if ca_capable:
            extension = "v3_ca"
        else:
            extension = "usr_cert"
        csrfile = NamedTemporaryFile()
        csrfile.write(csr)
        csrfile.seek(0)
        cafile = NamedTemporaryFile()
        cafile.write(cacrt)
        cafile.seek(0)
        cakeyfile = NamedTemporaryFile()
        cakeyfile.write(cakey)
        cakeyfile.seek(0)
        certfile = NamedTemporaryFile()
        
        command = ['x509', '-req', '-set_serial', str(serial_n),'-extfile', cfg.name , '-sha1', '-days', str(days), '-in', csrfile.name, '-CA', cafile.name, '-CAkey', cakeyfile.name, '-passin', 'stdin', '-extensions', extension, '-out', certfile.name]
        self.exec_openssl(command, stdin=passphrase)
        pem = certfile.read()
        return pem
Пример #29
0
    def _render_diffs(self, diffs, section_header):
        """Render diffs. Write the SECTION_HEADER iff there are actually
    any diffs to render."""
        w = self.output.write
        section_header_printed = False
        diff_temp_files = []
        total_diff_size = 0

        for diff in diffs:
            if not diff.diff and not diff.diff_url:
                continue
            if not section_header_printed:
                w(section_header)
                section_header_printed = True

            # Create a temporary file to hold this per-file diff
            diff_temp_file = NamedTemporaryFile()
            diff_temp_files.append(diff_temp_file)
            t = diff_temp_file.write

            if diff.kind == "D":
                t("\nDeleted: %s\n" % diff.base_path)
            elif diff.kind == "C":
                t("\nCopied: %s (from r%d, %s)\n" % (diff.path, diff.base_rev, diff.base_path))
            elif diff.kind == "A":
                t("\nAdded: %s\n" % diff.path)
            else:
                # kind == 'M'
                t("\nModified: %s\n" % diff.path)

            if diff.diff_url:
                t("URL: %s\n" % diff.diff_url)

            if diff.diff:
                t(SEPARATOR + "\n")
                if diff.binary:
                    if diff.singular:
                        t("Binary file. No diff available.\n")
                    else:
                        t("Binary files. No diff available.\n")
                else:
                    for line in diff.content:
                        t(line.raw)

            diff_temp_file.flush()
            temp_file_name = diff_temp_file.name
            total_diff_size = total_diff_size + os.stat(temp_file_name)[stat.ST_SIZE]
            if total_diff_size > MAX_DIFF_SIZE:
                break

        if total_diff_size > MAX_DIFF_SIZE:
            w("\nNOTE:  File difference information exceeds the allowed size.\n")
        else:
            for diff_temp_file in diff_temp_files:
                diff_temp_file.seek(0)
                while 1:
                    chunk = diff_temp_file.read(4096)
                    if not chunk:
                        break
                    w(chunk)
Пример #30
0
    def sign_csr(self, csr, cakey, cacrt, serial, days, passphrase=None, ca_capable=False):
        """Sign the CSR with given CA
        """
        shutil.copy(PKI_OPENSSL_CONF, self.tmpdir)
        confpath = os.path.join(self.tmpdir, os.path.split(PKI_OPENSSL_CONF)[-1])
        logger.info( 'Signing CSR' )

        if ca_capable:
            extension = "v3_ca"
        else:
            extension = "usr_cert"
        csrfile = NamedTemporaryFile()
        csrfile.write(csr)
        csrfile.seek(0)
        cafile = NamedTemporaryFile()
        cafile.write(cacrt)
        cafile.seek(0)
        cakeyfile = NamedTemporaryFile()
        cakeyfile.write(cakey)
        cakeyfile.seek(0)
        serialfile = NamedTemporaryFile()
        serial = "%X" % serial
        serial = serial.rjust(2,"0")
        serialfile.write(serial)
        serialfile.seek(0)
        certfile = NamedTemporaryFile()

        command = ['x509', '-req', '-CAserial', serialfile.name,'-extfile', confpath , '-sha1', '-days', str(days), '-in', csrfile.name, '-CA', cafile.name, '-CAkey', cakeyfile.name, '-passin', 'stdin', '-extensions', extension, '-out', certfile.name]
        self.exec_openssl(command, stdin=passphrase)
        pem = certfile.read()
        return pem
Пример #31
0
def new(request):
    newcodes = []
    subproductform = SubProductForm()
    download = "false"
    productform = ProductForm()
    if request.method == "POST":
        if request.POST.get("select", "") == "subproduct":
            wb = load_workbook(request.FILES['subproduct'])
            ws = wb.active
            headers = [cell.value for cell in ws.rows.next()]
            subprods = []
            for row in ws.iter_rows(row_offset=1):
                if row[0].value:
                    rowDict = {}
                    for cellnum in range(len(row) + 1):
                        try:
                            cell = row[cellnum]
                            rowDict[headers[cellnum]] = cell.value
                        except IndexError:
                            pass
                    try:
                        productline = ProductLine.objects.get(
                            code=rowDict.get("Existing Product Line Code"))
                    except Exception as e:
                        print(u"Could not find ProductLine '{0}' at row {1}".
                              format(rowDict.get("Existing Product Line Code"),
                                     row[0].row))
                        debug()

                    code = get_unused_code()
                    description = rowDict.get("Igor / Sub PL Description", "")
                    usage = rowDict.get("Usage")
                    if usage:
                        usage = Usage.objects.get(name=usage)
                    igoritemclass = rowDict.get("Igor Item Class", None)
                    if igoritemclass:
                        igoritemclass = IgorItemClass.objects.get(
                            name=igoritemclass)
                    if len(description) > 30:
                        print(
                            u"New Subproduct Line '{0}' exceed 30 characters: {0}. Dropping into the debugger. You decide what to do."
                            .format(description, len(description)))
                        debug()

                    subproductline, created = SubProductLine.objects.get_or_create(
                        fproductline=productline,
                        igor_or_sub_pl=code.code,
                        description=description,
                        igorclass=igoritemclass,
                        usage=usage)
                    if created:
                        msg = subproductline.save()
                        if msg:
                            messages.warning(request, msg)
                        subprods.append(subproductline)
                        code.use(newcodes)

            wb = Workbook()
            ws = wb.active
            count = 1
            tmp = NamedTemporaryFile(suffix=".xlsx")
            for header in bigheaders:
                ws.cell(row=1, column=count).value = header
                count += 1
            count = 2

            for subprod in subprods:
                subprod.excel_row(ws, count)
                count += 1
            wb.save(tmp)
            tmp.flush()
            #tmp.seek(0)
            #response = HttpResponse(content_type='application/xlsx')
            #response['Content-Disposition'] = 'attachment; filename="{0}"'.format(os.path.basename(tmp.name))
            #response.write(tmp.read())
            #return(response)
            tmp.seek(0)
            request.session['download'] = base64.b64encode(tmp.read())
            download = "true"

        elif request.POST.get('select') == "product":
            wb = load_workbook(request.FILES['product'])
            ws = wb.get_sheet_by_name(name='Product_Hierarchy')
            headers = [cell.value for cell in ws.rows.next()]
            prods = []
            rowDictList = []

            for row in ws.iter_rows(row_offset=1):
                if row[0].value:
                    rowDict = {}
                    for cellnum in range(len(row) + 1):
                        try:
                            cell = row[cellnum]
                            rowDict[headers[cellnum]] = cell.value
                        except IndexError:
                            pass
                    rowDictList.append(rowDict)
            """All product lines of the same name within a product line request should have the same Igor code, otherwise, abort."""
            productLineGroupIgorDict = {}
            for rowDict in rowDictList:
                productLineGroupIgorDict[rowDict[
                    "Product Line Name"]] = productLineGroupIgorDict.get(
                        rowDict["Product Line Name"],
                        []) + [rowDict["Igor Item Class"]]
            for plName, igorCodeList in productLineGroupIgorDict.iteritems():
                if len(set(igorCodeList)) > 1:
                    newClass = "active"
                    messages.warning(
                        request,
                        u"Igor Item Classes differ for identical Product Line Name: {0}. Aborting..."
                        .format(plName))
                    return (render(request, "hierarchy/new.html", locals()))

            newProductLines = []  #List of ID's
            subprods = []

            for rowDict in rowDictList:
                try:
                    productlinegroup = ProductLineGroup.objects.get(
                        code__iexact=rowDict.get(
                            "Existing Product Line Group Code"))
                except Exception as e:
                    print e
                    print(
                        u"Could not find Product Line Group Code '{0}'".format(
                            rowDict.get("Existing Product Line Group Code")))
                    debug()

                code = get_unused_code()
                igorDescription = rowDict.get("Igor / Sub PL Description", "")
                usage = rowDict.get("Usage")
                productlinename = rowDict.get("Product Line Name")
                if usage:
                    try:
                        usage = Usage.objects.get(name__iexact=usage)
                    except Exception as e:
                        debug()
                        print(u"Usage not found '{0}' at row {1}".format(
                            usage, row[0].row))
                igoritemclass = rowDict.get("Igor Item Class", None)
                if igoritemclass:
                    igoritemclass = IgorItemClass.objects.get(
                        name=igoritemclass)
                """Product line names are not unique in the database, but we don't want to create multiple product lines with the same
                description within a single new product line request. Check if we've created one in this new productline request,
                and if not create a new one. """

                if len(productlinename) > 30:
                    print(
                        u"Product Line Name '{0}' exceed 30 characters: {1} dropping into the debugger. The database will trucate the name if you contine. Abort with ^c"
                        .format(productlinename, len(productlinename)))

                try:
                    productLine = ProductLine.objects.get(
                        id__in=newProductLines,
                        name__iexact=productlinename[:30])
                except ProductLine.DoesNotExist:
                    code = get_unused_code()
                    productline = ProductLine(
                        code=code.code,
                        name=productlinename,
                        fproductlinegroup=productlinegroup)
                    productline.save()
                    code.use(newcodes)
                    newProductLines.append(productline.id)
                #Now we have a new productline
                code = get_unused_code()
                subproductline = SubProductLine(fproductline=productline,
                                                igor_or_sub_pl=code.code,
                                                description=igorDescription,
                                                igorclass=igoritemclass,
                                                usage=usage)
                msg = subproductline.save()
                if msg:
                    messages.warning(request, msg)
                subprods.append(subproductline)
                code.use(newcodes)

            wb = Workbook()
            ws = wb.active
            ws.title = "Product_Hierarchy"
            wsProductCodes = wb.create_sheet()
            wsProductCodes.title = "Product_Codes"
            count = 1
            tmp = NamedTemporaryFile(suffix=".xlsx")
            for header in bigheaders:
                ws.cell(row=1, column=count).value = header
                ws.cell(row=1, column=count).font = Font(bold=True)
                count += 1
            count = 2

            for subproduct in subprods:
                subproduct.excel_row(ws, count)
                count += 1

            #tmp.seek(0)
            #response = HttpResponse(content_type='application/xlsx')
            #response['Content-Disposition'] = 'attachment; filename="{0}"'.format(os.path.basename(tmp.name))
            #response.write(tmp.read())
            #return(response)
            tmp.seek(0)
            #Write product codes into a 2nd sheet
            column = 1
            for header in ["Code", "Description", "Type", "Date"]:
                wsProductCodes.cell(row=1, column=column).value = header
                wsProductCodes.cell(row=1,
                                    column=column).font = Font(bold=True)
                column += 1

            row = 2
            for newcodeList in newcodes:
                column = 1
                for value in newcodeList:
                    wsProductCodes.cell(row=row, column=column).value = value
                    column += 1
                row += 1
            wb.save(tmp)
            tmp.flush()
            tmp.seek(0)
            request.session['download'] = base64.b64encode(tmp.read())

    elif request.method == "GET":
        download = request.GET.get("download")
        if download in ["sub-product-template.xlsx", "product-template.xlsx"]:
            filepath = os.path.join(settings.BASE_DIR, "hierarchy", "static",
                                    "hierarchy", "downloads", download)
            response = HttpResponse(content_type="application/xlsx")
            response[
                'Content-Disposition'] = 'attachment; filename="{0}"'.format(
                    download)
            spreadsheet = open(filepath, 'rb').read()
            response.write(spreadsheet)
            return (response)
        retrieve = "retrieve" in request.GET.keys()
        if retrieve:
            xlsx = base64.b64decode(request.session['download'])
            response = HttpResponse(content_type='application/xlsx')
            response[
                'Content-Disposition'] = 'attachment; filename="{0}"'.format(
                    os.path.basename("productcodes.xlsx"))
            response.write(xlsx)
            return (response)

    newClass = "active"
    return (render(request, "hierarchy/new.html", locals()))
Пример #32
0
    def analyse(self):
        self.ensure_one()
        logger.info('Start analysis of %s', self.name)
        if not self.facturx_file:
            raise UserError(_("Missing Factur-X File"))
        filetype = mimetypes.guess_type(self.facturx_filename)
        logger.debug('Factur-X file mimetype: %s', filetype)
        vals = {'file_type': 'pdf'}
        errors = {
            '1_pdfa3': [],
            '2_xmp': [],
            '3_xml': [],
            '4_xml_schematron': [],
        }
        if filetype:
            if filetype[0] == 'application/xml':
                vals['file_type'] = 'xml'
            elif filetype[0] != 'application/pdf':
                raise UserError(
                    _("The Factur-X file has not been recognised as a PDF file "
                      "(MIME Type: %s). Please check the filename extension.")
                    % filetype[0])
        prefix = self.facturx_filename and self.facturx_filename[:4] + '-'\
            or 'facturx-'
        suffix = '.%s' % vals['file_type']
        f = NamedTemporaryFile('wb+', prefix=prefix, suffix=suffix)
        f.write(base64.decodebytes(self.facturx_file))
        f.seek(0)
        if vals['file_type'] == 'pdf':
            try:
                pdf = PdfFileReader(f)
                pdf_root = pdf.trailer['/Root']
            except Exception:
                raise UserError(_("This is not a PDF file"))
            rest = False
            try:
                logger.info('Connecting to veraPDF via Rest')
                vera_xml_root = self.run_verapdf_rest(vals, f)
                rest = True
            except Exception as e:
                logger.warning(
                    'Failed to connect to veraPDF via Rest. Error: %s'
                    'Fallback to subprocess method' % e)
                vera_xml_root = self.run_verapdf_subprocess(vals, f)
            if rest:
                pdfa_errors = self.analyse_verapdf_rest(vals, vera_xml_root)
            else:
                pdfa_errors = self.analyse_verapdf_subprocess(
                    vals, vera_xml_root)
            if pdfa_errors:
                self.vera_errors_reformat(pdfa_errors, errors)
            xmp_root = self.extract_xmp(vals, pdf_root, errors)

            xml_root = xml_bytes = None
            res_xml = self.extract_xml(vals, pdf_root, errors)
            if res_xml:
                xml_root, xml_bytes = res_xml
            # Set pdfa3_valid later in the code, because
            # there is a check later on AFRelationShip

        elif vals['file_type'] == 'xml':
            xml_bytes = base64.decodebytes(self.facturx_file)
            xml_root = None
            try:
                xml_root = etree.fromstring(xml_bytes)
            except Exception as e:
                errors['3_xml'].append({
                    'name':
                    'Not a valid XML file',
                    'comment':
                    'Technical error message:\n%s' % e,
                })
        if xml_root:
            self.analyse_xml_xsd(vals, xml_root, errors)
        else:
            vals['doc_type'] = 'facturx'
        # Starting from here, we have vals['doc_type'] and vals['xml_profile']
        if vals['file_type'] == 'pdf':
            if (vals.get('afrelationship')
                    and vals['afrelationship'] != '/Data'
                    and vals['xml_profile']
                    in ('facturx_minimum', 'facturx_basicwl')):
                errors['1_pdfa3'].append({
                    'name':
                    '/AFRelationship = %s not allowed for this Factur-X profile'
                    % vals['afrelationship'],
                    'comment':
                    "For Factur-X profiles Minimum and Basic WL, "
                    "/AFRelationship for attachment factur-x.xml must be "
                    "/Data, it cannot be /Alternative nor /Source. "
                    "In this file, /AFRelationship for attachment "
                    "factur-x.xml is %s." % vals['afrelationship']
                })
            if xmp_root:
                self.analyse_xmp(vals, xmp_root, errors)
                if not errors['2_xmp']:
                    vals['xmp_valid'] = True
            if vals.get('xml_filename'):
                if vals['doc_type'] == 'facturx' and vals[
                        'xml_filename'] == 'order-x.xml':
                    errors['1_pdfa3'].append({
                        'name':
                        'Wrong XML filename',
                        'comment':
                        "The attached XML filename is order-x.xml, but the content of the XML follows the Factur-X standard!"
                    })
                elif vals['doc_type'] == 'orderx' and vals[
                        'xml_filename'] == 'factur-x.xml':
                    errors['1_pdfa3'].append({
                        'name':
                        'Wrong XML filename',
                        'comment':
                        "The attached XML filename is factur-x.xml, but the content of the XML follows the Order-X standard!"
                    })
                # Rename xml_filename for easier download
                vals['xml_filename'] = '%s-x_%s.xml' % (
                    vals['doc_type'][:-1], self.name.replace('/', '_'))
        if vals.get('xml_profile') in ('facturx_en16931',
                                       'facturx_basic') and xml_bytes:
            self.analyse_xml_schematron_facturx(vals, xml_bytes, errors,
                                                prefix)
        elif vals.get('xml_profile') in (
                'orderx_extended', 'orderx_comfort',
                'orderx_basic') and xml_root is not None:
            self.analyse_xml_schematron_orderx(vals, xml_root, errors, prefix)
        if not errors['3_xml']:
            vals['xml_valid'] = True
        if vals.get(
                'xml_profile'
        ) in PROFILES_schematron_analysis and not errors['4_xml_schematron']:
            vals['xml_schematron_valid'] = True
        if vals['file_type'] == 'pdf':
            if not errors['1_pdfa3']:
                vals['pdfa3_valid'] = True
            if (vals.get('pdfa3_valid') and vals.get('xmp_valid')
                    and vals.get('xml_valid') and vals.get('xmp_profile')
                    and vals.get('xmp_profile') == vals.get('xml_profile')
                    and vals.get('xmp_orderx_type')
                    == vals.get('xml_orderx_type')):
                vals['valid'] = True
        elif vals['file_type'] == 'xml':
            if vals.get('xml_valid'):
                vals['valid'] = True
        if vals.get('xml_profile'
                    ) in PROFILES_schematron_analysis and not vals.get(
                        'xml_schematron_valid'):
            vals['valid'] = False
        facturx_file_size = os.stat(f.name).st_size
        f.seek(0)
        facturx_file_sha1 = hashlib.sha1(f.read()).hexdigest()
        f.close()
        # logger.debug('vals at end of analysis=%s', vals)
        errors_write = self.errors2errors_write(errors)
        vals.update({
            'state': 'done',
            'date': fields.Datetime.now(),
            'facturx_file_sha1': facturx_file_sha1,
            'facturx_file_size': facturx_file_size,
            'error_ids': errors_write,
        })
        self.write(vals)
        logger.info('End analysis of %s', self.name)
        return
Пример #33
0
    def _render_diffs(self, diffs, section_header):
        """Render diffs. Write the SECTION_HEADER iff there are actually
    any diffs to render."""
        w = self.output.write
        section_header_printed = False
        diff_temp_files = []
        total_diff_size = 0

        for diff in diffs:
            if not diff.diff and not diff.diff_url:
                continue
            if not section_header_printed:
                w(section_header)
                section_header_printed = True

            # Create a temporary file to hold this per-file diff
            diff_temp_file = NamedTemporaryFile()
            diff_temp_files.append(diff_temp_file)
            t = diff_temp_file.write

            if diff.kind == 'D':
                t('\nDeleted: %s\n' % diff.base_path)
            elif diff.kind == 'C':
                t('\nCopied: %s (from r%d, %s)\n' %
                  (diff.path, diff.base_rev, diff.base_path))
            elif diff.kind == 'A':
                t('\nAdded: %s\n' % diff.path)
            else:
                # kind == 'M'
                t('\nModified: %s\n' % diff.path)

            if diff.diff_url:
                t('URL: %s\n' % diff.diff_url)

            if diff.diff:
                t(SEPARATOR + '\n')
                if diff.binary:
                    if diff.singular:
                        t('Binary file. No diff available.\n')
                    else:
                        t('Binary files. No diff available.\n')
                else:
                    for line in diff.content:
                        t(line.raw)

            diff_temp_file.flush()
            temp_file_name = diff_temp_file.name
            total_diff_size = total_diff_size + os.stat(temp_file_name)[
                stat.ST_SIZE]
            if total_diff_size > MAX_DIFF_SIZE:
                break

        if total_diff_size > MAX_DIFF_SIZE:
            w("\nNOTE:  File difference information exceeds the allowed size.\n"
              )
        else:
            for diff_temp_file in diff_temp_files:
                diff_temp_file.seek(0)
                while 1:
                    chunk = diff_temp_file.read(4096)
                    if not chunk:
                        break
                    w(chunk)
class AbstractWrapper(object):
    '''
        abstract algorithm wrapper
    '''
    def __init__(self):
        '''
            Constructor
        '''

        root = logging.getLogger()
        ch = logging.StreamHandler(sys.stdout)
        formatter = logging.Formatter('[%(name)s][%(levelname)s] %(message)s')
        ch.setFormatter(formatter)
        root.handlers = [ch]
        self.logger = logging.getLogger("GenericWrapper")

        self.RESULT_MAPPING = {"SAT": "SUCCESS", "UNSAT": "SUCCESS"}
        self._watcher_file = None
        self._solver_file = None

        self._exit_code = None

        self._crashed_if_non_zero_status = True
        self._use_tmpdir = False

        self._subprocesses = []

        self._DEBUG = True
        if self._DEBUG:
            self.logger.setLevel(logging.DEBUG)

        self.data = None

        self._DELAY2KILL = 1

        self.parser = get_parser()
        self.args = None

    def main(self, exit: bool = True):
        '''
            main method of the generic wrapper
            1. parses cmd arguments; 
            2. calls target algorithm wrapped by runsolver
            3. parses outputs
            4. terminates
            
            Arguments
            ---------
            exit: bool
                exit with sys.exit at the end 
            
        '''

        signal.signal(signal.SIGTERM, signalHandler)
        signal.signal(signal.SIGQUIT, signalHandler)
        signal.signal(signal.SIGINT, signalHandler)

        # returns genericWrapper4AC.data.data.Data
        self.data, self.args = parse(cmd_arguments=sys.argv,
                                     parser=self.parser)
        self.data.tmp_dir, algo_temp_dir = self.set_tmpdir(
            tmp_dir=self.data.tmp_dir)

        # because of legacy reasons,
        # we still pass a dictionary to get_command_line_args
        runargs = {
            "instance": self.data.instance,
            "specifics": self.data.specifics,
            "cutoff": self.data.cutoff,
            "runlength": self.data.runlength,
            "seed": self.data.seed,
            "tmp": algo_temp_dir.name
        }

        try:
            target_cmd = self.get_command_line_args(runargs=runargs,
                                                    config=self.data.config)

            start_time = time.time()
            self.call_target(target_cmd)
            self.data.time = time.time() - start_time
            self.logger.debug("Measured wallclock time: %f" % (self.data.time))
            self.read_runsolver_output()
            self.logger.debug("Measured time by runsolver: %f" %
                              (self.data.time))

            resultMap = self.process_results(self._solver_file, {
                "exit_code": self.data.exit_code,
                "instance": self.data.instance
            })

            if 'status' in resultMap:
                self.data.status = self.RESULT_MAPPING.get(
                    resultMap['status'], resultMap['status'])
            if 'runtime' in resultMap:
                self.data.time = resultMap['runtime']
            if 'quality' in resultMap:
                self.data.cost = resultMap['quality']
            if 'cost' in resultMap:  # overrides quality
                self.data.cost = resultMap['cost']
            elif 'misc' in resultMap:
                self.data.additional += "; " + resultMap['misc']

            # if quality is still set to 2**32 - 1 and we use the new format,
            # overwrite quality with runtime, since irace only looks at the
            # cost field
            if self.data.new_format and self.data.cost == 2**32 - 1:
                self.data.cost = self.data.time

            sys.exit()

        except (KeyboardInterrupt, SystemExit):
            self.cleanup()
            self.print_result_string()
            if exit:
                if self.data.exit_code:
                    sys.exit(self.data.exit_code)
                else:
                    sys.exit(0)

    def set_tmpdir(self, tmp_dir):
        '''
            set temporary directory for log files;
            if not set, try to use $TMPDIR,
            otherwise use "."
        '''

        if tmp_dir is None:
            if "TMPDIR" in os.environ:
                self._use_tmpdir = True
                tmp_dir = os.environ["TMPDIR"]
            else:
                tmp_dir = "."
        else:
            self._use_tmpdir = True
            tmp_dir = tmp_dir

        if not os.path.isdir(tmp_dir):
            self.data.status = "ABORT"
            self.data.additional = "temp directory is missing - should have been at %s." % (
                args.tmp_dir)
            self.data.exit_code = 1
            sys.exit(1)

        # create tmp dir for target algorithm files
        algo_tmp_dir = tempfile.TemporaryDirectory(dir=tmp_dir)

        return tmp_dir, algo_tmp_dir

    def call_target(self, target_cmd: str):
        '''
            extends the target algorithm command line call with the runsolver
            and executes it

            Arguments
            --------
            target_cmd: str
                target cmd (from get_command_line_args)
                
        '''
        random_id = random.randint(0, 1000000)
        self._watcher_file = NamedTemporaryFile(suffix=".log",
                                                prefix="watcher-%d-" %
                                                (random_id),
                                                dir=self.data.tmp_dir,
                                                delete=False)
        self._solver_file = NamedTemporaryFile(suffix=".log",
                                               prefix="solver-%d-" %
                                               (random_id),
                                               dir=self.data.tmp_dir,
                                               delete=False)

        runsolver_cmd = [
            self.data.runsolver, "-M", self.data.mem_limit, "-C",
            self.data.cutoff, "-w",
            "\"%s\"" % (self._watcher_file.name), "-o",
            "\"%s\"" % (self._solver_file.name)
        ]

        runsolver_cmd = " ".join(map(str, runsolver_cmd)) + " " + target_cmd
        # for debugging
        self.logger.debug("Calling runsolver. Command-line:")
        self.logger.debug(runsolver_cmd)

        # run
        try:
            io = Popen(runsolver_cmd,
                       shell=True,
                       preexec_fn=os.setpgrp,
                       universal_newlines=True)
            self._subprocesses.append(io)
            io.wait()
            self._subprocesses.remove(io)
            if io.stdout:
                io.stdout.flush()
        except OSError:
            self.data.status = "ABORT"
            self.data.additional = "execution failed: %s" % (" ".join(
                map(str, runsolver_cmd)))
            self._exit_code = 1
            sys.exit(1)
        self._solver_file.seek(0)
        self._watcher_file.seek(0)

    def float_regex(self):
        return '[+-]?\d+(?:\.\d+)?(?:[eE][+-]\d+)?'

    def read_runsolver_output(self):
        '''
            reads self._watcher_file, 
            extracts runtime
            and returns if memout or timeout found
        '''
        self.logger.debug("Reading runsolver output from %s" %
                          (self._watcher_file.name))
        try:
            data = str(self._watcher_file.read().decode("utf8"))
        except:
            # due to the common, rare runsolver bug,
            # the watcher file can be corrupted and can failed to be read
            self.data.exit_code = 0
            self.logger.warn(
                "Failed to read runsolver's watcher file---trust own wc-time measurment"
            )
            return

        if (re.search('runsolver_max_cpu_time_exceeded', data)
                or re.search('Maximum CPU time exceeded', data)):
            self.data.status = "TIMEOUT"

        if (re.search('runsolver_max_memory_limit_exceeded', data)
                or re.search('Maximum VSize exceeded', data)):
            self.data.status = "TIMEOUT"
            self.data.additional += " memory limit was exceeded"

        cpu_pattern1 = re.compile(
            '^runsolver_cputime: (%s)' % (self.float_regex()), re.MULTILINE)
        cpu_match1 = re.search(cpu_pattern1, data)

        cpu_pattern2 = re.compile(
            '^CPU time \\(s\\): (%s)' % (self.float_regex()), re.MULTILINE)
        cpu_match2 = re.search(cpu_pattern2, data)

        if (cpu_match1):
            self.data.time = float(cpu_match1.group(1))
        if (cpu_match2):
            self.data.time = float(cpu_match2.group(1))

        exitcode_pattern = re.compile('Child status: ([0-9]+)')
        exitcode_match = re.search(exitcode_pattern, data)

        if (exitcode_match):
            self.data.exit_code = int(exitcode_match.group(1))

    def print_result_string(self):
        '''
            print result in old ParamILS format
            and also in new AClib format 
              if it new call string format was used
        '''

        # ensure a minimal runtime of 0.0005
        self.data.time = max(0.0005, self.data.time)

        if self.args.overwrite_cost_runtime:
            self.data.cost = self.data.time

        if self.data.new_format:
            aclib2_out_dict = {
                "status": str(self.data.status),
                "cost": float(self.data.cost),
                "runtime": float(self.data.time),
                "misc": str(self.data.additional)
            }
            print("Result of this algorithm run: %s" %
                  (json.dumps(aclib2_out_dict)))

        sys.stdout.write(
            "Result for ParamILS: %s, %.4f, %s, %s, %s" %
            (self.data.status, self.data.time, str(self.data.runlength),
             str(self.data.cost), str(self.data.seed)))

        if self.data.additional != "":
            sys.stdout.write(", %s" % (self.data.additional))
        sys.stdout.write("\n")
        sys.stdout.flush()

    def cleanup(self):
        '''
            cleanup if error occurred or external signal handled
        '''
        if (len(self._subprocesses) > 0):
            print("killing the target run!")
            try:
                for sub in self._subprocesses:
                    # sub.terminate()
                    Popen(["pkill", "-TERM", "-P",
                           str(sub.pid)],
                          universal_newlines=True)
                    self.logger.debug("Wait %d seconds ..." %
                                      (self._DELAY2KILL))
                    time.sleep(self._DELAY2KILL)
                    if sub.returncode is None:  # still running
                        sub.kill()

                self.logger.debug(
                    "done... If anything in the subprocess tree fork'd a new process group, we may not have caught everything..."
                )
                self.data.additional += "; forced to exit by signal or keyboard interrupt."
                self.data.time = self.data.cutoff
            except (OSError, KeyboardInterrupt, SystemExit):
                self.data.additional += "; forced to exit by multiple signals/interrupts."
                self.data.time = self.data.cutoff

        if (self.data.status is "ABORT" or self.data.status is "CRASHED"):
            if self.data.exit_code:
                self.data.additional += '; Problem with run. Exit code was %d.' % (
                    self.data.exit_code)
            else:
                self.data.additional += '; Problem with run. Exit code was N/A.'

            if (self._watcher_file and self._solver_file):
                self.data.additional += '; Preserving runsolver output at %s - preserving target algorithm output at %s' % (
                    self._watcher_file.name or "<none>", self._solver_file.name
                    or "<none>")

        try:
            if self._watcher_file:
                self._watcher_file.close()
            if self._solver_file:
                self._solver_file.close()

            if self.data.status not in ["ABORT", "CRASHED"]:
                os.remove(self._watcher_file.name)
                os.remove(self._solver_file.name)
            elif self._use_tmpdir:
                shutil.copy(self._watcher_file.name, ".")
                shutil.copy(self._solver_file.name, ".")
                os.remove(self._watcher_file.name)
                os.remove(self._solver_file.name)

        except (OSError, KeyboardInterrupt, SystemExit):
            self.data.additional = "problems removing temporary cd files during cleanup."
        except AttributeError:
            pass  # in internal mode, these files are not generated

    def get_command_line_args(self, runargs, config):
        '''
        Returns the command call list containing arguments to execute the implementing subclass' solver.
        The default implementation delegates to get_command_line_args_ext. If this is not implemented, a
        NotImplementedError will be raised.

        Args:
            runargs: a map of any non-configuration arguments required for the execution of the solver.
            config: a mapping from parameter name (with prefix) to parameter value.
        Returns:
            A command call list to execute a target algorithm.
        '''
        raise NotImplementedError()

    def process_results(self, filepointer, out_args):
        '''
        Parse a results file to extract the run's status (SUCCESS/CRASHED/etc) and other optional results.

        Args:
            filepointer: a pointer to the file containing the solver execution standard out.
            exit_code : exit code of target algorithm
        Returns:
            A map containing the standard AClib run results. The current standard result map as of AClib 2.06 is:
            {
                "status" : <"SAT"/"UNSAT"/"TIMEOUT"/"CRASHED"/"ABORT">,
                "runtime" : <runtime of target algrithm>,
                "quality" : <a domain specific measure of the quality of the solution [optional]>,
                "misc" : <a (comma-less) string that will be associated with the run [optional]>
            }
            ATTENTION: The return values will overwrite the measured results of the runsolver (if runsolver was used). 
        '''
        raise NotImplementedError()
Пример #35
0
class LazyZipOverHTTP:
    """File-like object mapped to a ZIP file over HTTP.

    This uses HTTP range requests to lazily fetch the file's content,
    which is supposed to be fed to ZipFile.  If such requests are not
    supported by the server, raise HTTPRangeRequestUnsupported
    during initialization.
    """

    def __init__(self, url, session, chunk_size=CONTENT_CHUNK_SIZE):
        # type: (str, PipSession, int) -> None
        head = session.head(url, headers=HEADERS)
        raise_for_status(head)
        assert head.status_code == 200
        self._session, self._url, self._chunk_size = session, url, chunk_size
        self._length = int(head.headers['Content-Length'])
        self._file = NamedTemporaryFile()
        self.truncate(self._length)
        self._left = []  # type: List[int]
        self._right = []  # type: List[int]
        if 'bytes' not in head.headers.get('Accept-Ranges', 'none'):
            raise HTTPRangeRequestUnsupported('range request is not supported')
        self._check_zip()

    @property
    def mode(self):
        # type: () -> str
        """Opening mode, which is always rb."""
        return 'rb'

    @property
    def name(self):
        # type: () -> str
        """Path to the underlying file."""
        return self._file.name

    def seekable(self):
        # type: () -> bool
        """Return whether random access is supported, which is True."""
        return True

    def close(self):
        # type: () -> None
        """Close the file."""
        self._file.close()

    @property
    def closed(self):
        # type: () -> bool
        """Whether the file is closed."""
        return self._file.closed

    def read(self, size=-1):
        # type: (int) -> bytes
        """Read up to size bytes from the object and return them.

        As a convenience, if size is unspecified or -1,
        all bytes until EOF are returned.  Fewer than
        size bytes may be returned if EOF is reached.
        """
        download_size = max(size, self._chunk_size)
        start, length = self.tell(), self._length
        stop = length if size < 0 else min(start+download_size, length)
        start = max(0, stop-download_size)
        self._download(start, stop-1)
        return self._file.read(size)

    def readable(self):
        # type: () -> bool
        """Return whether the file is readable, which is True."""
        return True

    def seek(self, offset, whence=0):
        # type: (int, int) -> int
        """Change stream position and return the new absolute position.

        Seek to offset relative position indicated by whence:
        * 0: Start of stream (the default).  pos should be >= 0;
        * 1: Current position - pos may be negative;
        * 2: End of stream - pos usually negative.
        """
        return self._file.seek(offset, whence)

    def tell(self):
        # type: () -> int
        """Return the current possition."""
        return self._file.tell()

    def truncate(self, size=None):
        # type: (Optional[int]) -> int
        """Resize the stream to the given size in bytes.

        If size is unspecified resize to the current position.
        The current stream position isn't changed.

        Return the new file size.
        """
        return self._file.truncate(size)

    def writable(self):
        # type: () -> bool
        """Return False."""
        return False

    def __enter__(self):
        # type: () -> LazyZipOverHTTP
        self._file.__enter__()
        return self

    def __exit__(self, *exc):
        # type: (*Any) -> Optional[bool]
        return self._file.__exit__(*exc)

    @contextmanager
    def _stay(self):
        # type: ()-> Iterator[None]
        """Return a context manager keeping the position.

        At the end of the block, seek back to original position.
        """
        pos = self.tell()
        try:
            yield
        finally:
            self.seek(pos)

    def _check_zip(self):
        # type: () -> None
        """Check and download until the file is a valid ZIP."""
        end = self._length - 1
        for start in reversed(range(0, end, self._chunk_size)):
            self._download(start, end)
            with self._stay():
                try:
                    # For read-only ZIP files, ZipFile only needs
                    # methods read, seek, seekable and tell.
                    ZipFile(self)  # type: ignore
                except BadZipfile:
                    pass
                else:
                    break

    def _stream_response(self, start, end, base_headers=HEADERS):
        # type: (int, int, Dict[str, str]) -> Response
        """Return HTTP response to a range request from start to end."""
        headers = base_headers.copy()
        headers['Range'] = f'bytes={start}-{end}'
        # TODO: Get range requests to be correctly cached
        headers['Cache-Control'] = 'no-cache'
        return self._session.get(self._url, headers=headers, stream=True)

    def _merge(self, start, end, left, right):
        # type: (int, int, int, int) -> Iterator[Tuple[int, int]]
        """Return an iterator of intervals to be fetched.

        Args:
            start (int): Start of needed interval
            end (int): End of needed interval
            left (int): Index of first overlapping downloaded data
            right (int): Index after last overlapping downloaded data
        """
        lslice, rslice = self._left[left:right], self._right[left:right]
        i = start = min([start]+lslice[:1])
        end = max([end]+rslice[-1:])
        for j, k in zip(lslice, rslice):
            if j > i:
                yield i, j-1
            i = k + 1
        if i <= end:
            yield i, end
        self._left[left:right], self._right[left:right] = [start], [end]

    def _download(self, start, end):
        # type: (int, int) -> None
        """Download bytes from start to end inclusively."""
        with self._stay():
            left = bisect_left(self._right, start)
            right = bisect_right(self._left, end)
            for start, end in self._merge(start, end, left, right):
                response = self._stream_response(start, end)
                response.raise_for_status()
                self.seek(start)
                for chunk in response_chunks(response, self._chunk_size):
                    self._file.write(chunk)
Пример #36
0
    def _sign_facturae(self, xml_string, certificate_password):
        """
        Inspired by https://github.com/pedrobaeza/l10n-spain/blob/d01d049934db55130471e284012be7c860d987eb/l10n_es_facturae/wizard/create_facturae.py
        """
        if not self.company.facturae_certificate:
            raise UserError(
                gettext('account_invoice_facturae.missing_certificate',
                        company=self.company.rec_name))

        logger = logging.getLogger('account_invoice_facturae')

        unsigned_file = NamedTemporaryFile(suffix='.xml', delete=False)
        unsigned_file.write(xml_string)
        unsigned_file.close()

        cert_file = NamedTemporaryFile(suffix='.pfx', delete=False)
        cert_file.write(self.company.facturae_certificate)
        cert_file.close()

        signed_file = NamedTemporaryFile(suffix='.xsig', delete=False)

        env = {}
        env.update(os.environ)
        libs = os.path.join(module_path(), 'java', 'lib', '*.jar')
        env['CLASSPATH'] = ':'.join(glob.glob(libs))

        # TODO: implement Signer with python
        # http://www.pyopenssl.org/en/stable/api/crypto.html#OpenSSL.crypto.load_pkcs12
        signature_command = [
            'java', '-Djava.awt.headless=true', 'com.nantic.facturae.Signer',
            '0', unsigned_file.name, signed_file.name, 'facturae31',
            cert_file.name, certificate_password
        ]
        signature_process = Popen(signature_command,
                                  stdout=PIPE,
                                  stderr=PIPE,
                                  env=env,
                                  cwd=os.path.join(module_path(), 'java'))
        output, err = signature_process.communicate()
        rc = signature_process.returncode
        if rc != 0:
            logger.warning(
                'Error %s signing invoice "%s" with command '
                '"%s <password>": %s %s', rc, self.id, signature_command[:-1],
                output, err)
            raise UserError(
                gettext('account_invoice_factura.error_signing',
                        invoice=self.rec_name,
                        process_output=output))

        logger.info("Factura-e for invoice %s (%s) generated and signed",
                    self.rec_name, self.id)

        signed_file_content = signed_file.read()
        signed_file.close()

        os.unlink(unsigned_file.name)
        os.unlink(cert_file.name)
        os.unlink(signed_file.name)

        return signed_file_content
Пример #37
0
class AbstractWrapper(object):
    """
    abstract solver wrapper
    """

    def __init__(self):
        """
        Constructor
        """
        # program_name = os.path.basename(sys.argv[0])
        program_version = "v%s" % __version__
        program_build_date = str(__updated__)
        program_version_message = "%%(prog)s %s (%s)" % (
            program_version,
            program_build_date,
        )
        program_shortdesc = __import__("__main__").__doc__.split("\n")[1]
        self._program_license = """%s

          Created by %s on %s.
          Copyright 2014 - AClib. All rights reserved.

          Licensed under the GPLv2
          http://www.gnu.org/licenses/gpl-2.0.html

          Distributed on an "AS IS" basis without warranties
          or conditions of any kind, either express or implied.

          Version: %s

          USAGE
        """ % (
            program_shortdesc,
            str(__authors__),
            str(__date__),
            program_version_message,
        )
        self.parser = OArgumentParser()
        self.args = None

        self.RESULT_MAPPING = {"SUCCESS": "SAT"}
        self._watcher_file = None
        self._solver_file = None

        self._instance = ""
        self._specifics = ""
        self._cutoff = 0.0
        self._runlength = 0
        self._seed = 0
        self._config_dict = {}

        self._exit_code = None

        self._runsolver = None
        self._mem_limit = 2048
        self._tmp_dir = None
        self._tmp_dir_algo = None

        self._crashed_if_non_zero_status = True

        self._subprocesses = []

        self._DEBUG = True
        self._DELAY2KILL = 2

        self._ta_status = "EXTERNALKILL"
        self._ta_runtime = 999999999.0
        self._ta_runlength = -1
        self._ta_quality = -1
        self._ta_exit_code = None
        self._ta_misc = ""

    def print_d(self, str_):
        if self._DEBUG:
            print(str_)

    def main(self, argv=None):
        """parse command line"""
        if argv is None:
            argv = sys.argv
        else:
            sys.argv.extend(argv)

        try:
            signal.signal(signal.SIGTERM, signalHandler)
            signal.signal(signal.SIGQUIT, signalHandler)
            signal.signal(signal.SIGINT, signalHandler)

            # Setup argument parser

            self.parser.add_argument(
                "--runsolver-path",
                dest="runsolver",
                default="examples/commandline/spear_qcp/target_algorithm/runsolver/runsolver",
                help="path to runsolver binary (if None, the runsolver is deactivated)",
            )
            self.parser.add_argument(
                "--temp-file-dir",
                dest="tmp_dir",
                default=None,
                help="""directory for temporary files (relative to -exec-dir in SMAC scenario).
                                             If 'NONE' use $TMPDIR if available, otherwise './'""",
            )
            self.parser.add_argument(
                "--temp-file-dir-algo",
                dest="tmp_dir_algo",
                default=False,
                type=bool,
                help="create a directory for temporary files from target algo",
            )
            self.parser.add_argument(
                "--mem-limit",
                dest="mem_limit",
                default=self._mem_limit,
                type=int,
                help="memory limit in MB",
            )
            self.parser.add_argument(
                "--internal",
                dest="internal",
                default=False,
                type=bool,
                help="skip calling an external target algorithm",
            )
            self.parser.add_argument(
                "--log",
                dest="log",
                default=True,
                type=bool,
                help='logs all runs in "target_algo_runs.json" in --temp-file-dir',
            )
            self.parser.add_argument(
                "--ext-callstring",
                dest="ext_callstring",
                default=None,
                help="""Command to get call string via external program;
                                             your programm gets a file with
                                             first line: instance name,
                                             second line: seed
                                             further lines: paramter name, paramater value;
                                             output: one line with callstring for target algorithm""",
            )
            self.parser.add_argument(
                "--ext-parsing",
                dest="ext_parsing",
                default=None,
                help="""Command to use an external program to parse the output of your target algorihm;
                                             only paramter: name of output file;
                                             output of your progam:
                                             status: SAT|UNSAT|TIMEOUT|CRASHED
                                             quality: <integer>
                                             misc: <string>""",
            )
            self.parser.add_argument(
                "--help", dest="show_help", default=False, type=bool, help="shows help"
            )

            # Process arguments
            self.args, target_args = self.parser.parse_cmd(sys.argv[1:])
            args = self.args

            if args.show_help:
                self.parser.print_help()
                self._ta_status = "ABORT"
                self._ta_misc = "help was requested..."
                self._exit_code = 1
                sys.exit(1)

            if (
                args.runsolver != "None"
                and not os.path.isfile(args.runsolver)
                and not args.internal
            ):
                self._ta_status = "ABORT"
                self._ta_misc = "runsolver is missing - should have been at %s." % (
                    args.runsolver
                )
                self._exit_code = 1
                sys.exit(1)
            else:
                self._runsolver = args.runsolver
                self._mem_limit = args.mem_limit

            if args.tmp_dir is None:
                if "TMPDIR" in os.environ:
                    args.tmp_dir = os.environ["TMPDIR"]
                else:
                    args.tmp_dir = "."

            if not os.path.isdir(args.tmp_dir):
                self._ta_status = "ABORT"
                self._ta_misc = (
                    "temp directory is missing - should have been at %s."
                    % (args.tmp_dir)
                )
                self._exit_code = 1
                sys.exit(1)
            else:
                self._tmp_dir = args.tmp_dir

            if len(target_args) < 5:
                self._ta_status = "ABORT"
                self._ta_misc = f"""some required TA parameters (instance, specifics, cutoff, runlength, seed) missing
                                    - was {' '.join(target_args)}"""
                self._exit_code = 1
                sys.exit(1)

            self._config_dict = self.build_parameter_dict(target_args)

            if args.tmp_dir_algo:
                try:
                    self._tmp_dir_algo = mkdtemp(dir="/tmp/")
                except OSError:
                    sys.stderr.write("Creating directory for temporary files failed")
                    pass

            runargs = {
                "instance": self._instance,
                "specifics": self._specifics,
                "cutoff": self._cutoff,
                "runlength": self._runlength,
                "seed": self._seed,
                "tmp": self._tmp_dir_algo,
            }

            if args.ext_callstring:
                target_cmd = self.get_command_line_args_ext(
                    runargs=runargs,
                    config=self._config_dict,
                    ext_call=args.ext_callstring,
                )
            else:
                target_cmd = self.get_command_line_args(
                    runargs=runargs, config=self._config_dict
                )

            target_cmd = target_cmd.split(" ")
            target_cmd = filter(lambda x: x != "", target_cmd)

            if not args.internal:
                self.call_target(target_cmd)
                self.read_runsolver_output()

            try:
                if "core" in os.listdir("."):
                    os.remove("core")
            except Exception:
                traceback.print_exc()

            if args.ext_parsing:
                resultMap = self.process_results_ext(
                    self._solver_file,
                    {"exit_code": self._ta_exit_code},
                    ext_call=args.ext_parsing,
                )
            else:
                resultMap = self.process_results(
                    self._solver_file, {"exit_code": self._ta_exit_code}
                )

            if "status" in resultMap:
                self._ta_status = self.RESULT_MAPPING.get(
                    resultMap["status"], resultMap["status"]
                )
            if "runtime" in resultMap:
                self._ta_runtime = resultMap["runtime"]
            if "quality" in resultMap:
                self._ta_quality = resultMap["quality"]
            if "misc" in resultMap and not self._ta_misc:
                self._ta_misc = resultMap["misc"]
            if "misc" in resultMap and self._ta_misc:
                self._ta_misc += " - " + resultMap["misc"]

            # if still no status was determined, something went wrong and output files should be kept
            if self._ta_status == "EXTERNALKILL":
                self._ta_status = "CRASHED"
            sys.exit()
        except (KeyboardInterrupt, SystemExit):
            self.cleanup()
            self.print_result_string()
            # traceback.print_exc()
            if self._ta_exit_code:
                sys.exit(self._ta_exit_code)
            elif self._exit_code:
                sys.exit(self._exit_code)
            else:
                sys.exit(0)

    def build_parameter_dict(self, arg_list):
        """
        Reads all arguments which were not parsed by ArgumentParser,
        extracts all meta information
        and builds a mapping: parameter name -> parameter value
        Format Assumption: <instance> <specifics> <runtime cutoff> <runlength> <seed> <solver parameters>
        Args:
            list of all options not parsed by ArgumentParser
        """
        self._instance = arg_list[0]
        self._specifics = arg_list[1]
        self._cutoff = int(
            float(arg_list[2]) + 1
        )  # runsolver only rounds down to integer
        self._ta_runtime = self._cutoff
        self._runlength = int(arg_list[3])
        self._seed = int(arg_list[4])

        params = arg_list[5:]
        if (len(params) / 2) * 2 != len(params):
            self._ta_status = "ABORT"
            self._ta_misc = (
                "target algorithm parameter list MUST have even length, found %d arguments."
                % (len(params))
            )
            self.print_d(" ".join(params))
            self._exit_code = 1
            sys.exit(1)

        return dict(
            (name, value.strip("'")) for name, value in zip(params[::2], params[1::2])
        )

    def call_target(self, target_cmd):
        """
        extends the target algorithm command line call with the runsolver
        and executes it
        Args:
            list of target cmd (from getCommandLineArgs)
        """
        logging.warning("genericWrapper: falling back to non-deterministic behaviour")
        random_id = random.randint(0, 1000000)
        self._watcher_file = NamedTemporaryFile(
            suffix=".log",
            prefix="watcher-%d-" % (random_id),
            dir=self._tmp_dir,
            delete=False,
        )
        self._solver_file = NamedTemporaryFile(
            suffix=".log",
            prefix="solver-%d-" % (random_id),
            dir=self._tmp_dir,
            delete=False,
        )

        runsolver_cmd = []
        if self._runsolver != "None":
            runsolver_cmd = [
                self._runsolver,
                "-M",
                self._mem_limit,
                "-C",
                self._cutoff,
                "-w",
                self._watcher_file.name,
                "-o",
                self._solver_file.name,
            ]

        runsolver_cmd.extend(target_cmd)
        # for debugging
        self.print_d("Calling runsolver. Command-line:")
        self.print_d(" ".join(map(str, runsolver_cmd)))

        # run
        try:
            if self._runsolver != "None":
                # if there are quotes in the call, we cannot split it individual list elements.
                # We have to call it via shell as a string; problematic solver: SparrowToRiss
                if '"' in runsolver_cmd:
                    runsolver_cmd = " ".join(map(str, runsolver_cmd))
                    io = Popen(
                        runsolver_cmd,
                        shell=True,
                        preexec_fn=os.setpgrp,
                        universal_newlines=True,
                    )
                else:
                    io = Popen(
                        map(str, runsolver_cmd),
                        shell=False,
                        preexec_fn=os.setpgrp,
                        universal_newlines=True,
                    )
            else:
                io = Popen(
                    map(str, runsolver_cmd),
                    stdout=self._solver_file,
                    shell=False,
                    preexec_fn=os.setpgrp,
                    universal_newlines=True,
                )
            self._subprocesses.append(io)
            io.wait()
            self._subprocesses.remove(io)
            if io.stdout:
                io.stdout.flush()
        except OSError:
            self._ta_status = "ABORT"
            self._ta_misc = "execution failed: %s" % (" ".join(map(str, runsolver_cmd)))
            self._exit_code = 1
            sys.exit(1)

        self._solver_file.seek(0)

    def float_regex(self):
        return "[+-]?\d+(?:\.\d+)?(?:[eE][+-]\d+)?"

    def read_runsolver_output(self):
        """
        reads self._watcher_file,
        extracts runtime
        and returns if memout or timeout found
        """
        if self._runsolver == "None":
            self._ta_exit_code = 0
            return

        self.print_d("Reading runsolver output from %s" % (self._watcher_file.name))
        data = str(self._watcher_file.read())

        if re.search("runsolver_max_cpu_time_exceeded", data) or re.search(
            "Maximum CPU time exceeded", data
        ):
            self._ta_status = "TIMEOUT"

        if re.search("runsolver_max_memory_limit_exceeded", data) or re.search(
            "Maximum VSize exceeded", data
        ):
            self._ta_status = "TIMEOUT"
            self._ta_misc = "memory limit was exceeded"

        cpu_pattern1 = re.compile("runsolver_cputime: (%s)" % (self.float_regex()))
        cpu_match1 = re.search(cpu_pattern1, data)

        cpu_pattern2 = re.compile("CPU time \\(s\\): (%s)" % (self.float_regex()))
        cpu_match2 = re.search(cpu_pattern2, data)

        if cpu_match1:
            self._ta_runtime = float(cpu_match1.group(1))
        if cpu_match2:
            self._ta_runtime = float(cpu_match2.group(1))

        exitcode_pattern = re.compile("Child status: ([0-9]+)")
        exitcode_match = re.search(exitcode_pattern, data)

        if exitcode_match:
            self._ta_exit_code = int(exitcode_match.group(1))

    def print_result_string(self):

        if self.args and self.args.log:
            # if not os.path.isfile("target_algo_runs.csv"):
            #    with open("target_algo_runs.csv", "a") as fp:
            #        fp.write("instance,seed,status,performance,config,[misc]\n")
            with open("target_algo_runs.json", "a") as fp:
                out_dict = {
                    "instance": self._instance,
                    "seed": self._seed,
                    "status": self._ta_status,
                    "time": self._ta_runtime,
                    "config": self._config_dict,
                    "misc": self._ta_misc,
                }
                json.dump(out_dict, fp)
                fp.write("\n")
                fp.flush()

        sys.stdout.write(
            "Result for ParamILS: %s, %s, %s, %s, %s"
            % (
                self._ta_status,
                str(self._ta_runtime),
                str(self._ta_runlength),
                str(self._ta_quality),
                str(self._seed),
            )
        )
        if len(self._ta_misc) > 0:
            sys.stdout.write(", %s" % (self._ta_misc))
        print("")
        sys.stdout.flush()

    def cleanup(self):
        """
        cleanup if error occurred or external signal handled
        """
        if len(self._subprocesses) > 0:
            print("killing the target run!")
            try:
                for sub in self._subprocesses:
                    # sub.terminate()
                    Popen(["pkill", "-TERM", "-P", str(sub.pid)])
                    self.print_d("Wait %d seconds ..." % (self._DELAY2KILL))
                    time.sleep(self._DELAY2KILL)
                    if sub.returncode is None:  # still running
                        sub.kill()

                self.print_d(
                    "done... If anything in the subprocess tree fork'd a new process group"
                    ", we may not have caught everything..."
                )
                self._ta_misc = "forced to exit by signal or keyboard interrupt."
                self._ta_runtime = self._cutoff
            except (OSError, KeyboardInterrupt, SystemExit):
                self._ta_misc = "forced to exit by multiple signals/interrupts."
                self._ta_runtime = self._cutoff

        if self._ta_status == "ABORT" or self._ta_status == "CRASHED":
            if len(self._ta_misc) == 0:
                if self._ta_exit_code:
                    self._ta_misc = "Problem with run. Exit code was %d." % (
                        self._ta_exit_code
                    )
                else:
                    self._ta_misc = "Problem with run. Exit code was N/A."

            if self._watcher_file and self._solver_file:
                self._ta_misc = f"""{self._ta_misc};
                                    Preserving runsolver output at {self._watcher_file.name or "None"}
                                    Preserving target algorithm output at {self._solver_file.name or "None"}
                                 """

        try:
            if self._watcher_file:
                self._watcher_file.close()
            if self._solver_file:
                self._solver_file.close()

            if self._ta_status != "ABORT" and self._ta_status != "CRASHED":
                os.remove(self._watcher_file.name)
                os.remove(self._solver_file.name)

            if self._tmp_dir_algo:
                shutil.rmtree(self._tmp_dir_algo)

        except (OSError, KeyboardInterrupt, SystemExit):
            self._ta_misc = "problems removing temporary files during cleanup."
        except AttributeError:
            pass  # in internal mode, these files are not generated

        if self._ta_status == "EXTERNALKILL":
            self._ta_status = "CRASHED"
            self._exit_code = 3

    def get_command_line_args(self, runargs, config):
        """
        Returns the command call list containing arguments to execute the implementing subclass' solver.
        The default implementation delegates to get_command_line_args_ext. If this is not implemented, a
        NotImplementedError will be raised.

        Args:
            runargs: a map of any non-configuration arguments required for the execution of the solver.
            config: a mapping from parameter name (with prefix) to parameter value.
        Returns:
            A command call list to execute a target algorithm.
        """
        raise NotImplementedError()

    def get_command_line_args_ext(self, runargs, config, ext_call):
        """
        When production of the target algorithm is done from a source other than python,
        override this method to return a command call list to execute whatever you need to produce the command line.

        Args:
            runargs: a map of any non-configuration arguments required for the execution of the solver.
            config: a mapping from parameter name (with prefix) to parameter value.
            ext_call: string to call external program to get callstring of target algorithm
        Returns:
            A command call list to execute the command producing a single line of output containing the solver command
            string
        """
        callstring_in = NamedTemporaryFile(
            suffix=".csv", prefix="callstring", dir=self._tmp_dir, delete=False
        )
        callstring_in.write("%s\n" % (runargs["instance"]))
        callstring_in.write("%d\n" % (runargs["seed"]))
        for name, value in config.items():
            callstring_in.write("%s,%s\n" % (name, value))
        callstring_in.flush()

        cmd = ext_call.split(" ")
        cmd.append(callstring_in.name)
        self.print_d(" ".join(cmd))
        try:
            io = Popen(
                cmd,
                shell=False,
                preexec_fn=os.setpgrp,
                stdout=PIPE,
                universal_newlines=True,
            )
            self._subprocesses.append(io)
            out_, _ = io.communicate()
            self._subprocesses.remove(io)
        except OSError:
            self._ta_misc = "failed to run external program for output parsing : %s" % (
                " ".join(cmd)
            )
            self._ta_runtime = self._cutoff
            self._exit_code = 2
            sys.exit(2)
        if not out_:
            self._ta_misc = (
                "external program for output parsing yielded empty output: %s"
                % (" ".join(cmd))
            )
            self._ta_runtime = self._cutoff
            self._exit_code = 2
            sys.exit(2)
        callstring_in.close()
        os.remove(callstring_in.name)
        self._instance = runargs["instance"]
        return out_.strip("\n\r\b")

    def process_results(self, filepointer, out_args):
        """
        Parse a results file to extract the run's status (SUCCESS/CRASHED/etc) and other optional results.

        Args:
            filepointer: a pointer to the file containing the solver execution standard out.
            exit_code : exit code of target algorithm
        Returns:
            A map containing the standard AClib run results. The current standard result map as of AClib 2.06 is:
            {
                "status" : <"SAT"/"UNSAT"/"TIMEOUT"/"CRASHED"/"ABORT">,
                "runtime" : <runtime of target algrithm>,
                "quality" : <a domain specific measure of the quality of the solution [optional]>,
                "misc" : <a (comma-less) string that will be associated with the run [optional]>
            }
            ATTENTION: The return values will overwrite the measured results of the runsolver (if runsolver was used).
        """
        raise NotImplementedError()

    def process_results_ext(self, filepointer, out_args, ext_call):
        """
        Args:
            filepointer: a pointer to the file containing the solver execution standard out.
            exit_code : exit code of target algorithm
        Returns:
            A map containing the standard AClib run results. The current standard result map as of AClib 2.06 is:
            {
                "status" : <"SAT"/"UNSAT"/"TIMEOUT"/"CRASHED"/"ABORT">,
                "quality" : <a domain specific measure of the quality of the solution [optional]>,
                "misc" : <a (comma-less) string that will be associated with the run [optional]>
            }
        """

        cmd = ext_call.split(" ")
        cmd.append(filepointer.name)
        self.print_d(" ".join(cmd))
        try:
            io = Popen(
                cmd,
                shell=False,
                preexec_fn=os.setpgrp,
                stdout=PIPE,
                universal_newlines=True,
            )
            self._subprocesses.append(io)
            out_, _ = io.communicate()
            self._subprocesses.remove(io)
        except OSError:
            self._ta_misc = "failed to run external program for output parsing"
            self._ta_runtime = self._cutoff
            self._exit_code = 2
            sys.exit(2)

        result_map = {}
        for line in out_.split("\n"):
            if line.startswith("status:"):
                result_map["status"] = line.split(":")[1].strip(" ")
            elif line.startswith("quality:"):
                result_map["quality"] = line.split(":")[1].strip(" ")
            elif line.startswith("misc:"):
                result_map["misc"] = line.split(":")[1]

        return result_map
Пример #38
0
class Pyforce(object):
    def __init__(self, *args):
        """
        Create an iterator over results of a p4 call. The args here are p4
        CLI arguments. See p4 help for more information.
        """
        self.args = [str(arg) for arg in args]
        from subprocess import Popen, PIPE
        from tempfile import NamedTemporaryFile
        self.stderr = NamedTemporaryFile()
        if os.environ.get('DEBUG', ''):
            print(f'## p4', *self.args, file=sys.stderr)
        try:
            timeout = abs(int(os.environ['O4_P4_TIMEOUT']))
        except:
            timeout = 120
        self.pope = Popen(['p4', f'-vnet.maxwait={timeout}', '-G'] + self.args,
                          stdout=PIPE,
                          stderr=self.stderr)
        self.transform = Pyforce.to_str
        self.errors = []

    def __iter__(self):
        return self

    def __next__(self):
        """
        Returns the next p4 result object from the command. If the p4
        command experiences a timeout, raise P4TimeoutError. All other
        errors are accumulated during the run and raised as arguments
        on a single P4Error object after the p4 process has been
        exhausted.

        Certain errors are not really errors, it's just p4 being
        silly. Such as the error "No files to reconcile" when you
        reconcile files that have the correct content. Such records
        have their 'code' member reset to a different value and
        returned.  Some may also produce a '#o4pass'-prefixed line
        on stdout, which, in a complete run, will make their way to
        "o4 fail" and be reported.

        The returned record will be sent on to the next item process of
        the o4 pipeline, unless the 'code' member is 'pass'.
        Records with code 'error' will be saved up and returned after
        the iteration is done via a P4Error exception.
        """
        import marshal
        try:
            while True:
                res = marshal.load(self.pope.stdout)
                data = res.get(b'data')
                if res.get(b'code') == b'info' and data:
                    if data.startswith(b'Diff chunks') and not data.endswith(
                            b'+ 0 conflicting'):
                        # This implies a resolution, but there's no information.
                        # A separate record (resolve skipped) identifies the
                        # file if there are conflicts.
                        pass
                    elif (b"can't move (already opened for edit)" in data
                          or b"is opened for add and can't be replaced" in data
                          or
                          # b"is opened and not being changed" in res[b'data'] or
                          # b"must resolve" in res[b'data'] or
                          b"- resolve skipped" in data):
                        res[b'code'] = b'mute'
                        print(
                            f'#o4pass-err#{data.decode("utf-8",errors="ignore")}'
                        )
                if res.get(b'code') != b'error':
                    return self.transform(res)
                if data:
                    # For messages that aren't errors at all, change their code and return
                    if (b'file(s) up-to-date' in data
                            or b'no file(s) to reconcile' in data
                            or b'no file(s) to resolve' in data
                            or b'no file(s) to unshelve' in data
                            or b'file(s) not on client' in data
                            or b'No shelved files in changelist to delete'
                            in data):
                        res[b'code'] = b'stat'
                    elif (b'no file(s) at that changelist number' in data or
                          b'no revision(s) above those at that changelist number'
                          in data):
                        print(
                            f'#o4pass-info#{data.decode("utf-8",errors="ignore")}'
                        )
                        res[b'code'] = b'mute'
                    elif b'must refer to client' in data:
                        res[b'data'] += b'This is likely due to a bad Root in your clientspec.'
                    # Other specific errors we pass along
                    elif b'clobber writable file' in data:
                        res[b'code'] = b'error'

                    # {b'code': b'error', b'data': b'SSL receive failed.\nread: Connection timed out: Connection timed out\n', b'severity': 3, b'generic': 38}
                    # 'data': 'TCP receive exceeded maximum configured duration of 60 seconds.\n', 'severity': 3, 'generic': 38
                    # This seems like it could be 100 different messages; we probably need #TODO find out what generic means.
                    elif b'Connection timed out' in data or b'TCP receive exceeded' in data:
                        raise P4TimeoutError(res, self.args)
                    # At this point, res must either be complete or have
                    # code == 'mute'.
                    if res[b'code'] != b'error':
                        return self.transform(res)
                # Allow operation to complete and report errors after
                self.errors.append(Pyforce.to_str(res))
        except EOFError:
            pass
        if self.stderr.tell():
            self.stderr.seek(0)
            err = self.stderr.read().decode(sys.stdout.encoding)
            if 'timed out' in err:
                raise P4TimeoutError(err)
            nl = '\n'
            print(f'#o4pass-err#{err.replace(nl, " ")})')
        if self.errors:
            raise P4Error(*self.errors)
        raise StopIteration()

    def __del__(self):
        if hasattr(self, 'pope'):
            try:
                self.pope.kill()
                self.pope.wait()
            except OSError:
                pass

    @staticmethod
    def to_str(r):
        """
        Converts a dictionary of bytes key-values to strings using stdout
        encoding.
        """
        def dec(a):
            if hasattr(a, 'decode'):
                return a.decode(sys.stdout.encoding, errors='ignore')
            return a

        return {dec(k): dec(v) for k, v in r.items()}

    @staticmethod
    def unescape(path):
        """Reverts p4 path escaping."""
        return path.replace('%40',
                            '@').replace('%23',
                                         '#').replace('%2a',
                                                      '*').replace('%25', '%')

    @staticmethod
    def escape(path):
        """Escapes a path like perforce would."""
        return path.replace('%', '%25').replace('#', '%23').replace(
            '*', '%2a').replace('@', '%40')

    @staticmethod
    def checksum(fname, fileSize):
        """
        Probably the only complete resource to how perforce computes a
        checksum. Fundamentally it's a MD5 checksum of the file's
        content. However utf16 files must first be converted to utf8,
        and if the file system file size is 3 bytes larger than the
        stated file size, then if those three bytes are the utf8 BOM,
        they must not be included in the checksum.

        Hence the fileSize argument can be an integer, or in the case
        of utf8 files <int>/utf8, and in the utf16 case <int>/utf16.
        """
        import hashlib
        hash_md5 = hashlib.md5()
        headType = ''
        if type(fileSize) != int:
            if '/' in fileSize:
                fileSize, headType = fileSize.split('/', 1)
            fileSize = int(fileSize)
        try:
            with open(fname, 'rb') as f:
                if headType.startswith('utf16'):
                    # FIXME: Don't overflow and die if there is a giant utf16 file
                    u = f.read().decode('utf16')
                    hash_md5.update(u.encode('utf8'))
                else:
                    if headType.startswith('utf8'):
                        fs = os.fstat(f.fileno())
                        if fs.st_size > fileSize:
                            # Skip utf8 BOM when computing digest, if filesize differs from st_size
                            bom = f.read(3)
                            if bom != b'\xef\xbb\xbf':
                                f.seek(0)
                    for chunk in iter(lambda: f.read(1024 * 1024), b''):
                        hash_md5.update(chunk)
            return hash_md5.hexdigest().upper()
        except (FileNotFoundError, IsADirectoryError):
            return None
Пример #39
0
options = parse_config('NaiveLogger.yml')

logging_file = NamedTemporaryFile()
logging_queue = queue.Queue()
options['log']['filename'] = logging_file.name

logging_thread = LoggingThread(logging_queue, **options['log'])

if __name__ == "__main__":
    logging_thread.start()

    # Note that logging handling is in a separate thread, so that printout may
    # be out of order.
    logger.info("Test message with level info.")
    logger.warning("Test message with level WARNING.")
    logger.critical("Test message with level CRITICAL.")
    print(
        'This message should be printed out immediately by the master thread.')
    logger.critical(
        "Test message with level CRITICAL, should be suppressed by email handler."
    )

    sleep(65)
    logger.critical("Test message with level CRITICAL, reprise.")

    logging_thread.stop()

    print('Now inspect the full logging file:')
    cprint(logging_file.read().decode("utf-8").strip('\n'))
    logging_file.close()
Пример #40
0
def write(self, handle):
    """How to write a CTD NetCDF file."""
    UNKNOWN = 'UNKNOWN'
    UNSPECIFIED_UNITS = 'unspecified'
    STRLEN = 40

    # Prepare file handles and datetime information
    tmp = NamedTemporaryFile()
    strdate = str(self.globals['DATE'])
    strtime = str(self.globals['TIME'])
    isowocedate = datetime.strptime(strdate + strtime, '%Y%m%d%H%M')
    nc_file = nc.Dataset(tmp.name, 'w', format='NETCDF3_CLASSIC')

    # Define dimensions variables
    makeDim = nc_file.createDimension
    makeDim('time', 1)
    makeDim('depth', len(self))
    makeDim('latitude', 1)
    makeDim('longitude', 1)
    makeDim('string_dimension', STRLEN)

    # Define dataset attributes
    nc_file.EXPOCODE = self.globals['EXPOCODE']
    nc_file.Conventions = 'COARDS-DEV/WOCE'
    nc_file.WOCE_VERSION = '3.0'
    nc_file.WOCE_ID = self.globals['SECT_ID'] if 'SECT_ID' in self.globals \
            else UNKNOWN
    nc_file.DATA_TYPE = 'WOCE CTD'
    nc_file.STATION_NUMBER = self.globals['STNNBR'] or UNKNOWN
    nc_file.CAST_NUMBER = self.globals['CASTNO'] or UNKNOWN
    nc_file.BOTTOM_DEPTH_METERS = nc.simplest_str(float(self.globals['DEPTH']))
    nc_file.Creation_Time = strftime_iso(datetime.utcnow())
    nc_file.ORIGINAL_HEADER = self.globals['header']
    nc_file.WOCE_CTD_FLAG_DESCRIPTION = woce.CTD_FLAG_DESCRIPTION

    def MISSING_COORD_VAR (param):
        return ("expected global coordinate variable %s "
                "but not found (XXX)") % param

    # Coordinate variables
    if 'TIME' not in self.globals:
        raise AttributeError(MISSING_COORD_VAR('TIME'))
    var_time = nc_file.createVariable('time', 'i', ('time', ))
    var_time.long_name = 'time'
    var_time.units = 'minutes since %s' % strftime_iso(nc.EPOCH)
    var_time.data_min = nc.minutes_since_epoch(isowocedate)
    var_time.data_max = var_time.data_min
    var_time.C_format = '%10d'
    var_time[:] = var_time.data_min

    if 'LATITUDE' not in self.globals:
        raise AttributeError(MISSING_COORD_VAR('LATITUDE'))
    var_latitude = nc_file.createVariable('latitude', 'f', ('latitude',))
    var_latitude.long_name = 'latitude'
    var_latitude.units = 'degrees_N'
    var_latitude.data_min = float(self.globals['LATITUDE'])
    var_latitude.data_max = var_latitude.data_min
    var_latitude.C_format = '%9.4f'
    var_latitude[:] = var_latitude.data_min

    if 'LONGITUDE' not in self.globals:
        raise AttributeError(MISSING_COORD_VAR('LONGITUDE'))
    var_longitude = nc_file.createVariable('longitude', 'f', ('longitude',))
    var_longitude.long_name = 'longitude'
    var_longitude.units = 'degrees_E'
    var_longitude.data_min = float(self.globals['LONGITUDE'])
    var_longitude.data_max = var_longitude.data_min
    var_longitude.C_format = '%9.4f'
    var_longitude[:] = var_longitude.data_min

    woce_datetime = woce.strftime_woce_date_time(isowocedate)

    if 'DATE' not in self.globals:
        raise AttributeError(MISSING_COORD_VAR('DATE'))
    var_woce_date = nc_file.createVariable('woce_date', 'i', ('time',))
    var_woce_date.long_name = 'WOCE date'
    var_woce_date.units = 'yyyymmdd UTC'
    var_woce_date.data_min = int(woce_datetime[0] or -9)
    var_woce_date.data_max = var_woce_date.data_min
    var_woce_date.C_format = '%8d'
    var_woce_date[:] = var_woce_date.data_min

    var_woce_time = nc_file.createVariable('woce_time', 'i2', ('time',))
    var_woce_time.long_name = 'WOCE time'
    var_woce_time.units = 'hhmm UTC'
    var_woce_time.data_min = int(woce_datetime[1] or -9)
    var_woce_time.data_max = var_woce_time.data_min
    var_woce_time.C_format = '%4d'
    var_woce_time[:] = var_woce_time.data_min

    var_station = nc_file.createVariable('station', 'c', ('string_dimension', ))
    var_station.long_name = 'STATION'
    var_station.units = UNSPECIFIED_UNITS
    var_station.C_format = '%s'
    var_station[:] = nc.simplest_str(self.globals['STNNBR']).ljust(len(var_station))
    
    var_cast = nc_file.createVariable('cast', 'c', ('string_dimension', ))
    var_cast.long_name = 'CAST'
    var_cast.units = UNSPECIFIED_UNITS
    var_cast.C_format = '%s'
    var_cast[:] = nc.simplest_str(self.globals['CASTNO']).ljust(len(var_cast))

    # Create data variables and fill them
    for param, column in self.columns.iteritems():
        parameter = column.parameter
        parameter_name = parameter.mnemonic_woce()
        if parameter_name in nc.STATIC_PARAMETERS_PER_CAST:
            continue
        var = nc_file.createVariable(
                  parameter.full_name.encode('ascii', 'replace'), 'f8',
                  ('time', 'depth', 'latitude', 'longitude', ))
        var.long_name = parameter.full_name.encode('ascii', 'replace')
        var.units = parameter.units.name.encode('ascii', 'replace') if \
                        parameter.units else UNSPECIFIED_UNITS
        compact_column = filter(None, column)
        if compact_column:
            var.data_min = min(compact_column)
            var.data_max = max(compact_column)
        else:
            var.data_min = float('-inf')
            var.data_max = float('inf')
        var.C_format = parameter.format.encode('ascii', 'replace')
        print parameter_name, len(var), len(column.values)
        var[:] = column.values

        if column.is_flagged_woce():
            vfw = nc_file.createVariable(parameter.name + nc.QC_SUFFIX, 'i2',
                    ('time', 'depth', 'latitude', 'longitude', ))
            vfw.long_name = parameter.name + nc.QC_SUFFIX
            vfw[:] = column.flags_woce

    # Transfer finished NetCDF file to provided handle
    nc_file.close()
    handle.write(tmp.read())
    tmp.close()
Пример #41
0
def crossGLPK(X, U, optimize="MIN", verbose=False):
    """
    based on the given conditions, 
    1) generate a MathProg file
    2) solve it
    3) parse result
    """
    assert optimize[:3] in ("MIN", "MAX"), optimize

    if verbose:
        print "Crossing:", sum(X.values()), X
        print "Non-Crossing:", sum(U.values()), U

    from tempfile import NamedTemporaryFile
    import glpk  # based on python-glpk example.py
    import string

    # vars are unique, but the objective function is not
    varS = {}
    varT = {}
    varX = {}
    varU = {}
    stX = {}
    stU = {}
    objs = []
    # s: node in src
    # t: node in trg
    # x: crossing variable s*t
    # u: non-crossing variable s*t
    for ((s, t), w) in X.iteritems():
        objs.append("(1 - s_%d - t_%d + 2 * x_%d_%d) * %d" % (s, t, s, t, w))
        varS["var s_%d binary >= 0;" % s] = ''
        varT["var t_%d binary >= 0;" % t] = ''
        varX["var x_%d_%d binary >= 0;" % (s, t)] = ''
        if optimize[:3] == "MIN":
            stX["s.t. c_x_%d_%d: x_%d_%d >=  s_%d + t_%d  - 1;" %
                (s, t, s, t, s, t)] = ''
        elif optimize[:3] == "MAX":
            stX["s.t. c_x_%d_%d: x_%d_%d <= (s_%d + t_%d) / 2;" %
                (s, t, s, t, s, t)] = ''

    for ((s, t), w) in U.iteritems():
        objs.append("(    s_%d + t_%d - 2 * u_%d_%d) * %d" % (s, t, s, t, w))
        varS["var s_%d binary >= 0;" % s] = ''
        varT["var t_%d binary >= 0;" % t] = ''
        varU["var u_%d_%d binary >= 0;" % (s, t)] = ''
        if optimize[:3] == "MIN":
            stU["s.t. c_u_%d_%d: u_%d_%d <= (s_%d + t_%d) / 2;" %
                (s, t, s, t, s, t)] = ''
        elif optimize[:3] == "MAX":
            stU["s.t. c_u_%d_%d: u_%d_%d >=  s_%d + t_%d  - 1;" %
                (s, t, s, t, s, t)] = ''

    #print "objs:\n\t", string.join(objs, " +\n\t")
    #print "vars:", string.join(varS.keys()+varT.keys()+varX.keys()+varU.keys(), ", ")
    #rint "s.t.:\n", string.join(stX.keys()+stU.keys(), "\n")

    # 'r+' for both reading and writing
    # FIXME: NamedTemporaryFile.name not usable in Windows
    modFile = NamedTemporaryFile(mode='r+', suffix=".mod")

    modFile.write("/* vars */\n%s\n\n" % string.join(
        varS.keys() + varT.keys() + varX.keys() + varU.keys(), "\n"))
    modFile.write("/* objective */\n")
    modFile.write("%s optCross:\n\t%s;\n\n" %
                  ("minimize" if optimize[:3] == "MIN" else "maximize",
                   string.join(objs, " +\n\t")))
    modFile.write("/* subject to */\n%s\n\n" %
                  string.join(stX.keys() + stU.keys(), "\n"))

    modFile.write("end;\n")

    modFile.seek(0)  # necessary for even only reading the file
    if verbose:
        print "name:", modFile.name
        print modFile.read()
    # the file should be self-destructive upon close
    glpkSol = glpk.lpx(modFile.name)
    modFile.close()

    glpkSol.solve()
    if verbose: print glpkSol.sol

    # TODO: if optimize[-3:] == "MOD": ...

    return int(glpkSol.sol['optCross']), None, None
Пример #42
0
def crossCPLEX(X, U, optimize="MIN", verbose=False):
    """
    CAUTION: 
    1) the QP objective function in CPLEX does not take constant term (an offset)
    2) the form [x*y + ... ] / 2 is mandatory for quadratic terms?!
    3) QP obj func can't have repeated terms. need to pre-process and merge them as coefficients  

    using CPLEX to solve the exact two tree corssing min in QP (quadratic objective)
    1) generate a LP file (cplex format) with offset objective
    2) ... (not sure if there will be a second step for now)  
    """
    assert optimize[:3] in ("MIN", "MAX"), optimize

    if verbose:
        print "Crossing:", len(X), X
        print "Non-Crossing:", len(U), U

    from tempfile import NamedTemporaryFile
    import string  # string.join
    from os import system  # the ugly...
    from os.path import basename

    # vars for the formulation
    varX = set()
    varY = set()
    objs = []
    offset = sum(X.values())

    # crossing terms: -x -y + [ 4 x * y ] / 2; offset ++
    for ((x, y), w) in X.iteritems():
        objs.append("- %d x%d - %d y%d + [ %d x%d * y%d ] / 2" %
                    (w, x, w, y, 4 * w, x, y))
        varX.add("x%d" % x)
        varY.add("y%d" % y)

    # non-crossing terms: x + y - [ 4 x * y ] / 2
    for ((x, y), w) in U.iteritems():
        objs.append("+ %d x%d + %d y%d - [ %d x%d * y%d ] / 2" %
                    (w, x, w, y, 4 * w, x, y))
        varX.add("x%d" % x)
        varY.add("y%d" % y)

    # 'r+' for both reading and writing
    # FIXME: NamedTemporaryFile.name not usable in Windows
    lpFile = NamedTemporaryFile(mode='r+', suffix=".lp")

    lpFile.write("\* Problem: tanglegrams *\ \n")
    lpFile.write("\* offset the objective by: %d *\ \n" % offset)
    lpFile.write("\* objective *\ \n")
    lpFile.write("%s\n" %
                 ("Minimize" if optimize[:3] == "MIN" else "Maximize"))
    lpFile.write(" Crossing:\n %s\n\n" % string.join(objs, " \n "))
    lpFile.write("\* vars - hey, Mom, no constraints! *\ \nBinary")
    lpFile.write("\n %s\n" % string.join(varX, "\n "))
    lpFile.write(" %s\n" % string.join(varY, "\n "))
    lpFile.write("\nEnd\n")

    lpFile.seek(0)  # necessary for even only reading the file
    if verbose:
        print "name:", lpFile.name
        print lpFile.read()
    # the file should be self-destructive upon close
    #print lpFile.read()

    # the ugly
    runFile = NamedTemporaryFile(mode='r+', suffix=".runme")
    #runFile.write("help\n")
    runFile.write("read /home/wcchang/prog/cplex/%s\n" % basename(lpFile.name))
    runFile.write("display problem all\n")
    runFile.write("optimize\n")
    runFile.write("write /home/wcchang/prog/cplex/%s.sol sol\n" %
                  basename(lpFile.name))
    runFile.write("quit\n")
    runFile.seek(0)

    system("scp %s pluto:~/prog/cplex/%s" %
           (lpFile.name, basename(lpFile.name)))
    #system("scp %s pluto:~/prog/cplex/%s" % (runFile.name, basename(runFile.name)))
    system("ssh pluto cplex101 < %s" % runFile.name)
    system("scp pluto:~/prog/cplex/%s.sol /tmp" % basename(lpFile.name))

    import xml.dom.minidom as minidom
    sol = minidom.parse("/tmp/%s.sol" % basename(lpFile.name))
    u0 = sol.getElementsByTagName("header")[0].getAttribute("objectiveValue")
    c0 = round(float(u0))
    crossing = offset + int(c0)
    if verbose: print "offset", offset, u0, c0, int(c0)

    runFile.close()
    lpFile.close()

    return (crossing, None, None)
Пример #43
0
    def export(self,
               out_f=None,
               format='mp3',
               codec=None,
               bitrate=None,
               parameters=None,
               tags=None,
               id3v2_version='4',
               cover=None):
        """
        Export an AudioSegment to a file with given options

        out_f (string):
            Path to destination audio file. Also accepts os.PathLike objects on
            python >= 3.6

        format (string)
            Format for destination audio file.
            ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files)

        codec (string)
            Codec used to encode the destination file.

        bitrate (string)
            Bitrate used when encoding destination file. (64, 92, 128, 256, 312k...)
            Each codec accepts different bitrate arguments so take a look at the
            ffmpeg documentation for details (bitrate usually shown as -b, -ba or
            -a:b).

        parameters (list of strings)
            Aditional ffmpeg/avconv parameters

        tags (dict)
            Set metadata information to destination files
            usually used as tags. ({title='Song Title', artist='Song Artist'})

        id3v2_version (string)
            Set ID3v2 version for tags. (default: '4')

        cover (file)
            Set cover for audio file from image file. (png or jpg)
        """
        id3v2_allowed_versions = ['3', '4']

        if format == "raw" and (codec is not None or parameters is not None):
            raise AttributeError(
                'Can not invoke ffmpeg when export format is "raw"; '
                'specify an ffmpeg raw format like format="s16le" instead '
                'or call export(format="raw") with no codec or parameters')

        out_f, _ = _fd_or_path_or_tempfile(out_f, 'wb+')
        out_f.seek(0)

        if format == "raw":
            out_f.write(self._data)
            out_f.seek(0)
            return out_f

        # wav with no ffmpeg parameters can just be written directly to out_f
        easy_wav = format == "wav" and codec is None and parameters is None

        if easy_wav:
            data = out_f
        else:
            data = NamedTemporaryFile(mode="wb", delete=False)

        pcm_for_wav = self._data
        if self.sample_width == 1:
            # convert to unsigned integers for wav
            pcm_for_wav = audioop.bias(self._data, 1, 128)

        wave_data = wave.open(data, 'wb')
        wave_data.setnchannels(self.channels)
        wave_data.setsampwidth(self.sample_width)
        wave_data.setframerate(self.frame_rate)
        # For some reason packing the wave header struct with
        # a float in python 2 doesn't throw an exception
        wave_data.setnframes(int(self.frame_count()))
        wave_data.writeframesraw(pcm_for_wav)
        wave_data.close()

        # for easy wav files, we're done (wav data is written directly to out_f)
        if easy_wav:
            out_f.seek(0)
            return out_f

        output = NamedTemporaryFile(mode="w+b", delete=False)

        # build converter command to export
        conversion_command = [
            self.converter,
            '-y',  # always overwrite existing files
            "-f",
            "wav",
            "-i",
            data.name,  # input options (filename last)
        ]

        if codec is None:
            codec = self.DEFAULT_CODECS.get(format, None)

        if cover is not None:
            if cover.lower().endswith(('.png', '.jpg', '.jpeg', '.bmp', '.tif',
                                       '.tiff')) and format == "mp3":
                conversion_command.extend(
                    ["-i", cover, "-map", "0", "-map", "1", "-c:v", "mjpeg"])
            else:
                raise AttributeError(
                    "Currently cover images are only supported by MP3 files. The allowed image formats are: .tif, .jpg, .bmp, .jpeg and .png."
                )

        if codec is not None:
            # force audio encoder
            conversion_command.extend(["-acodec", codec])

        if bitrate is not None:
            conversion_command.extend(["-b:a", bitrate])

        if parameters is not None:
            # extend arguments with arbitrary set
            conversion_command.extend(parameters)

        if tags is not None:
            if not isinstance(tags, dict):
                raise InvalidTag("Tags must be a dictionary.")
            else:
                # Extend converter command with tags
                # print(tags)
                for key, value in tags.items():
                    conversion_command.extend(
                        ['-metadata', '{0}={1}'.format(key, value)])

                if format == 'mp3':
                    # set id3v2 tag version
                    if id3v2_version not in id3v2_allowed_versions:
                        raise InvalidID3TagVersion(
                            "id3v2_version not allowed, allowed versions: %s" %
                            id3v2_allowed_versions)
                    conversion_command.extend(
                        ["-id3v2_version", id3v2_version])

        if sys.platform == 'darwin' and codec == 'mp3':
            conversion_command.extend(["-write_xing", "0"])

        conversion_command.extend([
            "-f",
            format,
            output.name,  # output options (filename last)
        ])

        log_conversion(conversion_command)

        # read stdin / write stdout
        with open(os.devnull, 'rb') as devnull:
            p = subprocess.Popen(conversion_command,
                                 stdin=devnull,
                                 stdout=subprocess.PIPE,
                                 stderr=subprocess.PIPE)
        p_out, p_err = p.communicate()

        log_subprocess_output(p_out)
        log_subprocess_output(p_err)

        if p.returncode != 0:
            raise CouldntEncodeError(
                "Encoding failed. ffmpeg/avlib returned error code: {0}\n\nCommand:{1}\n\nOutput from ffmpeg/avlib:\n\n{2}"
                .format(p.returncode, conversion_command,
                        p_err.decode(errors='ignore')))

        output.seek(0)
        out_f.write(output.read())

        data.close()
        output.close()

        os.unlink(data.name)
        os.unlink(output.name)

        out_f.seek(0)
        return out_f
Пример #44
0
def _screen_shot(self):
    from tempfile import NamedTemporaryFile as TempFile
    tmp = TempFile(suffix='.png')
    self.save_screenshot(tmp.name)
    return tmp.read()
Пример #45
0
 def __getstate__(self):
     # This is really really really hacky, but it works
     N = NamedTemporaryFile()
     self.model.save(N.name)
     S = N.read()
     return S, self.output_probability, self.names
Пример #46
0
parser = get_argument_parser()
arguments = parser.parse_args()


# go through the specified markdown files
for i, name in enumerate(arguments.files):
    # check if the file exists
    if not os.path.exists(name):
        print(f"'{name}' not found, skipping.")
        continue

    # get the svg from Xournal++, storing it in a temporary file
    tmp = NamedTemporaryFile(mode="w+", suffix=".svg")
    Popen(["xournalpp", f"--create-img={tmp.name}", name], stderr=DEVNULL).communicate()

    # crop the SVG
    cropped_svg = crop_svg(tmp.read())

    # possibly use scour to compress the SVG; else just output
    if arguments.compress:
        out = (
            Popen(["scour"], stdin=PIPE, stdout=PIPE, stderr=DEVNULL)
            .communicate(input=cropped_svg.encode())[0]
            .decode()
        )

    out_name = name[:-4] + 'svg'
    with open(out_name, "w") as f:
        f.write(out)
        print(f"{name} -> {out_name}")
Пример #47
0
class DsspApp(LocalApp):
    r"""
    Annotate the secondary structure of a protein structure using the
    DSSP software.
    
    Internally this creates a :class:`Popen` instance, which handles
    the execution.
    
    DSSP differentiates between 8 different types of secondary
    structure elements:
    
       - C: loop, coil or irregular
       - H: :math:`{\alpha}`-helix
       - B: :math:`{\beta}`-bridge
       - E: extended strand, participation in :math:`{\beta}`-ladder
       - G: 3 :sub:`10`-helix
       - I: :math:`{\pi}`-helix
       - T: hydrogen bonded turn
       - S: bend 
    
    Parameters
    ----------
    atom_array : AtomArray
        The atom array to be annotated.
    bin_path : str, optional
        Path of the DDSP binary.
    
    Examples
    --------

    >>> app = DsspApp(atom_array)
    >>> app.start()
    >>> app.join()
    >>> print(app.get_sse())
    ['C' 'H' 'H' 'H' 'H' 'H' 'H' 'H' 'T' 'T' 'G' 'G' 'G' 'G' 'T' 'C' 'C' 'C'
     'C' 'C']
    """
    def __init__(self, atom_array, bin_path="mkdssp"):
        super().__init__(bin_path)
        self._array = atom_array
        self._in_file = NamedTemporaryFile("w", suffix=".pdb", delete=False)
        self._out_file = NamedTemporaryFile("r", suffix=".dssp", delete=False)

    def run(self):
        in_file = PDBFile()
        in_file.set_structure(self._array)
        in_file.write(self._in_file)
        self._in_file.flush()
        self.set_arguments(
            ["-i", self._in_file.name, "-o", self._out_file.name])
        super().run()

    def evaluate(self):
        super().evaluate()
        lines = self._out_file.read().split("\n")
        # Index where SSE records start
        sse_start = None
        for i, line in enumerate(lines):
            if line.startswith("  #  RESIDUE AA STRUCTURE"):
                sse_start = i + 1
        if sse_start is None:
            raise ValueError("DSSP file does not contain SSE records")
        lines = [line for line in lines[sse_start:] if len(line) != 0]
        self._sse = np.zeros(len(lines), dtype="U1")
        # Parse file for SSE letters
        for i, line in enumerate(lines):
            self._sse[i] = line[16]
        # Remove "!" for missing residues
        self._sse = self._sse[self._sse != "!"]
        self._sse[self._sse == " "] = "C"

    def clean_up(self):
        super().clean_up()
        cleanup_tempfile(self._in_file)
        cleanup_tempfile(self._out_file)

    @requires_state(AppState.JOINED)
    def get_sse(self):
        """
        Get the resulting secondary structure assignment.
        
        Returns
        -------
        sse : ndarray, dtype="U1"
            An array containing DSSP secondary structure symbols
            corresponding to the residues in the input atom array.
        """
        return self._sse

    @staticmethod
    def annotate_sse(atom_array, bin_path="mkdssp"):
        """
        Perform a secondary structure assignment to an atom array.
        
        This is a convenience function, that wraps the :class:`DsspApp`
        execution.
        
        Parameters
        ----------
        atom_array : AtomArray
            The atom array to be annotated.
        bin_path : str, optional
            Path of the DDSP binary.
        
        Returns
        -------
        sse : ndarray, dtype="U1"
            An array containing DSSP secondary structure symbols
            corresponding to the residues in the input atom array.
        """
        app = DsspApp(atom_array, bin_path)
        app.start()
        app.join()
        return app.get_sse()
Пример #48
0
class GitIndexTests(unittest.TestCase):
    def setUp(self):
        self.dir = TemporaryDirectory()
        self.file = NamedTemporaryFile(dir=self.dir.name, delete=False)
        self.filename = path.basename(self.file.name)
        self.author = Signature('QuitStoreTest', '*****@*****.**')
        self.comitter = Signature('QuitStoreTest', '*****@*****.**')
        self.repo = quit.git.Repository(self.dir.name, create=True)

    def tearDown(self):
        self.file = None
        self.filename = None
        self.dir.cleanup()
        self.dir = None
        self.reop = None

    def addfile(self):
        """Create a repository and add a file to the git index."""
        # Write to file
        self.file.write(b'First Line\n')
        self.file.read()

        # Add file to index
        repo = Repository(self.dir.name)
        index = repo.index
        index.read()
        index.add(self.filename)
        index.write()
        self.repo = quit.git.Repository(self.dir.name)

    def createcommit(self):
        """Prepare a git repository with one existing commit.

        Create a directory, initialize a git Repository, add
        and commit a file.

        Returns:
            A list containing the directory and file
        """
        self.addfile()
        # Create commit
        repo = Repository(self.dir.name)
        index = repo.index
        index.read()
        tree = index.write_tree()
        message = "First commit of temporary test repo"
        repo.create_commit('HEAD', self.author, self.comitter, message, tree,
                           [])
        self.repo = quit.git.Repository(self.dir.name)

    def testIndexSetRevision(self):
        self.createcommit()
        reviosions = self.repo.revisions()
        index = quit.git.Index(self.repo)

        index.set_revision(reviosions[0].id)

        with self.assertRaises(Exception) as context:
            index.set_revision('not.existing.revision')

    def testIndexAddFile(self):
        index = quit.git.Index(self.repo)
        self.assertEqual(len(index.stash), 0)
        index.add(self.filename, b'First Line\n')
        self.assertEqual(len(index.stash), 1)

    def testIndexCommit(self):
        index = quit.git.Index(self.repo)

        self.assertFalse(index.dirty)

        commit = index.commit("First commit from quit test", "QuitTest",
                              "*****@*****.**")

        self.assertTrue(index.dirty)

        with self.assertRaises(Exception) as context:
            index.commit("Second commit from quit test", "QuitTest",
                         "*****@*****.**")
Пример #49
0
    def export(self, out_f=None, format='mp3', codec=None, bitrate=None, parameters=None, tags=None, id3v2_version='4'):
        """
        Export an AudioSegment to a file with given options

        out_f (string):
            Path to destination audio file

        format (string)
            Format for destination audio file.
            ('mp3', 'wav', 'raw', 'ogg' or other ffmpeg/avconv supported files)

        codec (string)
            Codec used to encoding for the destination.

        bitrate (string)
            Bitrate used when encoding destination file. (64, 92, 128, 256, 312k...)
            Each codec accepts different bitrate arguments so take a look at the
            ffmpeg documentation for details (bitrate usually shown as -b, -ba or
            -a:b).

        parameters (string)
            Aditional ffmpeg/avconv parameters

        tags (dict)
            Set metadata information to destination files
            usually used as tags. ({title='Song Title', artist='Song Artist'})

        id3v2_version (string)
            Set ID3v2 version for tags. (default: '4')
        """
        id3v2_allowed_versions = ['3', '4']

        out_f = _fd_or_path_or_tempfile(out_f, 'wb+')
        out_f.seek(0)

        if format == "raw":
            out_f.write(self._data)
            out_f.seek(0)
            return out_f

        # for wav output we can just write the data directly to out_f
        if format == "wav":
            data = out_f
        else:
            data = NamedTemporaryFile(mode="wb", delete=False)

        wave_data = wave.open(data, 'wb')
        wave_data.setnchannels(self.channels)
        wave_data.setsampwidth(self.sample_width)
        wave_data.setframerate(self.frame_rate)
        # For some reason packing the wave header struct with
        # a float in python 2 doesn't throw an exception
        wave_data.setnframes(int(self.frame_count()))
        wave_data.writeframesraw(self._data)
        wave_data.close()

        # for wav files, we're done (wav data is written directly to out_f)
        if format == 'wav':
            return out_f

        output = NamedTemporaryFile(mode="w+b", delete=False)

        # build converter command to export
        conversion_command = [
            self.converter,
            '-y',  # always overwrite existing files
            "-f", "wav", "-i", data.name,  # input options (filename last)
        ]

        if codec is None:
            codec = self.DEFAULT_CODECS.get(format, None)

        if codec is not None:
            # force audio encoder
            conversion_command.extend(["-acodec", codec])

        if bitrate is not None:
            conversion_command.extend(["-b:a", bitrate])

        if parameters is not None:
            # extend arguments with arbitrary set
            conversion_command.extend(parameters)

        if tags is not None:
            if not isinstance(tags, dict):
                raise InvalidTag("Tags must be a dictionary.")
            else:
                # Extend converter command with tags
                # print(tags)
                for key, value in tags.items():
                    conversion_command.extend(
                        ['-metadata', '{0}={1}'.format(key, value)])

                if format == 'mp3':
                    # set id3v2 tag version
                    if id3v2_version not in id3v2_allowed_versions:
                        raise InvalidID3TagVersion(
                            "id3v2_version not allowed, allowed versions: %s" % id3v2_allowed_versions)
                    conversion_command.extend([
                        "-id3v2_version",  id3v2_version
                    ])

        if sys.platform == 'darwin':
            conversion_command.extend(["-write_xing", "0"])

        conversion_command.extend([
            "-f", format, output.name,  # output options (filename last)
        ])

        log_conversion(conversion_command)

        # read stdin / write stdout
        p = subprocess.Popen(conversion_command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
        p_out, p_err = p.communicate()

        if p.returncode != 0:
            raise CouldntEncodeError("Encoding failed. ffmpeg/avlib returned error code: {0}\n\nOutput from ffmpeg/avlib:\n\n{1}".format(p.returncode, p_err))

        output.seek(0)
        out_f.write(output.read())

        data.close()
        output.close()

        os.unlink(data.name)
        os.unlink(output.name)

        out_f.seek(0)
        return out_f
Пример #50
0
def macamal(target, **kwargs):
    """Returns a list of syscalls made by a target.

    Every syscall is a named tuple with the following properties:
    name (string), args (list), result (int), errno (int),
    timestamp(int) and pid(int).
    """

    #grey-cuckoo: timeout (seconds); Notice: We need extra time to obtain log and send back to host.
    timeout = str(int(kwargs.get("gctimeout", 120)))
    log.warning("timeout: %s", timeout)

    if not target:
        raise Exception("Invalid target")

    output_file = NamedTemporaryFile()

    if "runas" in kwargs:
        runas = kwargs["runas"]
    else:
        runas = False
    log.warning("sudo /tmp/grey-cuckoo /usr/bin/open %s %s", timeout,
                output_file.name)
    if "args" in kwargs:
        target_cmd = "\"%s\" %s" % (target, " ".join(kwargs["args"]))
    else:
        target_cmd = target

    target_cmd = target_cmd.strip()
    target = target.strip()

    if target.endswith('.app') or target.endswith('.docx') or target.endswith(
            '.doc') or target.endswith('.dmg') or target.endswith('.pkg'):
        p1 = Popen([
            "sudo", "/tmp/grey-cuckoo", "/usr/bin/open", timeout,
            output_file.name
        ],
                   cwd=current_directory(),
                   stdout=PIPE)
    elif target.endswith('.pl'):
        p1 = Popen([
            "sudo", "/tmp/grey-cuckoo", "/usr/bin/perl", timeout,
            output_file.name
        ],
                   cwd=current_directory(),
                   stdout=PIPE)
    elif target.endswith('.jar'):
        p1 = Popen([
            "sudo", "/tmp/grey-cuckoo", "/usr/bin/java", timeout,
            output_file.name
        ],
                   cwd=current_directory(),
                   stdout=PIPE)
    else:
        p1 = Popen(
            ["sudo", "/tmp/grey-cuckoo", target, timeout, output_file.name],
            cwd=current_directory(),
            stdout=PIPE)
    #Wait for p1 initialization
    time.sleep(2)
    log.warning("target_cmd: %s ; target: %s; shlex: %s", target_cmd, target,
                shlex.split(target_cmd))
    if runas:
        #Set the whole running directory for executable
        parentdir = os.path.dirname(target)
        if parentdir != "/tmp" and parentdir != "/tmp/" and parentdir.startswith(
                '/usr'):
            Popen(["chown", "-R", runas + ":" + runas, parentdir],
                  cwd=current_directory())
            print "Chown parent!"
        Popen(["chown", "-R", runas + ":" + runas, target],
              cwd=current_directory())
        print "Chown target!"
        if target.endswith('.pl'):
            #This one is quick dirty. perl / python / .. must be handled in package class instead.
            p2 = Popen(["sudo", "-u", runas, "perl", target],
                       cwd=current_directory())
        elif target == '/usr/bin/python':
            p2 = Popen(("sudo -u " + runas + " " + target_cmd.replace(
                '"/usr/bin/python"', '/usr/bin/python')).split(),
                       cwd=current_directory())
        elif target == '/usr/bin/java':
            p2 = Popen(("sudo -u " + runas + " " + target_cmd.replace(
                '"/usr/bin/java"', '/usr/bin/java')).split(),
                       cwd=current_directory())
        elif target == '/bin/bash':
            p2 = Popen(
                ("sudo -u " + runas + " " +
                 target_cmd.replace('"/bin/bash"', '/bin/bash')).split(),
                cwd=current_directory())
        else:
            p2 = Popen(["sudo", "-u", runas, "open", target],
                       cwd=current_directory())

    else:
        if target.endswith('.pl'):
            p2 = Popen(["perl", target_cmd], cwd=current_directory())
        elif target.endswith('.jar'):
            p2 = Popen((target_cmd.replace('"/usr/bin/java"',
                                           '/usr/bin/java')).split(),
                       cwd=current_directory(),
                       stdout=subprocess.PIPE)
        else:
            p2 = Popen(["open", target_cmd], cwd=current_directory()
                       )  # Open sandbox drops root priv. to normal user priv.
            # p2 = Popen([target], cwd=current_directory())

    p1.communicate()
    for entry in output_file.read().split("{\"sc\":"):
        value = "{\"sc\":" + unicode(entry.strip(), errors='replace')
        if len(value) == 0:
            continue
        syscall = _parse_syscall(value)
        if syscall is None:
            continue
        yield syscall
    output_file.close()
Пример #51
0
class ChecksummingStreamer(OutputCoprocess):
    """
    This checksums and streams data a named temporary file on disk, which
    can then be read back or linked to a final location.
    """
    def __init__(self, dir=None):
        self.tempfile = NamedTemporaryFile(dir=dir, delete=False)

        self.outer_md5sum = None
        self.outer_md5 = hashlib.md5()
        self.md5filter = IOFilter(self.tempfile, self._outer_md5_callback)
        self.fd = self.md5filter.writer()

        self.saved = False
        self.finished = False
        self._write_preamble()
        OutputCoprocess.__init__(self, self._mk_command(), self.fd)

    def _mk_command(self):
        return None

    def finish(self):
        if self.finished:
            return
        self.finished = True
        OutputCoprocess.close(self)
        self._write_postamble()
        self.fd.close()
        self.md5filter.join()
        self.md5filter.close()
        self.tempfile.seek(0, 0)

    def close(self):
        self.finish()
        self.tempfile.close()

    def save(self, filename, finish=True):
        if finish:
            self.finish()
        if not self.saved:
            # 1st save just renames the tempfile
            os.rename(self.tempfile.name, filename)
            self.saved = True
        else:
            # 2nd save creates a copy
            with open(filename, 'wb') as out:
                self.save_copy(out)

    def save_copy(self, ofd):
        self.tempfile.seek(0, 0)
        data = self.tempfile.read(4096)
        while data != '':
            ofd.write(data)
            data = self.tempfile.read(4096)

    def _outer_md5_callback(self, data):
        if data is None:
            # EOF...
            self.outer_md5sum = self.outer_md5.hexdigest()
            return ''
        else:
            # We calculate the MD5 sum as if the data used the CRLF linefeed
            # convention, whether it's actually using that or not.
            self.outer_md5.update(data.replace('\r', '').replace('\n', '\r\n'))
            return data

    def _write_preamble(self):
        pass

    def _write_postamble(self):
        pass
Пример #52
0
class GitRepositoryTests(unittest.TestCase):
    def setUp(self):

        self.dir = TemporaryDirectory()
        self.remotedir = TemporaryDirectory()
        self.file = NamedTemporaryFile(dir=self.dir.name, delete=False)
        self.filename = path.basename(self.file.name)
        self.author = Signature('QuitStoreTest', '*****@*****.**')
        self.comitter = Signature('QuitStoreTest', '*****@*****.**')

        # Initialize repository
        init_repository(self.dir.name, False)

    def tearDown(self):
        self.file = None
        self.filename = None
        self.dir.cleanup()
        self.dir = None
        self.remotedir.cleanup()
        self.remotedir = None

    def addfile(self):
        """Create a repository and add a file to the git index."""
        # Write to file
        self.file.write(b'First Line\n')
        self.file.read()

        # Add file to index
        repo = Repository(self.dir.name)
        index = repo.index
        index.read()
        index.add(self.filename)
        index.write()

    def createcommit(self):
        """Prepare a git repository with one existing commit.

        Create a directory, initialize a git Repository, add
        and commit a file.

        Returns:
            A list containing the directory and file
        """
        self.addfile()
        # Create commit
        repo = Repository(self.dir.name)
        index = repo.index
        index.read()
        tree = index.write_tree()
        message = "First commit of temporary test repo"
        repo.create_commit('HEAD', self.author, self.comitter, message, tree,
                           [])

    def testInitNotExistingsRepo(self):
        dir = TemporaryDirectory()

        repo = quit.git.Repository(dir.name, create=True)
        self.assertFalse(repo.is_bare)
        self.assertEqual(len(repo.revisions()), 0)

        dir.cleanup()

    def testInitEmptyRepo(self):
        self.addfile()
        repo = quit.git.Repository(self.dir.name, create=True)
        self.assertFalse(repo.is_bare)
        self.assertEqual(len(repo.revisions()), 0)

    def testInitRepoWithExistingCommit(self):
        self.createcommit()
        repo = quit.git.Repository(self.dir.name)
        self.assertFalse(repo.is_bare)
        self.assertEqual(len(repo.revisions()), 1)

    def testCloneRepo(self):
        REMOTE_NAME = 'origin'
        REMOTE_URL = 'git://github.com/AKSW/QuitStore.example.git'

        dir = TemporaryDirectory()
        repo = quit.git.Repository(dir.name, create=True, origin=REMOTE_URL)
        self.assertTrue(path.exists(path.join(dir.name, 'example.nq')))
        self.assertFalse(repo.is_bare)
        dir.cleanup()

    @unittest.skip("Currently fails on travis")
    def testCloneRepoViaSSH(self):
        environ["QUIT_SSH_KEY_HOME"] = "./tests/assets/sshkey/"

        REMOTE_URL = '[email protected]:AKSW/QuitStore.example.git'

        dir = TemporaryDirectory()
        repo = quit.git.Repository(dir.name, create=True, origin=REMOTE_URL)
        self.assertTrue(path.exists(path.join(dir.name, 'example.nq')))
        self.assertFalse(repo.is_bare)
        dir.cleanup()

    def testCloneRepoViaSSHNoKeyFiles(self):
        environ["QUIT_SSH_KEY_HOME"] = "./tests/assets/nosshkey/"
        if "SSH_AUTH_SOCK" in environ:
            del environ["SSH_AUTH_SOCK"]

        REMOTE_URL = '[email protected]:AKSW/QuitStore.example.git'

        dir = TemporaryDirectory()
        with self.assertRaises(Exception) as context:
            quit.git.Repository(dir.name, create=True, origin=REMOTE_URL)
        dir.cleanup()

    def testCloneNotExistingRepo(self):
        environ["QUIT_SSH_KEY_HOME"] = "./tests/assets/sshkey/"

        REMOTE_URL = '[email protected]:AKSW/ThereIsNoQuitStoreRepo.git'

        dir = TemporaryDirectory()
        with self.assertRaises(Exception) as context:
            quit.git.Repository(dir.name, create=True, origin=REMOTE_URL)
        dir.cleanup()

    def testPushRepo(self):
        """Test if it is possible to push to an empty remote repository."""
        with TemporaryRepository(True) as remote:
            graphContent = """
                <http://ex.org/x> <http://ex.org/y> <http://ex.org/z> <http://example.org/> ."""
            with TemporaryRepositoryFactory().withGraph(
                    "http://example.org/", graphContent) as local:
                local.remotes.create("origin", remote.path)
                quitRepo = quit.git.Repository(local.workdir)

                self.assertTrue(remote.is_empty)
                self.assertFalse(local.is_empty)

                quitRepo.push("origin", "master")

                self.assertFalse(remote.is_empty)
                self.assertFalse(local.is_empty)

    def testPushRefspecs(self):
        """Test if it is possible to push to an empty remote repository."""
        for refspec in [
                'master', 'refs/heads/master', 'refs/heads/master:master',
                'master:master', 'master:refs/heads/master',
                'refs/heads/master:refs/heads/master'
        ]:
            with TemporaryRepository(True) as remote:
                graphContent = """
                    <http://ex.org/x> <http://ex.org/y> <http://ex.org/z> <http://example.org/> ."""
                with TemporaryRepositoryFactory().withGraph(
                        "http://example.org/", graphContent) as local:
                    local.remotes.create("origin", remote.path)
                    quitRepo = quit.git.Repository(local.workdir)

                    self.assertTrue(remote.is_empty)
                    self.assertFalse(local.is_empty)

                    quitRepo.push("origin", refspec)

                    self.assertFalse(remote.is_empty)
                    self.assertFalse(local.is_empty)

    def testPushRepoNotConfiguredRemote(self):
        """Test if the push failes if the origin remote was not defined."""
        with TemporaryRepository(True) as remote:
            graphContent = """
                <http://ex.org/x> <http://ex.org/y> <http://ex.org/z> <http://example.org/> ."""
            with TemporaryRepositoryFactory().withGraph(
                    "http://example.org/", graphContent) as local:
                local.remotes.create("upstream", remote.path)
                quitRepo = quit.git.Repository(local.workdir)

                self.assertTrue(remote.is_empty)
                self.assertFalse(local.is_empty)

                with self.assertRaises(RemoteNotFound):
                    quitRepo.push("origin", "master")

                self.assertTrue(remote.is_empty)
                self.assertFalse(local.is_empty)

    def testPushRepoWithRemoteName(self):
        """Test if it is possible to push to a remote repository, which is not called orign."""
        with TemporaryRepository(True) as remote:
            graphContent = "<http://ex.org/x> <http://ex.org/y> <http://ex.org/z> <http://example.org/> ."
            with TemporaryRepositoryFactory().withGraph(
                    "http://example.org/", graphContent) as local:
                local.remotes.create("upstream", remote.path)
                quitRepo = quit.git.Repository(local.workdir)

                self.assertTrue(remote.is_empty)
                self.assertFalse(local.is_empty)

                quitRepo.push("upstream", "master")

                self.assertFalse(remote.is_empty)
                self.assertFalse(local.is_empty)

    def testPushRepoNotConfiguredNamedRemote(self):
        """Test if the push failes if the specified remote was not defined."""
        with TemporaryRepository(is_bare=True) as remote:
            graphContent = """
                <http://ex.org/x> <http://ex.org/y> <http://ex.org/z> <http://example.org/> ."""
            with TemporaryRepositoryFactory().withGraph(
                    "http://example.org/", graphContent) as local:
                local.remotes.create("origin", remote.path)
                quitRepo = quit.git.Repository(local.workdir)

                self.assertTrue(remote.is_empty)
                self.assertFalse(local.is_empty)

                with self.assertRaises(RemoteNotFound):
                    quitRepo.push("upstream", "master")

                self.assertTrue(remote.is_empty)
                self.assertFalse(local.is_empty)

    def testPushRepoWithDivergedRemote(self):
        """Test for an exception, if the local and remote repositories are diverged."""
        with TemporaryRepositoryFactory().withEmptyGraph(
                "http://example.org/") as remote:
            graphContent = """
                <http://ex.org/x> <http://ex.org/y> <http://ex.org/z> <http://example.org/> ."""
            with TemporaryRepositoryFactory().withGraph(
                    "http://example.org/", graphContent) as local:
                local.remotes.create("origin", remote.path)
                quitRepo = quit.git.Repository(local.workdir)

                self.assertFalse(remote.is_empty)
                self.assertFalse(local.is_empty)

                with self.assertRaises(pygit2.GitError):
                    quitRepo.push("origin", "master")

    @unittest.skip("requires a remote with pre-receive hook")
    def testPushRepoWithRemoteReject(self):
        """Test for an exception, if the remote repositories rejects a push.

        CAUTION: This test is disabled, because it requires a remote with pre-receive hook.
        Unfortunately the libgit2 does not execute pre-receive hooks on local repositories.
        """
        graphContent = """
            <http://ex.org/x> <http://ex.org/y> <http://ex.org/z> <http://example.org/> ."""
        with TemporaryRepositoryFactory().withGraph("http://example.org/",
                                                    graphContent) as local:
            local.remotes.create("origin", "ssh://[email protected]/testing.git")
            quitRepo = quit.git.Repository(local.workdir)

            self.assertFalse(local.is_empty)

            with self.assertRaises(QuitGitPushError):
                quitRepo.push()

    def testFetchRepo(self):
        """Test if it is possible to fetch from a remote repository."""
        graphContent = """
            <http://ex.org/x> <http://ex.org/y> <http://ex.org/z> <http://example.org/> ."""
        with TemporaryRepositoryFactory().withGraph("http://example.org/",
                                                    graphContent) as remote:
            with TemporaryRepository(False) as local:
                local.remotes.create("origin", remote.path)
                quitRepo = quit.git.Repository(local.workdir)

                self.assertFalse(remote.is_empty)
                self.assertTrue(local.is_empty)
                self.assertTrue(quitRepo.is_empty)

                remoteHead = remote.revparse_single('HEAD').hex

                with self.assertRaises(RevisionNotFound):
                    quitRepo.revision('HEAD')

                quitRepo.fetch()

                self.assertEqual(
                    quitRepo.revision('origin/master').id, remoteHead)

                self.assertFalse(remote.is_empty)
                self.assertFalse(local.is_empty)
                self.assertFalse(quitRepo.is_empty)

    def testFetchUpstreamRepo(self):
        """Test if it is possible to from from a remote, which set as upstream."""
        graphContent = """
            <http://ex.org/x> <http://ex.org/x> <http://ex.org/x> <http://example.org/> ."""
        with TemporaryRepositoryFactory().withGraph("http://example.org/",
                                                    graphContent) as remote:
            with TemporaryRepository(clone_from_repo=remote) as local:
                quitRepo = quit.git.Repository(local.workdir)

                self.assertFalse(remote.is_empty)
                self.assertFalse(local.is_empty)
                self.assertFalse(quitRepo.is_empty)

                with open(path.join(remote.workdir, "graph.nq"),
                          "a") as graphFile:
                    graphContent = """
                        <http://ex.org/x> <http://ex.org/y> <http://ex.org/z> <http://example.org/> ."""
                    graphFile.write(graphContent)

                createCommit(repository=remote)

                remoteHead = remote.revparse_single('HEAD').hex
                localHead = local.revparse_single('HEAD').hex

                self.assertNotEqual(localHead, remoteHead)

                quitRepo.fetch()

                self.assertEqual(
                    quitRepo.revision('origin/master').id, remoteHead)

                self.assertFalse(remote.is_empty)
                self.assertFalse(local.is_empty)
                self.assertFalse(quitRepo.is_empty)

    def testPullRepo(self):
        """Test if it is possible to pull from a remote repository."""
        graphContent = """
            <http://ex.org/x> <http://ex.org/y> <http://ex.org/z> <http://example.org/> ."""
        with TemporaryRepositoryFactory().withGraph("http://example.org/",
                                                    graphContent) as remote:
            with TemporaryRepository(False) as local:
                local.remotes.create("origin", remote.path)
                quitRepo = quit.git.Repository(local.workdir)

                self.assertFalse(remote.is_empty)
                self.assertTrue(local.is_empty)
                self.assertTrue(quitRepo.is_empty)

                remoteHead = remote.revparse_single('HEAD').hex

                with self.assertRaises(RevisionNotFound):
                    quitRepo.revision('HEAD')

                quitRepo.pull("origin", "master")

                self.assertEqual(quitRepo.revision('HEAD').id, remoteHead)

                self.assertFalse(remote.is_empty)
                self.assertFalse(local.is_empty)
                self.assertFalse(quitRepo.is_empty)

    def testPullRepoWithUnbornHead(self):
        """Test if it is possible to pull from a remote repository."""
        graphContent = """
            <http://ex.org/x> <http://ex.org/y> <http://ex.org/z> <http://example.org/> ."""
        with TemporaryRepositoryFactory().withGraph("http://example.org/",
                                                    graphContent) as remote:
            with TemporaryRepository(False) as local:
                local.remotes.create("origin", remote.path)
                quitRepo = quit.git.Repository(local.workdir)

                self.assertFalse(remote.is_empty)
                self.assertTrue(local.is_empty)
                self.assertTrue(quitRepo.is_empty)

                remoteHead = remote.revparse_single('HEAD').hex

                with self.assertRaises(RevisionNotFound):
                    quitRepo.revision('HEAD')

                quitRepo.pull()

                self.assertEqual(
                    quitRepo.revision('origin/master').id, remoteHead)

                self.assertFalse(remote.is_empty)
                self.assertFalse(local.is_empty)
                self.assertFalse(quitRepo.is_empty)

    def testPullRepoClonedNoChanges(self):
        """Test pull if both repos are at the same state."""
        graphContent = """
            <http://ex.org/x> <http://ex.org/y> <http://ex.org/z> <http://example.org/> ."""
        with TemporaryRepositoryFactory().withGraph("http://example.org/",
                                                    graphContent) as remote:
            with TemporaryDirectory() as localDirectory:
                quitRepo = quit.git.Repository(localDirectory,
                                               create=True,
                                               origin=remote.path)

                self.assertFalse(remote.is_empty)
                self.assertFalse(quitRepo.is_empty)

                remoteHead = remote.revparse_single('HEAD').hex

                self.assertEqual(quitRepo.revision('HEAD').id, remoteHead)

                quitRepo.pull("origin", "master")

                self.assertEqual(quitRepo.revision('HEAD').id, remoteHead)

                self.assertFalse(remote.is_empty)
                self.assertFalse(quitRepo.is_empty)

    def testPullRepoClonedAndPullWithChanges(self):
        """Test clone, commit on remote and pull."""
        graphContent = """
            <http://ex.org/x> <http://ex.org/y> <http://ex.org/z> <http://example.org/> ."""
        with TemporaryRepositoryFactory().withGraph("http://example.org/",
                                                    graphContent) as remote:
            with TemporaryDirectory() as localDirectory:
                quitRepo = quit.git.Repository(localDirectory,
                                               create=True,
                                               origin=remote.path)

                self.assertFalse(remote.is_empty)
                self.assertFalse(quitRepo.is_empty)

                remoteHead = remote.revparse_single('HEAD').hex

                self.assertEqual(quitRepo.revision('HEAD').id, remoteHead)

                quitRepo.pull()

                self.assertEqual(quitRepo.revision('HEAD').id, remoteHead)

                self.assertFalse(remote.is_empty)
                self.assertFalse(quitRepo.is_empty)

                remoteQuitRepo = quit.git.Repository(remote.workdir)
                index = remoteQuitRepo.index(remoteHead)
                graphContent += """
                    <http://ex.org/x> <http://ex.org/z> <http://ex.org/z> <http://example.org/> ."""
                index.add("graph.nq", graphContent)

                author = Signature('QuitStoreTest', '*****@*****.**')
                commitid = index.commit("from test", author.name, author.email)

                quitRepo.pull()

                self.assertEqual(quitRepo.revision('HEAD').id, str(commitid))

    def testPullRepoClonedAndPullWithMerge(self):
        """Test clone, commit on remote and pull with merge, which resolves without conflicts."""
        graphContent = "<http://ex.org/a> <http://ex.org/b> <http://ex.org/c> <http://example.org/> .\n"
        with TemporaryRepositoryFactory().withGraph("http://example.org/",
                                                    graphContent) as remote:
            with TemporaryDirectory() as localDirectory:
                quitRepo = quit.git.Repository(localDirectory,
                                               create=True,
                                               origin=remote.path)

                self.assertFalse(remote.is_empty)
                self.assertFalse(quitRepo.is_empty)

                remoteHead = remote.revparse_single('HEAD').hex

                self.assertEqual(quitRepo.revision('HEAD').id, remoteHead)

                index = quitRepo.index(remoteHead)
                graph2Content = "<http://ex.org/x> <http://ex.org/y> <http://ex.org/y> <http://example.org/> .\n"
                index.add("graph2.nq", graph2Content)
                index.add("graph2.nq.graph", "http://example2.org/")
                author = Signature('QuitStoreTest', '*****@*****.**')
                localCommitid = index.commit("from local", author.name,
                                             author.email)
                quitRepo._repository.checkout_tree(
                    quitRepo._repository.get(localCommitid))

                self.assertEqual(
                    quitRepo.revision('HEAD').id, str(localCommitid))

                self.assertFalse(remote.is_empty)
                self.assertFalse(quitRepo.is_empty)

                remoteQuitRepo = quit.git.Repository(remote.workdir)
                index = remoteQuitRepo.index(remoteHead)
                graphContent += "<http://ex.org/x> <http://ex.org/z> <http://ex.org/z> <http://example.org/> .\n"
                index.add("graph.nq", graphContent)
                remoteCommitid = index.commit("from remote", author.name,
                                              author.email)
                remoteQuitRepo._repository.checkout_tree(
                    remoteQuitRepo._repository.get(remoteCommitid))

                quitRepo.pull()

                self.assertNotEqual(
                    quitRepo.revision('HEAD').id, str(localCommitid))
                self.assertNotEqual(
                    quitRepo.revision('HEAD').id, str(remoteCommitid))

                # check if head has local and remote commit id as ancestor
                self.assertListEqual([
                    parent.id for parent in quitRepo.revision('HEAD').parents
                ], [str(localCommitid),
                    str(remoteCommitid)])

                # check if the merged commit contains all file contents
                with open(path.join(localDirectory, "graph.nq")) as f:
                    self.assertEqual("".join(f.readlines()), graphContent)

                with open(path.join(localDirectory, "graph2.nq")) as f:
                    self.assertEqual("".join(f.readlines()), graph2Content)

    def testPullRepoClonedAndPullWithConflict(self):
        """Test clone, commit on remote and pull with conflict."""
        graphContent = "<http://ex.org/a> <http://ex.org/b> <http://ex.org/c> <http://example.org/> .\n"
        with TemporaryRepositoryFactory().withGraph("http://example.org/",
                                                    graphContent) as remote:
            with TemporaryDirectory() as localDirectory:
                quitRepo = quit.git.Repository(localDirectory,
                                               create=True,
                                               origin=remote.path)

                self.assertFalse(remote.is_empty)
                self.assertFalse(quitRepo.is_empty)

                remoteHead = remote.revparse_single('HEAD').hex

                self.assertEqual(quitRepo.revision('HEAD').id, remoteHead)

                index = quitRepo._repository.index
                with open(path.join(localDirectory, "graph.nq"),
                          "a") as graphFile:
                    graphFile.write(
                        "<http://ex.org/x> <http://ex.org/y> <http://ex.org/y> <http://example.org/> .\n"
                    )
                index.add("graph.nq")
                index.write()
                tree = index.write_tree()

                author = Signature('QuitStoreTest', '*****@*****.**')
                localCommitid = quitRepo._repository.create_commit(
                    'HEAD', author, author, "from local", tree, [remoteHead])

                self.assertFalse(remote.is_empty)
                self.assertFalse(quitRepo.is_empty)

                index = remote.index
                with open(path.join(remote.workdir, "graph.nq"),
                          "a") as graphFile:
                    graphFile.write(
                        "<http://ex.org/x> <http://ex.org/z> <http://ex.org/z> <http://example.org/> .\n"
                    )
                index.add("graph.nq")
                index.write()
                tree = index.write_tree()
                remoteCommitid = remote.create_commit('HEAD', author, author,
                                                      "from remote", tree,
                                                      [remoteHead])

                remoteQuitRepo = quit.git.Repository(remote.workdir)

                with self.assertRaises(QuitMergeConflict):
                    quitRepo.pull()

                self.assertEqual(
                    quitRepo.revision('HEAD').id, str(localCommitid))
                self.assertEqual(
                    remoteQuitRepo.revision('HEAD').id, str(remoteCommitid))

    def testPullRepoFromNamedRemote(self):
        """Test if it is possible to pull from a remote repository, which is not called origin."""
        graphContent = """
            <http://ex.org/x> <http://ex.org/y> <http://ex.org/z> <http://example.org/> ."""
        with TemporaryRepositoryFactory().withGraph("http://example.org/",
                                                    graphContent) as remote:
            with TemporaryRepository(False) as local:
                local.remotes.create("upstream", remote.path)
                quitRepo = quit.git.Repository(local.workdir)

                self.assertFalse(remote.is_empty)
                self.assertTrue(local.is_empty)
                self.assertTrue(quitRepo.is_empty)

                remoteHead = remote.revparse_single('HEAD').hex

                with self.assertRaises(RevisionNotFound):
                    quitRepo.revision('HEAD')

                quitRepo.pull(remote_name='upstream', refspec="master")

                self.assertEqual(quitRepo.revision('HEAD').id, remoteHead)

                self.assertFalse(remote.is_empty)
                self.assertFalse(local.is_empty)
                self.assertFalse(quitRepo.is_empty)

    def testPullRepoFromNotConfiguredRemote(self):
        """Test if it is possible to pull from a remote repository, which is not called origin."""
        graphContent = """
            <http://ex.org/x> <http://ex.org/y> <http://ex.org/z> <http://example.org/> ."""
        with TemporaryRepositoryFactory().withGraph("http://example.org/",
                                                    graphContent) as remote:
            with TemporaryRepository(False) as local:
                local.remotes.create("origin", remote.path)
                quitRepo = quit.git.Repository(local.workdir)

                self.assertFalse(remote.is_empty)
                self.assertTrue(local.is_empty)
                self.assertTrue(quitRepo.is_empty)

                with self.assertRaises(RemoteNotFound):
                    quitRepo.pull(remote_name='upstream')

                self.assertFalse(remote.is_empty)
                self.assertTrue(local.is_empty)
                self.assertTrue(quitRepo.is_empty)

    def testPullRepoNoFFW(self):
        """TODO"""
        pass

    def testPullRepoFromMasterToDevelop(self):
        """TODO"""
        pass

    def testPullRepoFromDevelopToMaster(self):
        """TODO"""
        pass

    def testPullRepoFromRemoteTrackingBranch(self):
        """TODO"""
        pass

    def testRepositoryIsEmpty(self):
        """Test that adding data causes a new commit."""
        self.addfile()
        repo = quit.git.Repository(self.dir.name)
        self.assertTrue(repo.is_empty)

        self.createcommit()
        repo = quit.git.Repository(self.dir.name)
        self.assertFalse(repo.is_empty)

    def testRepositoryIsBare(self):
        """Test if is_bare is currently done in init/clone tests."""
        pass

    def testNoGCConfiguration(self):
        """Test Garbage Collection configuration."""
        quit.git.Repository(self.dir.name, garbageCollection=False)

        with subprocess.Popen(["git", "config", "gc.auto"],
                              stdout=subprocess.PIPE,
                              cwd=self.dir.name) as getGCAuto:
            stdout, stderr = getGCAuto.communicate()
            response = stdout.decode("UTF-8").strip()

        self.assertEqual(response, '')

    def testGCConfiguration(self):
        """Test Garbage Collection configuration."""
        quit.git.Repository(self.dir.name, garbageCollection=True)

        with subprocess.Popen(["git", "config", "gc.auto"],
                              stdout=subprocess.PIPE,
                              cwd=self.dir.name) as getGCAuto:
            stdout, stderr = getGCAuto.communicate()
            response = stdout.decode("UTF-8").strip()

        self.assertNotEqual(response, '')
        self.assertEqual(response, '256')
Пример #53
0
class Editor(object):
    """
    This class is used to use an editor over the connection, and then return the edited file contents.  The
    files are temporary files, and are deleted when we've finished with them, and the editors are run in a
    restricted mode so they can only edit that file.

    To use:
    callback = self.handle_this_edited_value   # signature of void callback(str)
    editor can be "vim" or "nano", default of "nano"
    editorObj = Editor(connection, editor, callback)
    editorObj.launch(initial_contents)

    The calling tasklet can then yield as the user input loop will trigger the original handler on further input
    after the callback has been called to set the value of whatever variable being edited.
    """
    editors = {
        "nano": ["nano", "-R"],
        "vim": ["vim", "-Z"],
    }

    def __init__(self, connection, editor, callback=None):
        from HavokMud.startup import server_instance
        self.connection = connection
        self.server = server_instance
        self.editor = editor
        self.channel = stackless.channel()
        self.callback_channel = stackless.channel()
        self.file_ = None
        self.editor_callback = partial(self.editor_callback_wrapper, callback)

    def launch(self, initial_contents=None):
        command = self.editors.get(self.editor, None)
        if not command:
            raise NotImplementedError("Editor %s is not configured" %
                                      self.editor)
        self.file_ = NamedTemporaryFile("w+", delete=False)
        if initial_contents:
            self.file_.write(initial_contents)
            self.file_.flush()
            self.file_.seek(0)
        command += self.file_.name
        self.connection.handler = ExternalHandler(self.connection, command,
                                                  self.channel,
                                                  self.handler_callback,
                                                  self.editor_callback)

    def handler_callback(self):
        self.channel.receive()
        self.file_.seek(0)
        contents = self.file_.read()
        filename = self.file_.name
        self.file_.close()
        os.unlink(filename)
        self.callback_channel.send(contents)

    def default_editor_callback(self):
        contents = self.callback_channel.receive()
        return contents

    def editor_callback_wrapper(self, callback):
        if callback and hasattr(callback, "__call__"):
            callback(self.default_editor_callback())
Пример #54
0
class AbstractWrapper(object):
    '''
        abstract solver wrapper
    '''
    def __init__(self):
        '''
            Constructor
        '''
        #program_name = os.path.basename(sys.argv[0])
        program_version = "v%s" % __version__
        program_build_date = str(__updated__)
        program_version_message = "%%(prog)s %s (%s)" % (program_version,
                                                         program_build_date)
        program_shortdesc = __import__("__main__").__doc__.split("\n")[1]
        program_license = '''%s
    
          Created by %s on %s.
          Copyright 2014 - AClib. All rights reserved.
          
          Licensed under the GPLv2
          http://www.gnu.org/licenses/gpl-2.0.html
          
          Distributed on an "AS IS" basis without warranties
          or conditions of any kind, either express or implied.
        
          USAGE
        ''' % (program_shortdesc, str(__authors__), str(__date__))
        self.parser = ArgumentParser(
            description=program_license,
            formatter_class=RawDescriptionHelpFormatter,
            add_help=False)
        self.args = None

        self.RESULT_MAPPING = {'SUCCESS': "SAT"}
        self._watcher_file = None
        self._solver_file = None

        self._instance = ""
        self._specifics = ""
        self._cutoff = 0.0
        self._runlength = 0
        self._seed = 0

        self._exit_code = None

        self._runsolver = None
        self._mem_limit = 8192
        self._tmp_dir = None

        self._crashed_if_non_zero_status = True

        self._subprocesses = []

        self._DEBUG = True
        self._DELAY2KILL = 2

        self._ta_status = "EXTERNALKILL"
        self._ta_runtime = 999999999.0
        self._ta_runlength = -1
        self._ta_quality = -1
        self._ta_exit_code = None
        self._ta_misc = ""

    def print_d(self, str_):
        if self._DEBUG:
            print(str_)

    def main(self, argv=None):
        ''' parse command line'''
        if argv is None:
            argv = sys.argv
        else:
            sys.argv.extend(argv)

        try:
            signal.signal(signal.SIGTERM, signalHandler)
            signal.signal(signal.SIGQUIT, signalHandler)
            signal.signal(signal.SIGINT, signalHandler)

            # Setup argument parser

            run_group = self.parser.add_argument_group("Run")
            run_group.add_argument(
                "--runsolver-path",
                dest="runsolver",
                default=os.path.join(
                    os.path.join(os.path.dirname(__file__), "runsolver"),
                    "runsolver"),
                help=
                "path to runsolver binary (if None, the runsolver is deactivated)"
            )
            run_group.add_argument(
                "--temp-file-dir",
                dest="tmp_dir",
                default=".",
                help=
                "directory for temporary files (relative to -exec-dir in SMAC scenario)"
            )
            run_group.add_argument("--mem-limit",
                                   dest="mem_limit",
                                   default=self._mem_limit,
                                   type=int,
                                   help="memory limit in MB")
            run_group.add_argument(
                "--internal",
                dest="internal",
                default=False,
                action="store_true",
                help="skip calling an external target algorithm")

            run_group = self.parser.add_argument_group(
                "External Callstring Generation and Output Parsing")
            run_group.add_argument(
                "--ext-callstring",
                dest="ext_callstring",
                default=None,
                help="Command to get call string via external program;" +
                "your programm gets a file with" +
                "first line: instance name," + "second line: seed" +
                "further lines: paramter name, paramater value;" +
                "output: one line with callstring for target algorithm")
            run_group.add_argument(
                "--ext-parsing",
                dest="ext_parsing",
                default=None,
                help=
                "Command to use an external program to parse the output of your target algorihm;"
                + "only paramter: name of output file;" +
                "output of your progam:" +
                "status: SAT|UNSAT|TIMEOUT|CRASHED\n" +
                "quality: <integer>\n" + "misc: <string>")

            help_group = self.parser.add_argument_group("Help")
            help_group.add_argument("--help",
                                    dest="show_help",
                                    action="store_true",
                                    help="show this help message")

            # Process arguments
            self.args, target_args = self.parser.parse_known_args()
            args = self.args

            if args.show_help:
                self.parser.print_help()
                self._ta_status = "ABORT"
                self._ta_misc = "help was requested..."
                self._exit_code = 1
                sys.exit(1)

            if args.runsolver != "None" and not os.path.isfile(
                    args.runsolver) and not args.internal:
                self._ta_status = "ABORT"
                self._ta_misc = "runsolver is missing - should have been at %s." % (
                    args.runsolver)
                self._exit_code = 1
                sys.exit(1)
            else:
                self._runsolver = args.runsolver
                self._mem_limit = args.mem_limit

            if not os.path.isdir(args.tmp_dir):
                self._ta_status = "ABORT"
                self._ta_misc = "temp directory is missing - should have been at %s." % (
                    args.tmp_dir)
                self._exit_code = 1
                sys.exit(1)
            else:
                self._tmp_dir = args.tmp_dir

            if len(target_args) < 5:
                self._ta_status = "ABORT"
                self._ta_misc = "some required TA parameters (instance, specifics, cutoff, runlength, seed) missing - was [%s]." % (
                    " ".join(target_args))
                self._exit_code = 1
                sys.exit(1)

            config_dict = self.build_parameter_dict(target_args)
            runargs = {
                "instance": self._instance,
                "specifics": self._specifics,
                "cutoff": self._cutoff,
                "runlength": self._runlength,
                "seed": self._seed
            }
            if args.ext_callstring:
                target_cmd = self.get_command_line_args_ext(
                    runargs=runargs,
                    config=config_dict,
                    ext_call=args.ext_callstring).split(" ")
            else:
                target_cmd = self.get_command_line_args(
                    runargs=runargs, config=config_dict).split(" ")

            if not args.internal:
                self.call_target(target_cmd)
                self.read_runsolver_output()

            if args.ext_parsing:
                resultMap = self.process_results_ext(
                    self._solver_file, {"exit_code": self._ta_exit_code},
                    ext_call=args.ext_parsing)
            else:
                resultMap = self.process_results(
                    self._solver_file, {"exit_code": self._ta_exit_code})

            if ('status' in resultMap):
                self._ta_status = self.RESULT_MAPPING.get(
                    resultMap['status'], resultMap['status'])
            if ('runtime' in resultMap):
                self._ta_runtime = resultMap['runtime']
            if ('quality' in resultMap):
                self._ta_quality = resultMap['quality']
            if ('misc' in resultMap):
                self._ta_misc = resultMap['misc']

            # if still no status was determined, something went wrong and output files should be kept
            if self._ta_status is "EXTERNALKILL":
                self._ta_status = "CRASHED"
            sys.exit()
        except (KeyboardInterrupt, SystemExit):
            self.cleanup()
            self.print_result_string()
            if self._ta_exit_code:
                sys.exit(self._ta_exit_code)
            elif self._exit_code:
                sys.exit(self._exit_code)
            else:
                sys.exit(0)

    def build_parameter_dict(self, arg_list):
        '''
            Reads all arguments which were not parsed by ArgumentParser,
            extracts all meta information
            and builds a mapping: parameter name -> parameter value
            Format Assumption: <instance> <specifics> <runtime cutoff> <runlength> <seed> <solver parameters>
            Args:
                list of all options not parsed by ArgumentParser
        '''
        self._instance = arg_list[1]
        self._specifics = arg_list[2]
        self._cutoff = int(float(arg_list[3]) +
                           1)  # runsolver only rounds down to integer
        self._runlength = int(arg_list[4])
        self._seed = int(arg_list[5])

        params = arg_list[6:]
        if (len(params) / 2) * 2 != len(params):
            self._ta_status = "ABORT"
            self._ta_misc = "target algorithm parameter list MUST have even length - found %d arguments." % (
                len(params))
            self.print_d(" ".join(params))
            self._exit_code = 1
            sys.exit(1)

        return dict(
            (name, value) for name, value in zip(params[::2], params[1::2]))

    def call_target(self, target_cmd):
        '''
            extends the target algorithm command line call with the runsolver
            and executes it
            Args:
                list of target cmd (from getCommandLineArgs)
        '''
        random_id = random.randint(0, 1000000)
        self._watcher_file = NamedTemporaryFile(suffix=".log",
                                                prefix="watcher-%d-" %
                                                (random_id),
                                                dir=self._tmp_dir,
                                                delete=False)
        self._solver_file = NamedTemporaryFile(suffix=".log",
                                               prefix="solver-%d-" %
                                               (random_id),
                                               dir=self._tmp_dir,
                                               delete=False)

        runsolver_cmd = []
        if self._runsolver != "None":
            runsolver_cmd = [
                self._runsolver, "-M", self._mem_limit, "-C", self._cutoff,
                "-w", self._watcher_file.name, "-o", self._solver_file.name
            ]

        runsolver_cmd.extend(target_cmd)
        #for debugging
        self.print_d("Calling runsolver. Command-line:")
        self.print_d(" ".join(map(str, runsolver_cmd)))

        # run
        try:
            if self._runsolver != "None":
                io = Popen(map(str, runsolver_cmd),
                           shell=False,
                           preexec_fn=os.setpgrp)
            else:
                io = Popen(map(str, runsolver_cmd),
                           stdout=self._solver_file,
                           shell=False,
                           preexec_fn=os.setpgrp)
            self._subprocesses.append(io)
            io.wait()
            self._subprocesses.remove(io)
            if io.stdout:
                io.stdout.flush()
        except OSError:
            self._ta_status = "ABORT"
            self._ta_misc = "execution failed: %s" % (" ".join(
                map(str, runsolver_cmd)))
            self._exit_code = 1
            sys.exit(1)

        self._solver_file.seek(0)

    def float_regex(self):
        return '[+-]?\d+(?:\.\d+)?(?:[eE][+-]\d+)?'

    def read_runsolver_output(self):
        '''
            reads self._watcher_file, 
            extracts runtime
            and returns if memout or timeout found
        '''
        if self._runsolver == "None":
            self._ta_exit_code = 0
            return

        self.print_d("Reading runsolver output from %s" %
                     (self._watcher_file.name))
        data = self._watcher_file.read()

        if (re.search('runsolver_max_cpu_time_exceeded', data)
                or re.search('Maximum CPU time exceeded', data)):
            self._ta_status = "TIMEOUT"

        if (re.search('runsolver_max_memory_limit_exceeded', data)):
            self._ta_status = "TIMEOUT"
            self._ta_misc = "memory limit was exceeded"

        cpu_pattern1 = re.compile('runsolver_cputime: (%s)' %
                                  (self.float_regex()))
        cpu_match1 = re.search(cpu_pattern1, data)

        cpu_pattern2 = re.compile('CPU time \\(s\\): (%s)' %
                                  (self.float_regex()))
        cpu_match2 = re.search(cpu_pattern2, data)

        if (cpu_match1):
            self._ta_runtime = float(cpu_match1.group(1))
        if (cpu_match2):
            self._ta_runtime = float(cpu_match2.group(1))

        exitcode_pattern = re.compile('Child status: ([0-9]+)')
        exitcode_match = re.search(exitcode_pattern, data)

        if (exitcode_match):
            self._ta_exit_code = int(exitcode_match.group(1))

    def print_result_string(self):
        sys.stdout.write(
            "Result for ParamILS: %s, %s, %s, %s, %s" %
            (self._ta_status, str(self._ta_runtime), str(
                self._ta_runlength), str(self._ta_quality), str(self._seed)))
        if (len(self._ta_misc) > 0):
            sys.stdout.write(", %s" % (self._ta_misc))

        print('')

    def cleanup(self):
        '''
            cleanup if error occurred or external signal handled
        '''
        if (len(self._subprocesses) > 0):
            print("killing the target run!")
            try:
                for sub in self._subprocesses:
                    #sub.terminate()
                    Popen(["pkill", "-TERM", "-P", str(sub.pid)])
                    self.print_d("Wait %d seconds ..." % (self._DELAY2KILL))
                    time.sleep(self._DELAY2KILL)
                    if sub.returncode is None:  # still running
                        sub.kill()

                self.print_d(
                    "done... If anything in the subprocess tree fork'd a new process group, we may not have caught everything..."
                )
                self._ta_misc = "forced to exit by signal or keyboard interrupt."
                self._ta_runtime = self._cutoff
            except (OSError, KeyboardInterrupt, SystemExit):
                self._ta_misc = "forced to exit by multiple signals/interrupts."
                self._ta_runtime = self._cutoff

        if (self._ta_status is "ABORT" or self._ta_status is "CRASHED"):
            if (len(self._ta_misc) == 0):
                self._ta_misc = 'Problem with run. Exit code was %d.' % (
                    self._ta_exit_code)

            if (self._watcher_file and self._solver_file):
                self._ta_misc = self._ta_misc + '; Preserving runsolver output at %s - preserving target algorithm output at %s' % (
                    self._watcher_file.name or "<none>", self._solver_file.name
                    or "<none>")

        try:
            if (self._watcher_file):
                self._watcher_file.close()
            if (self._solver_file):
                self._solver_file.close()

            if (self._ta_status is not "ABORT"
                    and self._ta_status is not "CRASHED"):
                os.remove(self._watcher_file.name)
                os.remove(self._solver_file.name)
        except (OSError, KeyboardInterrupt, SystemExit):
            self._ta_misc = "problems removing temporary files during cleanup."
        except AttributeError:
            pass  #in internal mode, these files are not generated

        if self._ta_status is "EXTERNALKILL":
            self._ta_status = "CRASHED"
            self._exit_code = 3

    def get_command_line_args(self, runargs, config):
        '''
        Returns the command call list containing arguments to execute the implementing subclass' solver.
        The default implementation delegates to get_command_line_args_ext. If this is not implemented, a
        NotImplementedError will be raised.
    
        Args:
            runargs: a map of any non-configuration arguments required for the execution of the solver.
            config: a mapping from parameter name (with prefix) to parameter value.
        Returns:
            A command call list to execute a target algorithm.
        '''
        raise NotImplementedError()

    def get_command_line_args_ext(self, runargs, config, ext_call):
        '''
        When production of the target algorithm is done from a source other than python,
        override this method to return a command call list to execute whatever you need to produce the command line.

        Args:
            runargs: a map of any non-configuration arguments required for the execution of the solver.
            config: a mapping from parameter name (with prefix) to parameter value.
            ext_call: string to call external program to get callstring of target algorithm
        Returns:
            A command call list to execute the command producing a single line of output containing the solver command string
        '''
        callstring_in = NamedTemporaryFile(suffix=".csv",
                                           prefix="callstring",
                                           dir=self._tmp_dir,
                                           delete=False)
        callstring_in.write("%s\n" % (runargs["instance"]))
        callstring_in.write("%d\n" % (runargs["seed"]))
        for name, value in config.items():
            callstring_in.write("%s,%s\n" % (name, value))
        callstring_in.flush()

        cmd = ext_call.split(" ")
        cmd.append(callstring_in.name)
        self.print_d(" ".join(cmd))
        try:
            io = Popen(cmd, shell=False, preexec_fn=os.setpgrp, stdout=PIPE)
            self._subprocesses.append(io)
            out_, _ = io.communicate()
            self._subprocesses.remove(io)
        except OSError:
            self._ta_misc = "failed to run external program for output parsing : %s" % (
                " ".join(cmd))
            self._ta_runtime = self._cutoff
            self._exit_code = 2
            sys.exit(2)
        if not out_:
            self._ta_misc = "external program for output parsing yielded empty output: %s" % (
                " ".join(cmd))
            self._ta_runtime = self._cutoff
            self._exit_code = 2
            sys.exit(2)
        callstring_in.close()
        os.remove(callstring_in.name)
        return out_.strip("\n")

    def process_results(self, filepointer, out_args):
        '''
        Parse a results file to extract the run's status (SUCCESS/CRASHED/etc) and other optional results.
    
        Args:
            filepointer: a pointer to the file containing the solver execution standard out.
            exit_code : exit code of target algorithm
        Returns:
            A map containing the standard AClib run results. The current standard result map as of AClib 2.06 is:
            {
                "status" : <"SAT"/"UNSAT"/"TIMEOUT"/"CRASHED"/"ABORT">,
                "runtime" : <runtime of target algrithm>,
                "quality" : <a domain specific measure of the quality of the solution [optional]>,
                "misc" : <a (comma-less) string that will be associated with the run [optional]>
            }
            ATTENTION: The return values will overwrite the measured results of the runsolver (if runsolver was used). 
        '''
        raise NotImplementedError()

    def process_results_ext(self, filepointer, out_args, ext_call):
        '''
        Args:
            filepointer: a pointer to the file containing the solver execution standard out.
            exit_code : exit code of target algorithm
        Returns:
            A map containing the standard AClib run results. The current standard result map as of AClib 2.06 is:
            {
                "status" : <"SAT"/"UNSAT"/"TIMEOUT"/"CRASHED"/"ABORT">,
                "quality" : <a domain specific measure of the quality of the solution [optional]>,
                "misc" : <a (comma-less) string that will be associated with the run [optional]>
            }
        '''

        cmd = ext_call.split(" ")
        cmd.append(filepointer.name)
        self.print_d(" ".join(cmd))
        try:
            io = Popen(cmd, shell=False, preexec_fn=os.setpgrp, stdout=PIPE)
            self._subprocesses.append(io)
            out_, _ = io.communicate()
            self._subprocesses.remove(io)
        except OSError:
            self._ta_misc = "failed to run external program for output parsing"
            self._ta_runtime = self._cutoff
            self._exit_code = 2
            sys.exit(2)

        result_map = {}
        for line in out_.split("\n"):
            if line.startswith("status:"):
                result_map["status"] = line.split(":")[1].strip(" ")
            elif line.startswith("quality:"):
                result_map["quality"] = line.split(":")[1].strip(" ")
            elif line.startswith("misc:"):
                result_map["misc"] = line.split(":")[1]

        return result_map
Пример #55
0
class CodePipelineJob(object):
    def __init__(self, event, context):
        super(CodePipelineJob, self).__init__()
        self.cp_job = event["CodePipeline.job"]
        self.cp_job_id = self.cp_job["id"]
        self.cp_data = self.cp_job["data"]
        self.cp_action_cfg = (self.cp_data.get("actionConfiguration",
                                               {}).get("configuration", {}))
        self.cp_userparam_str = self.cp_action_cfg.get("UserParameters")
        self.cp_input_artifacts = self.cp_data["inputArtifacts"]
        self.cp_output_artifacts = self.cp_data["outputArtifacts"]

        creds = self.cp_data["artifactCredentials"]

        # Create a Boto3 session using the credentials provided by
        # CodePipeline, not the Lambda job, for processing artifacts.
        self.boto_session = Boto3Session(
            aws_access_key_id=creds["accessKeyId"],
            aws_secret_access_key=creds["secretAccessKey"],
            aws_session_token=creds["sessionToken"])

        # CodePipeline itself should be called using the default client.
        # We can't run this during unit tests -- Moto doesn't support it yet.
        skip_codepipeline = (event.get("TestParameters",
                                       {}).get("SkipCodePipeline"))
        if skip_codepipeline:
            self.codepipeline = None
        else:  # pragma: no cover
            self.codepipeline = boto3.client("codepipeline")

        # Parameters for running the transclusion
        self.default_input_filename = "assemble.yml"
        self.template_document_name = None
        self.resource_document_names = []
        self.local_tags = True
        self.format = "yaml"

        # File objects for the template and resources
        self.template_document = None
        self.resource_documents = []

        # Create a named temporary file for the output.
        self.output_temp = NamedTemporaryFile(mode="w+")

        return

    def run(self):
        self.create_input_artifacts()
        self.extract_user_parameters()
        self.extract_artifacts()
        self.transclude()
        self.write_output()
        return

    def create_input_artifacts(self):
        # The input artifacts, in order declared.
        self.input_artifacts = [
            InputArtifact(ia, self.boto_session)
            for ia in self.cp_input_artifacts
        ]

        # And by name
        self.input_artifacts_by_name = dict([(ia.name, ia)
                                             for ia in self.input_artifacts])

        return

    def extract_user_parameters(self):
        # Decode the user parameters if specified.
        if self.cp_userparam_str:
            user_parameters = json_loads(self.cp_userparam_str)
            if not isinstance(user_parameters, dict):
                raise TypeError("Expected a JSON object for user parameters.")
        else:
            user_parameters = {}

        # What input artifacts have we seen?
        seen_artifacts = set()

        # Get the default input filename, if specified.
        self.default_input_filename = user_parameters.get(
            "DefaultInputFilename", "assemble.yml")

        # Get the template document name
        td = user_parameters.get("TemplateDocument")
        if td is not None:
            ia_name, _ = self.check_artifact_filename(td, "TemplateDocument")
            seen_artifacts.add(ia_name)
            self.template_document_name = td

        # And the resource document names
        rds = user_parameters.get("ResourceDocuments")
        if rds is not None:
            # Be lenient on input -- allow a single string instead of a list
            # of strings.
            if isinstance(rds, string_types):
                rds = [rds]

            for rd in rds:
                ia_name, _ = self.check_artifact_filename(
                    rd, "ResourceDocuments")
                seen_artifacts.add(ia_name)
                self.resource_document_names.append(rd)

        # Do we want local tag support?
        self.local_tags = user_parameters.get("LocalTags", True)

        # What format should we use for the output?
        self.format = user_parameters.get("Format", "yaml")
        if self.format not in (
                "json",
                "yaml",
        ):
            raise ValueError(
                "Invalid output format '%s': valid types are 'json' and "
                "'yaml'" % self.format)

        # Name of the output file
        self.output_filename = user_parameters.get("OutputFilename",
                                                   "assemble.yml")

        # If any input artifacts are untouched, use them as the template or
        # additional resource documents.
        for ia in self.input_artifacts:
            if ia.name not in seen_artifacts:
                doc_name = ia.name + "::" + self.default_input_filename

                if self.template_document_name is None:
                    self.template_document_name = doc_name
                else:
                    self.resource_document_names.append(doc_name)

        if self.template_document_name is None:
            raise ValueError("No input artifact was specified as the "
                             "template file.")

        return

    def check_artifact_filename(self, doc_name, param_type):
        """
        cpj.check_artifact_filename(doc_name) -> (ia_name, filename)

        Make sure doc_name is in "artifact_name::filename" format, and
        that artifact_name is valid.
        """
        try:
            ia_name, filename = split_artifact_filename(doc_name)
        except ValueError:
            raise ValueError(
                "Invalid value for %s: expected input_artifact::filename: %s" %
                (param_type, doc_name))

        if ia_name not in self.input_artifacts_by_name:
            raise ValueError(
                "Invalid value for %s: unknown input artifact %s" %
                (param_type, ia_name))

        return ia_name, filename

    def extract_artifact(self, doc_name):
        ia_name, filename = split_artifact_filename(doc_name)
        ia = self.input_artifacts_by_name[ia_name]

        try:
            doc = ia.get_file(filename)
            doc.filename = doc_name
            return doc
        except Exception as e:
            raise RuntimeError(
                "While processing template document %s::%s from %s: %s" %
                (ia_name, filename, ia.url, e))

    def extract_artifacts(self):
        """
        Extract all input artifacts.
        """
        self.template_document = self.extract_artifact(
            self.template_document_name)

        for rdn in self.resource_document_names:
            self.resource_documents.append(self.extract_artifact(rdn))

    def transclude(self):
        result = run(self.template_document, self.resource_documents,
                     self.output_temp, self.local_tags)
        if result != 0:
            raise ValueError("Transclusion error -- see above messages for "
                             "details.")

        return

    def write_output(self):
        # Create the output ZipFile
        output_binary = NamedTemporaryFile(mode="w+b")
        output_zip = ZipFile(output_binary, "a")
        self.output_temp.seek(0)
        content = self.output_temp.read()
        output_zip.writestr(self.output_filename, content)
        output_zip.close()

        # Write the output artifact
        oa = self.cp_output_artifacts[0]
        s3loc = oa["location"]["s3Location"]
        bucket = s3loc["bucketName"]
        key = s3loc["objectKey"]
        output_binary.seek(0)
        s3 = self.boto_session.client("s3",
                                      config=Config(signature_version="s3v4"))
        s3.put_object(Body=output_binary,
                      Bucket=bucket,
                      Key=key,
                      ServerSideEncryption="aws:kms")
        return

    def send_success(self):
        log.info("Notifying CodePipeline: put_job_success_result(%r)",
                 self.cp_job_id)
        if self.codepipeline:  # pragma: no cover
            self.codepipeline.put_job_success_result(jobId=self.cp_job_id)
        return

    def send_failure(self, message):
        log.info(
            "Notifying CodePipeline: put_job_failure_result("
            "%r, message=%r)", self.cp_job_id, message)
        if self.codepipeline:  # pragma: no cover
            self.codepipeline.put_job_failure_result(jobId=self.cp_job_id,
                                                     failureDetails={
                                                         "type": "JobFailed",
                                                         "message": message,
                                                     })
        return
Пример #56
0
class AbstractWrapper(object):
    '''
        abstract algorithm wrapper
    '''
    def __init__(self):
        '''
            Constructor
        '''
        root = logging.getLogger()
        ch = logging.StreamHandler(sys.stdout)
        formatter = logging.Formatter('[%(name)s][%(levelname)s] %(message)s')
        ch.setFormatter(formatter)
        root.handlers = [ch]
        self.logger = logging.getLogger("GenericWrapper")

        #program_name = os.path.basename(sys.argv[0])
        program_version = "v%s" % __version__
        program_build_date = str(__updated__)
        program_version_message = "%%(prog)s %s (%s)" % (program_version,
                                                         program_build_date)

        #program_shortdesc = __import__("__main__").__doc__.split("\n")[1]
        program_license = '''GenericWrapper4AC
    
          Created by %s on %s.
          Copyright 2016 - AClib. All rights reserved.
          
          Licensed under the BSD
          
          Distributed on an "AS IS" basis without warranties
          or conditions of any kind, either express or implied.
        
          USAGE
        ''' % (str(__authors__), str(__date__))
        #self.parser = ArgumentParser(description=program_license, formatter_class=RawDescriptionHelpFormatter, add_help=False)
        self.parser = OArgumentParser()
        self.args = None

        self.RESULT_MAPPING = {'SUCCESS': "SAT"}
        self._watcher_file = None
        self._solver_file = None

        self._instance = ""
        self._specifics = ""
        self._cutoff = 0.0
        self._runlength = 0
        self._seed = 0
        self._config_dict = {}

        self._exit_code = None

        self._runsolver = None
        self._mem_limit = 2048
        self._tmp_dir = None
        self._tmp_dir_algo = None

        self._crashed_if_non_zero_status = True
        self._use_local_tmp = False

        self._subprocesses = []

        self._DEBUG = True
        if self._DEBUG:
            self.logger.setLevel(logging.DEBUG)

        self._DELAY2KILL = 2

        self._ta_status = "EXTERNALKILL"
        self._ta_runtime = 999999999.0
        self._ta_runlength = -1
        self._ta_quality = 999999999.0
        self._ta_exit_code = None
        self._ta_misc = ""

    def main(self, argv=None):
        ''' parse command line'''
        if argv is None:
            argv = sys.argv
        else:
            sys.argv.extend(argv)

        try:
            signal.signal(signal.SIGTERM, signalHandler)
            signal.signal(signal.SIGQUIT, signalHandler)
            signal.signal(signal.SIGINT, signalHandler)

            # Setup argument parser

            self.parser.add_argument(
                "--runsolver-path",
                dest="runsolver",
                default=os.path.join(genericWrapper4AC.__path__[0], "binaries",
                                     "runsolver"),
                help=
                "path to runsolver binary (if None, the runsolver is deactivated)"
            )
            self.parser.add_argument(
                "--temp-file-dir",
                dest="tmp_dir",
                default=None,
                help=
                "directory for temporary files (relative to -exec-dir in SMAC scenario)"
            )
            self.parser.add_argument(
                "--temp-file-dir-algo",
                dest="tmp_dir_algo",
                default=True,
                type=bool,
                help="create a directory for temporary files from target algo"
            )  #TODO: set default to False
            self.parser.add_argument("--mem-limit",
                                     dest="mem_limit",
                                     default=self._mem_limit,
                                     type=int,
                                     help="memory limit in MB")
            self.parser.add_argument(
                "--internal",
                dest="internal",
                default=False,
                type=bool,
                help="skip calling an external target algorithm")
            self.parser.add_argument(
                "--log",
                dest="log",
                default=False,
                type=bool,
                help=
                "logs all runs in \"target_algo_runs.csv\" in --temp-file-dir")
            self.parser.add_argument(
                "--max_quality",
                dest="max_quality",
                default=None,
                help=
                "maximal quality of unsuccessful runs with timeouts or crashes"
            )
            self.parser.add_argument("--help",
                                     dest="show_help",
                                     default=False,
                                     type=bool,
                                     help="shows help")

            # new format arguments
            self.parser.add_argument("--instance",
                                     dest="instance",
                                     default=None,
                                     help="path to instance")
            self.parser.add_argument("--cutoff",
                                     dest="cutoff",
                                     default=None,
                                     type=float,
                                     help="running time cutoff")
            self.parser.add_argument("--seed",
                                     dest="seed",
                                     default=None,
                                     type=int,
                                     help="random seed")

            # Process arguments
            self.args, target_args = self.parser.parse_cmd(sys.argv[1:])
            args = self.args

            if args.show_help:
                self.parser.print_help()
                self._ta_status = "ABORT"
                self._ta_misc = "help was requested..."
                self._exit_code = 1
                sys.exit(1)

            if args.runsolver != "None" and not os.path.isfile(
                    args.runsolver) and not args.internal:
                self._ta_status = "ABORT"
                self._ta_misc = "runsolver is missing - should have been at %s." % (
                    args.runsolver)
                self._exit_code = 1
                sys.exit(1)
            else:
                self._runsolver = args.runsolver
                self._mem_limit = args.mem_limit

            if args.tmp_dir is None:
                if "TMPDIR" in os.environ:
                    args.tmp_dir = os.environ["TMPDIR"]
                    self._use_local_tmp = True
                else:
                    args.tmp_dir = "."

            if not os.path.isdir(args.tmp_dir):
                self._ta_status = "ABORT"
                self._ta_misc = "temp directory is missing - should have been at %s." % (
                    args.tmp_dir)
                self._exit_code = 1
                sys.exit(1)
            else:
                self._tmp_dir = args.tmp_dir

            if args.max_quality:
                self._ta_quality = float(args.max_quality)

            self.new_format = "--config" in target_args

            if self.new_format and len(target_args) < 5:
                self._ta_status = "ABORT"
                self._ta_misc = "some required TA parameters (instance, specifics, cutoff, runlength, seed) missing - was [%s]." % (
                    " ".join(target_args))
                self._exit_code = 1
                sys.exit(1)

            self._config_dict = self.build_parameter_dict(args, target_args)

            if args.tmp_dir_algo:
                try:
                    self._tmp_dir_algo = mkdtemp(dir="/tmp/")
                except OSError:
                    self.logger.error(
                        "Creating directory for temporary files failed")
                    pass

            runargs = {
                "instance": self._instance,
                "specifics": self._specifics,
                "cutoff": self._cutoff,
                "runlength": self._runlength,
                "seed": self._seed,
                "tmp": self._tmp_dir_algo
            }

            target_cmd = self.get_command_line_args(runargs=runargs,
                                                    config=self._config_dict)

            if type(target_cmd) is list:
                target_cmd = " ".join(target_cmd)

            if not args.internal:
                start_time = time.time()
                self.call_target(target_cmd)
                self._ta_runtime = time.time() - start_time
                self.logger.debug("Measured wallclock time: %f" %
                                  (self._ta_runtime))
                self.read_runsolver_output()
                self.logger.debug("Measured time by runsolver: %f" %
                                  (self._ta_runtime))

            resultMap = self.process_results(self._solver_file, {
                "exit_code": self._ta_exit_code,
                "instance": self._instance
            })

            if ('status' in resultMap):
                self._ta_status = self.RESULT_MAPPING.get(
                    resultMap['status'], resultMap['status'])
            if ('runtime' in resultMap):
                self._ta_runtime = resultMap['runtime']
            if ('quality' in resultMap):
                self._ta_quality = resultMap['quality']
            if 'misc' in resultMap and not self._ta_misc:
                self._ta_misc = resultMap['misc']
            elif 'misc' in resultMap and self._ta_misc:
                self._ta_misc += " - " + resultMap['misc']

            # if still no status was determined, something went wrong and output files should be kept
            if self._ta_status is "EXTERNALKILL":
                self._ta_status = "CRASHED"
            sys.exit()
        except (KeyboardInterrupt, SystemExit):
            self.cleanup()
            self.print_result_string()
            if self._ta_exit_code:
                sys.exit(self._ta_exit_code)
            elif self._exit_code:
                sys.exit(self._exit_code)
            else:
                sys.exit(0)

    def build_parameter_dict(self, args, arg_list):
        '''
            Reads all arguments which were not parsed by ArgumentParser,
            extracts all meta information
            and builds a mapping: parameter name -> parameter value
            Format Assumption: <instance> <specifics> <runtime cutoff> <runlength> <seed> <solver parameters>
            
            Arguments
            ---------
            args: namedtuple
                command line parsed arguments
            arg_list: list
                list of all options not parsed by ArgumentParser
        '''

        if "--config" in arg_list:
            self._instance = args.instance
            self._specifics = None
            self._cutoff = int(float(args.cutoff) + 1 -
                               1e-10)  # runsolver only rounds down to integer
            self._cutoff = min(self._cutoff,
                               2**31 - 1)  # at most 32bit integer supported
            self._ta_runtime = self._cutoff
            self._runlength = None
            self._seed = int(args.seed)
            params = arg_list[arg_list.index("--config") + 1:]

        else:
            self._instance = arg_list[0]
            self._specifics = arg_list[1]
            self._cutoff = int(float(arg_list[2]) + 1 -
                               1e-10)  # runsolver only rounds down to integer
            self._cutoff = min(self._cutoff,
                               2**31 - 1)  # at most 32bit integer supported
            self._ta_runtime = self._cutoff
            self._runlength = int(arg_list[3])
            self._seed = int(arg_list[4])
            params = arg_list[5:]

        if (len(params) / 2) * 2 != len(params):
            self._ta_status = "ABORT"
            self._ta_misc = "target algorithm parameter list MUST have even length - found %d arguments." % (
                len(params))
            self.logger.debug(" ".join(params))
            self._exit_code = 1
            sys.exit(1)

        return dict((name, value.strip("'"))
                    for name, value in zip(params[::2], params[1::2]))

    def call_target(self, target_cmd):
        '''
            extends the target algorithm command line call with the runsolver
            and executes it
            Args:
                list of target cmd (from getCommandLineArgs)
        '''
        random_id = random.randint(0, 1000000)
        self._watcher_file = NamedTemporaryFile(suffix=".log",
                                                prefix="watcher-%d-" %
                                                (random_id),
                                                dir=self._tmp_dir,
                                                delete=False)
        self._solver_file = NamedTemporaryFile(suffix=".log",
                                               prefix="solver-%d-" %
                                               (random_id),
                                               dir=self._tmp_dir,
                                               delete=False)

        runsolver_cmd = []
        if self._runsolver != "None":
            runsolver_cmd = [
                self._runsolver, "-M", self._mem_limit, "-C", self._cutoff,
                "-w",
                "\"%s\"" % (self._watcher_file.name), "-o",
                "\"%s\"" % (self._solver_file.name)
            ]

        runsolver_cmd = " ".join(map(str, runsolver_cmd)) + " " + target_cmd
        #for debugging
        self.logger.debug("Calling runsolver. Command-line:")
        self.logger.debug(runsolver_cmd)

        # run
        try:
            if self._runsolver != "None":
                io = Popen(runsolver_cmd,
                           shell=True,
                           preexec_fn=os.setpgrp,
                           universal_newlines=True)
            else:
                io = Popen(map(str, runsolver_cmd),
                           stdout=self._solver_file,
                           shell=True,
                           preexec_fn=os.setpgrp,
                           universal_newlines=True)
            self._subprocesses.append(io)
            io.wait()
            self._subprocesses.remove(io)
            if io.stdout:
                io.stdout.flush()
        except OSError:
            self._ta_status = "ABORT"
            self._ta_misc = "execution failed: %s" % (" ".join(
                map(str, runsolver_cmd)))
            self._exit_code = 1
            sys.exit(1)
        self._solver_file.seek(0)
        self._watcher_file.seek(0)

    def float_regex(self):
        return '[+-]?\d+(?:\.\d+)?(?:[eE][+-]\d+)?'

    def read_runsolver_output(self):
        '''
            reads self._watcher_file, 
            extracts runtime
            and returns if memout or timeout found
        '''
        if self._runsolver == "None":
            self._ta_exit_code = 0
            return

        self.logger.debug("Reading runsolver output from %s" %
                          (self._watcher_file.name))
        try:
            data = str(self._watcher_file.read().decode("utf8"))
        except:
            # due to the common, rare runsolver bug,
            # the watcher file can be corrupted and can failed to be read
            self._ta_exit_code = 0
            self.logger.warn(
                "Failed to read runsolver's watcher file---trust own wc-time measurment"
            )
            return

        if (re.search('runsolver_max_cpu_time_exceeded', data)
                or re.search('Maximum CPU time exceeded', data)):
            self._ta_status = "TIMEOUT"

        if (re.search('runsolver_max_memory_limit_exceeded', data)
                or re.search('Maximum VSize exceeded', data)):
            self._ta_status = "TIMEOUT"
            self._ta_misc = "memory limit was exceeded"

        cpu_pattern1 = re.compile(
            '^runsolver_cputime: (%s)' % (self.float_regex()), re.MULTILINE)
        cpu_match1 = re.search(cpu_pattern1, data)

        cpu_pattern2 = re.compile(
            '^CPU time \\(s\\): (%s)' % (self.float_regex()), re.MULTILINE)
        cpu_match2 = re.search(cpu_pattern2, data)

        if (cpu_match1):
            self._ta_runtime = float(cpu_match1.group(1))
        if (cpu_match2):
            self._ta_runtime = float(cpu_match2.group(1))

        exitcode_pattern = re.compile('Child status: ([0-9]+)')
        exitcode_match = re.search(exitcode_pattern, data)

        if (exitcode_match):
            self._ta_exit_code = int(exitcode_match.group(1))

    def print_result_string(self):

        # ensure a minimal runtime of 0.0005
        self._ta_runtime = max(0.0005, self._ta_runtime)

        if self.args and self.args.log:
            with open("target_algo_runs.json", "a") as fp:
                out_dict = {
                    "instance": self._instance,
                    "seed": self._seed,
                    "status": self._ta_status,
                    "time": self._ta_runtime,
                    "quality": self._ta_quality,
                    "config": self._config_dict,
                    "misc": self._ta_misc
                }
                json.dump(out_dict, fp)
                fp.write("\n")
                fp.flush()

        if self._ta_status in ["SAT", "UNSAT"]:
            aclib_status = "SUCCESS"
        else:
            aclib_status = self._ta_status

        if self.new_format:
            aclib2_out_dict = {
                "status": str(aclib_status),
                "cost": float(self._ta_quality),
                "runtime": float(self._ta_runtime),
                "misc": str(self._ta_misc)
            }
            print("Result of this algorithm run: %s" %
                  (json.dumps(aclib2_out_dict)))

        sys.stdout.write(
            "Result for ParamILS: %s, %.4f, %s, %s, %s" %
            (self._ta_status, self._ta_runtime, str(
                self._ta_runlength), str(self._ta_quality), str(self._seed)))
        if (len(self._ta_misc) > 0):
            sys.stdout.write(", %s" % (self._ta_misc))
        print('')
        sys.stdout.flush()

    def cleanup(self):
        '''
            cleanup if error occurred or external signal handled
        '''
        if (len(self._subprocesses) > 0):
            print("killing the target run!")
            try:
                for sub in self._subprocesses:
                    #sub.terminate()
                    Popen(["pkill", "-TERM", "-P",
                           str(sub.pid)],
                          universal_newlines=True)
                    self.logger.debug("Wait %d seconds ..." %
                                      (self._DELAY2KILL))
                    time.sleep(self._DELAY2KILL)
                    if sub.returncode is None:  # still running
                        sub.kill()

                self.logger.debug(
                    "done... If anything in the subprocess tree fork'd a new process group, we may not have caught everything..."
                )
                self._ta_misc = "forced to exit by signal or keyboard interrupt."
                self._ta_runtime = self._cutoff
            except (OSError, KeyboardInterrupt, SystemExit):
                self._ta_misc = "forced to exit by multiple signals/interrupts."
                self._ta_runtime = self._cutoff

        if (self._ta_status is "ABORT" or self._ta_status is "CRASHED"):
            if (len(self._ta_misc) == 0):
                if self._ta_exit_code:
                    self._ta_misc = 'Problem with run. Exit code was %d.' % (
                        self._ta_exit_code)
                else:
                    self._ta_misc = 'Problem with run. Exit code was N/A.'

            if (self._watcher_file and self._solver_file):
                self._ta_misc = self._ta_misc + '; Preserving runsolver output at %s - preserving target algorithm output at %s' % (
                    self._watcher_file.name or "<none>", self._solver_file.name
                    or "<none>")

        try:
            if (self._watcher_file):
                self._watcher_file.close()
            if (self._solver_file):
                self._solver_file.close()

            if (self._ta_status is not "ABORT"
                    and self._ta_status is not "CRASHED"):
                os.remove(self._watcher_file.name)
                os.remove(self._solver_file.name)
            elif self._use_local_tmp:
                shutil.copy(self._watcher_file.name, ".")
                shutil.copy(self._solver_file.name, ".")
                os.remove(self._watcher_file.name)
                os.remove(self._solver_file.name)

            if self._tmp_dir_algo:
                shutil.rmtree(self._tmp_dir_algo)

        except (OSError, KeyboardInterrupt, SystemExit):
            self._ta_misc = "problems removing temporary files during cleanup."
        except AttributeError:
            pass  #in internal mode, these files are not generated

        if self._ta_status is "EXTERNALKILL":
            self._ta_status = "CRASHED"
            self._exit_code = 3

    def get_command_line_args(self, runargs, config):
        '''
        Returns the command call list containing arguments to execute the implementing subclass' solver.
        The default implementation delegates to get_command_line_args_ext. If this is not implemented, a
        NotImplementedError will be raised.
    
        Args:
            runargs: a map of any non-configuration arguments required for the execution of the solver.
            config: a mapping from parameter name (with prefix) to parameter value.
        Returns:
            A command call list to execute a target algorithm.
        '''
        raise NotImplementedError()

    def process_results(self, filepointer, out_args):
        '''
        Parse a results file to extract the run's status (SUCCESS/CRASHED/etc) and other optional results.
    
        Args:
            filepointer: a pointer to the file containing the solver execution standard out.
            exit_code : exit code of target algorithm
        Returns:
            A map containing the standard AClib run results. The current standard result map as of AClib 2.06 is:
            {
                "status" : <"SAT"/"UNSAT"/"TIMEOUT"/"CRASHED"/"ABORT">,
                "runtime" : <runtime of target algrithm>,
                "quality" : <a domain specific measure of the quality of the solution [optional]>,
                "misc" : <a (comma-less) string that will be associated with the run [optional]>
            }
            ATTENTION: The return values will overwrite the measured results of the runsolver (if runsolver was used). 
        '''
        raise NotImplementedError()
Пример #57
0
    def create_from_file(self, stash, name=None, album=None, artist=None):
        if not isinstance(stash, file):
            filename = stash
            stash = open(stash)
        else:
            filename = getattr(stash, 'name', 'input_file')

        inspfp = NamedTemporaryFile(suffix=os.path.splitext(filename)[1])
        inspfp.write(stash.read())
        stash.seek(0)
        inspfp.seek(0)
        inspobj = Inspector(fileobj=inspfp)

        files = []

        if inspobj.mimetype == 'application/zip':
            myzip = zipfile.ZipFile(stash)
            count = 0
            for member in myzip.namelist():
                # We could just use ZipFile.open, but we need to
                # be able to seek.
                if member.endswith('/'):
                    continue
                mytarget = NamedTemporaryFile()
                mytarget.write(myzip.read(member))
                mytarget.seek(0)
                myinspfp = NamedTemporaryFile()
                myinspfp.write(mytarget.read())
                myinspobj = Inspector(fileobj=myinspfp)
                mytarget.seek(0)
                files.append((mytarget, myinspobj))
                count += 1
        elif inspobj.mimetype.startswith('audio/'):
            stash.seek(0)
            files.append((stash, inspobj))
        else:
            raise Exception(
                'Could not figure out what to do with {0} of type {1}.'.format(
                    filename, inspobj.mimetype))

        results = []

        for f, i in files:
            if artist:
                i.artist = artist
            if album:
                i.album = album
            if name:
                i.name = name
            if i.album is None:
                i.album = 'Non-album tracks'
            mandatory = ['artist', 'album', 'name']
            proceed = True
            for attrib in mandatory:
                if not getattr(i, attrib, None):
                    proceed = False

            if not proceed:
                results.append(None)
                continue

            art, cre = Artist.objects.get_or_create(
                name__iexact=i.artist,
                defaults={
                    'name': i.artist,
                },
            )
            alb, cre = Album.objects.get_or_create(
                name__iexact=i.album,
                defaults={
                    'name': i.album,
                    'is_compilation': getattr(i, 'is_compilation', False),
                },
            )
            t, cre = self.get_or_create(
                name__iexact=i.name,
                album=alb,
                artist=art,
                defaults={
                    'name': i.name,
                    'track_number': getattr(i, 'track', None),
                    'disc_number': getattr(i, 'disc', None),
                    'length': getattr(i, 'length', None),
                },
            )
            af = AssetFile.objects.create(
                name=i.name,
                asset=t,
                contents=File(f),
                mimetype=i.mimetype,
            )

            t._inspect_files(qs=t.assetfile_set.filter(pk=af.pk))
            results.append(t)

        return results
Пример #58
0
    def export(self,
               out_f=None,
               format='mp3',
               codec=None,
               bitrate=None,
               parameters=None,
               tags=None,
               id3v2_version='4'):
        """
        Export an AudioSegment to a file with given options

        out_f (string):
            Path to destination audio file

        format (string)
            Format for destination audio file. ('mp3', 'wav', 'ogg' or other ffmpeg/avconv supported files)

        codec (string)
            Codec used to encoding for the destination.

        bitrate (string)
            Bitrate used when encoding destination file. (128, 256, 312k...)

        parameters (string)
            Aditional ffmpeg/avconv parameters

        tags (dict)
            Set metadata information to destination files usually used as tags. ({title='Song Title', artist='Song Artist'})

        id3v2_version (string)
            Set ID3v2 version for tags. (default: '4')
        """
        id3v2_allowed_versions = ['3', '4']

        out_f = _fd_or_path_or_tempfile(out_f, 'wb+')
        out_f.seek(0)

        # for wav output we can just write the data directly to out_f
        if format == "wav":
            data = out_f
        else:
            data = NamedTemporaryFile(mode="wb", delete=False)

        wave_data = wave.open(data, 'wb')
        wave_data.setnchannels(self.channels)
        wave_data.setsampwidth(self.sample_width)
        wave_data.setframerate(self.frame_rate)
        # For some reason packing the wave header struct with a float in python 2
        # doesn't throw an exception
        wave_data.setnframes(int(self.frame_count()))
        wave_data.writeframesraw(self._data)
        wave_data.close()

        # for wav files, we're done (wav data is written directly to out_f)
        if format == 'wav':
            return out_f

        output = NamedTemporaryFile(mode="w+b", delete=False)

        # build converter command to export
        convertion_command = [
            self.converter,
            '-y',  # always overwrite existing files
            "-f",
            "wav",
            "-i",
            data.name,  # input options (filename last)
        ]

        if format == "ogg" and codec is None:
            convertion_command.extend(["-acodec", "libvorbis"])

        if codec is not None:
            # force audio encoder
            convertion_command.extend(["-acodec", codec])

        if bitrate is not None:
            convertion_command.extend(["-b:a", bitrate])

        if parameters is not None:
            # extend arguments with arbitrary set
            convertion_command.extend(parameters)

        if tags is not None:
            if not isinstance(tags, dict):
                raise InvalidTag("Tags must be a dictionary.")
            else:
                # Extend converter command with tags
                # print(tags)
                for key, value in tags.items():
                    convertion_command.extend(
                        ['-metadata', '{0}={1}'.format(key, value)])

                if format == 'mp3':
                    # set id3v2 tag version
                    if id3v2_version not in id3v2_allowed_versions:
                        raise InvalidID3TagVersion(
                            "id3v2_version not allowed, allowed versions: %s" %
                            id3v2_allowed_versions)
                    convertion_command.extend(
                        ["-id3v2_version", id3v2_version])

        convertion_command.extend([
            "-f",
            format,
            output.name,  # output options (filename last)
        ])

        # read stdin / write stdout
        subprocess.call(
            convertion_command,
            # make converter shut up
            stderr=open(os.devnull))

        output.seek(0)
        out_f.write(output.read())

        data.close()
        output.close()

        os.unlink(data.name)
        os.unlink(output.name)

        out_f.seek(0)
        return out_f
Пример #59
0
class UpdateFwTask(object):
    def __init__(self, stack, handler, length):
        self.stack = stack
        self.tmpfile = NamedTemporaryFile()
        self.padding_length = length
        handler.binary_mode = True

    def on_verified(self, watcher, revent):
        self.subwatcher = None
        handler, proc = watcher.data
        ret = proc.poll()
        logger.info("Firmware verify: %s", ret)

        if ret:
            handler.send_text("error %s" % FILE_BROKEN)
            self.stack.exit_task(self, True)
        else:
            shutil.copyfile(self.tmpfile.name, FIRMWARE_UPDATE_PATH)
            handler.send_text("ok")
            handler.close()
            os.system("fluxlauncher --update &")

    def on_exit(self):
        pass

    def on_text(self, message, handler):
        raise SystemError(PROTOCOL_ERROR, "UPLOADING_BINARY")

    def on_binary(self, buf, handler):
        try:
            l = len(buf)

            if self.padding_length > l:
                self.tmpfile.write(buf)
                self.padding_length -= l

            else:
                if self.padding_length == l:
                    self.tmpfile.write(buf)
                else:
                    self.tmpfile.write(buf[:self.padding_length])
                    logger.error("Recv data length error")

                handler.binary_mode = False

                logger.info("New firmware received")
                self.tmpfile.file.flush()
                self.tmpfile.seek(0)
                s = Storage("update_fw")
                with s.open("upload.fxfw", "wb") as f:
                    f.write(self.tmpfile.read())

                proc = Popen(["fxupdate.py", "--dryrun", self.tmpfile.name])
                self.subwatcher = self.stack.loop.child(
                    proc.pid, False, self.on_verified, (handler, proc))
                self.subwatcher.start()

        except RuntimeError as e:
            handler.send_text(("error %s" % e.args[0]).encode())
        except Exception:
            logger.exception("Unhandle Error")
            handler.send_text("error %s" % UNKNOWN_ERROR)

    def send_upload_request(self):
        try:
            s = socket.socket(socket.AF_UNIX)
            s.connect(HALCONTROL_ENDPOINT)
            s.send("update_fw")
        except socket.error:
            raise RuntimeError(SUBSYSTEM_ERROR)
Пример #60
0
class TestBooXtream(unittest.TestCase):
    def setUp(self):
        # get a small epub test file as a file-like object
        self.epub2file = NamedTemporaryFile(delete=False)
        test_file_content = urlopen(
            'http://www.hxa.name/articles/content/EpubGuide-hxa7241.epub')
        self.epub2file.write(test_file_content.read())
        self.epub2file.seek(0)
        self.textfile = NamedTemporaryFile(delete=False)
        self.textfile.write(b'bad text file')
        self.textfile.seek(0)

    def _makeOne(self):
        from . import BooXtream
        manager = BooXtream()
        return manager

    def test_booxtream_errors(self):
        if settings.LOCAL_TEST:
            return
        from .exceptions import BooXtreamError
        inst = self._makeOne()
        if not settings.BOOXTREAM_API_KEY:
            return
        with self.assertRaises(BooXtreamError) as cm:
            inst.platform()
        self.assertIn('expirydays not set', str(cm.exception))
        params = {
            'customername': 'Jane Test',
            'languagecode': '1043',
            'expirydays': 1,
            'downloadlimit': 3,
            'exlibris': 1,
            'chapterfooter': 1,
            'disclaimer': 1,
            'referenceid': 'bad_file_check'
        }
        with self.assertRaises(BooXtreamError) as cm:
            inst.platform(epubfile=self.textfile, **params)

    def test_booxtream_good(self):
        inst = self._makeOne()
        params = {
            'customeremailaddress': '*****@*****.**',
            'customername': 'Jane Test',
            'languagecode': '1043',
            'expirydays': 1,
            'downloadlimit': 3,
            'exlibris': 1,
            'chapterfooter': 1,
            'disclaimer': 1,
        }
        params['referenceid'] = 'order' + str(time.time())
        boox = inst.platform(epubfile=self.epub2file, **params)
        self.assertRegexpMatches(boox.download_link_epub,
                                 'download.booxtream.com/')
        self.assertFalse(boox.expired)
        self.assertEqual(boox.downloads_remaining, 3)

        # make sure it works with an in-memory file
        self.epub2file.seek(0)
        in_mem_epub = BytesIO()
        in_mem_epub.write(self.epub2file.read())
        in_mem_epub.seek(0)
        boox2 = inst.platform(epubfile=in_mem_epub, **params)
        self.assertRegexpMatches(boox2.download_link_epub,
                                 'download.booxtream.com/')