Exemple #1
0
 def generateExits(self, position):
     newRooms=[-1,-1,-1,-1]
     for i in range(4):
         if bool(random.randint(0,1)):
             if i==0:#north
                 newRooms[i]=(position[0],position[1]-1)
             elif i==1:#east
                 newRooms[i]=(position[0]+1,position[1])
             elif i==2:#south
                 newRooms[i]=(position[0],position[1]+1)
             elif i==3:#west
                 newRooms[i]=(position[0]-1,position[1])
     #time to check the new rooms to make sure that they are in range etc
     for i in range(3,-1,-1):
         if newRooms[i]!=-1:
             x=newRooms[i][0]
             y=newRooms[i][1]
             if x>=self.SIZE or x<0:
                 newRooms[i]=-1
                 continue
             if y>=self.SIZE or y<0:
                 newRooms[i]=-1
                 continue
             #now check to see if this room butts up against anything existing room
             if self.levelGrid[x][y]!=-1:
                 #lets see if we want to connect them
                 if not bool(random.randint(0,1)):
                     newRooms[i]=-1
                     continue
     #print(newRooms)
     return newRooms
Exemple #2
0
    def get_category(resource_type, name):
        # this will return one of the following:
        # 'Version', 'Shared', 'Routing' or 'None'

        versioned = [
            'OS::Nova::Server',
            'OS::Neutron::Port'
        ]
        shared = [
            'OS::Heat::RandomString',
            'OS::Neutron::Net',
            'OS::Neutron::Subnet',
            'OS::Neutron::SecurityGroup',
            'OS::Neutron::Router',
            'OS::Neutron::RouterInterface',
            'OS::Neutron::Port',
            'OS::Nova::Server'
        ]
        routing = [
            'OS::Neutron::FloatingIP'
        ]

        if resource_type in versioned:
            if bool(versionRe.search(name)):
                return 'Version'
        if resource_type in shared:
            if bool(sharedRe.search(name)):
                return 'Shared'
        if resource_type in routing:
            if not bool(versionRe.search(name)) and not bool(sharedRe.search(name)):
                return 'Routing'
        return 'None'
Exemple #3
0
def make_client(conf):
    """Creates a kazoo client given a configuration dictionary."""
    # See: http://kazoo.readthedocs.org/en/latest/api/client.html
    client_kwargs = {
        'read_only': bool(conf.get('read_only')),
        'randomize_hosts': bool(conf.get('randomize_hosts')),
    }
    # See: http://kazoo.readthedocs.org/en/latest/api/retry.html
    if 'command_retry' in conf:
        client_kwargs['command_retry'] = conf['command_retry']
    if 'connection_retry' in conf:
        client_kwargs['connection_retry'] = conf['connection_retry']
    hosts = _parse_hosts(conf.get("hosts", "localhost:2181"))
    if not hosts or not isinstance(hosts, six.string_types):
        raise TypeError("Invalid hosts format, expected "
                        "non-empty string/list, not %s" % type(hosts))
    client_kwargs['hosts'] = hosts
    if 'timeout' in conf:
        client_kwargs['timeout'] = float(conf['timeout'])
    # Kazoo supports various handlers, gevent, threading, eventlet...
    # allow the user of this client object to optionally specify one to be
    # used.
    if 'handler' in conf:
        client_kwargs['handler'] = conf['handler']
    return client.KazooClient(**client_kwargs)
 def _queryapi(self, method_url, get, post):
     c = pycurl.Curl()
     if bool(get):
         query_url = method_url + '?' + urlencode(get)
     else:
         query_url = method_url
     c.setopt(c.URL, query_url)
     if bool(post):
         # first clear all fields that are None
         post_cleared = {}
         for i in post:
             if post[i] is not None:
                 post_cleared[i] = post[i]
         postfields = urlencode(post_cleared)
         c.setopt(c.POSTFIELDS, postfields)
     buffer = StringIO()
     c.setopt(c.WRITEFUNCTION, buffer.write)
     c.setopt(c.HTTPHEADER, ['PddToken: ' + self.token])
     c.perform()
     http_response_code = c.getinfo(c.RESPONSE_CODE)
     http_response_data = json.loads(buffer.getvalue())
     c.close()
     if 200 != http_response_code:
         self.module.fail_json(msg='Error querying yandex pdd api, HTTP status=' + c.getinfo(c.RESPONSE_CODE) + ' error=' + http_response_data.error)
     return (http_response_code, http_response_data)
 def check(rv, ans):
     assert bool(rv[1]) == bool(ans[1])
     if ans[1]:
         return s_check(rv, ans)
     e = rv[0].expand()
     a = ans[0].expand()
     return e in [a, -a] and rv[1] == ans[1]
Exemple #6
0
def _check_function(function, flag, run_args, data):
    object_name, function_name = _split_function_name(function)
    if not function_name:
        return None

    # Make sure function is present in either libvirt module or
    # object_name class
    flag_tuple = ()

    if not _has_command(function_name, objname=object_name):
        return False

    if flag:
        found_flag = _get_flag(flag)
        if not bool(found_flag):
            return False
        flag_tuple = (found_flag,)

    if run_args is None:
        return None

    # If function requires an object, make sure the passed obj
    # is of the correct type
    if object_name:
        classobj = _get_command(object_name)
        if not isinstance(data, classobj):
            raise ValueError(
                "Passed obj %s with args must be of type %s, was %s" %
                (data, str(classobj), type(data)))

    cmd = _get_command(function_name, obj=data)

    # Function with args specified is all the proof we need
    return _try_command(cmd, run_args + flag_tuple,
                        check_all_error=bool(flag_tuple))
        def relpath(path, start=os.path.curdir):
            """Return a relative version of a path"""
            from os.path import sep, curdir, join, abspath, commonprefix, \
                 pardir, splitunc

            if not path:
                raise ValueError("no path specified")
            start_list = abspath(start).split(sep)
            path_list = abspath(path).split(sep)
            if start_list[0].lower() != path_list[0].lower():
                unc_path, rest = splitunc(path)
                unc_start, rest = splitunc(start)
                if bool(unc_path) ^ bool(unc_start):
                    raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)"
                                                                        % (path, start))
                else:
                    raise ValueError("path is on drive %s, start on drive %s"
                                                        % (path_list[0], start_list[0]))
            # Work out how much of the filepath is shared by start and path.
            for i in range(min(len(start_list), len(path_list))):
                if start_list[i].lower() != path_list[i].lower():
                    break
            else:
                i += 1

            rel_list = [pardir] * (len(start_list)-i) + path_list[i:]
            if not rel_list:
                return curdir
            return join(*rel_list)
    def add_package(self, login, package_name,
                    summary=None,
                    license=None,
                    public=True,
                    publish=True,
                    license_url=None,
                    attrs=None):
        '''
        Add a new package to a users account

        :param login: the login of the package owner
        :param package_name: the name of the package to be created
        :param package_type: A type identifyer for the package (eg. 'pypi' or 'conda', etc.)
        :param summary: A short summary about the package
        :param license: the name of the package license
        :param license_url: the url of the package license
        :param public: if true then the package will be hosted publicly
        :param attrs: A dictionary of extra attributes for this package
        :param host_publicly: TODO: describe
        '''
        url = '%s/package/%s/%s' % (self.domain, login, package_name)

        attrs = attrs or {}
        attrs['summary'] = summary
        attrs['license'] = {'name':license, 'url':license_url}

        payload = dict(public=bool(public),
                       publish=bool(publish),
                       public_attrs=dict(attrs or {})
                       )

        data = jencode(payload)
        res = self.session.post(url, verify=True, data=data)
        self._check_response(res)
        return res.json()
Exemple #9
0
    def get_visibility_errors(self, customer):
        if self.product.deleted:
            yield ValidationError(_('This product has been deleted.'), code="product_deleted")

        if customer and customer.is_all_seeing:  # None of the further conditions matter for omniscient customers.
            return

        if not self.visible:
            yield ValidationError(_('This product is not visible.'), code="product_not_visible")

        is_logged_in = (bool(customer) and not customer.is_anonymous)

        if not is_logged_in and self.visibility_limit != ProductVisibility.VISIBLE_TO_ALL:
            yield ValidationError(
                _('The Product is invisible to users not logged in.'),
                code="product_not_visible_to_anonymous")

        if is_logged_in and self.visibility_limit == ProductVisibility.VISIBLE_TO_GROUPS:
            # TODO: Optimization
            user_groups = set(customer.groups.all().values_list("pk", flat=True))
            my_groups = set(self.visibility_groups.values_list("pk", flat=True))
            if not bool(user_groups & my_groups):
                yield ValidationError(
                    _('This product is not visible to your group.'),
                    code="product_not_visible_to_group"
                )

        for receiver, response in get_visibility_errors.send(ShopProduct, shop_product=self, customer=customer):
            for error in response:
                yield error
    def __init__(self, isoform_filename, sam_filename, output_prefix,
                 min_aln_coverage, min_aln_identity, min_flnc_coverage,
                 max_fuzzy_junction, allow_extra_5exon, skip_5_exon_alt):
        """
        Parameters:
          isoform_filename -- input file containing isoforms, as fastq|fasta|contigset
          sam_filename -- input sam file produced by mapping fastq_filename to reference and sorted.
          #collapsed_isoform_filename -- file to output collapsed isoforms as fasta|fastq|contigset
          min_aln_coverage -- min coverage over reference to collapse a group of isoforms
          min_aln_identity -- min identity aligning to reference to collapse a group of isoforms
          min_flnc_coverage -- min supportive flnc reads to not ignore an isoform
                               Must be 1 when collapsing consensus isoforms, which is the case in production isoseq.
                               Can be >= 1 only when directly collapsing FLNC reads.
          max_fuzzy_junction -- max edit distance between fuzzy-matching exons
          allow_extra_5exon -- whether or not to allow shorter 5' exons
          skip_5_exon_alt -- whether or not to skip alternative 5' exons
        """
        self.suffix = parse_ds_filename(isoform_filename)[1]
        super(CollapseIsoformsRunner, self).__init__(prefix=output_prefix,
                                                     allow_extra_5exon=allow_extra_5exon)

        self.isoform_filename = isoform_filename # input, uncollapsed fa|fq|ds
        self.sam_filename = sam_filename # input, sorted, gmap sam
        #self.collapsed_isoform_filename = collapsed_isoform_filename # output, collapsed, fa|fq|ds

        self.min_aln_coverage = float(min_aln_coverage)
        self.min_aln_identity = float(min_aln_identity)
        self.min_flnc_coverage = int(min_flnc_coverage)
        self.max_fuzzy_junction = int(max_fuzzy_junction)
        self.allow_extra_5exon = bool(allow_extra_5exon)
        self.skip_5_exon_alt = bool(skip_5_exon_alt)
    def onOK(self, event): # wxGlade: PreferencesPanel.<event_handler>
        """Record all of the preferences and return to fitting mode."""

        # Record structure viewer stuff
        executable = str(self.textCtrlViewer.GetValue()).strip()
        argstr = str(self.textCtrlArgument.GetValue()).strip()
        fileformat = str(self.choiceFormat.GetStringSelection())
        config = {
                "executable" : executable,
                "argstr"     : argstr,
                "fileformat" : fileformat,
                }

        viewer = structureviewer.getStructureViewer()
        viewer.setConfig(config)

        # Structures path
        remember = bool(self.structureDirCheckBox.GetValue())
        if not self.cP.has_section("PHASE"):
            self.cP.add_section("PHASE")
        self.cP.set("PHASE", "remember", str(remember))

        # Data set path
        remember = bool(self.dataDirCheckBox.GetValue())
        if not self.cP.has_section("DATASET"):
            self.cP.add_section("DATASET")
        self.cP.set("DATASET", "remember", str(remember))

        # Get out of here
        self.onCancel(event)
        return
Exemple #12
0
 def __init__(self,
         tx=False,  # safe assumption
         agc=True,  # show useless controls > hide functionality
         dc_cancel=True,  # ditto
         dc_offset=True,  # safe assumption
         tune_delay=DEFAULT_DELAY,
         e4000=False):
     """
     All values are booleans.
     
     tx: The device supports transmitting (osmosdr.sink).
     agc: The device has a hardware AGC (set_gain_mode works).
     dc_cancel: The device supports DC offset auto cancellation
         (set_dc_offset_mode auto works).
     dc_offset: The output has a DC offset and tuning should
         avoid the area around DC.
     e4000: The device is an RTL2832U + E4000 tuner and can be
         confused into tuning to 0 Hz.
     """
     
     # TODO: If the user specifies an OsmoSDRProfile without a full set of explicit args, derive the rest from the device string instead of using defaults.
     self.tx = bool(tx)
     self.agc = bool(agc)
     self.dc_cancel = bool(dc_cancel)
     self.dc_offset = bool(dc_offset)
     self.tune_delay = float(tune_delay)
     self.e4000 = bool(e4000)
def IncBench(inc):

    NR_CYCLES = 201
      
    m = 8
    n = 2 ** m

    count = Signal(intbv(0)[m:])
    count_v = Signal(intbv(0)[m:])
    enable = Signal(bool(0))
    clock, reset = [Signal(bool(0)) for i in range(2)]


    inc_inst = inc(count, enable, clock, reset, n=n)

    @instance
    def clockgen():
        clock.next = 1
        for i in range(NR_CYCLES):
            yield delay(10)
            clock.next = not clock

    @instance
    def monitor():
        reset.next = 0
        enable.next = 1
        yield clock.negedge
        reset.next = 1
        yield clock.negedge
        while True:
            yield clock.negedge
            print count

    return inc_inst, clockgen, monitor
Exemple #14
0
 def _plugin_current_changed(self, current, previous):
     if current.isValid():
         actual_idx = self.proxy_model.mapToSource(current)
         display_plugin = self.model.display_plugins[actual_idx.row()]
         self.description.setText(display_plugin.description)
         self.forum_link = display_plugin.forum_link
         self.zip_url = display_plugin.zip_url
         self.forum_action.setEnabled(bool(self.forum_link))
         self.install_button.setEnabled(display_plugin.is_valid_to_install())
         self.install_action.setEnabled(self.install_button.isEnabled())
         self.uninstall_action.setEnabled(display_plugin.is_installed())
         self.history_action.setEnabled(display_plugin.has_changelog)
         self.configure_button.setEnabled(display_plugin.is_installed())
         self.configure_action.setEnabled(self.configure_button.isEnabled())
         self.toggle_enabled_action.setEnabled(display_plugin.is_installed())
         self.donate_enabled_action.setEnabled(bool(display_plugin.donation_link))
     else:
         self.description.setText('')
         self.forum_link = None
         self.zip_url = None
         self.forum_action.setEnabled(False)
         self.install_button.setEnabled(False)
         self.install_action.setEnabled(False)
         self.uninstall_action.setEnabled(False)
         self.history_action.setEnabled(False)
         self.configure_button.setEnabled(False)
         self.configure_action.setEnabled(False)
         self.toggle_enabled_action.setEnabled(False)
         self.donate_enabled_action.setEnabled(False)
     self.update_forum_label()
Exemple #15
0
    def _update(self, message, clean_ctrl_chars=True, commit=True, waitFlush=None, waitSearcher=None):
        """
        Posts the given xml message to http://<self.url>/update and
        returns the result.

        Passing `sanitize` as False will prevent the message from being cleaned
        of control characters (default True). This is done by default because
        these characters would cause Solr to fail to parse the XML. Only pass
        False if you're positive your data is clean.
        """
        path = 'update/'

        # Per http://wiki.apache.org/solr/UpdateXmlMessages, we can append a
        # ``commit=true`` to the URL and have the commit happen without a
        # second request.
        query_vars = []

        if commit is not None:
            query_vars.append('commit=%s' % str(bool(commit)).lower())

        if waitFlush is not None:
            query_vars.append('waitFlush=%s' % str(bool(waitFlush)).lower())

        if waitSearcher is not None:
            query_vars.append('waitSearcher=%s' % str(bool(waitSearcher)).lower())

        if query_vars:
            path = '%s?%s' % (path, '&'.join(query_vars))

        # Clean the message of ctrl characters.
        if clean_ctrl_chars:
            message = sanitize(message)

        return self._send_request('post', path, message, {'Content-type': 'text/xml; charset=utf-8'})
Exemple #16
0
def mark_provider_template(api, provider, template, tested=None, usable=None,
        diagnosis='', build_number=None, stream=None):
    """Mark a provider template as tested and/or usable

    Args:
        api: The trackerbot API to act on
        provider: The provider's key in cfme_data or a :py:class:`Provider` instance
        template: The name of the template to mark on this provider or a :py:class:`Template`
        tested: Whether or not this template has been tested on this provider
        usable: Whether or not this template is usable on this provider
        diagnosis: Optional reason for marking a template

    Returns the response of the API request

    """
    provider_template = _as_providertemplate(provider, template, group=stream)

    if tested is not None:
        provider_template['tested'] = bool(tested)

    if usable is not None:
        provider_template['usable'] = bool(usable)

    if diagnosis:
        provider_template['diagnosis'] = diagnosis

    if build_number:
        provider_template['build_number'] = int(build_number)

    return api.providertemplate.post(provider_template)
Exemple #17
0
def test_page(request):
    title = 'Pyramid Debugtoolbar'
    log.info(title)
    return {
        'title': title,
        'show_jinja2_link': bool(pyramid_jinja2),
        'show_sqla_link': bool(sqlalchemy)}
Exemple #18
0
    def __init__(self, PERMISSIVE=True, get_header=False,
                 structure_builder=None, QUIET=False):
        """Create a PDBParser object.

        The PDB parser call a number of standard methods in an aggregated
        StructureBuilder object. Normally this object is instanciated by the
        PDBParser object itself, but if the user provides his/her own
        StructureBuilder object, the latter is used instead.

        Arguments:
         - PERMISSIVE - Evaluated as a Boolean. If false, exceptions in
           constructing the SMCRA data structure are fatal. If true (DEFAULT),
           the exceptions are caught, but some residues or atoms will be missing.
           THESE EXCEPTIONS ARE DUE TO PROBLEMS IN THE PDB FILE!.
         - structure_builder - an optional user implemented StructureBuilder class.
         - QUIET - Evaluated as a Boolean. If true, warnings issued in constructing
           the SMCRA data will be suppressed. If false (DEFAULT), they will be shown.
           These warnings might be indicative of problems in the PDB file!
        """
        if structure_builder is not None:
            self.structure_builder = structure_builder
        else:
            self.structure_builder = StructureBuilder()
        self.header = None
        self.trailer = None
        self.line_counter = 0
        self.PERMISSIVE = bool(PERMISSIVE)
        self.QUIET = bool(QUIET)
Exemple #19
0
    def _check_for_list(self, location):
        # Standard LatLng list or tuple with 2 number values
        if len(location) == 2:
            lat = self._convert_float(location[0])
            lng = self._convert_float(location[1])
            condition_1 = isinstance(lat, float)
            condition_2 = isinstance(lng, float)

            # Check if input are Floats
            if bool(condition_1 and condition_2):
                condition_3 = lat <= 90 and lat >= -90
                condition_4 = lng <= 180 and lng >= -180

                # Check if inputs are within the World Geographical
                # boundary (90,180,-90,-180)
                if bool(condition_3 and condition_4):
                    self.lat = lat
                    self.lng = lng
                    return self.lat, self.lng
                else:
                    print("[ERROR] Coords are not within the world's geographical boundary\n"
                          'Latitudes must be within -90 to 90 degrees\n'
                          'Longitude must be within -180 to 180 degrees')
                    sys.exit()
            else:
                print("[ERROR] Coordinates must be numbers.\n"
                      '>>> g = geocoder.location("Ottawa ON")\n'
                      '>>> g = geocoder.location([45.23, -75.12])\n'
                      '>>> g = geocoder.location("45.23, -75.12")\n'
                      '>>> g = geocoder.location({"lat": 45.23, "lng": -75.12})')
                sys.exit()
Exemple #20
0
def _create_bootstrap(script_name, packages_to_install, paver_command_line,
                      install_paver=True, more_text="", dest_dir='.',
                      no_site_packages=None, system_site_packages=None,
                      unzip_setuptools=False, distribute=None, index_url=None,
                      find_links=None):
    # configure easy install template
    easy_install_options = []
    if index_url:
        easy_install_options.extend(["--index-url", index_url])
    if find_links:
        easy_install_options.extend(
            ["--find-links", " ".join(find_links)])
    easy_install_options = (
        easy_install_options
        and "'%s', " % "', '".join(easy_install_options) or '')
    confd_easy_install_tmpl = (_easy_install_tmpl %
                               ('bin_dir',  easy_install_options))
    if install_paver:
        paver_install = (confd_easy_install_tmpl %
                         ('paver==%s' % setup_meta['version']))
    else:
        paver_install = ""

    options = ""
    # if deprecated 'no_site_packages' was specified and 'system_site_packages'
    # wasn't, set it from that value
    if system_site_packages is None and no_site_packages is not None:
        system_site_packages = not no_site_packages
    if system_site_packages is not None:
        options += ("    options.system_site_packages = %s\n" %
                    bool(system_site_packages))
    if unzip_setuptools:
        options += "    options.unzip_setuptools = True\n"
    if distribute is not None:
        options += "    options.use_distribute = %s\n" % bool(distribute)
    options += "\n"

    extra_text = """def adjust_options(options, args):
    args[:] = ['%s']
%s
def after_install(options, home_dir):
    if sys.platform == 'win32':
        bin_dir = join(home_dir, 'Scripts')
    else:
        bin_dir = join(home_dir, 'bin')
%s""" % (dest_dir, options, paver_install)
    for package in packages_to_install:
        extra_text += confd_easy_install_tmpl % package
    if paver_command_line:
        command_list = list(paver_command_line.split())
        extra_text += "    subprocess.call([join(bin_dir, 'paver'),%s)" % repr(command_list)[1:]

    extra_text += more_text
    bootstrap_contents = venv.create_bootstrap_script(extra_text)
    fn = script_name

    debug("Bootstrap script extra text: " + extra_text)
    def write_script():
        open(fn, "w").write(bootstrap_contents)
    dry("Write bootstrap script %s" % fn, write_script)
Exemple #21
0
def create_cookie(name, value, **kwargs):
    """Make a cookie from underspecified parameters.
    By default, the pair of `name` and `value` will be set for the domain ''
    and sent on every request (this is sometimes called a "supercookie").
    """
    result = dict(
        version=0,
        name=name,
        value=value,
        port=None,
        domain='',
        path='/',
        secure=False,
        expires=None,
        discard=True,
        comment=None,
        comment_url=None,
        rest={"HttpOnly": None},
        rfc2109=False,
    )
    if kwargs.get("path") in ("", "/,"):
        del kwargs["path"]
    badargs = set(kwargs) - set(result)
    if badargs:
        err = "create_cookie() got unexpected keyword arguments: %s"
        raise TypeError(err % list(badargs))
    result.update(kwargs)
    result["port_specified"] = bool(result["port"])
    result["domain_specified"] = bool(result["domain"])
    result["domain_initial_dot"] = result["domain"].startswith(".")
    result["path_specified"] = bool(result["path"])
    return Cookie(**result)
 def __init__(self, announce, piece_length=262144, **kw):
     self.piece_length = piece_length
     if not bool(urlparse.urlparse(announce).scheme):
         raise ValueError('No schema present for url')
     self.tdict = {
         'announce': announce,
         'creation date': int(time()),
         'info': {
             'piece length': self.piece_length
         }
     }
     if kw.get('comment'):
         self.tdict.update({'comment': kw.get('comment')})
     if kw.get('httpseeds'):
         if not isinstance(kw.get('httpseeds'), list):
             raise TypeError('httpseeds must be a list')
         else:
             self.tdict.update({'httpseeds': kw.get('httpseeds')})
     if kw.get('announcelist'):
         if not isinstance(kw.get('announcelist'), list):
             raise TypeError('announcelist must be a list of lists')
         if False in [isinstance(l, list) for l in kw.get('announcelist')]:
             raise TypeError('announcelist must be a list of lists')
         if False in [bool(urlparse.urlparse(f[0]).scheme) for f in kw.get('announcelist')]:
             raise ValueError('No schema present for url')
         else:
             self.tdict.update({'announce-list': kw.get('announcelist')})
Exemple #23
0
 def _process(self):
     self.user.settings.set('suggest_categories', True)
     tz = session.tzinfo
     hours, minutes = timedelta_split(tz.utcoffset(datetime.now()))[:2]
     categories = get_related_categories(self.user)
     categories_events = []
     if categories:
         category_ids = {c['categ'].id for c in categories.itervalues()}
         today = now_utc(False).astimezone(tz).date()
         query = (Event.query
                  .filter(~Event.is_deleted,
                          Event.category_chain_overlaps(category_ids),
                          Event.start_dt.astimezone(session.tzinfo) >= today)
                  .options(joinedload('category').load_only('id', 'title'),
                           joinedload('series'),
                           subqueryload('acl_entries'),
                           load_only('id', 'category_id', 'start_dt', 'end_dt', 'title', 'access_key',
                                     'protection_mode', 'series_id', 'series_pos', 'series_count'))
                  .order_by(Event.start_dt, Event.id))
         categories_events = get_n_matching(query, 10, lambda x: x.can_access(self.user))
     from_dt = now_utc(False) - relativedelta(weeks=1, hour=0, minute=0, second=0)
     linked_events = [(event, {'management': bool(roles & self.management_roles),
                               'reviewing': bool(roles & self.reviewer_roles),
                               'attendance': bool(roles & self.attendance_roles)})
                      for event, roles in get_linked_events(self.user, from_dt, 10).iteritems()]
     return WPUser.render_template('dashboard.html', 'dashboard',
                                   offset='{:+03d}:{:02d}'.format(hours, minutes), user=self.user,
                                   categories=categories,
                                   categories_events=categories_events,
                                   suggested_categories=get_suggested_categories(self.user),
                                   linked_events=linked_events)
Exemple #24
0
def GetDisplayModes():
    res = []
    displayDevice = DISPLAY_DEVICE()
    displayDevice.cb = sizeof(DISPLAY_DEVICE)
    devMode = DEVMODE()
    devMode.dmSize = sizeof(DEVMODE)
    iDevNum = 0
    while True:
        if EnumDisplayDevices(None, iDevNum, pointer(displayDevice), 0) == 0:
            break
        iDevNum += 1
        if displayDevice.StateFlags & DISPLAY_DEVICE_MIRRORING_DRIVER:
            continue
        EnumDisplaySettingsEx(
            displayDevice.DeviceName,
            ENUM_CURRENT_SETTINGS,
            pointer(devMode),
            0
        )
        displayMode = (
            displayDevice.DeviceName,
            devMode.dmPosition.x,
            devMode.dmPosition.y,
            devMode.dmPelsWidth,
            devMode.dmPelsHeight,
            devMode.dmDisplayFrequency,
            devMode.dmBitsPerPel,
            bool(
                displayDevice.StateFlags & DISPLAY_DEVICE_ATTACHED_TO_DESKTOP
            ),
            bool(displayDevice.StateFlags & DISPLAY_DEVICE_PRIMARY_DEVICE),
            devMode.dmDisplayFlags,
        )
        res.append(displayMode)
    return tuple(res)
Exemple #25
0
    def synonyms(self, tax_id=None, tax_name=None):
        if not bool(tax_id) ^ bool(tax_name):
            raise ValueError(
                'Exactly one of tax_id and tax_name may be provided.')

        names = self.names

        if tax_name:
            s1 = select([names.c.tax_id], names.c.tax_name == tax_name)
            res = s1.execute().fetchone()

            if res:
                tax_id = res[0]
            else:
                msg = '"{}" not found in names.tax_names'.format(tax_name)
                raise ValueError(msg)

        s = select([names.c.tax_name, names.c.is_primary],
                   names.c.tax_id == tax_id)
        output = s.execute().fetchall()

        if not output:
            raise ValueError('"{}" not found in names.tax_id'.format(tax_id))

        return output
Exemple #26
0
    def get_columns(self, connection, table_name, schema=None, **kw):
        table_id = self.get_table_id(connection, table_name, schema,
                                     info_cache=kw.get("info_cache"))

        COLUMN_SQL = text("""
          SELECT col.name AS name,
                 t.name AS type,
                 (col.status & 8) AS nullable,
                 (col.status & 128) AS autoincrement,
                 com.text AS 'default',
                 col.prec AS precision,
                 col.scale AS scale,
                 col.length AS length
          FROM systypes t, syscolumns col LEFT OUTER JOIN syscomments com ON
              col.cdefault = com.id
          WHERE col.usertype = t.usertype
              AND col.id = :table_id
          ORDER BY col.colid
        """)

        results = connection.execute(COLUMN_SQL, table_id=table_id)

        columns = []
        for (name, type_, nullable, autoincrement, default, precision, scale,
             length) in results:
            col_info = self._get_column_info(name, type_, bool(nullable),
                             bool(autoincrement), default, precision, scale,
                             length)
            columns.append(col_info)

        return columns
Exemple #27
0
    def _get_no_init( self ):
        if None is self._no_init and False == bool( self.indexing_suite ):
            #select all public constructors and exclude copy constructor
            cs = self.constructors( lambda c: not c.is_copy_constructor and c.access_type == 'public'
                                    , recursive=False, allow_empty=True )

            has_suitable_constructor = bool( cs )
            if cs and len(cs) == 1 and cs[0].is_trivial_constructor and self.find_noncopyable_vars():
                has_suitable_constructor = False

            has_nonpublic_destructor = declarations.has_destructor( self ) \
                                       and not declarations.has_public_destructor( self )

            trivial_constructor = self.find_trivial_constructor()

            if has_nonpublic_destructor \
               or ( self.is_abstract and not self.is_wrapper_needed() ) \
               or not has_suitable_constructor:
                self._no_init = True
            elif not trivial_constructor or trivial_constructor.access_type != 'public':
                exportable_cs = [c for c in cs if c.exportable and c.ignore == False]
                if not exportable_cs:
                    self._no_init = True
            else:
                pass
        if None is self._no_init:
            self._no_init = False
        return self._no_init
Exemple #28
0
def database_filename(subreddit=None, username=None):
    '''
    Given a subreddit name or username, return the appropriate database filename.
    '''
    if bool(subreddit) == bool(username):
        raise ValueError('Enter subreddit or username but not both')

    text = subreddit or username
    text = text.replace('/', os.sep)

    if os.sep in text:
        # If they've given us a full path, don't mess
        # with it
        return text

    text = text.replace('\\', os.sep)
    if not text.endswith('.db'):
        text += '.db'

    if subreddit:
        full_path = DATABASE_SUBREDDIT % text
    else:
        full_path = DATABASE_USER % text

    basename = os.path.basename(full_path)
    if os.path.exists(basename):
        # Prioritize existing local files of the same name before creating
        # the deeper, proper one.
        return basename

    return full_path
Exemple #29
0
    def __init__(
            self,
            host=None,
            port=6379,
            unix_sock=None,
            database=0,
            password=None,
            encoding=None,
            conn_timeout=2,
            read_timeout=2,
            sentinel=False):

        if not bool(host) != bool(unix_sock):
            raise PyRedisError('Ether host or unix_sock has to be provided')
        self._closed = False
        self._conn_timeout = conn_timeout
        self._read_timeout = read_timeout
        self._encoding = encoding
        self._reader = None
        self._sentinel = sentinel
        self._writer = writer
        self._sock = None
        self.host = host
        self.port = port
        self.unix_sock = unix_sock
        self.password = password
        self.database = database
Exemple #30
0
    def setSaveOptionsPNG(self, optimize=None, palette=None, palette256=None):
        """ Optional arguments are added to self.png_options for pickup when saving.
        
            Palette argument is a URL relative to the configuration file,
            and it implies bits and optional transparency options.
        
            More information about options:
                http://effbot.org/imagingbook/format-png.htm
        """
        if optimize is not None:
            self.png_options['optimize'] = bool(optimize)
        
        if palette is not None:
            palette = urljoin(self.config.dirpath, palette)
            palette, bits, t_index = load_palette(palette)
            
            self.bitmap_palette, self.png_options['bits'] = palette, bits
            
            if t_index is not None:
                self.png_options['transparency'] = t_index

        if palette256 is not None:
            self.palette256 = bool(palette256)
        else:
            self.palette256 = None
Exemple #31
0
def voted_post(user, post, value):
    return user.is_authenticated and bool(user.vote_set.filter(post=post, value=value))
Exemple #32
0
def bool_encoder(value, *args):
    return bool(value)
Exemple #33
0
 def has_sequence(self, connection, sequence_name):
     cursor = connection.execute(
         '''SELECT relname FROM pg_class WHERE relkind = 'S' AND relnamespace IN ( SELECT oid FROM pg_namespace WHERE nspname NOT LIKE 'pg_%%' AND nspname != 'information_schema' AND relname = %(seqname)s);''',
         {'seqname': sequence_name.encode(self.encoding)})
     return bool(not not cursor.rowcount)
Exemple #34
0
def main():
    parser = ArgumentParser()
    parser.add_argument('--pregenerated_data', type=Path, required=True)
    parser.add_argument('--output_dir', type=Path, required=True)
    parser.add_argument("--bert_model", type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, "
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
    parser.add_argument("--do_lower_case", action="store_true")
    parser.add_argument("--reduce_memory", action="store_true",
                        help="Store training data as on-disc memmaps to massively reduce memory usage")

    parser.add_argument("--epochs", type=int, default=3, help="Number of epochs to train for")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument('--fp16',
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--loss_scale',
                        type=float, default=0,
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                        "0 (default value): dynamic loss scaling.\n"
                        "Positive power of 2: static loss scaling value.\n")
    parser.add_argument("--warmup_steps", 
                        default=0, 
                        type=int,
                        help="Linear warmup over warmup_steps.")
    parser.add_argument("--adam_epsilon", 
                        default=1e-8, 
                        type=float,
                        help="Epsilon for Adam optimizer.")
    parser.add_argument("--learning_rate",
                        default=3e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    args = parser.parse_args()

    assert args.pregenerated_data.is_dir(), \
        "--pregenerated_data should point to the folder of files made by pregenerate_training_data.py!"

    samples_per_epoch = []
    for i in range(args.epochs):
        epoch_file = args.pregenerated_data / f"epoch_0.json" #f"epoch_{i}.json"
        metrics_file = args.pregenerated_data / f"epoch_0_metrics.json" #f"epoch_{i}_metrics.json"
        if epoch_file.is_file() and metrics_file.is_file():
            metrics = json.loads(metrics_file.read_text())
            samples_per_epoch.append(metrics['num_training_examples'])
        else:
            if i == 0:
                exit("No training data was found!")
            print(f"Warning! There are fewer epochs of pregenerated data ({i}) than training epochs ({args.epochs}).")
            print("This script will loop over the available data, but training diversity may be negatively impacted.")
            num_data_epochs = i
            break
    else:
        num_data_epochs = args.epochs

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logging.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                            args.gradient_accumulation_steps))

    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if args.output_dir.is_dir() and list(args.output_dir.iterdir()):
        logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!")
    args.output_dir.mkdir(parents=True, exist_ok=True)

    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)

    total_train_examples = 0
    for i in range(args.epochs):
        # The modulo takes into account the fact that we may loop over limited epochs of data
        total_train_examples += samples_per_epoch[i % len(samples_per_epoch)]

    num_train_optimization_steps = int(
        total_train_examples / args.train_batch_size / args.gradient_accumulation_steps)
    if args.local_rank != -1:
        num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()

    # Prepare model
    model = BertForPreTraining.from_pretrained(args.bert_model)
    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
         'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
    ]

    if args.fp16:
        try:
            from apex.fp16_utils import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False)
        # get_linear_schedule_with_warmup only
        # accepts unwrapped optimizer
        scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=num_train_optimization_steps)        
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
    else:
        optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
        scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=num_train_optimization_steps)

    
    global_step = 0
    logging.info("***** Running training *****")
    logging.info(f"  Num examples = {total_train_examples}")
    logging.info("  Batch size = %d", args.train_batch_size)
    logging.info("  Num steps = %d", num_train_optimization_steps)
    model.train()
    for epoch in range(args.epochs):
        epoch_dataset = PregeneratedDataset(epoch=epoch, training_path=args.pregenerated_data, tokenizer=tokenizer,
                                            num_data_epochs=num_data_epochs, reduce_memory=args.reduce_memory)
        if args.local_rank == -1:
            train_sampler = RandomSampler(epoch_dataset)
        else:
            train_sampler = DistributedSampler(epoch_dataset)
        train_dataloader = DataLoader(epoch_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
        tr_loss = 0
        nb_tr_examples, nb_tr_steps = 0, 0
        with tqdm(total=len(train_dataloader), desc=f"Epoch {epoch}") as pbar: # tqdm: progress bar
            for step, batch in enumerate(train_dataloader):
                batch = tuple(t.to(device) for t in batch)
                input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch
                outputs = model(input_ids, masked_lm_labels=lm_label_ids, next_sentence_label=is_next)
                loss = outputs[0]
                if n_gpu > 1:
                    loss = loss.mean() # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps
                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()
                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                pbar.update(1)
                mean_loss = tr_loss * args.gradient_accumulation_steps / nb_tr_steps
                pbar.set_postfix_str(f"Loss: {mean_loss:.5f}")
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    optimizer.step()
                    scheduler.step()  # Update learning rate schedule
                    optimizer.zero_grad()
                    global_step += 1

    # Save a trained model
    if  n_gpu > 1 and torch.distributed.get_rank() == 0  or n_gpu <=1 :
        logging.info("** ** * Saving fine-tuned model ** ** * ")
        model.save_pretrained(args.output_dir)
        tokenizer.save_pretrained(args.output_dir)
Exemple #35
0
 def play(self):
     for i in range(9):
         self.__innings.append(str(i + 1))
         for j in range(2):
             self.__is_top_of_inning = bool(j)
             self.__play_inning()
Exemple #36
0
 def __defensing_team(self):
     return self.__teams[bool(not self.__is_top_of_inning)]
 def canAddNote(self, note):
     try:
         return bool(self.createNote(note))
     except:
         return False
 def ptz_stop(self, cha):
     self.nvrDll.CLIENT_DHPTZControlEx2(self.userSession, cha - 1, 34, 0, 4,
                                        0, bool(1), None)
 def check_cmd(self, cmd):
     return bool(
         self.run_command(['which', cmd], check_exit_code=False).strip())
Exemple #40
0
def voted(user, comment, value):
    return user.is_authenticated and bool(user.vote_set.filter(comment=comment, value=value))
 def test_restart_stream(self):
     self.assertFalse(bool(self.ubwa._restart_stream(self.ubwa.get_new_uuid_id())))
 def goPtz(self, cha, ptz):
     self.nvrDll.CLIENT_DHPTZControlEx2(self.userSession, cha - 1, 10, 0,
                                        ptz, 0, bool(0), None)
Exemple #43
0
 def __bool__(self):
     return bool(self.content)
Exemple #44
0
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument(
        "--data_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The input data dir. Should contain the .tsv files (or other data files) for the task."
    )
    parser.add_argument(
        "--bert_model",
        default=None,
        type=str,
        required=True,
        help="Bert pre-trained model selected in the list: bert-base-uncased, "
        "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese."
    )
    parser.add_argument("--task_name",
                        default=None,
                        type=str,
                        required=True,
                        help="The name of the task to train.")
    parser.add_argument(
        "--output_dir",
        default=None,
        type=str,
        required=True,
        help=
        "The output directory where the model predictions and checkpoints will be written."
    )

    ## Other parameters
    parser.add_argument(
        "--max_seq_length",
        default=128,
        type=int,
        help=
        "The maximum total input sequence length after WordPiece tokenization. \n"
        "Sequences longer than this will be truncated, and sequences shorter \n"
        "than this will be padded.")
    parser.add_argument("--do_train",
                        default=False,
                        action='store_true',
                        help="Whether to run training.")
    parser.add_argument("--do_eval",
                        default=False,
                        action='store_true',
                        help="Whether to run eval on the dev set.")
    parser.add_argument(
        "--do_lower_case",
        default=False,
        action='store_true',
        help="Set this flag if you are using an uncased model.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument("--eval_batch_size",
                        default=8,
                        type=int,
                        help="Total batch size for eval.")
    parser.add_argument("--learning_rate",
                        default=5e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument("--num_train_epochs",
                        default=3.0,
                        type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument(
        "--warmup_proportion",
        default=0.1,
        type=float,
        help=
        "Proportion of training to perform linear learning rate warmup for. "
        "E.g., 0.1 = 10%% of training.")
    parser.add_argument("--no_cuda",
                        default=False,
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    parser.add_argument(
        '--gradient_accumulation_steps',
        type=int,
        default=1,
        help=
        "Number of updates steps to accumulate before performing a backward/update pass."
    )
    parser.add_argument(
        '--fp16',
        default=False,
        action='store_true',
        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument(
        '--loss_scale',
        type=float,
        default=0,
        help=
        "Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
        "0 (default value): dynamic loss scaling.\n"
        "Positive power of 2: static loss scaling value.\n")

    args = parser.parse_args()

    processors = {
        "cola": ColaProcessor,
        "mnli": MnliProcessor,
        "mrpc": MrpcProcessor,
    }

    num_labels_task = {
        "cola": 2,
        "mnli": 3,
        "mrpc": 2,
    }

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available()
                              and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logger.info(
        "device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".
        format(device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError(
            "Invalid gradient_accumulation_steps parameter: {}, should be >= 1"
            .format(args.gradient_accumulation_steps))

    args.train_batch_size = int(args.train_batch_size /
                                args.gradient_accumulation_steps)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if not args.do_train and not args.do_eval:
        raise ValueError(
            "At least one of `do_train` or `do_eval` must be True.")

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        raise ValueError(
            "Output directory ({}) already exists and is not empty.".format(
                args.output_dir))
    os.makedirs(args.output_dir, exist_ok=True)

    task_name = args.task_name.lower()

    if task_name not in processors:
        raise ValueError("Task not found: %s" % (task_name))

    processor = processors[task_name]()
    num_labels = num_labels_task[task_name]
    label_list = processor.get_labels()

    tokenizer = BertTokenizer.from_pretrained(args.bert_model,
                                              do_lower_case=args.do_lower_case)

    train_examples = None
    num_train_steps = None
    if args.do_train:
        train_examples = processor.get_train_examples(args.data_dir)
        num_train_steps = int(
            len(train_examples) / args.train_batch_size /
            args.gradient_accumulation_steps * args.num_train_epochs)

    # Prepare model
    model = BertForSequenceClassification.from_pretrained(
        args.bert_model,
        cache_dir=PYTORCH_PRETRAINED_BERT_CACHE /
        'distributed_{}'.format(args.local_rank),
        num_labels=num_labels)
    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )

        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [{
        'params':
        [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
        'weight_decay':
        0.01
    }, {
        'params':
        [p for n, p in param_optimizer if any(nd in n for nd in no_decay)],
        'weight_decay':
        0.0
    }]
    t_total = num_train_steps
    if args.local_rank != -1:
        t_total = t_total // torch.distributed.get_world_size()
    if args.fp16:
        try:
            from apex.optimizers import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training."
            )

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False,
                              max_grad_norm=1.0)
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer,
                                       static_loss_scale=args.loss_scale)

    else:
        optimizer = BertAdam(optimizer_grouped_parameters,
                             lr=args.learning_rate,
                             warmup=args.warmup_proportion,
                             t_total=t_total)

    global_step = 0
    if args.do_train:
        train_features = convert_examples_to_features(train_examples,
                                                      label_list,
                                                      args.max_seq_length,
                                                      tokenizer)
        logger.info("***** Running training *****")
        logger.info("  Num examples = %d", len(train_examples))
        logger.info("  Batch size = %d", args.train_batch_size)
        logger.info("  Num steps = %d", num_train_steps)
        all_input_ids = torch.tensor([f.input_ids for f in train_features],
                                     dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in train_features],
                                      dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in train_features],
                                       dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in train_features],
                                     dtype=torch.long)
        train_data = TensorDataset(all_input_ids, all_input_mask,
                                   all_segment_ids, all_label_ids)
        if args.local_rank == -1:
            train_sampler = RandomSampler(train_data)
        else:
            train_sampler = DistributedSampler(train_data)
        train_dataloader = DataLoader(train_data,
                                      sampler=train_sampler,
                                      batch_size=args.train_batch_size)

        model.train()
        for _ in trange(int(args.num_train_epochs), desc="Epoch"):
            tr_loss = 0
            nb_tr_examples, nb_tr_steps = 0, 0
            for step, batch in enumerate(
                    tqdm(train_dataloader, desc="Iteration")):
                batch = tuple(t.to(device) for t in batch)
                input_ids, input_mask, segment_ids, label_ids = batch
                loss = model(input_ids, segment_ids, input_mask, label_ids)
                if n_gpu > 1:
                    loss = loss.mean()  # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps

                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()

                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    # modify learning rate with special warm up BERT uses
                    lr_this_step = args.learning_rate * warmup_linear(
                        global_step / t_total, args.warmup_proportion)
                    for param_group in optimizer.param_groups:
                        param_group['lr'] = lr_this_step
                    optimizer.step()
                    optimizer.zero_grad()
                    global_step += 1

    # Save a trained model
    model_to_save = model.module if hasattr(
        model, 'module') else model  # Only save the model it-self
    output_model_file = os.path.join(args.output_dir, "pytorch_model.bin")
    torch.save(model_to_save.state_dict(), output_model_file)

    # Load a trained model that you have fine-tuned
    model_state_dict = torch.load(output_model_file)
    model = BertForSequenceClassification.from_pretrained(
        args.bert_model, state_dict=model_state_dict)
    model.to(device)

    if args.do_eval and (args.local_rank == -1
                         or torch.distributed.get_rank() == 0):
        eval_examples = processor.get_dev_examples(args.data_dir)
        eval_features = convert_examples_to_features(eval_examples, label_list,
                                                     args.max_seq_length,
                                                     tokenizer)
        logger.info("***** Running evaluation *****")
        logger.info("  Num examples = %d", len(eval_examples))
        logger.info("  Batch size = %d", args.eval_batch_size)
        all_input_ids = torch.tensor([f.input_ids for f in eval_features],
                                     dtype=torch.long)
        all_input_mask = torch.tensor([f.input_mask for f in eval_features],
                                      dtype=torch.long)
        all_segment_ids = torch.tensor([f.segment_ids for f in eval_features],
                                       dtype=torch.long)
        all_label_ids = torch.tensor([f.label_id for f in eval_features],
                                     dtype=torch.long)
        eval_data = TensorDataset(all_input_ids, all_input_mask,
                                  all_segment_ids, all_label_ids)
        # Run prediction for full data
        eval_sampler = SequentialSampler(eval_data)
        eval_dataloader = DataLoader(eval_data,
                                     sampler=eval_sampler,
                                     batch_size=args.eval_batch_size)

        model.eval()
        eval_loss, eval_accuracy = 0, 0
        nb_eval_steps, nb_eval_examples = 0, 0
        for input_ids, input_mask, segment_ids, label_ids in eval_dataloader:
            input_ids = input_ids.to(device)
            input_mask = input_mask.to(device)
            segment_ids = segment_ids.to(device)
            label_ids = label_ids.to(device)

            with torch.no_grad():
                tmp_eval_loss = model(input_ids, segment_ids, input_mask,
                                      label_ids)
                logits = model(input_ids, segment_ids, input_mask)

            logits = logits.detach().cpu().numpy()
            label_ids = label_ids.to('cpu').numpy()
            tmp_eval_accuracy = accuracy(logits, label_ids)

            eval_loss += tmp_eval_loss.mean().item()
            eval_accuracy += tmp_eval_accuracy

            nb_eval_examples += input_ids.size(0)
            nb_eval_steps += 1

        eval_loss = eval_loss / nb_eval_steps
        eval_accuracy = eval_accuracy / nb_eval_examples

        result = {
            'eval_loss': eval_loss,
            'eval_accuracy': eval_accuracy,
            'global_step': global_step,
            'loss': tr_loss / nb_tr_steps
        }

        output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
        with open(output_eval_file, "w") as writer:
            logger.info("***** Eval results *****")
            for key in sorted(result.keys()):
                logger.info("  %s = %s", key, str(result[key]))
                writer.write("%s = %s\n" % (key, str(result[key])))
Exemple #45
0
    def __search(self, titles, year, season='0'):
        try:
            url = urlparse.urljoin(self.base_link, self.tvsearch_link) % (
                urllib.quote_plus(titles[0]))
            url2 = urlparse.urljoin(self.base_link, self.tvsearch_link_2) % (
                urllib.quote_plus(titles[0]))
            cookie = '; approve_search=yes'
            result = client.request(url, cookie=cookie)
            if season <> '0':
                try:
                    id = [
                        j['id'] for j in json.loads(result)
                        if str.upper(str(j['title'])) == str.upper(
                            titles[0] + ' - Season ' + season)
                    ]
                    page = '%s-season-%s-stream-%s.html' % (str.replace(
                        titles[0], ' ', '-'), season, id[0])
                except:
                    result = client.request(url2, cookie=cookie)
                    result = client.parseDOM(result,
                                             'div',
                                             attrs={'class': 'recent-item'})
                    page = [
                        re.findall(
                            r'class=\'title_new\'>.*?Season\s+%s.*?<.*?href="([^"]+)">'
                            % season, r, re.DOTALL) for r in result if bool(
                                re.search(
                                    r'class=\'title_new\'>.*?Season\s+%s.*?<' %
                                    season, r, re.DOTALL))
                    ][0][0]

            else:
                try:
                    id = [
                        j['id'] for j in json.loads(result)
                        if str.upper(str(j['title'])) == str.upper(titles[0])
                        and j['year'] == year
                    ]
                    page = '%s-stream-%s.html' % (str.replace(
                        titles[0], ' ', '-'), id[0])
                except:
                    result = client.request(url2, cookie=cookie)
                    result = client.parseDOM(result,
                                             'div',
                                             attrs={'class': 'recent-item'})
                    page = [
                        re.findall(
                            r'class=\'title_new\'>.*?\(%s\).*?href="([^"]+)"' %
                            year, r, re.DOTALL) for r in result
                        if bool(
                            re.search(r'class=\'title_new\'>.*?\(%s\)' %
                                      year, r, re.DOTALL))
                    ][0][0]

            url = page if 'http' in page else urlparse.urljoin(
                self.base_link, page)
            result = client.request(url)
            url = re.findall(u'<center><iframe\s+id="myiframe".*?src="([^"]+)',
                             result)[0]

            return url
        except:
            return
Exemple #46
0
def build_release(dir_src: os.PathLike,
                  dir_dst: os.PathLike = os.getcwd(),
                  dir_ver: os.PathLike = None,
                  temp_alt: os.PathLike = None,
                  arch_exe: os.PathLike = None,
                  arch_flags: ArchiveFlags = ArchiveFlags(),
                  bsa_exclude: list = list(),
                  trim_fomod: bool = False,
                  warn_modgroups: bool = True,
                  warn_readmes: bool = True,
                  warn_version: bool = True,
                  warn_mult_plugins: bool = True):
    """Build a release archive.

    Args:
        dir_src: Source directory for which the archive is built.
            It must contain a Fomod subdirectory with Info.xml and
            ModuleConfig.xml. Furthermore it must contain all files specified
            in ModuleConfig.xml.
        dir_dst: Target directory for the release archive. Defaults to the
            current working directory.
        dir_ver: Plugins are temporarily moved to this directory to manually
            add a version number to their description.
            If ommited, no version number is added.
        temp_alt: A directory whose path does not contain a directory that
            ends with "Data". Will be used to store temporary files during
            cration of the bsa.
            If ommited, no bsa is created.
        arch_exe: Path to Archive.exe, the executable that creates the bsa.
            If ommited, no bsa is created.
        arch_flags: Check the corresponding options in Archive.exe.
            If ommited, no flags are set.
        bsa_exclude: No bsa is created for these subdirectories.
        trim_fomod: If True the release archive contains no fomod installer if
            ModuleConfig.xml specifies a single directory and no loose files.
            Defaults to False.
        warn_modgroups: If True warn of plugins without a modgroups file.
            Defaults to True.
        warn_readmes: If True warn of plugins with a readme. A readme is
            expected to have the same name as the plugin. Defaults to True.
        warn_version: If True warn of plugins that don't have a version stamp.
            Defaults to True.
        warn_mult_plugins: If True warn of multiple plugins inside a
            subdirectory of the fomod installation. Defaults to True.
    """
    logger.info("------------------------------------------------------------")
    logger.info("Building release")
    logger.info("Source directory: {}".format(dir_src))
    logger.info("Target directory: {}".format(dir_dst))
    logger.info("Add version number: {}".format(bool(dir_ver)))
    if dir_ver:
        logger.info("Versioning directory: {}".format(dir_ver))
    if temp_alt:
        logger.info("Alternate temporary directory: {}".format(temp_alt))
    logger.info("Build bsa: {}".format(bool(arch_exe) and bool(temp_alt)))
    if arch_exe:
        logger.info("Archive.exe path: {}".format(arch_exe))
    if bsa_exclude:
        logger.info("Subdirectories excluded from bsa creation:")
        for bsa in bsa_exclude:
            logger.info("    {}".format(bsa))
    logger.info("Check modgroups: {}".format(bool(warn_modgroups)))
    logger.info("Check readmes: {}".format(bool(warn_readmes)))
    logger.info("Check version number: {}".format(bool(warn_version)))
    logger.info("Check multiple plugins: {}".format(bool(warn_version)))
    # Validate arguments
    dir_src_fomod = os.path.join(dir_src, "Fomod")
    if not os.path.isdir(dir_src):
        logger.error("Source directory does not exist")
        exit()
    if not os.path.isdir(dir_dst):
        logger.error("Target directory does not exist")
        exit()
    if dir_ver and not os.path.isdir(dir_ver):
        logger.error("Versioning directory does not exist")
        exit()
    if temp_alt and not os.path.isdir(temp_alt):
        logger.error("Alternate temporary directory does not exist")
        exit()
    if not os.path.isfile(os.path.join(dir_src_fomod, "Info.xml")):
        logger.error("Info.xml is missing in {}".format(dir_src_fomod))
        exit()
    if not os.path.isfile(os.path.join(dir_src_fomod, "ModuleConfig.xml")):
        logger.error("ModuleConfig.xml is missing in {}".format(dir_src_fomod))
        exit()
    if arch_exe and not os.path.isfile(arch_exe):
        logger.error("Archive.exe path does not exist")
        exit()
    if arch_exe and os.path.basename(arch_exe) != "Archive.exe":
        logger.error("Archive.exe path does not point to Archive.exe")
        exit()
    # Extract relevant information from fomod installation files
    name_release, version, sub_dirs, loose_files = parse_fomod(dir_src_fomod)
    plugins = [file for file in loose_files if is_plugin(file)]
    for sub_dir in sub_dirs:
        for plugin in find_plugins(os.path.join(dir_src, sub_dir)):
            plugins.append(os.path.join(sub_dir, plugin))
    # Validate subdirectories
    logger.info("Subdirectories required by the Fomod installer:")
    for sub_dir in sub_dirs:
        logger.info("   {}".format(sub_dir))
        if not os.path.isdir(os.path.join(dir_src, sub_dir)):
            logger.error("Subdirectory {} is missing".format(sub_dir))
            exit()
        if warn_mult_plugins:
            if len(find_plugins(os.path.join(dir_src, sub_dir))) > 1:
                logger.warning(
                    "Subdirectory {} contains multiple plugins".format(
                        sub_dir))
    # Validate loose files
    logger.info("Loose files required by the Fomod installer:")
    for file in loose_files:
        logger.info("   {}".format(file))
        if not os.path.isfile(os.path.join(dir_src, file)):
            logger.error("Loose file {} is missing".format(file))
            exit()
    # Check if all plugins have modgroups and readmes
    if warn_modgroups:
        check_modgroups(plugins, sub_dirs, loose_files, dir_src)
    if warn_readmes:
        check_readmes(plugins, sub_dirs, loose_files, dir_src)
    # Build fomod tree in a temporary directory
    with tempfile.TemporaryDirectory() as dir_temp:
        # Copy fomod files to the fomod tree
        src = os.path.join(dir_src, "Fomod")
        dst = os.path.join(dir_temp, "Fomod")
        shutil.copytree(src, dst)
        # Copy subdirectories to the fomod tree
        for sub_dir in sub_dirs:
            # Find a possible bsa name
            bsa = find_bsa_name(os.path.join(dir_src, sub_dir))
            if bsa and temp_alt and arch_exe and sub_dir not in bsa_exclude:
                os.mkdir(os.path.join(dir_temp, sub_dir))
                # Build the bsa
                src = os.path.join(dir_src, sub_dir)
                dst = os.path.join(dir_temp, sub_dir, bsa)
                build_bsa(src, dst, temp_alt, arch_exe, arch_flags)
                # Copy all files that aren't packed in the bsa
                for path in os.listdir(os.path.join(dir_src, sub_dir)):
                    src = os.path.join(dir_src, sub_dir, path)
                    dst = os.path.join(dir_temp, sub_dir, path)
                    if os.path.isfile(src):
                        shutil.copy2(src, dst)
                    elif os.path.isdir(src):
                        if path.lower() not in BSA_INCLUDE_DIRS:
                            shutil.copytree(src, dst)
            else:
                # Copy everything
                src = os.path.join(dir_src, sub_dir)
                dst = os.path.join(dir_temp, sub_dir)
                shutil.copytree(src, dst)
        # Copy loose files to the fomod tree
        for file in loose_files:
            src = os.path.join(dir_src, file)
            dst = os.path.join(dir_temp, file)
            os.makedirs(os.path.dirname(dst), exist_ok=True)
            shutil.copy2(src, dst)
        # Add version number to plugins
        plugins_fomod = [os.path.join(dir_temp, p) for p in plugins]
        if dir_ver:
            version_plugins(plugins_fomod, dir_ver, version)
        if warn_version:
            check_version(plugins_fomod, version)
        # Pack fomod tree into a zip archive
        file_archive = "{} {}".format(name_release, version)
        # Remove whitespaces from archive name because GitHub doesn't like them
        file_archive = "_".join(file_archive.split())
        dst = os.path.join(dir_dst, file_archive)
        if os.path.isfile(dst):
            os.remove(dst)
        if trim_fomod and len(sub_dirs) == 1 and len(loose_files) == 0:
            src = os.path.join(dir_temp, sub_dirs[0])
        else:
            src = dir_temp
        shutil.make_archive(dst, "zip", src)
        logger.info("Succesfully built release archive for {} of {}".format(
            version, name_release))
Exemple #47
0
def sr_image(file_path, backend, tiling, tile_size, overlap, stitch_type, adjust_brightness, use_hsv, use_init):
    '''Funktion that calculates a superresolution to a given image

    file_path           -- path to the image
    backend             -- integer with the used backend
    tiling              -- bool should tiling be used
    tile_size           -- integer if tiling is used
    overlap             -- bool should overlapping tiles be used
    stitch_type         -- integer if tiling is used how should the sr_tiles be stitched together
    adjust_brightness   -- bool should the brightness be equalized
    use_hsv             -- bool should hsv colors of the lr image be used
    use_init            -- int for backend 2 should initialization be used

    returns             -- path to the sr_image    
    '''
    # clear any previous model
    keras.backend.clear_session()

    models = {0: os.path.join("models", "SRDense-Type-3_ep80.h5"),
              1: os.path.join("models", "srresnet85.h5"),
              2: (os.path.join("models", "gen_model90.h5"), os.path.join("models", "srgan60.h5"))}

    # first step: load the image
    #img = Utils.crop_into_lr_shape( cv2.cvtColor( cv2.imread(file_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB ) )
    img = cv2.cvtColor( cv2.imread(file_path, cv2.IMREAD_COLOR), cv2.COLOR_BGR2RGB )

    # second step: load the model
    model = load_backend_model(models, backend, tiling, img.shape, tile_size, use_init)

    # third step sr the image
    # check for tiling
    if tiling:
        if overlap:
            tiles = Utils.tile_image(img, shape=(tile_size,tile_size), overlap=True)
        else:
            tiles = Utils.tile_image(img, shape=(tile_size,tile_size))

        x_dim = img.shape[1] // tile_size
        y_dim = img.shape[0] // tile_size

        sr_tiles = []
        for tile in tiles:
            if backend == 0:
                tmp = np.squeeze(model.predict(np.expand_dims(tile, axis=0)), axis=0)
                tmp[tmp < 0] = 0
                tmp[tmp > 255] = 255
                sr_tiles.append( tmp.astype(np.uint8) )
            else:
                sr_tiles.append( Utils.denormalize(np.squeeze(model.predict(np.expand_dims(Utils.rescale_imgs_to_neg1_1(tile), axis=0)), axis=0)))

        if stitch_type == 0:
            if overlap:
                sr = ImageStitching.stitching(sr_tiles, LR=None, image_size=(y_dim*sr_tiles[0].shape[0], x_dim*sr_tiles[0].shape[1]), overlap = True, adjustRGB = False)
            else:
                sr = ImageStitching.stitch_images(sr_tiles, x_dim*sr_tiles[0].shape[1], y_dim*sr_tiles[0].shape[0],
                                                  sr_tiles[0].shape[1], sr_tiles[0].shape[0], x_dim, y_dim)
        elif stitch_type == 1:
            if adjust_brightness and use_hsv:
                sr = ImageStitching.stitching(sr_tiles, LR = img, image_size=(y_dim*sr_tiles[0].shape[0], x_dim*sr_tiles[0].shape[1]), overlap = bool(overlap), adjustRGB = True)
            elif adjust_brightness:
                sr = ImageStitching.stitching(sr_tiles, LR = None, image_size=(y_dim*sr_tiles[0].shape[0], x_dim*sr_tiles[0].shape[1]), overlap = bool(overlap), adjustRGB = True)
            else:
                if overlap:
                    sr = ImageStitching.stitching(sr_tiles, LR=None, image_size=(y_dim*sr_tiles[0].shape[0], x_dim*sr_tiles[0].shape[1]), overlap = True, adjustRGB = False)
                else:
                    sr = ImageStitching.stitch_images(sr_tiles, x_dim*sr_tiles[0].shape[1], y_dim*sr_tiles[0].shape[0],
                                                      sr_tiles[0].shape[1], sr_tiles[0].shape[0], x_dim, y_dim)
    else:
        if backend == 0:
            sr = np.squeeze(model.predict(np.expand_dims(img, axis=0)), axis=0)
            sr[sr < 0] = 0
            sr[sr > 255] = 255
            sr = sr.astype(np.uint8)
        else:
            sr = Utils.denormalize(np.squeeze(model.predict(np.expand_dims(Utils.rescale_imgs_to_neg1_1(img), axis=0)), axis=0))

    # save the sr image
    if backend == 0:
        file_name = os.path.split(file_path)[1].split(".")[0] + "-srdense.jpg"
    elif backend == 1:
        file_name = os.path.split(file_path)[1].split(".")[0] + "-srresnet.jpg"
    elif backend == 2:
        file_name = os.path.split(file_path)[1].split(".")[0] + "-srgan.jpg"

    cv2.imwrite(os.path.join(os.path.split(file_path)[0], file_name), cv2.cvtColor(sr, cv2.COLOR_RGB2BGR))

    # clear the model
    keras.backend.clear_session()
    # return the name of the saved sr image
    return file_name
Exemple #48
0
 def has_latex_package(self, name: str) -> bool:
     packages = self.latex_packages + self.latex_packages_after_hyperref
     return bool([x for x in packages if x[0] == name])
Exemple #49
0
        print(__doc__)
        sys.exit(0)

    # set command variable
    monName = sys.argv[1]
    monPath = sys.argv[2]
    monFile = {
        "map": os.path.join(monPath, "map.ppm"),
        "cmd": os.path.join(monPath, "cmd"),
        "env": os.path.join(monPath, "env"),
    }

    # monitor size
    monSize = (int(sys.argv[3]), int(sys.argv[4]))

    monDecor = not bool(int(sys.argv[5]))
    grass.verbose(_("Starting map display <%s>...") % (monName))

    # create pid file
    pidFile = os.path.join(monPath, "pid")
    fd = open(pidFile, "w")
    if not fd:
        grass.fatal(_("Unable to create file <%s>") % pidFile)
    fd.write("%s\n" % os.getpid())
    fd.close()

    RunCommand("g.gisenv", set="MONITOR_%s_PID=%d" % (monName.upper(), os.getpid()))

    start = time.time()
    gmMap = MapApp(0)
    mapFrame = gmMap.CreateMapFrame(monName, monDecor)
Exemple #50
0
    def __init__(self):
        gr.top_block.__init__(self, "Top Block")
        Qt.QWidget.__init__(self)
        self.setWindowTitle("Top Block")
        qtgui.util.check_set_qss()
        try:
            self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))
        except:
            pass
        self.top_scroll_layout = Qt.QVBoxLayout()
        self.setLayout(self.top_scroll_layout)
        self.top_scroll = Qt.QScrollArea()
        self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)
        self.top_scroll_layout.addWidget(self.top_scroll)
        self.top_scroll.setWidgetResizable(True)
        self.top_widget = Qt.QWidget()
        self.top_scroll.setWidget(self.top_widget)
        self.top_layout = Qt.QVBoxLayout(self.top_widget)
        self.top_grid_layout = Qt.QGridLayout()
        self.top_layout.addLayout(self.top_grid_layout)

        self.settings = Qt.QSettings("GNU Radio", "top_block")
        self.restoreGeometry(self.settings.value("geometry").toByteArray())

        ##################################################
        # Variables
        ##################################################
        self.mod_index0 = mod_index0 = 0.5
        self.fs0 = fs0 = 100000
        self.freq0 = freq0 = 2500
        self.fpor0 = fpor0 = 30000
        self.fmod0 = fmod0 = 2000
        self.dutycycle0 = dutycycle0 = 0.5
        self.amplitud0 = amplitud0 = 1
        self.syh_on = syh_on = True
        self.switch_on = switch_on = True
        self.seniales_control_iguales = seniales_control_iguales = 0
        self.samp_rate = samp_rate = 300000
        self.mod_index = mod_index = mod_index0
        self.input_signal = input_signal = 0
        self.input_freq = input_freq = freq0
        self.fs = fs = fs0
        self.fpor = fpor = fpor0
        self.fmod = fmod = fmod0
        self.filtro_recuperador_on = filtro_recuperador_on = True
        self.filtro_antialiasing_on = filtro_antialiasing_on = True
        self.dutycycle = dutycycle = dutycycle0
        self.amplitud = amplitud = amplitud0

        ##################################################
        # Blocks
        ##################################################
        self.main_tab = Qt.QTabWidget()
        self.main_tab_widget_0 = Qt.QWidget()
        self.main_tab_layout_0 = Qt.QBoxLayout(Qt.QBoxLayout.TopToBottom,
                                               self.main_tab_widget_0)
        self.main_tab_grid_layout_0 = Qt.QGridLayout()
        self.main_tab_layout_0.addLayout(self.main_tab_grid_layout_0)
        self.main_tab.addTab(self.main_tab_widget_0, '')
        self.top_grid_layout.addWidget(self.main_tab)
        self.gui_tab_configuracion = Qt.QTabWidget()
        self.gui_tab_configuracion_widget_0 = Qt.QWidget()
        self.gui_tab_configuracion_layout_0 = Qt.QBoxLayout(
            Qt.QBoxLayout.TopToBottom, self.gui_tab_configuracion_widget_0)
        self.gui_tab_configuracion_grid_layout_0 = Qt.QGridLayout()
        self.gui_tab_configuracion_layout_0.addLayout(
            self.gui_tab_configuracion_grid_layout_0)
        self.gui_tab_configuracion.addTab(self.gui_tab_configuracion_widget_0,
                                          'Streamline')
        self.gui_tab_configuracion_widget_1 = Qt.QWidget()
        self.gui_tab_configuracion_layout_1 = Qt.QBoxLayout(
            Qt.QBoxLayout.TopToBottom, self.gui_tab_configuracion_widget_1)
        self.gui_tab_configuracion_grid_layout_1 = Qt.QGridLayout()
        self.gui_tab_configuracion_layout_1.addLayout(
            self.gui_tab_configuracion_grid_layout_1)
        self.gui_tab_configuracion.addTab(self.gui_tab_configuracion_widget_1,
                                          'Inputs')
        self.main_tab_grid_layout_0.addWidget(self.gui_tab_configuracion, 0, 0,
                                              1, 1)
        for r in range(0, 1):
            self.main_tab_grid_layout_0.setRowStretch(r, 1)
        for c in range(0, 1):
            self.main_tab_grid_layout_0.setColumnStretch(c, 1)
        _syh_on_check_box = Qt.QCheckBox("Sample and hold on")
        self._syh_on_choices = {True: 1, False: 0}
        self._syh_on_choices_inv = dict(
            (v, k) for k, v in self._syh_on_choices.iteritems())
        self._syh_on_callback = lambda i: Qt.QMetaObject.invokeMethod(
            _syh_on_check_box, "setChecked",
            Qt.Q_ARG("bool", self._syh_on_choices_inv[i]))
        self._syh_on_callback(self.syh_on)
        _syh_on_check_box.stateChanged.connect(
            lambda i: self.set_syh_on(self._syh_on_choices[bool(i)]))
        self.gui_tab_configuracion_grid_layout_0.addWidget(
            _syh_on_check_box, 0, 0, 1, 1)
        for r in range(0, 1):
            self.gui_tab_configuracion_grid_layout_0.setRowStretch(r, 1)
        for c in range(0, 1):
            self.gui_tab_configuracion_grid_layout_0.setColumnStretch(c, 1)
        _switch_on_check_box = Qt.QCheckBox("Llave analogica on")
        self._switch_on_choices = {True: 1, False: 0}
        self._switch_on_choices_inv = dict(
            (v, k) for k, v in self._switch_on_choices.iteritems())
        self._switch_on_callback = lambda i: Qt.QMetaObject.invokeMethod(
            _switch_on_check_box, "setChecked",
            Qt.Q_ARG("bool", self._switch_on_choices_inv[i]))
        self._switch_on_callback(self.switch_on)
        _switch_on_check_box.stateChanged.connect(
            lambda i: self.set_switch_on(self._switch_on_choices[bool(i)]))
        self.gui_tab_configuracion_grid_layout_0.addWidget(
            _switch_on_check_box, 0, 1, 1, 1)
        for r in range(0, 1):
            self.gui_tab_configuracion_grid_layout_0.setRowStretch(r, 1)
        for c in range(1, 2):
            self.gui_tab_configuracion_grid_layout_0.setColumnStretch(c, 1)
        self._seniales_control_iguales_options = (
            0,
            1,
        )
        self._seniales_control_iguales_labels = (
            'Iguales',
            'Invertidas',
        )
        self._seniales_control_iguales_tool_bar = Qt.QToolBar(self)
        self._seniales_control_iguales_tool_bar.addWidget(
            Qt.QLabel('Relacion seniales control' + ": "))
        self._seniales_control_iguales_combo_box = Qt.QComboBox()
        self._seniales_control_iguales_tool_bar.addWidget(
            self._seniales_control_iguales_combo_box)
        for label in self._seniales_control_iguales_labels:
            self._seniales_control_iguales_combo_box.addItem(label)
        self._seniales_control_iguales_callback = lambda i: Qt.QMetaObject.invokeMethod(
            self._seniales_control_iguales_combo_box, "setCurrentIndex",
            Qt.Q_ARG("int", self._seniales_control_iguales_options.index(i)))
        self._seniales_control_iguales_callback(self.seniales_control_iguales)
        self._seniales_control_iguales_combo_box.currentIndexChanged.connect(
            lambda i: self.set_seniales_control_iguales(
                self._seniales_control_iguales_options[i]))
        self.gui_tab_configuracion_grid_layout_1.addWidget(
            self._seniales_control_iguales_tool_bar, 0, 1, 1, 1)
        for r in range(0, 1):
            self.gui_tab_configuracion_grid_layout_1.setRowStretch(r, 1)
        for c in range(1, 2):
            self.gui_tab_configuracion_grid_layout_1.setColumnStretch(c, 1)
        self._mod_index_range = Range(0, 1, 0.05, mod_index0, 200)
        self._mod_index_win = RangeWidget(self._mod_index_range,
                                          self.set_mod_index,
                                          'Indice de modulacion (solo AM)',
                                          "counter_slider", float)
        self.gui_tab_configuracion_grid_layout_1.addWidget(
            self._mod_index_win, 3, 1, 1, 1)
        for r in range(3, 4):
            self.gui_tab_configuracion_grid_layout_1.setRowStretch(r, 1)
        for c in range(1, 2):
            self.gui_tab_configuracion_grid_layout_1.setColumnStretch(c, 1)
        self._input_signal_options = (
            0,
            1,
            2,
            3,
        )
        self._input_signal_labels = (
            'Seno',
            'Diente de sierra',
            'Seno 3/2',
            'Senial AM',
        )
        self._input_signal_tool_bar = Qt.QToolBar(self)
        self._input_signal_tool_bar.addWidget(Qt.QLabel("input_signal" + ": "))
        self._input_signal_combo_box = Qt.QComboBox()
        self._input_signal_tool_bar.addWidget(self._input_signal_combo_box)
        for label in self._input_signal_labels:
            self._input_signal_combo_box.addItem(label)
        self._input_signal_callback = lambda i: Qt.QMetaObject.invokeMethod(
            self._input_signal_combo_box, "setCurrentIndex",
            Qt.Q_ARG("int", self._input_signal_options.index(i)))
        self._input_signal_callback(self.input_signal)
        self._input_signal_combo_box.currentIndexChanged.connect(
            lambda i: self.set_input_signal(self._input_signal_options[i]))
        self.gui_tab_configuracion_grid_layout_1.addWidget(
            self._input_signal_tool_bar, 0, 0, 1, 1)
        for r in range(0, 1):
            self.gui_tab_configuracion_grid_layout_1.setRowStretch(r, 1)
        for c in range(0, 1):
            self.gui_tab_configuracion_grid_layout_1.setColumnStretch(c, 1)
        self._input_freq_range = Range(freq0 / 10, freq0 * 10, freq0 / 100,
                                       freq0, 200)
        self._input_freq_win = RangeWidget(
            self._input_freq_range, self.set_input_freq,
            'Frecuencia senial de entrada (no AM ni sen32)', "counter_slider",
            float)
        self.gui_tab_configuracion_grid_layout_1.addWidget(
            self._input_freq_win, 1, 0, 1, 1)
        for r in range(1, 2):
            self.gui_tab_configuracion_grid_layout_1.setRowStretch(r, 1)
        for c in range(0, 1):
            self.gui_tab_configuracion_grid_layout_1.setColumnStretch(c, 1)
        self._fs_range = Range(400, 130000, 100, fs0, 200)
        self._fs_win = RangeWidget(self._fs_range, self.set_fs,
                                   'Frecuencia de muestreo ', "counter_slider",
                                   float)
        self.gui_tab_configuracion_grid_layout_1.addWidget(
            self._fs_win, 3, 0, 1, 1)
        for r in range(3, 4):
            self.gui_tab_configuracion_grid_layout_1.setRowStretch(r, 1)
        for c in range(0, 1):
            self.gui_tab_configuracion_grid_layout_1.setColumnStretch(c, 1)
        self._fpor_range = Range(100, 130000, 100, fpor0, 200)
        self._fpor_win = RangeWidget(self._fpor_range, self.set_fpor,
                                     'Frecuencia portadora (solo AM)',
                                     "counter_slider", float)
        self.gui_tab_configuracion_grid_layout_1.addWidget(
            self._fpor_win, 2, 1, 1, 1)
        for r in range(2, 3):
            self.gui_tab_configuracion_grid_layout_1.setRowStretch(r, 1)
        for c in range(1, 2):
            self.gui_tab_configuracion_grid_layout_1.setColumnStretch(c, 1)
        self._fmod_range = Range(100, 130000, 100, fmod0, 200)
        self._fmod_win = RangeWidget(self._fmod_range, self.set_fmod,
                                     'Frecuencia moduladora (solo AM)',
                                     "counter_slider", float)
        self.gui_tab_configuracion_grid_layout_1.addWidget(
            self._fmod_win, 1, 1, 1, 1)
        for r in range(1, 2):
            self.gui_tab_configuracion_grid_layout_1.setRowStretch(r, 1)
        for c in range(1, 2):
            self.gui_tab_configuracion_grid_layout_1.setColumnStretch(c, 1)
        _filtro_recuperador_on_check_box = Qt.QCheckBox(
            "Filtro recuperador on")
        self._filtro_recuperador_on_choices = {True: 1, False: 0}
        self._filtro_recuperador_on_choices_inv = dict(
            (v, k) for k, v in self._filtro_recuperador_on_choices.iteritems())
        self._filtro_recuperador_on_callback = lambda i: Qt.QMetaObject.invokeMethod(
            _filtro_recuperador_on_check_box, "setChecked",
            Qt.Q_ARG("bool", self._filtro_recuperador_on_choices_inv[i]))
        self._filtro_recuperador_on_callback(self.filtro_recuperador_on)
        _filtro_recuperador_on_check_box.stateChanged.connect(
            lambda i: self.set_filtro_recuperador_on(
                self._filtro_recuperador_on_choices[bool(i)]))
        self.gui_tab_configuracion_grid_layout_0.addWidget(
            _filtro_recuperador_on_check_box, 0, 2, 1, 1)
        for r in range(0, 1):
            self.gui_tab_configuracion_grid_layout_0.setRowStretch(r, 1)
        for c in range(2, 3):
            self.gui_tab_configuracion_grid_layout_0.setColumnStretch(c, 1)
        _filtro_antialiasing_on_check_box = Qt.QCheckBox(
            "Filtro antialiasing on")
        self._filtro_antialiasing_on_choices = {True: 1, False: 0}
        self._filtro_antialiasing_on_choices_inv = dict(
            (v, k)
            for k, v in self._filtro_antialiasing_on_choices.iteritems())
        self._filtro_antialiasing_on_callback = lambda i: Qt.QMetaObject.invokeMethod(
            _filtro_antialiasing_on_check_box, "setChecked",
            Qt.Q_ARG("bool", self._filtro_antialiasing_on_choices_inv[i]))
        self._filtro_antialiasing_on_callback(self.filtro_antialiasing_on)
        _filtro_antialiasing_on_check_box.stateChanged.connect(
            lambda i: self.set_filtro_antialiasing_on(
                self._filtro_antialiasing_on_choices[bool(i)]))
        self.gui_tab_configuracion_grid_layout_0.addWidget(
            _filtro_antialiasing_on_check_box, 0, 3, 1, 1)
        for r in range(0, 1):
            self.gui_tab_configuracion_grid_layout_0.setRowStretch(r, 1)
        for c in range(3, 4):
            self.gui_tab_configuracion_grid_layout_0.setColumnStretch(c, 1)
        self._dutycycle_range = Range(0.05, 0.95, 0.05, dutycycle0, 200)
        self._dutycycle_win = RangeWidget(self._dutycycle_range,
                                          self.set_dutycycle, 'Duty cycle',
                                          "counter_slider", float)
        self.gui_tab_configuracion_grid_layout_1.addWidget(
            self._dutycycle_win, 2, 0, 1, 1)
        for r in range(2, 3):
            self.gui_tab_configuracion_grid_layout_1.setRowStretch(r, 1)
        for c in range(0, 1):
            self.gui_tab_configuracion_grid_layout_1.setColumnStretch(c, 1)
        self._amplitud_range = Range(0, 5, 0.2, amplitud0, 200)
        self._amplitud_win = RangeWidget(self._amplitud_range,
                                         self.set_amplitud, 'Amplitud',
                                         "counter_slider", float)
        self.gui_tab_configuracion_grid_layout_1.addWidget(
            self._amplitud_win, 4, 1, 1, 1)
        for r in range(4, 5):
            self.gui_tab_configuracion_grid_layout_1.setRowStretch(r, 1)
        for c in range(1, 2):
            self.gui_tab_configuracion_grid_layout_1.setColumnStretch(c, 1)
        self.senoidal = analog.sig_source_f(samp_rate, analog.GR_SIN_WAVE,
                                            input_freq, 1, 0)
        self.sen32 = blocks.wavfile_source('sen32.wav', True)
        self.qtgui_time_sink_x_0 = qtgui.time_sink_f(
            1024,  #size
            samp_rate,  #samp_rate
            "Output",  #name
            6  #number of inputs
        )
        self.qtgui_time_sink_x_0.set_update_time(0.10)
        self.qtgui_time_sink_x_0.set_y_axis(-1, 1)

        self.qtgui_time_sink_x_0.set_y_label('Amplitude', "")

        self.qtgui_time_sink_x_0.enable_tags(-1, True)
        self.qtgui_time_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE,
                                                  qtgui.TRIG_SLOPE_POS, 0.1, 0,
                                                  0, "")
        self.qtgui_time_sink_x_0.enable_autoscale(False)
        self.qtgui_time_sink_x_0.enable_grid(True)
        self.qtgui_time_sink_x_0.enable_axis_labels(True)
        self.qtgui_time_sink_x_0.enable_control_panel(True)
        self.qtgui_time_sink_x_0.enable_stem_plot(False)

        if not True:
            self.qtgui_time_sink_x_0.disable_legend()

        labels = [
            'Salida filtro antialias', 'Salida sample and hold',
            'Salida llave analogica', 'Salida filtro recuperador', 'Entrada',
            'Output', '', '', '', ''
        ]
        widths = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
        colors = [
            "blue", "red", "green", "magenta", "cyan", "magenta", "yellow",
            "dark red", "dark green", "blue"
        ]
        styles = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
        markers = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
        alphas = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]

        for i in xrange(6):
            if len(labels[i]) == 0:
                self.qtgui_time_sink_x_0.set_line_label(
                    i, "Data {0}".format(i))
            else:
                self.qtgui_time_sink_x_0.set_line_label(i, labels[i])
            self.qtgui_time_sink_x_0.set_line_width(i, widths[i])
            self.qtgui_time_sink_x_0.set_line_color(i, colors[i])
            self.qtgui_time_sink_x_0.set_line_style(i, styles[i])
            self.qtgui_time_sink_x_0.set_line_marker(i, markers[i])
            self.qtgui_time_sink_x_0.set_line_alpha(i, alphas[i])

        self._qtgui_time_sink_x_0_win = sip.wrapinstance(
            self.qtgui_time_sink_x_0.pyqwidget(), Qt.QWidget)
        self.main_tab_grid_layout_0.addWidget(self._qtgui_time_sink_x_0_win, 1,
                                              0, 1, 1)
        for r in range(1, 2):
            self.main_tab_grid_layout_0.setRowStretch(r, 1)
        for c in range(0, 1):
            self.main_tab_grid_layout_0.setColumnStretch(c, 1)
        self.qtgui_freq_sink_x_0 = qtgui.freq_sink_f(
            1024,  #size
            firdes.WIN_BLACKMAN_hARRIS,  #wintype
            0,  #fc
            samp_rate,  #bw
            "",  #name
            6  #number of inputs
        )
        self.qtgui_freq_sink_x_0.set_update_time(0.10)
        self.qtgui_freq_sink_x_0.set_y_axis(-140, 10)
        self.qtgui_freq_sink_x_0.set_y_label('Relative Gain', 'dB')
        self.qtgui_freq_sink_x_0.set_trigger_mode(qtgui.TRIG_MODE_FREE, 0.0, 0,
                                                  "")
        self.qtgui_freq_sink_x_0.enable_autoscale(True)
        self.qtgui_freq_sink_x_0.enable_grid(True)
        self.qtgui_freq_sink_x_0.set_fft_average(1.0)
        self.qtgui_freq_sink_x_0.enable_axis_labels(True)
        self.qtgui_freq_sink_x_0.enable_control_panel(True)

        if not True:
            self.qtgui_freq_sink_x_0.disable_legend()

        if "float" == "float" or "float" == "msg_float":
            self.qtgui_freq_sink_x_0.set_plot_pos_half(not False)

        labels = [
            'Salida filtro antialias', 'Salida sample and hold',
            'Salida llave analogica', 'Salida filtro recuperador', 'Entrada',
            'Output', '', '', '', ''
        ]
        widths = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
        colors = [
            "blue", "red", "green", "magenta", "cyan", "magenta", "yellow",
            "dark red", "dark green", "dark blue"
        ]
        alphas = [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0]
        for i in xrange(6):
            if len(labels[i]) == 0:
                self.qtgui_freq_sink_x_0.set_line_label(
                    i, "Data {0}".format(i))
            else:
                self.qtgui_freq_sink_x_0.set_line_label(i, labels[i])
            self.qtgui_freq_sink_x_0.set_line_width(i, widths[i])
            self.qtgui_freq_sink_x_0.set_line_color(i, colors[i])
            self.qtgui_freq_sink_x_0.set_line_alpha(i, alphas[i])

        self._qtgui_freq_sink_x_0_win = sip.wrapinstance(
            self.qtgui_freq_sink_x_0.pyqwidget(), Qt.QWidget)
        self.main_tab_grid_layout_0.addWidget(self._qtgui_freq_sink_x_0_win, 2,
                                              0, 1, 1)
        for r in range(2, 3):
            self.main_tab_grid_layout_0.setRowStretch(r, 1)
        for c in range(0, 1):
            self.main_tab_grid_layout_0.setColumnStretch(c, 1)
        self.portadora = analog.sig_source_f(samp_rate, analog.GR_SIN_WAVE,
                                             fpor, 1, 0)
        self.moduladora = analog.sig_source_f(samp_rate, analog.GR_SIN_WAVE,
                                              fmod, 1, 0)
        self.filtro_recuperador = filter.fir_filter_fff(
            1,
            firdes.low_pass(1, samp_rate, freq0 * 18, freq0 * 18 * 0.5,
                            firdes.WIN_HAMMING, 6.76))
        self.filtro_antialiasing = filter.fir_filter_fff(
            1,
            firdes.low_pass(1, samp_rate, freq0 * 18, freq0 * 18 * 0.5,
                            firdes.WIN_HAMMING, 6.76))
        self.diente_de_sierra = analog.sig_source_f(samp_rate,
                                                    analog.GR_SAW_WAVE,
                                                    input_freq, 1, 0)
        self.blocks_throttle_0 = blocks.throttle(gr.sizeof_float * 1,
                                                 samp_rate, True)
        self.blocks_threshold_ff_0 = blocks.threshold_ff(
            dutycycle, dutycycle, 0)
        self.blocks_sample_and_hold_xx_0 = blocks.sample_and_hold_ff()
        self.blocks_multiply_xx_1 = blocks.multiply_vff(1)
        self.blocks_multiply_xx_0 = blocks.multiply_vff(1)
        self.blocks_multiply_const_vxx_0_0_0 = blocks.multiply_const_vff(
            (amplitud, ))
        self.blocks_multiply_const_vxx_0_0 = blocks.multiply_const_vff(
            (mod_index, ))
        self.blocks_multiply_const_vxx_0 = blocks.multiply_const_vff(
            (seniales_control_iguales, ))
        self.blocks_float_to_char_0 = blocks.float_to_char(1, 1)
        self.blocks_add_const_vxx_1 = blocks.add_const_vff((1, ))
        self.blocks_add_const_vxx_0 = blocks.add_const_vff((-1, ))
        self.blks2_selector_1 = grc_blks2.selector(
            item_size=gr.sizeof_float * 1,
            num_inputs=2,
            num_outputs=1,
            input_index=seniales_control_iguales,
            output_index=0,
        )
        self.blks2_selector_0_1 = grc_blks2.selector(
            item_size=gr.sizeof_float * 1,
            num_inputs=4,
            num_outputs=1,
            input_index=input_signal,
            output_index=0,
        )
        self.blks2_selector_0_0_0_0 = grc_blks2.selector(
            item_size=gr.sizeof_float * 1,
            num_inputs=2,
            num_outputs=1,
            input_index=filtro_antialiasing_on,
            output_index=0,
        )
        self.blks2_selector_0_0_0 = grc_blks2.selector(
            item_size=gr.sizeof_float * 1,
            num_inputs=2,
            num_outputs=1,
            input_index=filtro_recuperador_on,
            output_index=0,
        )
        self.blks2_selector_0_0 = grc_blks2.selector(
            item_size=gr.sizeof_float * 1,
            num_inputs=2,
            num_outputs=1,
            input_index=switch_on,
            output_index=0,
        )
        self.blks2_selector_0 = grc_blks2.selector(
            item_size=gr.sizeof_float * 1,
            num_inputs=2,
            num_outputs=1,
            input_index=syh_on,
            output_index=0,
        )
        self.analog_sig_source_x_0_0 = analog.sig_source_f(
            samp_rate, analog.GR_SAW_WAVE, fs, 1, 0)

        ##################################################
        # Connections
        ##################################################
        self.connect((self.analog_sig_source_x_0_0, 0),
                     (self.blocks_threshold_ff_0, 0))
        self.connect((self.blks2_selector_0, 0), (self.blks2_selector_0_0, 0))
        self.connect((self.blks2_selector_0, 0),
                     (self.blocks_multiply_xx_0, 0))
        self.connect((self.blks2_selector_0_0, 0),
                     (self.blks2_selector_0_0_0, 0))
        self.connect((self.blks2_selector_0_0, 0),
                     (self.filtro_recuperador, 0))
        self.connect((self.blks2_selector_0_0_0, 0),
                     (self.qtgui_freq_sink_x_0, 5))
        self.connect((self.blks2_selector_0_0_0, 0),
                     (self.qtgui_time_sink_x_0, 5))
        self.connect((self.blks2_selector_0_0_0_0, 0),
                     (self.blks2_selector_0, 0))
        self.connect((self.blks2_selector_0_0_0_0, 0),
                     (self.blocks_sample_and_hold_xx_0, 0))
        self.connect((self.blks2_selector_0_1, 0),
                     (self.blocks_multiply_const_vxx_0_0_0, 0))
        self.connect((self.blks2_selector_1, 0),
                     (self.blocks_float_to_char_0, 0))
        self.connect((self.blocks_add_const_vxx_0, 0),
                     (self.blocks_multiply_const_vxx_0, 0))
        self.connect((self.blocks_add_const_vxx_1, 0),
                     (self.blocks_multiply_xx_1, 1))
        self.connect((self.blocks_float_to_char_0, 0),
                     (self.blocks_sample_and_hold_xx_0, 1))
        self.connect((self.blocks_multiply_const_vxx_0, 0),
                     (self.blks2_selector_1, 1))
        self.connect((self.blocks_multiply_const_vxx_0_0, 0),
                     (self.blocks_add_const_vxx_1, 0))
        self.connect((self.blocks_multiply_const_vxx_0_0_0, 0),
                     (self.blocks_throttle_0, 0))
        self.connect((self.blocks_multiply_xx_0, 0),
                     (self.blks2_selector_0_0, 1))
        self.connect((self.blocks_multiply_xx_0, 0),
                     (self.qtgui_freq_sink_x_0, 2))
        self.connect((self.blocks_multiply_xx_0, 0),
                     (self.qtgui_time_sink_x_0, 2))
        self.connect((self.blocks_multiply_xx_1, 0),
                     (self.blks2_selector_0_1, 3))
        self.connect((self.blocks_sample_and_hold_xx_0, 0),
                     (self.blks2_selector_0, 1))
        self.connect((self.blocks_sample_and_hold_xx_0, 0),
                     (self.qtgui_freq_sink_x_0, 1))
        self.connect((self.blocks_sample_and_hold_xx_0, 0),
                     (self.qtgui_time_sink_x_0, 1))
        self.connect((self.blocks_threshold_ff_0, 0),
                     (self.blks2_selector_1, 0))
        self.connect((self.blocks_threshold_ff_0, 0),
                     (self.blocks_add_const_vxx_0, 0))
        self.connect((self.blocks_threshold_ff_0, 0),
                     (self.blocks_multiply_xx_0, 1))
        self.connect((self.blocks_throttle_0, 0),
                     (self.blks2_selector_0_0_0_0, 0))
        self.connect((self.blocks_throttle_0, 0),
                     (self.filtro_antialiasing, 0))
        self.connect((self.blocks_throttle_0, 0),
                     (self.qtgui_freq_sink_x_0, 4))
        self.connect((self.blocks_throttle_0, 0),
                     (self.qtgui_time_sink_x_0, 4))
        self.connect((self.diente_de_sierra, 0), (self.blks2_selector_0_1, 1))
        self.connect((self.filtro_antialiasing, 0),
                     (self.blks2_selector_0_0_0_0, 1))
        self.connect((self.filtro_antialiasing, 0),
                     (self.qtgui_freq_sink_x_0, 0))
        self.connect((self.filtro_antialiasing, 0),
                     (self.qtgui_time_sink_x_0, 0))
        self.connect((self.filtro_recuperador, 0),
                     (self.blks2_selector_0_0_0, 1))
        self.connect((self.filtro_recuperador, 0),
                     (self.qtgui_freq_sink_x_0, 3))
        self.connect((self.filtro_recuperador, 0),
                     (self.qtgui_time_sink_x_0, 3))
        self.connect((self.moduladora, 0),
                     (self.blocks_multiply_const_vxx_0_0, 0))
        self.connect((self.portadora, 0), (self.blocks_multiply_xx_1, 0))
        self.connect((self.sen32, 0), (self.blks2_selector_0_1, 2))
        self.connect((self.senoidal, 0), (self.blks2_selector_0_1, 0))
Exemple #51
0
 def revertFsRedir():
     return bool(libclamav.cw_revertfsredir())
def csAlphanumericRestriction(input_str):
    return bool(re.fullmatch("[a-zA-Z]+|[\d]+", input_str))
Exemple #53
0
 def isWow64():
     is_wow64 = c_int()
     IsWow64Process(GetCurrentProcess(), byref(is_wow64))
     return bool(is_wow64)
Exemple #54
0
    def load_waveform(self, load_dict):
        """ Loads a waveform to the specified channel of the pulsing device.

        @param dict|list load_dict: a dictionary with keys being one of the available channel
                                    index and values being the name of the already written
                                    waveform to load into the channel.
                                    Examples:   {1: rabi_ch1, 2: rabi_ch2} or
                                                {1: rabi_ch2, 2: rabi_ch1}
                                    If just a list of waveform names if given, the channel
                                    association will be invoked from the channel
                                    suffix '_ch1', '_ch2' etc.

                                        {1: rabi_ch1, 2: rabi_ch2}
                                    or
                                        {1: rabi_ch2, 2: rabi_ch1}

                                    If just a list of waveform names if given,
                                    the channel association will be invoked from
                                    the channel suffix '_ch1', '_ch2' etc. A
                                    possible configuration can be e.g.

                                        ['rabi_ch1', 'rabi_ch2', 'rabi_ch3']

        @return dict: Dictionary containing the actually loaded waveforms per
                      channel.

        For devices that have a workspace (i.e. AWG) this will load the waveform
        from the device workspace into the channel. For a device without mass
        memory, this will make the waveform/pattern that has been previously
        written with self.write_waveform ready to play.

        Please note that the channel index used here is not to be confused with the number suffix
        in the generic channel descriptors (i.e. 'd_ch1', 'a_ch1'). The channel index used here is
        highly hardware specific and corresponds to a collection of digital and analog channels
        being associated to a SINGLE wavfeorm asset.
        """

        # create new dictionary with keys = num_of_ch and item = waveform

        max_chunk_size = self.instance.max_chunk_size
        empty_chunk_array_factor = self.instance.empty_chunk_array_factor

        if isinstance(load_dict, list):
            new_dict = dict()
            for waveform in load_dict:
                wave_name = waveform.rsplit('.pkl')[0]
                channel_num = int(wave_name.rsplit('_ch', 1)[1])
                if not '_a_ch' in waveform:
                    channel = channel_num + 4
                else:
                    channel = channel_num
                new_dict[channel] = wave_name
            load_dict = new_dict

        if not bool(dict):
            print('No data to sent to awg')
            return -1

        # load possible sequences
        path = self.waveform_folder
        wave_form_files = self.get_waveform_names()
        wave_form_list = [file.rsplit('.pkl')[0] for file in wave_form_files]
        # with open(path,'w') as json_file:
        #     wave_form_dict  = json.load(json_file)

        # dict_path = os.path.join('awg', 'WaveFormDict.pkl')
        # pkl_file = open(dict_path, 'rb')
        # wave_form_dict = pickle.load(pkl_file)
        # pkl_file.close()

        data_list = list()
        # this looks like 4 analog channels and 6 digital
        for i in range(4):
            data_list.append(
                np.zeros(int(max_chunk_size * empty_chunk_array_factor),
                         np.int16))
        for i in range(6):
            data_list.append(
                np.zeros(int(max_chunk_size * empty_chunk_array_factor),
                         np.bool))

        for ch, value in load_dict.items():
            if value in wave_form_list:
                wavefile = '{0}.pkl'.format(value)
                filepath = os.path.join(path, wavefile)
                data = self.my_load_dict(filepath)
                data_list[ch][0:len(data)] = data
                data_size = len(data)
                if '_a_ch' in value:
                    chan_name = 'a_ch{0}'.format(value.rsplit('a_ch')[1])
                    self.loaded_assets[chan_name] = value
                else:
                    chan_name = 'd_ch{0}'.format(value.rsplit('d_ch')[1])
                    self.loaded_assets[chan_name] = value
            else:
                print('waveform not found')
                data_size = 0

        # key_list = list()

        # for key in wave_form_dict.keys():
        # key_list.append(key.rsplit('_a',1)[0])

        # find the given sequence in the dictionary and load to the wanted channel
        # for chan, wave_name in load_dict.items():
        #     wave_form = wave_form_dict.get(wave_name)
        #     if wave_form is not None:
        #         # prepare_ch(name=)
        #         # self.instance.upload_wave_from_list(wave_form)
        #         data = np.asarray(wave_form*(2**15-1), dtype=np.int16)
        #         data_list[chan][0:len(wave_form)] = data
        #         # plt.plot(data)
        #         # plt.show()
        #         data_size = len(wave_form)
        #     else:
        #         self.log.error(wave_name + ' not in dictionary')n
        new_list = list()
        if data_size < len(data_list[0]):
            for row in data_list:
                new_row = row[0:data_size]
                new_list.append(new_row)
            data_list = new_list

        # this is done in the spectrumAWG file, now both QUEST_AWG and spectrumAWG have the same output, see pg 80 in manual
        count = 0
        while not data_size % 32 == 0:
            data_size += 1
            count += 1
        if not count == 1:
            extra = np.zeros(count, np.int16)
            new_list = list()
            for row in data_list:
                new_row = np.concatenate((row, extra), axis=0)
                new_list.append(new_row)
            data_list = new_list

        self.instance.set_memory_size(int(data_size))
        self.log.info('Waveform sent to AWG')

        if not data_size == 0:
            self.instance.upload(data_list, data_size, mem_offset=0)
            self.typeloaded = 'waveform'
            print('data sent to awg')
            # print(data_list[0][0:5])
        return load_dict
 def enabled(self):
     return bool(self._trace_observer_host)
Exemple #56
0
 def disableFsRedir():
     return bool(libclamav.cw_disablefsredir())
Exemple #57
0
def _is_database_under_alembic_control(engine):
    with engine.connect() as conn:
        context = alembic_migration.MigrationContext.configure(conn)
        return bool(context.get_current_revision())
Exemple #58
0
def main():
    parser = argparse.ArgumentParser()

    ## Required parameters
    parser.add_argument("--data_dir", default='dataset/car_data', type=str, required=False,
                        help="输入数据文件地址")
    parser.add_argument("--model_type", default='albert', type=str, required=False,
                        help="模型种类")
    parser.add_argument("--model_name_or_path", default='prev_trained_model/albert_chinese_small', type=str,
                        required=False,
                        help="模型参数文件地址")
    parser.add_argument("--task_name", default='car', type=str, required=False,
                        help="那个种类数据" + ", ".join(processors.keys()))
    parser.add_argument("--output_dir", default='outputs', type=str, required=False,
                        help="输出文件地址")
    parser.add_argument("--vocab_file", default='prev_trained_model/albert_chinese_small/vocab.txt', type=str)

    ## Other parameters
    parser.add_argument("--config_name", default="", type=str,
                        help="配置文件地址")
    parser.add_argument("--tokenizer_name", default="", type=str,
                        help="Pretrained tokenizer name or path if not the same as model_name")
    parser.add_argument("--cache_dir", default="", type=str,
                        help="Where do you want to store the pre-trained models downloaded from s3")
    parser.add_argument("--max_seq_length", default=512, type=int,
                        help="句子最大长度")
    parser.add_argument("--do_train", action='store_true',
                        help="训练")
    parser.add_argument("--do_eval", action='store_true',
                        help="验证")
    parser.add_argument("--do_predict", action='store_true',
                        help="预测")
    parser.add_argument("--do_lower_case", action='store_true',
                        help="Set this flag if you are using an uncased model.")

    parser.add_argument("--per_gpu_train_batch_size", default=8, type=int,
                        help="批量大小")
    parser.add_argument("--per_gpu_eval_batch_size", default=8, type=int,
                        help="验证批量大小")
    parser.add_argument('--gradient_accumulation_steps', type=int, default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument("--learning_rate", default=5e-5, type=float,
                        help="Adam学习率")
    parser.add_argument("--weight_decay", default=0.0, type=float,
                        help="Weight deay if we apply some.")
    parser.add_argument("--adam_epsilon", default=1e-6, type=float,
                        help="Epsilon for Adam optimizer.")
    parser.add_argument("--max_grad_norm", default=1.0, type=float,
                        help="Max gradient norm.")
    parser.add_argument("--num_train_epochs", default=3.0, type=float,
                        help="Total number of training epochs to perform.")
    parser.add_argument("--max_steps", default=-1, type=int,
                        help="If > 0: set total number of training steps to perform. Override num_train_epochs.")
    parser.add_argument("--warmup_proportion", default=0.1, type=float,
                        help="Proportion of training to perform linear learning rate warmup for,E.g., 0.1 = 10% of training.")

    parser.add_argument('--logging_steps', type=int, default=10,
                        help="Log every X updates steps.")
    parser.add_argument('--save_steps', type=int, default=1000,
                        help="每多少部保存一次")
    parser.add_argument("--eval_all_checkpoints",type=str,default='do',# action='store_true',
                        help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number")
    parser.add_argument("--no_cuda", type=int, default=0,  # action='store_true',
                        help="GPU")
    parser.add_argument('--overwrite_output_dir', action='store_true',
                        help="Overwrite the content of the output directory")
    parser.add_argument('--overwrite_cache', action='store_true',
                        help="Overwrite the cached training and evaluation sets")
    parser.add_argument('--seed', type=int, default=42,
                        help="随机种子")

    parser.add_argument('--fp16', action='store_true',
                        help="Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit")
    parser.add_argument('--fp16_opt_level', type=str, default='O1',
                        help="For fp16: Apex AMP optimization level selected in ['O0', 'O1', 'O2', and 'O3']."
                             "See details at https://nvidia.github.io/apex/amp.html")
    parser.add_argument("--local_rank", type=int, default=0,
                        help="For distributed training: local_rank")

    args = parser.parse_args()

    if not os.path.exists(args.output_dir):
        os.mkdir(args.output_dir)
    type_task = args.model_type + '_' + '{}'.format(args.task_name)
    if not os.path.exists(os.path.join(args.output_dir, type_task)):
        os.mkdir(os.path.join(args.output_dir, type_task))
    init_logger(log_file=args.output_dir + '/{}-{}.log'.format(args.model_type, args.task_name))

    # Setup CUDA, GPU & distributed training
    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        args.n_gpu = torch.cuda.device_count()
    else:  # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        # torch.distributed.init_process_group(backend='nccl')
        args.n_gpu = 1
    args.device = device

    # Setup logging
    logger.warning("Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
                   args.local_rank, device, args.n_gpu, bool(args.local_rank != -1), args.fp16)
    # Set seed
    seed_everything(args.seed)
    # Prepare GLUE task
    args.task_name = args.task_name.lower()
    if args.task_name not in processors:
        raise ValueError("Task not found: %s" % (args.task_name))
    processor = processors[args.task_name]()
    args.output_mode = output_modes[args.task_name]
    label_list = processor.get_labels()
    num_labels = len(label_list)

    args.model_type = args.model_type.lower()
    config = AlbertConfig.from_pretrained(args.config_name if args.config_name else args.model_name_or_path,
                                          num_labels=num_labels,
                                          finetuning_task=args.task_name)
    tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file, do_lower_case=args.do_lower_case,
                                                 )
    model =AlbertForSequenceClassification.from_pretrained(args.model_name_or_path,                                                            config=config)
    #if args.local_rank == 0:
    #    torch.distributed.barrier()  # Make sure only the first process in distributed training will download model & vocab
    model.to(args.device)
    logger.info("Training/evaluation parameters %s", args)

    # Training
    # args.do_train = True
    if args.do_train:
        train_dataset = load_and_cache_examples(args, args.task_name, tokenizer, data_type='train')
        
        global_step, tr_loss = train(args, train_dataset, model, tokenizer)
        logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)

    # Saving best-practices: if you use defaults names for the model, you can reload it using from_pretrained()
    if args.do_train:# and (args.local_rank == -1 or torch.distributed.get_rank() == 0):
        # Create output directory if needed
        if not os.path.exists(args.output_dir) and args.local_rank in [-1, 0]:
            os.makedirs(args.output_dir)

        logger.info("Saving model checkpoint to %s", args.output_dir)
        # Save a trained model, configuration and tokenizer using `save_pretrained()`.
        # They can then be reloaded using `from_pretrained()`
        model_to_save = model.module if hasattr(model,
                                                'module') else model  # Take care of distributed/parallel training
        model_to_save.save_pretrained(args.output_dir)
        # Good practice: save your training arguments together with the trained model
        torch.save(args, os.path.join(args.output_dir, 'training_args.bin'))

    # Evaluation
    # args.do_eval = True
    results = []
    if args.do_eval and args.local_rank in [-1, 0]:
        tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file,
                                                      do_lower_case=args.do_lower_case,
                                                      )
        checkpoints = [(0,args.output_dir)]
        if args.eval_all_checkpoints:
            checkpoints = list(
                os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
            checkpoints = [(int(checkpoint.split('-')[-1]),checkpoint) for checkpoint in checkpoints if checkpoint.find('checkpoint') != -1]
            checkpoints = sorted(checkpoints,key =lambda x:x[0])
        logger.info("Evaluate the following checkpoints: %s", checkpoints)
        for _,checkpoint in checkpoints:
            global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
            prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""

            model =AlbertForSequenceClassification.from_pretrained(checkpoint)
            model.to(args.device)
            result = evaluate(args, model, tokenizer, prefix=prefix)
            results.extend([(k + '_{}'.format(global_step), v) for k, v in result.items()])
        output_eval_file = os.path.join(args.output_dir, "checkpoint_eval_results.txt")
        with open(output_eval_file, "w") as writer:
            for key,value in results:
                writer.write("%s = %s\n" % (key, str(value)))

    # args.do_predict = True
    predict_results = []
    if args.do_predict and args.local_rank in [-1, 0]:
        tokenizer = tokenization_albert.FullTokenizer(vocab_file=args.vocab_file,
                                                      do_lower_case=args.do_lower_case,
                                                      )
        # checkpoints_path = os.path.join(args.output_dir, 'checkpoint-4000')
        checkpoints = [(0, args.output_dir)]
        
        if args.eval_all_checkpoints:
            checkpoints = list(
                os.path.dirname(c) for c in sorted(glob.glob(args.output_dir + '/**/' + WEIGHTS_NAME, recursive=True)))
            checkpoints = [(int(checkpoint.split('-')[-1]), checkpoint) for checkpoint in checkpoints if
                           checkpoint.find('checkpoint') != -1]
            checkpoints = sorted(checkpoints, key=lambda x: x[0])
        logger.info("Evaluate the following checkpoints: %s", checkpoints)
        checkpoints = [checkpoints[-1]]

        for _, checkpoint in checkpoints:
            global_step = checkpoint.split('-')[-1] if len(checkpoints) > 1 else ""
            prefix = checkpoint.split('/')[-1] if checkpoint.find('checkpoint') != -1 else ""

            model = AlbertForSequenceClassification.from_pretrained(checkpoint)
            model.to(args.device)
            result = predict(args, model, tokenizer, prefix=prefix)
            predict_results.extend([(k + '_{}'.format(global_step), v) for k, v in result.items()])
        output_eval_file = os.path.join(args.output_dir, "checkpoint_eval_results.txt")
        with open(output_eval_file, "w") as writer:
            for key, value in predict_results:
                writer.write("%s = %s\n" % (key, str(value)))
Exemple #59
0
 def is_annonymous(self):
     """
     Is this object anonymous.
     """
     value = self.get_values("is_annonymous")
     return bool(value)
# the logging things
import logging
logging.basicConfig(level=logging.DEBUG,
                    format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)

import asyncio
import json
import math
import os
import shutil
import time
from datetime import datetime

# the secret configuration specific things
if bool(os.environ.get("WEBHOOK", False)):
    from sample_config import Config
else:
    from config import Config

# the Strings used for this "thing"
from translation import Translation

import pyrogram
logging.getLogger("pyrogram").setLevel(logging.WARNING)

from helper_funcs.chat_base import TRChatBase
from helper_funcs.display_progress import progress_for_pyrogram, humanbytes
from hachoir.metadata import extractMetadata
from hachoir.parser import createParser
# https://stackoverflow.com/a/37631799/4723940