Example #1
0
  def _FindPsOutputFormat(cls, cmd, args):
    """Return our best guess the formating of the "ps" output."""
    output_format = []
    for arg in args:
      # If the "ps" arg contains a comma, it's probably an output format defn.
      if "," in arg:
        output_format.extend(arg.split(","))
    if not output_format:
        # Assume a default format for the "-f" style formating.
      output_format = ["user", "pid", "ppid", "pcpu", "not_implemented", "tty",
                       "not_implemented", "cmd"]
    # Do some sanity checking for the cmd/cmdline if present.
    for option in ["cmd", "command", "args"]:
      if option in output_format:
        if output_format.count(option) > 1:
          logging.warn("Multiple commandline outputs expected in '%s %s' "
                       "output. Skipping parsing.", cmd, " ".join(args))
          return []
        if output_format[-1] != option:
          logging.warn("'ps's output has the commandline not as the last "
                       "column. We can't safely parse output of '%s %s'."
                       "Skipping parsing.", cmd, " ".join(args))
          return []

    # If we made it here, we should be able to parse the output and we have a
    # good idea of it's format.
    return output_format
    def read(self, request):
        logging.info("__INIT__read[VappVm]")
        res = vapp_vm_pb2.ReadVappVmResult()
        res.present = False
        org_resource = self.client.get_org()
        org = Org(self.client, resource=org_resource)
        try:
            vdc_resource = org.get_vdc(request.target_vdc)
            vdc = VDC(
                self.client, name=request.target_vdc, resource=vdc_resource)

            vapp_resource = vdc.get_vapp(request.target_vapp)
            vapp = VApp(
                self.client, name=request.target_vapp, resource=vapp_resource)
            read_vapp_vm_resp = vapp.get_vm(request.target_vm_name)
            vm = VM(client=self.client, href=None, resource=read_vapp_vm_resp)

            res.present = True
        except Exception as e:
            errmsg = '__ERROR_read[VappVm] failed for VappVm {0}. __ErrorMessage__ {1}'
            logging.warn(errmsg.format(request.target_vm_name, str(e)))

            return res

        logging.info("__DONE__read[VappVm]")

        return res
Example #3
0
    def post_config_change(self, method):
        route = CsRoute()
        if method == "add":
            route.add_table(self.dev)
            route.add_route(self.dev, str(self.address["network"]))
        elif method == "delete":
            logging.warn("delete route not implemented")

        self.fw_router()
        self.fw_vpcrouter()

        # On deletion nw_type will no longer be known
        if self.get_type() in ["guest"] and self.config.is_vpc():

            CsDevice(self.dev, self.config).configure_rp()

            logging.error(
                "Not able to setup source-nat for a regular router yet")
            dns = CsDnsmasq(self)
            dns.add_firewall_rules()
            app = CsApache(self)
            app.setup()

        cmdline = self.config.cmdline()
        # If redundant then this is dealt with by the master backup functions
        if self.get_type() in ["guest"] and not cmdline.is_redundant():
            pwdsvc = CsPasswdSvc(self.address['public_ip']).start()

        if self.get_type() == "public" and self.config.is_vpc():
            if self.address["source_nat"]:
                vpccidr = cmdline.get_vpccidr()
                self.fw.append(
                    ["filter", "", "-A FORWARD -s %s ! -d %s -j ACCEPT" % (vpccidr, vpccidr)])
                self.fw.append(
                    ["nat", "", "-A POSTROUTING -j SNAT -o %s --to-source %s" % (self.dev, self.address['public_ip'])])
Example #4
0
 def _ota_chunk_data(self, data, data_cb):
     # spec https://shadowsocks.org/en/spec/one-time-auth.html
     unchunk_data = b''
     while len(data) > 0:
         if self._ota_len == 0:
             # get DATA.LEN + HMAC-SHA1
             length = ONETIMEAUTH_CHUNK_BYTES - len(self._ota_buff_head)
             self._ota_buff_head += data[:length]
             data = data[length:]
             if len(self._ota_buff_head) < ONETIMEAUTH_CHUNK_BYTES:
                 # wait more data
                 return
             data_len = self._ota_buff_head[:ONETIMEAUTH_CHUNK_DATA_LEN]
             self._ota_len = struct.unpack('>H', data_len)[0]
         length = min(self._ota_len, len(data))
         self._ota_buff_data += data[:length]
         data = data[length:]
         if len(self._ota_buff_data) == self._ota_len:
             # get a chunk data
             _hash = self._ota_buff_head[ONETIMEAUTH_CHUNK_DATA_LEN:]
             _data = self._ota_buff_data
             index = struct.pack('>I', self._ota_chunk_idx)
             key = self._encryptor.decipher_iv + index
             if onetimeauth_verify(_hash, _data, key) is False:
                 logging.warn('one time auth fail, drop chunk !')
             else:
                 unchunk_data += _data
                 self._ota_chunk_idx += 1
             self._ota_buff_head = b''
             self._ota_buff_data = b''
             self._ota_len = 0
     data_cb(unchunk_data)
     return
    def create_from_vapp(self, request):
        logging.info("__INIT__create[VappVm] source_catalog_name[%s]",
                     request.source_vapp)
        res = vapp_vm_pb2.CreateVappVmResult()
        res.created = False
        source_vapp_resource = self.get_vapp_resource(
            request.target_vdc, vapp_name=request.source_vapp)
        target_vapp_resource = self.get_vapp_resource(
            request.target_vdc, vapp_name=request.target_vapp)

        specs = [{
            'vapp': source_vapp_resource,
            'source_vm_name': request.source_vm_name,
            'target_vm_name': request.target_vm_name,
            'hostname': request.hostname,
            'password': request.password,
            'password_auto': request.password_auto,
            'password_reset': request.password_reset,
            'cust_script': request.cust_script,
            'network': request.network,
            # 'storage_profile': request.storage_profile
        }]

        try:
            vapp = VApp(self.client, resource=target_vapp_resource)
            create_vapp_vm_resp = vapp.add_vms(
                specs,
                power_on=request.power_on,
                all_eulas_accepted=request.all_eulas_accepted)
            task_monitor = self.client.get_task_monitor()
            task = task_monitor.wait_for_status(
                task=create_vapp_vm_resp,
                timeout=60,
                poll_frequency=2,
                fail_on_statuses=None,
                expected_target_statuses=[
                    TaskStatus.SUCCESS, TaskStatus.ABORTED, TaskStatus.ERROR,
                    TaskStatus.CANCELED
                ],
                callback=None)

            st = task.get('status')
            if st != TaskStatus.SUCCESS.value:
                raise errors.VappVmCreateError(
                    etree.tostring(task, pretty_print=True))

            message = 'status : {0} '.format(st)
            logging.info(message)
            res.created = True

        except Exception as e:
            errmsg = '''__ERROR_create[VappVm] failed for vm {0}. __ErrorMessage__ {1}'''
            logging.warn(errmsg.format(request.target_vm_name, str(e)))
            self.context.set_code(grpc.StatusCode.INVALID_ARGUMENT)
            self.context.set_details(errmsg)

            return res

        logging.info("__DONE__create[VappVm]")
        return res
Example #6
0
    def __init__(self, **names_vals):
        for name, val in names_vals.iteritems():
            if name.startswith(('_', 'is_valid_')):
                raise NameError('The parameter name %s is not acceptable'
                                % name)
            try:
                convert = self.__class__.params[name]
            except KeyError:
                logging.warn('The parameter %r is unknown, ignoring' % name)
                continue
            try:
                value = convert(val)
            except:
                raise ValueError('Could not convert to %s: %s=%s'
                                 % (convert.__name__, name, val))
            setattr(self, name, value)

        valids = sorted(getattr(self, valid)
                        for valid in dir(self.__class__)
                        if valid.startswith('is_valid_'))
        for is_valid in valids:
            if not is_valid():
                dump = '\n'.join('%s=%s' % (n, v)
                                 for n, v in sorted(self.__dict__.items()))
                raise ValueError(is_valid.__doc__ + 'Got:\n' + dump)
Example #7
0
def gather(suffix, options):
    url = options.get("url")
    if url is None:
        logging.warn("A --url is required. (Can be a local path.)")
        exit(1)

    # remote URL
    if url.startswith("http:") or url.startswith("https:"):
        # Though it's saved in cache/, it will be downloaded every time.
        remote_path = os.path.join(utils.cache_dir(), "url.csv")

        try:
            response = requests.get(url)
            utils.write(response.text, remote_path)
        except:
            logging.error("Remote URL not downloaded successfully.")
            print(utils.format_last_exception())
            exit(1)

    # local path
    else:
        remote_path = url

    for domain in utils.load_domains(remote_path):
        yield domain
Example #8
0
    def _stage_final_image(self):
        try:
            makedirs(self.__ensure_isodir() + "/LiveOS")

            self._resparse()

            if not self.skip_minimize:
                create_image_minimizer(self.__isodir + "/LiveOS/osmin.img", self._image, self.compress_type)

            if self.skip_compression:
                shutil.move(self._image, self.__isodir + "/LiveOS/ext3fs.img")
                if os.stat(self.__isodir + "/LiveOS/ext3fs.img").st_size >= 4*1024*1024*1024:
                    self._isofstype = "udf"
                    logging.warn("Switching to UDF due to size of LiveOS/ext3fs.img")
            else:
                makedirs(os.path.join(os.path.dirname(self._image), "LiveOS"))
                shutil.move(self._image,
                            os.path.join(os.path.dirname(self._image),
                                         "LiveOS", "ext3fs.img"))
                mksquashfs(os.path.dirname(self._image),
                           self.__isodir + "/LiveOS/squashfs.img",
                           self.compress_type)
                if os.stat(self.__isodir + "/LiveOS/squashfs.img").st_size >= 4*1024*1024*1024:
                    self._isofstype = "udf"
                    logging.warn("Switching to UDF due to size of LiveOS/squashfs.img")


            self.__create_iso(self.__isodir)
        finally:
            shutil.rmtree(self.__isodir, ignore_errors = True)
            self.__isodir = None
Example #9
0
    def _launchSlaves(self):
        """
        Launch a group of worker processes (self._slaves), the queue
        (self._workQueue) that will be used to send them chunks of
        work, and the queue that will be used to receive back the
        results (self._resultsQueue).

        Additionally, launch the result collector process.
        """
        availableCpus = multiprocessing.cpu_count()
        logging.info("Available CPUs: %d" % (availableCpus,))
        logging.info("Requested workers: %d" % (options.numWorkers,))
        logging.info("Parallel Mode: %s" % ("Threaded" if options.threaded else "Process",))
        if (options.numWorkers > availableCpus):
            logging.warn("More workers requested (%d) than CPUs available (%d);"
                         " may result in suboptimal performance."
                         % (options.numWorkers, availableCpus))
        self._initQueues()

        WorkerType, ResultCollectorType = self._algorithm.slaveFactories(options.threaded)
        self._slaves = []
        for i in xrange(options.numWorkers):
            p = WorkerType(self._workQueue, self._resultsQueue, self._algorithmConfiguration)
            self._slaves.append(p)
            p.start()
        logging.info("Launched compute slaves.")

        rcp = ResultCollectorType(self._resultsQueue, self._algorithmConfiguration)
        rcp.start()
        self._slaves.append(rcp)
        logging.info("Launched collector slave.")
Example #10
0
    def search(self, search_query):
        """
        Search in the store for tiddlers that match search_query.
        This is intentionally simple, slow and broken to encourage overriding.
        """
        bag_filenames = self._bag_filenames()

        query = search_query.lower()

        for bagname in bag_filenames:
            bagname = urllib.unquote(bagname).decode('utf-8')
            tiddler_dir = self._tiddlers_dir(bagname)
            tiddler_files = self._files_in_dir(tiddler_dir)
            for tiddler_name in tiddler_files:
                tiddler = Tiddler(
                        title=urllib.unquote(tiddler_name).decode('utf-8'),
                        bag=bagname)
                try:
                    revision_id = self.list_tiddler_revisions(tiddler)[0]
                    if query in tiddler.title.lower():
                        yield tiddler
                        continue
                    tiddler_file = codecs.open(
                        self._tiddler_full_filename(tiddler, revision_id),
                        encoding='utf-8')
                    for line in tiddler_file:
                        if query in line.lower():
                            yield tiddler
                            break
                except (OSError, NoTiddlerError), exc:
                    logging.warn('malformed tiddler during search: %s:%s',
                            bagname, tiddler_name)
Example #11
0
 def dataReadsBlocks2(self, addrThreshold = 16):
     lastAddr = 0
     lastInstruction = None
     
     blocks = []
     currentBlock = []
     
     for i in self:
         datas = []
         for data in i.data:
             if data.mode == data.modeRead:
                 datas.append(data)
         if len(datas) == 1:
             data = datas[0]
             
             if abs(lastAddr-data.addr) <= addrThreshold:
                 if len(currentBlock) == 0 and lastInstruction != None:
                     currentBlock.append(lastInstruction)
                 currentBlock.append(i)
             elif len(currentBlock) == 0:
                 blocks.append([lastInstruction])
             else:
                 blocks.append(currentBlock)
                 currentBlock = []
             lastAddr = data.addr
             lastInstruction = i
             
         if len(datas) == 2:
             pass
             # todo
         if len(datas) > 2:
             logging.warn('ouch len(datas) > 2')
     return blocks
Example #12
0
 def handle(self, input, addr):
     """  handle an incoming udp packet. """
     if cfg['udpseed']:
         data = ""
         for i in range(len(input)/16):
             try: data += crypt.decrypt(input[i*16:i*16+16])
             except Exception as ex:
                 logging.warn("udp - can't decrypt: %s" % str(ex))
                 data = input
                 break
     else: data = input
     if cfg['udpstrip']: data = strippedtxt(data)
     # check if udp is enabled and source ip is in udpallow list
     if cfg['udp'] and (addr[0] in cfg['udpallow'] or _inmask(addr[0])):
         # get printto and passwd data
         header = re.search('(\S+) (\S+) (.*)', data)
         if header:
             # check password
             if header.group(1) == cfg['udppassword']:
                 printto = header.group(2)    # is the nick/channel
                 # check if printto is in allowednicks
                 
                 if cfg['udpalloednicks'] and not printto in cfg['udpallowednicks']:
                     logging.warn("udp - udp denied %s" % printto )
                     return
                 logging.debug('udp - ' + str(addr[0]) +  " - udp allowed")
                 text = header.group(3)    # is the text
                 self.say(printto, text)
             else: logging.warn("udp - can't match udppasswd from " + str(addr))
         else: logging.warn("udp - can't match udp from " + str(addr[0]))
     else: logging.warn('udp - denied udp from ' + str(addr[0]))
Example #13
0
def OpenClient(client_id=None):
  """Opens the client, getting potential approval tokens.

  Args:
    client_id: The client id the approval should be revoked for.

  Returns:
    tuple containing (client, token) objects or (None, None) on if
    no appropriate aproval tokens were found.
  """
  token = access_control.ACLToken(username="******")
  try:
    token = ApprovalFind(client_id, token=token)
  except access_control.UnauthorizedAccess as e:
    logging.warn("No authorization found for access to client: %s", e)

  try:
    # Try and open with the token we managed to retrieve or the default.
    client = aff4.FACTORY.Open(rdfvalue.RDFURN(client_id), mode="r",
                               token=token)
    return client, token
  except access_control.UnauthorizedAccess:
    logging.warning("Unable to find a valid reason for client %s. You may need "
                    "to request approval.", client_id)
    return None, None
Example #14
0
    def get(self, section, key, **kwargs):
        section = str(section).lower()
        key = str(key).lower()

        d = self.defaults

        # first check environment variables
        option = self._get_env_var_option(section, key)
        if option:
            return option

        # ...then the config file
        if self.has_option(section, key):
            return expand_env_var(
                ConfigParser.get(self, section, key, **kwargs))

        # ...then commands
        option = self._get_cmd_option(section, key)
        if option:
            return option

        # ...then the defaults
        if section in d and key in d[section]:
            return expand_env_var(d[section][key])

        else:
            logging.warn("section/key [{section}/{key}] not found "
                         "in config".format(**locals()))

            raise AirflowConfigException(
                "section/key [{section}/{key}] not found "
                "in config".format(**locals()))
Example #15
0
 def _listen(self):
     """ listen for udp messages .. /msg via bot"""
     if not cfg['udp']: return
     fleet = getfleet()
     for botname in cfg['udpbots']:
         if not fleet.byname(botname): logging.info("udp - can't find %s bot" % botname)
     try:
         fleet.startok.wait(5)
         logging.warn('udp listening on %s %s' % (cfg['udphost'], cfg['udpport']))
         self.sock.bind((cfg['udphost'], cfg['udpport']))
         self.stop = 0
     except IOError:
         handle_exception()
         self.sock = None
         self.stop = 1
         return
     # loop on listening udp socket
     while not self.stop:
         try: input, addr = self.sock.recvfrom(64000)
         except socket.timeout: continue
         except Exception as ex:
             try: (errno, errstr) = ex
             except ValueError: errno = 0 ; errstr = str(ex)
             if errno == 4: logging.warn("udp - %s - %s" % (self.name, str(ex))) ; break
             if errno == 35: continue
             else: handle_exception() ; break
         if self.stop: break
         self.queue.put((input, addr))
     logging.info('udp - shutting down main loop')
Example #16
0
def refresh():
    'Load or refresh the library.'
    objs={}
    for root,dirs,files in os.walk(libDir):
        for f in files:
            ext=os.path.splitext(f)[-1].lower()
            ff=os.path.join(root,f)
            assert ff.startswith(libDir+'/')
            libPath=ff[len(libDir)+1:]
            if ext=='xls':
                raise NotImplementedError('Reading library objects from XLS not yet implemented.')
                # load multiple objects from the XLS file
                for key,val in loadFromXLS(ff):
                    objs[tuple((libPath+'/'+key).split('/'))]=val
            else:
                try:
                    obj=woo._monkey.io.Object_load(None,ff)
                    objs[tuple(libPath.split('/'))]=obj
                except:
                    logging.warn('Loading library object from %s failed (skipped):\n\n'%ff)
                    import traceback
                    traceback.print_exc()
                    print logging.warn('--------------------------------------------------------')
    global libObjs
    libObjs=objs
Example #17
0
def runCmd(cmd):
    logging.debug("Running %s", cmd)
    rv=os.system(cmd)
    if rv != 0:
        ce=CmdError(cmd, rv)
        logging.warn("Command failed:  %s", ce)
        raise ce
Example #18
0
        def inhibit(self, *a):
            """inhibit a gluster filesystem

            Mount glusterfs over a temporary mountpoint,
            change into the mount, and lazy unmount the
            filesystem.
            """
            mounted = False
            try:
                po = Popen(self.make_mount_argv(*a), **self.mountkw)
                self.handle_mounter(po)
                po.terminate_geterr()
                d = self.mntpt
                mounted = True
                logging.debug("auxiliary glusterfs mount in place")
                os.chdir(d)
                self.umount_l(d).terminate_geterr()
                mounted = False
            finally:
                try:
                    if mounted:
                        self.umount_l(d).terminate_geterr(fail_on_err=False)
                    self.cleanup_mntpt()
                except:
                    logging.warn("stale mount possibly left behind on " + d)
            logging.debug("auxiliary glusterfs mount prepared")
Example #19
0
def measure_timeslice_used(tree, device, timevals):
    """Measures the actual timeslice that was charged to the group. This
       is done because we dont charge the first seek to the group and so
       the service_time and actual charged time can diverge outside the
       allowed margin of error for some pathological workloads, masking
       any fair scheduling errors.

    """
    for container in tree:
        found_data = False
        for line in container['blkio_cgroup'].get_attr('timeslice_used'):
            parts = line.split()
            if parts[0] == device:
                timevals[container['name']] = int(parts[-1])
                found_data = True
        if not found_data:
            timevals[container['name']] = 0
            logging.warn('No data for container %s.' % container['name'])

        for line in container['blkio_cgroup'].get_attr('unaccounted_time'):
            parts = line.split()
            if parts[0] == device:
                timevals[container['name']] -= int(parts[-1])

        # Recurse to nested containers.
        measure_timeslice_used(container['nest'], device, timevals)
Example #20
0
	def update(self):
		args = list(map(self.getValue, self.__fields__))
		args.append(self.getValue(self.__primary_key__))

		rows = yield from execute(self.__update__, args)
		if rows != 1:
			logging.warn('failed to update by primary key: affected rows: %s' % rows)
Example #21
0
def get_compressor(ctools=COMPRESSING_TOOLS):
    global UPTO

    am_dir_pattern = "/usr/share/automake-*"
    am_files_pattern = am_dir_pattern + "/am/*.am"

    if len(glob.glob(am_dir_pattern)) == 0:  # automake is not installed.
        UPTO = STEP_PRECONFIGURE

        logging.warn("Automake not found. The process will go up to the step: " + UPTO)

        return ctools[-1]  # fallback to the default (gzip).

    for ct in ctools:
        # NOTE: bzip2 tries compressing input from stdin even it is invoked
        # with --version option. So give null input (/dev/null) to it.
        c_exists = 0 == subprocess.check_call(
            "%(command)s --version > /dev/null 2> /dev/null < /dev/null" % ct, shell=True
        )

        if c_exists:
            am_support_c = 0 == subprocess.check_call(
                "grep -q -e '^dist-%s:' %s 2> /dev/null" % (ct.command, am_files_pattern), shell=True
            )
            if am_support_c:
                return ct

    # gzip must exist at least and not reached here:
    raise RuntimeError("No compressor found! Aborting...")
Example #22
0
def _request(sock, headers, payload, bufsize=8192):
    request_data = 'POST /_gh/ HTTP/1.1\r\n'
    request_data += ''.join('%s: %s\r\n' % (k, v) for k, v in headers.items() if k not in skip_headers)
    request_data += '\r\n'

    if isinstance(payload, bytes):
        sock.send(request_data.encode() + payload)
    elif hasattr(payload, 'read'):
        sock.send(request_data)
        while True:
            data = payload.read(bufsize)
            if not data:
                break
            sock.send(data)
    else:
        raise TypeError('_request(payload) must be a string or buffer, not %r' % type(payload))

    response = httplib.HTTPResponse(sock, buffering=True)
    try:
        orig_timeout = sock.gettimeout()
        sock.settimeout(90)
        response.begin()
        sock.settimeout(orig_timeout)
    except httplib.BadStatusLine as e:
        logging.warn("_request bad status line:%r", e)
        response = None
    except Exception as e:
        logging.exception("_request:%r", e)
    return response
Example #23
0
    def setup(self):
        exp = self.exp
        data_obj = exp.create_data_object('in_data', 'tomo')

        data_obj.backing_file = \
            h5py.File(self.exp.meta_data.get_meta_data("data_file"), 'r')

        data_obj.data = data_obj.backing_file[self.parameters['data_path']]

        self.__set_dark_and_flat(data_obj)

        if self.parameters['3d_to_4d']:
            if not self.parameters['angles']:
                raise Exception('Angles are required in the loader.')
            self.__setup_4d(data_obj)
            n_angles = self.__set_rotation_angles(data_obj)
            shape = self.__setup_3d_to_4d(data_obj, n_angles)
        else:
            if len(data_obj.data.shape) is 3:
                shape = self.__setup_3d(data_obj)
            else:
                shape = self.__setup_4d(data_obj)
            self.__set_rotation_angles(data_obj)

        try:
            control = data_obj.backing_file['entry1/tomo_entry/control/data']
            data_obj.meta_data.set_meta_data("control", control[...])
        except:
            logging.warn("No Control information available")

        self.__check_angles
        data_obj.set_original_shape(shape)
        self.set_data_reduction_params(data_obj)
Example #24
0
 def regjob(e, xte, pb):
     if pb.wait():
         logging.debug("synced " + e)
         self.sendmark_regular(e, xte)
         return True
     else:
         logging.warn("failed to sync " + e)
Example #25
0
def import_key(stream):
	"""Run C{gpg --import} with this stream as stdin."""
	errors = tempfile.TemporaryFile()

	child = _run_gpg(['--quiet', '--import', '--batch'],
				stdin = stream, stderr = errors)

	status = child.wait()

	errors.seek(0)
	error_messages = errors.read().strip()
	errors.close()

	if error_messages:
		import codecs
		decoder = codecs.lookup('utf-8')
		error_messages = decoder.decode(error_messages, errors = 'replace')[0]

	if status != 0:
		if error_messages:
			raise SafeException(_("Errors from 'gpg --import':\n%s") % error_messages)
		else:
			raise SafeException(_("Non-zero exit code %d from 'gpg --import'") % status)
	elif error_messages:
		warn(_("Warnings from 'gpg --import':\n%s") % error_messages)
Example #26
0
 def _sweep_timeout(self):
     # tornado's timeout memory management is more flexible than we need
     # we just need a sorted last_activity queue and it's faster than heapq
     # in fact we can do O(1) insertion/remove so we invent our own
     if self._timeouts:
         logging.log(shell.VERBOSE_LEVEL, 'sweeping timeouts')
         now = time.time()
         length = len(self._timeouts)
         pos = self._timeout_offset
         while pos < length:
             handler = self._timeouts[pos]
             if handler:
                 if now - handler.last_activity < self._timeout:
                     break
                 else:
                     if handler.remote_address:
                         logging.warn('timed out: %s:%d' %
                                      handler.remote_address)
                     else:
                         logging.warn('timed out')
                     handler.destroy()
                     self._timeouts[pos] = None  # free memory
                     pos += 1
             else:
                 pos += 1
         if pos > TIMEOUTS_CLEAN_SIZE and pos > length >> 1:
             # clean up the timeout queue when it gets larger than half
             # of the queue
             self._timeouts = self._timeouts[pos:]
             for key in self._handler_to_timeouts:
                 self._handler_to_timeouts[key] -= pos
             pos = 0
         self._timeout_offset = pos
Example #27
0
 def handle_event(self, sock, fd, event):
     # handle events and dispatch to handlers
     if sock:
         logging.log(shell.VERBOSE_LEVEL, 'fd %d %s', fd,
                     eventloop.EVENT_NAMES.get(event, event))
     if sock == self._server_socket:
         if event & eventloop.POLL_ERR:
             # TODO
             raise Exception('server_socket error')
         try:
             logging.debug('accept')
             conn = self._server_socket.accept()
             TCPRelayHandler(self, self._fd_to_handlers,
                             self._eventloop, conn[0], self._config,
                             self._dns_resolver, self._is_local)
         except (OSError, IOError) as e:
             error_no = eventloop.errno_from_exception(e)
             if error_no in (errno.EAGAIN, errno.EINPROGRESS,
                             errno.EWOULDBLOCK):
                 return
             else:
                 shell.print_exception(e)
                 if self._config['verbose']:
                     traceback.print_exc()
     else:
         if sock:
             handler = self._fd_to_handlers.get(fd, None)
             if handler:
                 handler.handle_event(sock, event)
         else:
             logging.warn('poll removed fd')
Example #28
0
def request(headers={}, payload=None):
    max_retry = 3
    for i in range(max_retry):
        ssl_sock = None
        try:
            ssl_sock = https_manager.create_ssl_connection()
            if not ssl_sock:
                logging.debug('create_ssl_connection fail')
                continue

            if ssl_sock.host == '':
                ssl_sock.appid = appid_manager.get_appid()
                if not ssl_sock.appid:
                    raise GAE_Exception(1, "no appid can use")
                headers['Host'] = ssl_sock.appid + ".appspot.com"
                ssl_sock.host = headers['Host']
            else:
                headers['Host'] = ssl_sock.host


            response = _request(ssl_sock, headers, payload)
            if not response:
                continue

            response.ssl_sock = ssl_sock
            return response

        except Exception as e:
            logging.warn('request failed:%s', e)
            if ssl_sock:
                ssl_sock.close()
    raise GAE_Exception(2, "try max times")
Example #29
0
 def handle_event(self, sock, event):
     # handle all events in this handler and dispatch them to methods
     if self._stage == STAGE_DESTROYED:
         logging.debug('ignore handle_event: destroyed')
         return
     # order is important
     if sock == self._remote_sock:
         if event & eventloop.POLL_ERR:
             self._on_remote_error()
             if self._stage == STAGE_DESTROYED:
                 return
         if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
             self._on_remote_read()
             if self._stage == STAGE_DESTROYED:
                 return
         if event & eventloop.POLL_OUT:
             self._on_remote_write()
     elif sock == self._local_sock:
         if event & eventloop.POLL_ERR:
             self._on_local_error()
             if self._stage == STAGE_DESTROYED:
                 return
         if event & (eventloop.POLL_IN | eventloop.POLL_HUP):
             self._on_local_read()
             if self._stage == STAGE_DESTROYED:
                 return
         if event & eventloop.POLL_OUT:
             self._on_local_write()
     else:
         logging.warn('unknown socket')
Example #30
0
	def save(self):
		args = list(map(self.getValueOrDefault, self.__fields__))
		args.append(self.getValueOrDefault(self.__primary_key__))

		rows = yield from execute(self.__insert__, args)
		if rows != 1:
			logging.warn('failed to insert record: affected rows: %s' % rows)
Example #31
0
 async def __call__(self, request):  
     kw = None # 定义kw,用于保存request中参数  
     if self._has_named_kw_arg or self._has_var_kw_arg: # 若视图函数有命名关键词或关键词参数  
         if request.method == 'POST':  
             # 根据request参数中的content_type使用不同解析方法:  
             if request.content_type == None: # 如果content_type不存在,返回400错误  
                 return web.HTTPBadRequest(text='Missing Content_Type.')  
             ct = request.content_type.lower() # 小写,便于检查  
             if ct.startwith('application/json'):  # json格式数据  
                 params = await request.json() # 仅解析body字段的json数据  
                 if not isinstance(params, dict): # request.json()返回dict对象  
                     return web.HTTPBadRequest(text='JSON body must be object.')  
                 kw = params  
             # form表单请求的编码形式  
             elif ct.startwith('application/x-www-form-urlencoded') or ct.startswith('multipart/form-data'):  
                 params = await request.post() # 返回post的内容中解析后的数据。dict-like对象。  
                 kw = dict(**params) # 组成dict,统一kw格式  
             else:  
                 return web.HTTPBadRequest(text='Unsupported Content-Type: %s' % request.content_type)  
         if request.method == 'GET':  
             qs = request.query_string # 返回URL查询语句,?后的键值。string形式。  
             if qs:  
                 kw = dict()  
                 ''''' 
                 解析url中?后面的键值对的内容 
                 qs = 'first=f,s&second=s' 
                 parse.parse_qs(qs, True).items() 
                 >>> dict([('first', ['f,s']), ('second', ['s'])]) 
                 '''  
                 for k, v in parse.parse_qs(qs, True).items(): # 返回查询变量和值的映射,dict对象。True表示不忽略空格。  
                     kw[k] = v[0]  
     if kw is None:  # 若request中无参数  
         # request.match_info返回dict对象。可变路由中的可变字段{variable}为参数名,传入request请求的path为值  
         # 若存在可变路由:/a/{name}/c,可匹配path为:/a/jack/c的request  
         # 则reqwuest.match_info返回{name = jack}  
         kw = dict(**request.match_info)  
     else: # request有参数  
         if self._has_named_kw_arg and (not self._has_var_kw_arg): # 若视图函数只有命名关键词参数没有关键词参数                 
             copy = dict()  
             # 只保留命名关键词参数  
             for name in self._named_kw_arg:  
                 if name in kw:  
                     copy[name] = kw[name]  
             kw = copy # kw中只存在命名关键词参数  
         # 将request.match_info中的参数传入kw  
         for k, v in request.match_info.items():  
             # 检查kw中的参数是否和match_info中的重复  
             if k in kw:  
                 logging.warn('Duplicate arg name in named arg and kw args: %s' % k)   
             kw[k] = v  
     if self._has_request_arg: # 视图函数存在request参数  
         kw['request'] = request  
     if self._required_kw_args: # 视图函数存在无默认值的命名关键词参数  
         for name in self._required_kw_args:  
             if not name in kw: # 若未传入必须参数值,报错。  
                 return web.HTTPBadRequest('Missing argument: %s' % name)  
     # 至此,kw为视图函数fn真正能调用的参数  
     # request请求中的参数,终于传递给了视图函数  
     logging.info('call with args: %s' % str(kw))  
     #try:  
     r = await self._func(**kw)  
     return r  
Example #32
0
def main(**kwargs):
    """
    Entry point for dx-build-app(let).

    Don't call this function as a subroutine in your program! It is liable to
    sys.exit your program when it detects certain error conditions, so you
    can't recover from those as you could if it raised exceptions. Instead,
    call dx_build_app.build_and_upload_locally which provides the real
    implementation for dx-build-app(let) but is easier to use in your program.
    """

    if len(sys.argv) > 0:
        if sys.argv[0].endswith('dx-build-app'):
            logging.warn('Warning: dx-build-app has been replaced with "dx build --create-app". Please update your scripts.')
        elif sys.argv[0].endswith('dx-build-applet'):
            logging.warn('Warning: dx-build-applet has been replaced with "dx build". Please update your scripts.')

    if len(kwargs) == 0:
        args = parser.parse_args()
    else:
        args = parser.parse_args(**kwargs)

    if dxpy.AUTH_HELPER is None and not args.dry_run:
        parser.error('Authentication required to build an executable on the platform; please run "dx login" first')

    if args.src_dir is None:
        args.src_dir = os.getcwd()
        if USING_PYTHON2:
            args.src_dir = args.src_dir.decode(sys.getfilesystemencoding())

    if args.mode == "app" and args.destination != '.':
        parser.error("--destination cannot be used when creating an app (only an applet)")

    if args.dx_toolkit_autodep in ['beta', 'unstable']:
        logging.warn('The --dx-toolkit-beta-autodep and --dx-toolkit-unstable-autodep flags have no effect and will be removed at some date in the future.')

    if args.overwrite and args.archive:
        parser.error("Options -f/--overwrite and -a/--archive cannot be specified together")

    if args.run is not None and args.dry_run:
        parser.error("Options --dry-run and --run cannot be specified together")

    if args.run and args.remote and args.mode == 'app':
        parser.error("Options --remote, --app, and --run cannot all be specified together. Try removing --run and then separately invoking dx run.")

    executable_id = _build_app(args,
                               json.loads(args.extra_args) if args.extra_args else {})

    if args.run is not None:

        if executable_id is None:
            raise AssertionError('Expected executable_id to be set here')

        try:
            subprocess.check_call(['dx', 'run', executable_id, '--priority', 'high'] + args.run)
        except subprocess.CalledProcessError as e:
            sys.exit(e.returncode)
        except:
            err_exit()

    return
Example #33
0
def _verify_app_source_dir_impl(src_dir, temp_dir, mode, enforce=True):
    """Performs syntax and lint checks on the app source.

    Precondition: the dxapp.json file exists and can be parsed.
    """
    _lint(os.path.join(src_dir, "dxapp.json"), mode)

    # Check that the entry point file parses as the type it is going to
    # be interpreted as. The extension is irrelevant.
    manifest = json.load(open(os.path.join(src_dir, "dxapp.json")))
    if "runSpec" in manifest:
        if "interpreter" not in manifest['runSpec']:
            raise dxpy.app_builder.AppBuilderException('runSpec.interpreter field was not present')

        if manifest['runSpec']['interpreter'] in ["python2.7", "bash"]:
            if "file" in manifest['runSpec']:
                entry_point_file = os.path.abspath(os.path.join(src_dir, manifest['runSpec']['file']))
                try:
                    _check_file_syntax(entry_point_file, temp_dir, override_lang=manifest['runSpec']['interpreter'], enforce=enforce)
                except IOError as e:
                    raise dxpy.app_builder.AppBuilderException(
                        'Could not open runSpec.file=%r. The problem was: %s' % (entry_point_file, e))
                except DXSyntaxError:
                    raise dxpy.app_builder.AppBuilderException('Entry point file %s has syntax errors, see above for details. Rerun with --no-check-syntax to proceed anyway.' % (entry_point_file,))
            elif "code" in manifest['runSpec']:
                try:
                    _check_syntax(manifest['runSpec']['code'], manifest['runSpec']['interpreter'], temp_dir, enforce=enforce)
                except DXSyntaxError:
                    raise dxpy.app_builder.AppBuilderException('Code in runSpec.code has syntax errors, see above for details. Rerun with --no-check-syntax to proceed anyway.')

        if 'execDepends' in manifest['runSpec']:
            if not isinstance(manifest['runSpec']['execDepends'], list):
                raise dxpy.app_builder.AppBuilderException('Expected runSpec.execDepends to be an array. Rerun with --no-check-syntax to proceed anyway.')
            if not all(isinstance(dep, dict) for dep in manifest['runSpec']['execDepends']):
                raise dxpy.app_builder.AppBuilderException('Expected runSpec.execDepends to be an array of hashes. Rerun with --no-check-syntax to proceed anyway.')
            if any(dep.get('package_manager', 'apt') != 'apt' for dep in manifest['runSpec']['execDepends']):
                if not isinstance(manifest.get('access'), dict) or 'network' not in manifest['access']:
                    msg = '\n'.join(['runSpec.execDepends specifies non-APT dependencies, but no network access spec is given.',
                    'Add {"access": {"network": ["*"]}} to allow dependencies to install.',
                    'See https://wiki.dnanexus.com/Developer-Tutorials/Request-Additional-App-Resources#Network-Access.',
                    'Rerun with --no-check-syntax to proceed anyway.'])
                    raise dxpy.app_builder.AppBuilderException(msg)

    if 'authorizedUsers' in manifest:
        if not isinstance(manifest['authorizedUsers'], list) or isinstance(manifest['authorizedUsers'], basestring):
            raise dxpy.app_builder.AppBuilderException('Expected authorizedUsers to be a list of strings')
        for thing in manifest['authorizedUsers']:
            if thing != 'PUBLIC' and (not isinstance(thing, basestring) or not re.match("^(org-|user-)", thing)):
                raise dxpy.app_builder.AppBuilderException('authorizedUsers field contains an entry which is not either the string "PUBLIC" or a user or org ID')

    # Check all other files that are going to be in the resources tree.
    # For these we detect the language based on the filename extension.
    # Obviously this check can have false positives, since the app can
    # execute (or not execute!) all these files in whatever way it
    # wishes, e.g. it could use Python != 2.7 or some non-bash shell.
    # Consequently errors here are non-fatal.
    files_with_problems = []
    for dirpath, dirnames, filenames in os.walk(os.path.abspath(os.path.join(src_dir, "resources"))):
        for filename in filenames:
            # On Mac OS, the resource fork for "FILE.EXT" gets tarred up
            # as a file named "._FILE.EXT". To a naive check this
            # appears to be a file of the same extension. Therefore, we
            # exclude these from syntax checking since they are likely
            # to not parse as whatever language they appear to be.
            if not filename.startswith("._"):
                try:
                    _check_file_syntax(os.path.join(dirpath, filename), temp_dir, enforce=True)
                except IOError as e:
                    raise dxpy.app_builder.AppBuilderException(
                        'Could not open file in resources directory %r. The problem was: %s' %
                        (os.path.join(dirpath, filename), e)
                    )
                except DXSyntaxError:
                    # Suppresses errors from _check_file_syntax so we
                    # only print a nice error message
                    files_with_problems.append(os.path.join(dirpath, filename))

    if files_with_problems:
        # Make a message of the form:
        #    "/path/to/my/app.py"
        # OR "/path/to/my/app.py and 3 other files"
        files_str = files_with_problems[0] if len(files_with_problems) == 1 else (files_with_problems[0] + " and " + str(len(files_with_problems) - 1) + " other file" + ("s" if len(files_with_problems) > 2 else ""))
        logging.warn('%s contained syntax errors, see above for details' % (files_str,))
Example #34
0
def gen_audio_features(item, config):
    """Generate audio features and transformations
    Args:
        item (Dict): dictionary containing the attributes to encode.
        config (Dict): configuration dictionary.
    Returns:
        (bool): keep this sample or not.
        mel (ndarray): mel matrix in np.float32.
        energy (ndarray): energy audio profile.
        f0 (ndarray): fundamental frequency.
        item (Dict): dictionary containing the updated attributes.
    """
    # get info from sample.
    audio = item["audio"]
    utt_id = item["utt_id"]
    rate = item["rate"]

    # check audio properties
    assert len(audio.shape) == 1, f"{utt_id} seems to be multi-channel signal."
    assert np.abs(
        audio).max() <= 1.0, f"{utt_id} is different from 16 bit PCM."

    # check sample rate
    if rate != config["sampling_rate"]:
        audio = librosa.resample(audio, rate, config["sampling_rate"])
        logging.info(
            f"{utt_id} sampling rate is {rate}, not {config['sampling_rate']}, we resample it."
        )

    # trim silence
    if config["trim_silence"]:
        if "trim_mfa" in config and config["trim_mfa"]:
            _, item["text_ids"], audio = ph_based_trim(
                config,
                utt_id,
                item["text_ids"],
                item["raw_text"],
                audio,
                config["hop_size"],
            )
            if (
                    audio.__len__() < 1
            ):  # very short files can get trimmed fully if mfa didnt extract any tokens LibriTTS maybe take only longer files?
                logging.warning(
                    f"File have only silence or MFA didnt extract any token {utt_id}"
                )
                return False, None, None, None, item
        else:
            audio, _ = librosa.effects.trim(
                audio,
                top_db=config["trim_threshold_in_db"],
                frame_length=config["trim_frame_size"],
                hop_length=config["trim_hop_size"],
            )

    # resample audio if necessary
    if "sampling_rate_for_feats" in config:
        audio = librosa.resample(audio, rate,
                                 config["sampling_rate_for_feats"])
        sampling_rate = config["sampling_rate_for_feats"]
        assert (
            config["hop_size"] * config["sampling_rate_for_feats"] % rate == 0
        ), "'hop_size' must be 'int' value. Please check if 'sampling_rate_for_feats' is correct."
        hop_size = config["hop_size"] * config[
            "sampling_rate_for_feats"] // rate
    else:
        sampling_rate = config["sampling_rate"]
        hop_size = config["hop_size"]

    # get spectrogram
    D = librosa.stft(
        audio,
        n_fft=config["fft_size"],
        hop_length=hop_size,
        win_length=config["win_length"],
        window=config["window"],
        pad_mode="reflect",
    )
    S, _ = librosa.magphase(D)  # (#bins, #frames)

    # get mel basis
    fmin = 0 if config["fmin"] is None else config["fmin"]
    fmax = sampling_rate // 2 if config["fmax"] is None else config["fmax"]
    mel_basis = librosa.filters.mel(
        sr=sampling_rate,
        n_fft=config["fft_size"],
        n_mels=config["num_mels"],
        fmin=fmin,
        fmax=fmax,
    )
    mel = np.log10(np.maximum(np.dot(mel_basis, S),
                              1e-10)).T  # (#frames, #bins)

    # check audio and feature length
    audio = np.pad(audio, (0, config["fft_size"]), mode="edge")
    audio = audio[:len(mel) * hop_size]
    assert len(mel) * hop_size == len(audio)

    # extract raw pitch
    _f0, t = pw.dio(
        audio.astype(np.double),
        fs=sampling_rate,
        f0_ceil=fmax,
        frame_period=1000 * hop_size / sampling_rate,
    )
    f0 = pw.stonemask(audio.astype(np.double), _f0, t, sampling_rate)
    if len(f0) >= len(mel):
        f0 = f0[:len(mel)]
    else:
        f0 = np.pad(f0, (0, len(mel) - len(f0)))

    # extract energy
    energy = np.sqrt(np.sum(S**2, axis=0))
    assert len(mel) == len(f0) == len(energy)

    # remove outlier f0/energy
    f0 = remove_outlier(f0)
    energy = remove_outlier(energy)

    # apply global gain
    if config["global_gain_scale"] > 0.0:
        audio *= config["global_gain_scale"]
    if np.abs(audio).max() >= 1.0:
        logging.warn(
            f"{utt_id} causes clipping. It is better to reconsider global gain scale value."
        )
    item["audio"] = audio
    item["mel"] = mel
    item["f0"] = f0
    item["energy"] = energy
    return True, mel, energy, f0, item
Example #35
0
def error(bot, update, error):
    logging.warn('Update:"%s" caused Error:"%s"' % (update, error))
Example #36
0
from __future__ import (absolute_import,)

import os
import logging
import re

from setuptools import setup, find_packages

readme_dir = os.path.dirname(__file__)
readme_path = os.path.join(readme_dir, 'README.md')

try:
    with open(readme_path, 'r') as f:
        readme_markdown = f.read()
except:
    logging.warn("Failed to load %s" % readme_path)
    readme_markdown = ""

try:
    import pypandoc
    readme_restructured = pypandoc.convert(readme_markdown, to='rst', format='md')
except:
    readme_restructured = readme_markdown
    logging.warn("Conversion of long_description from MD to RST failed")
    pass


with open('isovar/__init__.py', 'r') as f:
    version = re.search(
        r'^__version__\s*=\s*[\'"]([^\'"]*)[\'"]',
        f.read(),
Example #37
0
 def testCaseOutputV2(self):
     if vobject is None:
         logging.warn(
             "QTI v1 to v2 migration tests skipped: vobject required")
         return
     self.cp.manifest.root.SetID('outputv2')
     dPath = os.path.join(self.dataPath, 'input')
     fList = []
     for f in os.listdir(dPath):
         if self.cp.IgnoreFile(f):
             continue
         stem, ext = os.path.splitext(f)
         if ext.lower() == '.xml':
             fList.append(f)
     fList.sort()
     for f in fList:
         doc = QTIDocument(
             baseURI=str(uri.URI.from_path(os.path.join(dPath, f))))
         doc.Read()
         doc.MigrateV2(self.cp)
     # Having migrated everything in the input folder, we now check our CP
     # against the output
     cp2 = imscp.ContentPackage(os.path.join(self.dataPath, 'outputv2'))
     # To do....
     # Compare the manifests
     # Compare each file
     fList1 = self.cp.fileTable.keys()
     fList1.sort()
     fList2 = cp2.fileTable.keys()
     fList2.sort()
     if fList1 != fList2:
         diagnosis = []
         for f in fList1:
             if f not in fList2:
                 diagnosis.append("Extra file found: %s" % f)
         for f in fList2:
             if f not in fList1:
                 diagnosis.append("Missing file: %s" % f)
         self.fail("File lists:\n  %s" % string.join(diagnosis, '\n  '))
     logging.debug(str(self.cp.manifest))
     logging.debug(str(cp2.manifest))
     output = self.cp.manifest.DiffString(cp2.manifest)
     self.assertTrue(self.cp.manifest.root == cp2.manifest.root,
                     "Manifests differ:\n%s" % output)
     checkFiles = {}
     for r in cp2.manifest.root.Resources.Resource:
         # Check the entry-point of each resource
         f = r.GetEntryPoint()
         if f:
             fPath = f.PackagePath(cp2)
             qtiDoc = qtiv2.core.QTIDocument(baseURI=str(
                 uri.URI.from_virtual_path(self.cp.dPath.join(fPath))))
             qtiDoc.Read()
             # print str(qtiDoc)
             qtiDoc2 = qtiv2.core.QTIDocument(baseURI=str(
                 uri.URI.from_virtual_path(cp2.dPath.join(fPath))))
             qtiDoc2.Read()
             # print str(qtiDoc2)
             output = qtiDoc.DiffString(qtiDoc2)
             result = (qtiDoc.root == qtiDoc2.root)
             if not result and output is None:
                 # This should not happen
                 self.PrintPrettyWeird(qtiDoc.root, qtiDoc2.root)
             self.assertTrue(
                 qtiDoc.root == qtiDoc2.root,
                 "QTI Files differ at %s (actual output shown first)\n%s" %
                 (fPath, output))
         for f in r.File:
             if f.href is None or f.href.is_absolute():
                 continue
             fPath = f.PackagePath(cp2)
             fAbsPath = self.cp.dPath.join(fPath)
             fAbsPath2 = cp2.dPath.join(fPath)
             baseURI = str(uri.URI.from_virtual_path(fAbsPath))
             baseURI2 = str(uri.URI.from_virtual_path(fAbsPath2))
             if fAbsPath.splitext()[1].lower() == '.xml':
                 # Two xml files, compare with simple XMLElement
                 doc = xml.Document(baseURI=baseURI)
                 doc.Read()
                 doc2 = xml.Document(baseURI=baseURI2)
                 doc2.Read()
                 output = doc.DiffString(doc2)
                 result = (doc.root == doc2.root)
                 if not result and output is None:
                     # This should not happen
                     self.PrintPrettyWeird(doc.root, doc2.root)
                 self.assertTrue(
                     doc.root == doc2.root,
                     "XML Files differ at %s (actual output shown first)\n%s"
                     % (fPath, output))
             else:
                 # Binary compare the two files.
                 f = fAbsPath.open('rb')
                 f2 = fAbsPath2.open('rb')
                 while True:
                     fData = f.read(1024)
                     fData2 = f2.read(1024)
                     self.assertTrue(fData == fData2,
                                     "Binary files don't match: %s" % fPath)
                     if not fData:
                         break
Example #38
0
 async def remove(self):
     args = [self.getValue(self.__primary_key__)]
     rows = await execute(self.__delete__, args)
     if rows != 1:
         logging.warn('failed to remove by primary key: affected rows: %s' %
                      rows)
Example #39
0
 def post(self):
     logging.warn("error response")
     self.set_status(500)
Example #40
0
    def do_generate(self, lane):
        feeds = []
        annotator = self.app.manager.annotator(lane)
        if isinstance(lane, Lane) and lane.parent:
            languages = lane.language_key
            lane_name = lane.name
        else:
            languages = None
            lane_name = None

        library = lane.library
        url = self.app.manager.cdn_url_for(
            "feed",
            languages=languages,
            lane_name=lane_name,
            library_short_name=library.short_name)

        default_order = library.default_facet(Facets.ORDER_FACET_GROUP_NAME)
        allowed_orders = library.enabled_facets(Facets.ORDER_FACET_GROUP_NAME)
        chosen_orders = self.orders or [default_order]

        default_availability = library.default_facet(
            Facets.AVAILABILITY_FACET_GROUP_NAME)
        allowed_availabilities = library.enabled_facets(
            Facets.AVAILABILITY_FACET_GROUP_NAME)
        chosen_availabilities = self.availabilities or [default_availability]

        default_collection = library.default_facet(
            Facets.COLLECTION_FACET_GROUP_NAME)
        allowed_collections = library.enabled_facets(
            Facets.COLLECTION_FACET_GROUP_NAME)
        chosen_collections = self.collections or [default_collection]

        for order in chosen_orders:
            if order not in allowed_orders:
                logging.warn("Ignoring unsupported ordering %s" % order)
                continue
            for availability in chosen_availabilities:
                if availability not in allowed_availabilities:
                    logging.warn("Ignoring unsupported availability %s" %
                                 availability)
                    continue
                for collection in chosen_collections:
                    if collection not in allowed_collections:
                        logging.warn("Ignoring unsupported collection %s" %
                                     collection)
                        continue
                    pagination = Pagination.default()
                    facets = Facets(library=library,
                                    collection=collection,
                                    availability=availability,
                                    order=order,
                                    order_ascending=True)
                    title = lane.display_name
                    for pagenum in range(0, self.pages):
                        yield AcquisitionFeed.page(self._db,
                                                   title,
                                                   url,
                                                   lane,
                                                   annotator,
                                                   facets=facets,
                                                   pagination=pagination,
                                                   force_refresh=True)
                        pagination = pagination.next_page
Example #41
0
def get_source_models(oqparam, gsim_lt, source_model_lt, in_memory=True):
    """
    Build all the source models generated by the logic tree.

    :param oqparam:
        an :class:`openquake.commonlib.oqvalidation.OqParam` instance
    :param gsim_lt:
        a :class:`openquake.commonlib.logictree.GsimLogicTree` instance
    :param source_model_lt:
        a :class:`openquake.commonlib.logictree.SourceModelLogicTree` instance
    :param in_memory:
        if True, keep in memory the sources, else just collect the TRTs
    :returns:
        an iterator over :class:`openquake.commonlib.logictree.SourceModel`
        tuples
    """
    converter = sourceconverter.SourceConverter(
        oqparam.investigation_time,
        oqparam.rupture_mesh_spacing,
        oqparam.complex_fault_mesh_spacing,
        oqparam.width_of_mfd_bin,
        oqparam.area_source_discretization)
    psr = nrml.SourceModelParser(converter)

    # consider only the effective realizations
    smlt_dir = os.path.dirname(source_model_lt.filename)
    for sm in source_model_lt.gen_source_models(gsim_lt):
        src_groups = []
        for name in sm.names.split():
            fname = os.path.abspath(os.path.join(smlt_dir, name))
            if in_memory:
                apply_unc = source_model_lt.make_apply_uncertainties(sm.path)
                logging.info('Reading %s', fname)
                src_groups.extend(psr.parse_src_groups(fname, apply_unc))
            else:  # just collect the TRT models
                smodel = nrml.read(fname).sourceModel
                if smodel[0].tag.endswith('sourceGroup'):  # NRML 0.5 format
                    for sg_node in smodel:
                        sg = sourceconverter.SourceGroup(
                            sg_node['tectonicRegion'])
                        sg.sources = sg_node.nodes
                        src_groups.append(sg)
                else:  # NRML 0.4 format: smodel is a list of source nodes
                    src_groups.extend(
                        sourceconverter.SourceGroup.collect(smodel))
        num_sources = sum(len(sg.sources) for sg in src_groups)
        sm.src_groups = src_groups
        trts = [mod.trt for mod in src_groups]
        source_model_lt.tectonic_region_types.update(trts)
        logging.info(
            'Processed source model %d with %d potential gsim path(s) and %d '
            'sources', sm.ordinal + 1, sm.num_gsim_paths, num_sources)

        gsim_file = oqparam.inputs.get('gsim_logic_tree')
        if gsim_file:  # check TRTs
            for src_group in src_groups:
                if src_group.trt not in gsim_lt.values:
                    raise ValueError(
                        "Found in %r a tectonic region type %r inconsistent "
                        "with the ones in %r" % (sm, src_group.trt, gsim_file))
        yield sm

    # check investigation_time
    psr.check_nonparametric_sources(oqparam.investigation_time)

    # log if some source file is being used more than once
    dupl = 0
    for fname, hits in psr.fname_hits.items():
        if hits > 1:
            logging.info('%s has been considered %d times', fname, hits)
            if not psr.changed_sources:
                dupl += hits
    if dupl and not oqparam.optimize_same_id_sources:
        logging.warn('You are doing redundant calculations: please make sure '
                     'that different sources have different IDs and set '
                     'optimize_same_id_sources=true in your .ini file')
Example #42
0
 async def save(self):
     args = list(map(self.getValueOrDefault, self.__fields__))
     args.append(self.getValueOrDefault(self.__primary_key__))
     rows = await execute(self.__insert__, args)
     if rows != 1:
         logging.warn('failed to insert record: affected rows: %s' % rows)
Example #43
0
def corner(xs,
           bins=20,
           range=None,
           weights=None,
           color="k",
           smooth=None,
           smooth1d=None,
           labels=None,
           label_kwargs=None,
           show_titles=False,
           title_fmt=".2f",
           title_kwargs=None,
           truths=None,
           truth_color="#4682b4",
           scale_hist=False,
           quantiles=None,
           verbose=False,
           fig=None,
           max_n_ticks=5,
           top_ticks=False,
           use_math_text=False,
           hist_kwargs=None,
           **hist2d_kwargs):
    """
    Make a *sick* corner plot showing the projections of a data set in a
    multi-dimensional space. kwargs are passed to hist2d() or used for
    `matplotlib` styling.

    Parameters
    ----------
    xs : array_like (nsamples, ndim)
        The samples. This should be a 1- or 2-dimensional array. For a 1-D
        array this results in a simple histogram. For a 2-D array, the zeroth
        axis is the list of samples and the next axis are the dimensions of
        the space.

    bins : int or array_like (ndim,) (optional)
        The number of bins to use in histograms, either as a fixed value for
        all dimensions, or as a list of integers for each dimension.

    weights : array_like (nsamples,)
        The weight of each sample. If `None` (default), samples are given
        equal weight.

    color : str (optional)
        A ``matplotlib`` style color for all histograms.

    smooth, smooth1d : float (optional)
       The standard deviation for Gaussian kernel passed to
       `scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
       respectively. If `None` (default), no smoothing is applied.

    labels : iterable (ndim,) (optional)
        A list of names for the dimensions. If a ``xs`` is a
        ``pandas.DataFrame``, labels will default to column names.

    label_kwargs : dict (optional)
        Any extra keyword arguments to send to the `set_xlabel` and
        `set_ylabel` methods.

    show_titles : bool (optional)
        Displays a title above each 1-D histogram showing the 0.5 quantile
        with the upper and lower errors supplied by the quantiles argument.

    title_fmt : string (optional)
        The format string for the quantiles given in titles. If you explicitly
        set ``show_titles=True`` and ``title_fmt=None``, the labels will be
        shown as the titles. (default: ``.2f``)

    title_kwargs : dict (optional)
        Any extra keyword arguments to send to the `set_title` command.

    range : iterable (ndim,) (optional)
        A list where each element is either a length 2 tuple containing
        lower and upper bounds or a float in range (0., 1.)
        giving the fraction of samples to include in bounds, e.g.,
        [(0.,10.), (1.,5), 0.999, etc.].
        If a fraction, the bounds are chosen to be equal-tailed.

    truths : iterable (ndim,) (optional)
        A list of reference values to indicate on the plots.  Individual
        values can be omitted by using ``None``.

    truth_color : str (optional)
        A ``matplotlib`` style color for the ``truths`` makers.

    scale_hist : bool (optional)
        Should the 1-D histograms be scaled in such a way that the zero line
        is visible?

    quantiles : iterable (optional)
        A list of fractional quantiles to show on the 1-D histograms as
        vertical dashed lines.

    verbose : bool (optional)
        If true, print the values of the computed quantiles.

    plot_contours : bool (optional)
        Draw contours for dense regions of the plot.

    use_math_text : bool (optional)
        If true, then axis tick labels for very large or small exponents will
        be displayed as powers of 10 rather than using `e`.

    max_n_ticks: int (optional)
        Maximum number of ticks to try to use

    top_ticks : bool (optional)
        If true, label the top ticks of each axis

    fig : matplotlib.Figure (optional)
        Overplot onto the provided figure object.

    hist_kwargs : dict (optional)
        Any extra keyword arguments to send to the 1-D histogram plots.

    **hist2d_kwargs : (optional)
        Any remaining keyword arguments are sent to `corner.hist2d` to generate
        the 2-D histogram plots.
    """
    if quantiles is None:
        quantiles = []
    if title_kwargs is None:
        title_kwargs = dict()
    if label_kwargs is None:
        label_kwargs = dict()

    # Try filling in labels from pandas.DataFrame columns.
    if labels is None:
        try:
            labels = xs.columns
        except AttributeError:
            pass

    # Deal with 1D sample lists.
    xs = np.atleast_1d(xs)
    if len(xs.shape) == 1:
        xs = np.atleast_2d(xs)
    else:
        assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
        xs = xs.T
    assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
                                       "dimensions than samples!"

    # Parse the weight array.
    if weights is not None:
        weights = np.asarray(weights)
        if weights.ndim != 1:
            raise ValueError("Weights must be 1-D")
        if xs.shape[1] != weights.shape[0]:
            raise ValueError("Lengths of weights must match number of samples")

    # Parse the parameter ranges.
    if range is None:
        if "extents" in hist2d_kwargs:
            logging.warn("Deprecated keyword argument 'extents'. "
                         "Use 'range' instead.")
            range = hist2d_kwargs.pop("extents")
        else:
            range = [[x.min(), x.max()] for x in xs]
            # Check for parameters that never change.
            m = np.array([e[0] == e[1] for e in range], dtype=bool)
            if np.any(m):
                raise ValueError(
                    ("It looks like the parameter(s) in "
                     "column(s) {0} have no dynamic range. "
                     "Please provide a `range` argument.").format(", ".join(
                         map("{0}".format,
                             np.arange(len(m))[m]))))

    else:
        # If any of the extents are percentiles, convert them to ranges.
        # Also make sure it's a normal list.
        range = list(range)
        for i, _ in enumerate(range):
            try:
                emin, emax = range[i]
            except TypeError:
                q = [0.5 - 0.5 * range[i], 0.5 + 0.5 * range[i]]
                range[i] = quantile(xs[i], q, weights=weights)

    if len(range) != xs.shape[0]:
        raise ValueError("Dimension mismatch between samples and range")

    # Parse the bin specifications.
    try:
        bins = [float(bins) for _ in range]
    except TypeError:
        if len(bins) != len(range):
            raise ValueError("Dimension mismatch between bins and range")

    # Some magic numbers for pretty axis layout.
    K = len(xs)
    factor = 2.0  # size of one side of one panel
    lbdim = 0.5 * factor  # size of left/bottom margin
    trdim = 0.2 * factor  # size of top/right margin
    whspace = 0.05  # w/hspace size
    plotdim = factor * K + factor * (K - 1.) * whspace
    dim = lbdim + plotdim + trdim

    # Create a new figure if one wasn't provided.
    if fig is None:
        fig, axes = pl.subplots(K, K, figsize=(dim, dim))
    else:
        try:
            axes = np.array(fig.axes).reshape((K, K))
        except:
            raise ValueError("Provided figure has {0} axes, but data has "
                             "dimensions K={1}".format(len(fig.axes), K))

    # Format the figure.
    lb = lbdim / dim
    tr = (lbdim + plotdim) / dim
    fig.subplots_adjust(left=lb,
                        bottom=lb,
                        right=tr,
                        top=tr,
                        wspace=whspace,
                        hspace=whspace)

    # Set up the default histogram keywords.
    if hist_kwargs is None:
        hist_kwargs = dict()
    hist_kwargs["color"] = hist_kwargs.get("color", color)

    for i, x in enumerate(xs):
        # Deal with masked arrays.
        if hasattr(x, "compressed"):
            x = x.compressed()

        if np.shape(xs)[0] == 1:
            ax = axes
        else:
            ax = axes[i, i]

        # Plot the histograms.
        if gaussian_filter is None:
            raise ImportError("Please install scipy for smoothing")
        n, b = np.histogram(x, bins=bins[i], weights=weights, range=range[i])
        if smooth1d is not None:
            n = gaussian_filter(n, smooth1d)
        x0 = 0.5 * (b[:-1] + b[1:])
        y0 = n.copy()
        ax.plot(x0, y0, **hist_kwargs)

        if truths is not None and truths[i] is not None:
            ax.axvline(truths[i], color=truth_color, lw=2)

        # Plot quantiles if wanted.
        if len(quantiles) > 0:
            qvalues = quantile(x, quantiles, weights=weights)
            #            for q in qvalues:
            #                ax.axvline(q, ls="dashed", color=color)

            if verbose:
                print("Quantiles:")
                print([item for item in zip(quantiles, qvalues)])

        if show_titles:
            title = None
            if title_fmt is not None:
                # Compute the quantiles for the title. This might redo
                # unneeded computation but who cares.
                q_16, q_50, q_84 = quantile(x, [0.16, 0.5, 0.84],
                                            weights=weights)
                q_m, q_p = q_50 - q_16, q_84 - q_50

                #                # Format the quantile display.
                #                fmt = "{{0:{0}}}".format(title_fmt).format
                #                title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
                #                title = title.format(fmt(q_50), fmt(q_m), fmt(q_p))

                # format the quantile display
                max_dq = max(q_m, q_p)
                expo = lambda x: np.floor(np.log10(np.abs(x))).astype(int)
                q_50_str = str(round(q_50, -expo(max_dq)))
                q_m_str = str(round(q_m, -expo(q_m) + 1))
                q_p_str = str(round(q_p, -expo(q_p) + 1))
                title = r"$" + q_50_str + "_{-" + q_m_str + "}^{+" + q_p_str + "}$"

                #                # just to check the format
                #                print(str(q_50)+", "+str(q_50_str))
                #                print(str(q_m)+", "+str(q_m_str))
                #                print(str(q_p)+", "+str(q_p_str))
                #                print("\n")

                # Add in the column name if it's given.
                if labels is not None:
                    title = "{0} = {1}".format(labels[i], title)

            elif labels is not None:
                title = "{0}".format(labels[i])

            if title is not None:
                ax.set_title(title, **title_kwargs)

        # fill in the confidence levels
        q_2_5, q_16, q_84, q_97_5 = quantile(x, [0.025, 0.16, 0.84, 0.975],
                                             weights=weights)
        # find closest x bin to quantiles
        q_2_5 = x0[x0 >= q_2_5][0]
        q_16 = x0[x0 >= q_16][0]
        q_84 = x0[x0 <= q_84][-1]
        q_97_5 = x0[x0 <= q_97_5][-1]
        # fill below the 1d histogram
        ax.fill_between(x0,
                        y0,
                        color=color,
                        alpha=0.8,
                        where=(x0 >= q_16) * (x0 <= q_84))
        ax.fill_between(x0,
                        y0,
                        color=color,
                        alpha=0.4,
                        where=(x0 >= q_2_5) * (x0 <= q_16) + (x0 >= q_84) *
                        (x0 <= q_97_5))

        # Set up the axes.
        ax.set_xlim(range[i])
        if scale_hist:
            maxn = np.max(n)
            ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
        else:
            ax.set_ylim(0, 1.1 * np.max(n))
        ax.set_yticklabels([])
        ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))

        if i < K - 1:
            if top_ticks:
                ax.xaxis.set_ticks_position("top")
                [l.set_rotation(45) for l in ax.get_xticklabels()]
                [l.set_size(12) for l in ax.get_xticklabels()]
            else:
                ax.set_xticklabels([])
        else:
            [l.set_rotation(45) for l in ax.get_xticklabels()]
            [l.set_size(12) for l in ax.get_xticklabels()]
            if labels is not None:
                ax.set_xlabel(labels[i], **label_kwargs)
                ax.xaxis.set_label_coords(0.5, -0.3)

            # use MathText for axes ticks
            ax.xaxis.set_major_formatter(
                ScalarFormatter(useMathText=use_math_text))

        for j, y in enumerate(xs):
            if np.shape(xs)[0] == 1:
                ax = axes
            else:
                ax = axes[i, j]
            if j > i:
                ax.set_frame_on(False)
                ax.set_xticks([])
                ax.set_yticks([])
                continue
            elif j == i:
                continue

            # Deal with masked arrays.
            if hasattr(y, "compressed"):
                y = y.compressed()

            # plot 2d histogram
            hist2d(y,
                   x,
                   ax=ax,
                   range=[range[j], range[i]],
                   weights=weights,
                   color=color,
                   smooth=smooth,
                   bins=[bins[j], bins[i]],
                   **hist2d_kwargs)

            if truths is not None:
                if truths[i] is not None and truths[j] is not None:
                    ax.plot(truths[j], truths[i], "s", color=truth_color)


#                if truths[j] is not None:
#                    ax.axvline(truths[j], color=truth_color, lw=2)
#                if truths[i] is not None:
#                    ax.axhline(truths[i], color=truth_color, lw=2)

            ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))
            ax.yaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))

            if i < K - 1:
                ax.set_xticklabels([])
            else:
                [l.set_rotation(45) for l in ax.get_xticklabels()]
                [l.set_size(12) for l in ax.get_xticklabels()]
                if labels is not None:
                    ax.set_xlabel(labels[j], **label_kwargs)
                    ax.xaxis.set_label_coords(0.5, -0.3)

                # use MathText for axes ticks
                ax.xaxis.set_major_formatter(
                    ScalarFormatter(useMathText=use_math_text))

            if j > 0:
                ax.set_yticklabels([])
            else:
                [l.set_rotation(45) for l in ax.get_yticklabels()]
                [l.set_size(12) for l in ax.get_yticklabels()]
                if labels is not None:
                    ax.set_ylabel(labels[i], **label_kwargs)
                    ax.yaxis.set_label_coords(-0.3, 0.5)

                # use MathText for axes ticks
                ax.yaxis.set_major_formatter(
                    ScalarFormatter(useMathText=use_math_text))

    return fig
Example #44
0
 def delayed_response():
     logging.warn("delayed response")
     self.finish()
Example #45
0
def copy_element(val, src_dt, tgt_dt, ctx):
    logging.debug("copy_element, val: " + str(val) + " val type: " + str(type(val)) + "src_dt: " + dump_dtype(src_dt) + " tgt_dt: " + dump_dtype(tgt_dt))
     
    fin = ctx["fin"]
    fout = ctx["fout"]
    out = None
    if len(src_dt) > 0:
        out_fields = []
        i = 0
        for name in src_dt.fields:
            field_src_dt = src_dt.fields[name][0]
            field_tgt_dt = tgt_dt.fields[name][0]
            field_val = val[i]
            i += 1
            out_field = copy_element(field_val, field_src_dt, field_tgt_dt, ctx)
            out_fields.append(out_field)
            out = tuple(out_fields)
    elif src_dt.metadata and 'ref' in src_dt.metadata:
        if not tgt_dt.metadata or 'ref' not in tgt_dt.metadata:
            raise TypeError("Expected tgt dtype to be ref, but got: {}".format(tgt_dt))
        ref = tgt_dt.metadata['ref']
        if is_reference(ref):
            # initialize out to null ref
            if is_h5py(ctx['fout']):
                out = h5py.Reference()  # null h5py ref
            else:
                out = '' # h5pyd refs are strings
             
            if ref:
                try:
                    fin_obj = fin[val]
                except AttributeError as ae:
                    msg = "Unable able to get obj for ref value: {}".format(ae)
                    logging.error(msg)
                    print(msg)
                    return None

                # TBD - for hsget, the name property is not getting set
                h5path = fin_obj.name
                if not h5path:
                    msg = "No path found for ref object"
                    logging.warn(msg)
                    if ctx["verbose"]:
                        print(msg)
                else:
                    fout_obj = fout[h5path]
                    if is_h5py(ctx['fout']):
                        out = fout_obj.ref
                    else:
                        out = str(fout_obj.ref) # convert to string for JSON serialization
            
            
        elif is_regionreference(ref):
            out = "tbd"
        else:
            raise TypeError("Unexpected ref type: {}".format(type(ref)))
    elif src_dt.metadata and 'vlen' in src_dt.metadata:
        logging.debug("copy_elment, got vlen element, dt: {}".format(src_dt.metadata["vlen"]))
        if not isinstance(val, np.ndarray):
            raise TypeError("Expecting ndarray or vlen element, but got: {}".format(type(val)))
        if not tgt_dt.metadata or 'vlen' not in tgt_dt.metadata:
            raise TypeError("Expected tgt dtype to be vlen, but got: {}".format(tgt_dt))
        src_vlen_dt = src_dt.metadata["vlen"]
        tgt_vlen_dt = tgt_dt.metadata["vlen"]
        if has_reference(src_vlen_dt):
            if len(val.shape) == 0:
                # scalar array
                e = val[()]
                v = copy_element(e, src_vlen_dt, tgt_vlen_dt, ctx)
                out = np.array(v, dtype=tgt_dt)
            else:
                out = np.zeros(val.shape, dtype=tgt_dt)
                for i in range(len(out)):
                    e = val[i]
                    out[i] = copy_element(e, src_vlen_dt, tgt_vlen_dt, ctx)
        else:
            # can just directly copy the array
            out = np.zeros(val.shape, dtype=tgt_dt)
            out[...] = val[...]
    else:
        out = val  # can just copy as is
    return out
Example #46
0
def hist2d(x,
           y,
           bins=20,
           range=None,
           weights=None,
           levels=None,
           smooth=None,
           ax=None,
           color=None,
           plot_datapoints=True,
           plot_density=True,
           plot_contours=True,
           no_fill_contours=False,
           fill_contours=False,
           contour_kwargs=None,
           contourf_kwargs=None,
           data_kwargs=None,
           **kwargs):
    """
    Plot a 2-D histogram of samples.

    Parameters
    ----------
    x, y : array_like (nsamples,)
       The samples.

    levels : array_like
        The contour levels to draw.

    ax : matplotlib.Axes (optional)
        A axes instance on which to add the 2-D histogram.

    plot_datapoints : bool (optional)
        Draw the individual data points.

    plot_density : bool (optional)
        Draw the density colormap.

    plot_contours : bool (optional)
        Draw the contours.

    no_fill_contours : bool (optional)
        Add no filling at all to the contours (unlike setting
        ``fill_contours=False``, which still adds a white fill at the densest
        points).

    fill_contours : bool (optional)
        Fill the contours.

    contour_kwargs : dict (optional)
        Any additional keyword arguments to pass to the `contour` method.

    contourf_kwargs : dict (optional)
        Any additional keyword arguments to pass to the `contourf` method.

    data_kwargs : dict (optional)
        Any additional keyword arguments to pass to the `plot` method when
        adding the individual data points.
    """
    if ax is None:
        ax = pl.gca()

    # Set the default range based on the data range if not provided.
    if range is None:
        if "extent" in kwargs:
            logging.warn("Deprecated keyword argument 'extent'. "
                         "Use 'range' instead.")
            range = kwargs["extent"]
        else:
            range = [[x.min(), x.max()], [y.min(), y.max()]]

    # Set up the default plotting arguments.
    if color is None:
        color = "k"

    # Choose the default "sigma" contour levels.
    if levels is None:
        levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5)**2)

    # This is the color map for the density plot, over-plotted to indicate the
    # density of the points near the center.
    density_cmap = LinearSegmentedColormap.from_list("density_cmap",
                                                     [color, (1, 1, 1, 0)])

    # This color map is used to hide the points at the high density areas.
    white_cmap = LinearSegmentedColormap.from_list("white_cmap", [(1, 1, 1),
                                                                  (1, 1, 1)],
                                                   N=2)

    # This "color map" is the list of colors for the contour levels if the
    # contours are filled.
    rgba_color = colorConverter.to_rgba(color)
    contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
    for i, l in enumerate(levels):
        contour_cmap[i][-1] *= float(i) / (len(levels) + 1)

    # We'll make the 2D histogram to directly estimate the density.
    try:
        # MANUWARNING: removed the stupid range argument
        #        H, X, Y = np.histogram2d(x.flatten(), y.flatten(), bins=bins,
        #                                 range=range, weights=weights)
        H, X, Y = np.histogram2d(x.flatten(),
                                 y.flatten(),
                                 bins=bins,
                                 weights=weights)
    except ValueError:
        raise ValueError("It looks like at least one of your sample columns "
                         "have no dynamic range. You could try using the "
                         "'range' argument.")

    if smooth is not None:
        if gaussian_filter is None:
            raise ImportError("Please install scipy for smoothing")
        H = gaussian_filter(H, smooth)

    # Compute the density levels.
    Hflat = H.flatten()
    inds = np.argsort(Hflat)[::-1]
    Hflat = Hflat[inds]
    sm = np.cumsum(Hflat)
    sm /= sm[-1]
    V = np.empty(len(levels))
    for i, v0 in enumerate(levels):
        try:
            V[i] = Hflat[sm <= v0][-1]
        except:
            V[i] = Hflat[0]
    V.sort()
    m = np.diff(V) == 0
    if np.any(m):
        logging.warning("Too few points to create valid contours")
    while np.any(m):
        V[np.where(m)[0][0]] *= 1.0 - 1e-4
        m = np.diff(V) == 0
    V.sort()

    # Compute the bin centers.
    X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])

    # Extend the array for the sake of the contours at the plot edges.
    H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
    H2[2:-2, 2:-2] = H
    H2[2:-2, 1] = H[:, 0]
    H2[2:-2, -2] = H[:, -1]
    H2[1, 2:-2] = H[0]
    H2[-2, 2:-2] = H[-1]
    H2[1, 1] = H[0, 0]
    H2[1, -2] = H[0, -1]
    H2[-2, 1] = H[-1, 0]
    H2[-2, -2] = H[-1, -1]
    X2 = np.concatenate([
        X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
        X1,
        X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
    ])
    Y2 = np.concatenate([
        Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),
        Y1,
        Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),
    ])

    if plot_datapoints:
        if data_kwargs is None:
            data_kwargs = dict()
        data_kwargs["color"] = data_kwargs.get("color", color)
        data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
        data_kwargs["mec"] = data_kwargs.get("mec", "none")
        data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
        ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)

    # Plot the base fill to hide the densest data points.
    if (plot_contours or plot_density) and not no_fill_contours:
        ax.contourf(X2,
                    Y2,
                    H2.T, [V.min(), H.max()],
                    cmap=white_cmap,
                    antialiased=False)

#    if plot_contours and fill_contours:
#        if contourf_kwargs is None:
#            contourf_kwargs = dict()
#        contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
#        contourf_kwargs["antialiased"] = contourf_kwargs.get("antialiased",
#                                                             False)
#        ax.contourf(X2, Y2, H2.T, np.concatenate([[0], V, [H.max()*(1+1e-4)]]),
#                    **contourf_kwargs)
#
#     # Plot the density map. This can't be plotted at the same time as the
#     # contour fills.
#     elif plot_density:
#        ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap)

#     if plot_contours and fill_contours:
#        if contourf_kwargs is None:
#           contourf_kwargs = dict()
#        contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
#        contourf_kwargs["antialiased"] = contourf_kwargs.get("antialiased",
#                                                             False)
    ax.contourf(X2,
                Y2,
                H2.T, [V[-1], H.max() * (1 + 1e-4)],
                antialiased=False,
                colors=color,
                alpha=0.8)
    ax.contourf(X2,
                Y2,
                H2.T, [V[-2], V[-1]],
                antialiased=False,
                colors=color,
                alpha=0.4)

    # Plot the contour edge colors.
    if plot_contours:
        if contour_kwargs is None:
            contour_kwargs = dict()
        contour_kwargs["colors"] = contour_kwargs.get("colors", color)
        ax.contour(X2, Y2, H2.T, V, **contour_kwargs)

    ax.set_xlim(range[0])
    ax.set_ylim(range[1])
Example #47
0
        os.system(cmd)


def start_cluster():
    logging.info("start cluster")
    servers = [('127.0.0.1', port) for port in PORTS]
    half = len(servers) / 2
    command.start_cluster_on_multi(servers[0:half])
    time.sleep(5)
    for i in range(half):
        command.replicate("127.0.0.1", PORTS[i], "127.0.0.1", PORTS[i + half])


if os.path.exists(cluster):
    start_servers()
    logging.warn("cluster dir has already exists")
    sys.exit(0)
else:
    start_servers()
    start_cluster()
logging.info("done")









    def handle(self, *args, **options):

        username = options['user']
        groupname = options['group']
        rules_status = options['status']
        rules_source = options['source']
        rules_category = options['category']
        folder_override = options['folder_as']

        try:
            user = User.objects.get(username=username)
        except ObjectDoesNotExist:
            user = None
            logging.error('[!] Specified user does not exist')

        try:
            group = Group.objects.get(name=groupname)
        except ObjectDoesNotExist:
            group = None
            logging.error('[!] Specified group does not exist')

        if user and group:
            for yara_master in options['rule_masters']:

                if os.path.isfile(yara_master):

                    # Change working directory to account for relative imports
                    os.chdir(os.path.dirname(os.path.abspath(yara_master)))

                    with open(yara_master, 'r') as file_object:
                        master_file_contents = file_object.read()

                    include_pattern = r'\ninclude \"([^\"]+)\"'
                    import_files = re.findall(include_pattern, master_file_contents)

                    for file_path in import_files:
                        with open(file_path, 'rb') as raw_content:
                            content_results = parse_rule_submission(raw_content)

                        # Inspect the results from content submission
                        parsed_rules = content_results['parsed_rules']
                        parsing_error = content_results['parser_error']

                        # Identify any parsing errors that occur
                        if parsing_error:
                            message = '[!] Parsing Error: {}'.format(parsing_error)
                            logging.error(message)
                        else:
                            # Save successfully parsed rules
                            if folder_override == 'source':
                                rules_source = str(Path(file_path).parent.name)
                                save_results = YaraRule.objects.process_parsed_rules(parsed_rules,
                                                                                     rules_source,
                                                                                     rules_category,
                                                                                     user, group,
                                                                                     status=rules_status,
                                                                                     force_source=True)
                            elif folder_override == 'category':
                                rules_category = str(Path(file_path).parent.name)
                                save_results = YaraRule.objects.process_parsed_rules(parsed_rules,
                                                                                     rules_source,
                                                                                     rules_category,
                                                                                     user, group,
                                                                                     status=rules_status,
                                                                                     force_category=True)
                            else:
                                save_results = YaraRule.objects.process_parsed_rules(parsed_rules,
                                                                                     rules_source,
                                                                                     rules_category,
                                                                                     user, group,
                                                                                     status=rules_status)

                            upload_count = save_results['rule_upload_count']
                            collision_count = save_results['rule_collision_count']
                            message = '[+] Successfully uploaded {} rules and prevented {} rule collisions from {}'.format(upload_count, collision_count, file_path)
                            logging.info(message)

                            for error in save_results['errors']:
                                logging.error(error)

                            for warning in save_results['warnings']:
                                logging.warn(warning)
Example #49
0
    def test_param(self):
        u'''参数测试'''
        logging.warn(u'参数测试。。。')
        # 发请求调用,接收到的参数,全为字符串类型
        response = self.fetch("/test_param/555/run/?a=1&b=2&c=3&b=4&c=3")
        result = response.body
        self.assertTrue(isinstance(result, basestring),
                        u'返回结果的类型不对!')  # 网页调用,返回字符串结果
        result = json.loads(result)
        self.assertTrue(isinstance(result, dict), u'返回结果的类型不对!')
        self.assertEqual(result.get('num'), '555', u'返回结果的参数值不对!')  # 接收url地址参数
        self.assertEqual(result.get('service'), 'run', u'返回结果的参数值不对!')
        self.assertEqual(result.get('a'), '1',
                         u'返回结果的参数值不对!')  # 接收到的参数,全为字符串类型
        self.assertEqual(result.get('b'), ['2', '4'], u'返回结果的参数值不对!')  # 多个同名参数
        self.assertEqual(result.get('c'), '3',
                         u'返回结果的参数值不对!')  # c 参数重复写了,要求会过滤掉
        self.assertTrue(result.get('self') != 'None', u'首参数有内容!')  # 首参数是请求类

        # 发请求调用,请求参数不足时
        response = self.fetch("/test_param/555/run/?a=1")
        result = response.body
        self.assertTrue(isinstance(result, basestring),
                        u'返回结果的类型不对!')  # 网页调用,返回字符串结果
        result = json.loads(result)
        self.assertTrue(isinstance(result, dict), u'返回结果的类型不对!')
        self.assertEqual(result, {
            'result': 1,
            'reason': u'请求参数错误'
        }, u'返回结果的参数值不对!')  # 返回参数错误提示

        # 内部程序直接调用,接收到的参数跟调用传参一样,且首参数为空
        result = param_fun(555, 'service', a=1, b=2, c=3)
        self.assertTrue(isinstance(result, dict),
                        u'返回结果的类型不对!')  # 内部程序直接调用,返回原始结果
        self.assertEqual(result.get('num'), 555, u'返回结果的参数值不对!')  # 参数是传来的原始参数
        self.assertEqual(result.get('service'), 'service', u'返回结果的参数值不对!')
        self.assertEqual(result.get('a'), 1, u'返回结果的参数值不对!')
        self.assertEqual(result.get('b'), 2, u'返回结果的参数值不对!')
        self.assertEqual(result.get('c'), 3, u'返回结果的参数值不对!')
        self.assertEqual(result.get('self'), 'None', u'返回结果的参数值不对!')

        # 内部程序直接调用,参数错误时
        result = param_fun(555, 'service')
        self.assertTrue(isinstance(result, dict),
                        u'返回结果的类型不对!')  # 内部程序直接调用,返回原始结果
        self.assertEqual(result, {
            'result': 1,
            'reason': u'请求参数错误'
        }, u'返回结果的参数值不对!')  # 返回参数错误提示

        # 发请求调用,参数中带有非法字符时,返回参数错误
        response = self.fetch("/test_param/555/run/?a=1&c=3&b=4%27ss&c=3")
        result = response.body
        self.assertTrue(isinstance(result, basestring),
                        u'返回结果的类型不对!')  # 网页调用,返回字符串结果
        result = json.loads(result)
        self.assertTrue(isinstance(result, dict), u'返回结果的类型不对!')
        self.assertEqual(result, {
            'result': 1,
            'reason': u'请求参数错误'
        }, u'返回结果的参数值不对!')  # 返回参数错误提示

        # 发请求调用,参数中带有非法字符时,返回参数错误
        response = self.fetch("/test_param/555/run/?a=1&b=2&c=3&b=s'ss&c=3")
        result = response.body
        self.assertTrue(isinstance(result, basestring),
                        u'返回结果的类型不对!')  # 网页调用,返回字符串结果
        result = json.loads(result)
        self.assertTrue(isinstance(result, dict), u'返回结果的类型不对!')
        self.assertEqual(result, {
            'result': 1,
            'reason': u'请求参数错误'
        }, u'返回结果的参数值不对!')  # 返回参数错误提示
Example #50
0
#!/usr/bin/python3
#
# Example of using joblog.py.
#

import joblog
import logging
import random

logging.info("example in Python")

total = random.randint(1000000, 2000000)
for i in range(0, total):
    if ((i + 1) % 100000) == 0 or i + 1 == total:
        logging.info("%i/%i (%.1f%%) processed", i + 1, total, 100.0 * (i + 1) / total)

logging.warn("all done")

try:
    x = 5 / 0
except Exception:
    logging.exception("intentional exception")
Example #51
0
def upload_schedule():
    req = flask.request
    user = view_helpers.get_current_user()

    schedule_data = util.json_loads(req.form.get('schedule_data'))
    processed_items = schedule_data['processed_items']
    failed_items = schedule_data['failed_items']
    term_name = schedule_data['term_name']
    term_id = m.Term.id_from_name(term_name)

    # FIXME TODO(david): Save these in models and display on schedule
    #failed_items = schedule_data['failed_items']

    rmclogger.log_event(
        rmclogger.LOG_CATEGORY_API,
        rmclogger.LOG_EVENT_SCHEDULE,
        {
            'schedule_data': schedule_data,
            'term_id': term_id,
            'user_id': user.id,
        },
    )

    now = datetime.now()

    user.last_good_schedule_paste = req.form.get('schedule_text')
    user.last_good_schedule_paste_date = now
    user.save()

    # Remove existing schedule items for the user for the given term
    for usi in m.UserScheduleItem.objects(user_id=user.id, term_id=term_id):
        usi.delete()

    for item in processed_items:
        try:
            # Create this UserScheduleItem
            first_name, last_name = m.Professor.guess_names(item['prof_name'])
            prof_id = m.Professor.get_id_from_name(
                first_name=first_name,
                last_name=last_name,
            )
            if first_name and last_name:
                if not m.Professor.objects.with_id(prof_id):
                    m.Professor(
                        id=prof_id,
                        first_name=first_name,
                        last_name=last_name,
                    ).save()

            usi = m.UserScheduleItem(
                user_id=user.id,
                class_num=item['class_num'],
                building=item['building'],
                room=item.get('room'),
                section_type=item['section_type'].upper(),
                section_num=item['section_num'],
                start_date=datetime.utcfromtimestamp(item['start_date']),
                end_date=datetime.utcfromtimestamp(item['end_date']),
                course_id=item['course_id'],
                prof_id=prof_id,
                term_id=term_id,
            )
            try:
                usi.save()
            except me.NotUniqueError as ex:
                # Likely the case where the user pastes in two or more valid
                # schedules into the same input box
                logging.info(
                    'Duplicate error on UserScheduleItem .save(): %s' % (ex))

            # Add this item to the user's course history
            # FIXME(Sandy): See if we can get program_year_id from Quest
            # Or just increment their last one
            user.add_course(usi.course_id, usi.term_id)

        except KeyError:
            logging.error("Invalid item in uploaded schedule: %s" % (item))

    # Add courses that failed to fully parse, probably due to unavailable times
    for course_id in set(failed_items):
        fsi = m.FailedScheduleItem(
            user_id=user.id,
            course_id=course_id,
            parsed_date=now,
        )

        try:
            fsi.save()
        except me.NotUniqueError as ex:
            # This should never happen since we're iterating over a set
            logging.warn('WTF this should never happen.')
            logging.warn('Duplicate error FailedScheduleItem.save(): %s' % ex)

        user.add_course(course_id, term_id)

    user.schedules_imported += 1
    user.save()

    schedule_screenshot.update_screenshot_async(user)

    rmclogger.log_event(rmclogger.LOG_CATEGORY_SCHEDULE,
                        rmclogger.LOG_EVENT_UPLOAD, user.id)

    return ''
Example #52
0
    def remove(self):
        args = [self.getValue(self.__primary_key__)]
        rows = yield from execute(self.__delete__, args)

        if rows != 1:
            logging.warn('failed to delete record: affected rows: %s' % rows)
Example #53
0
import jinja2, logging, os
from google.appengine.api import app_identity

PROJECT_DIR = os.path.abspath(os.path.dirname(__file__))

env = jinja2.Environment(
    #extensions=,
    autoescape=True,
    loader=jinja2.FileSystemLoader(
               os.path.join(PROJECT_DIR, "templates")),
    cache_size=-1,
    auto_reload=False,
    trim_blocks=True)

try:
    env.globals['app_id'] = app_identity.get_application_id()
except Exception as e:
    logging.warn(e, exc_info=True)
    env.globals['app_id'] = None
Example #54
0
def model_run(queue,func):##需要传入两个参数,queue为manage中管理的queue,func为需要提交的线程任务,需要带参数
    '''主函数'''

    avg_time=model_test(queue=queue,func=func)##平均一个线程花费的时间
    logging.info(u'from function test get avg_time=%s'%avg_time)
    print u'from function test get avg_time=%s'%avg_time
    count=0
    start_time=time.time()
    multiple=g_multiple
    
    while True:
        try:
            logging.info(u'max wait 100 second try to get element from queue')
            # d=song_info_queue.get(timeout=100)
            d=queue.get(timeout=100)

            logging.info(u'get %s from queue'%d)

            # msi=Multi_Song_Info(song_ids=d['ids'],refer=d['refer'])
            # pool.submit(msi.insert_song)

            pool.submit(func,d)

            logging.info(u'%s start new threading, get song ids=%s,threading name is:%s,pid is:%s'%(sys._getframe().f_code.co_name,d['ids'],threading.current_thread().name,os.getpid()))
            count+=1

            if count>=max_pool*multiple:
                sleeptime=multiple*avg_time
                info=u'generate %s threading,so sleep %s second! current active threading num=%s'%(count,sleeptime,threading.active_count())
                print info
                logging.debug(info)
                time.sleep(sleeptime)
                count=0

                end_time=time.time()
                if end_time - start_time>=60:##每过10十分做一次检测,确保数据库连接数最好介于30到150之间,保证程序稳定运行

                    mysql=Cloud_Music_MySQL()
                    Threads_connected=mysql.show_Threads_connected()
                    Threads_connected=int(Threads_connected)
                    mysql.close_connect()

                    if Threads_connected<=30:   ##说明程序运行效率不高,可以适当提高multiple,或是降低avg_time
                        avg_time=model_test(queue=queue,func=func)
                        multiple=multiple+1
                        info=u'程序闲置过多,current Threads_connected=%s,重设avg_time=%s,multiple=%s'%(Threads_connected,avg_time,multiple)
                        print info
                        logging.info(info)


                    elif Threads_connected>=150: ##说明程序负荷过重,可以适当降低multiple,或是提高avg_time
                        avg_time=model_test(queue=queue,func=func)
                        multiple=max(multiple-1,2)##multiple最小为2
                        info=u'程序负荷过重,current Threads_connected=%s,重设avg_time=%s,multiple=%s'%(Threads_connected,avg_time,multiple)
                        print info
                        logging.info(info)

                    else:
                        info=u'程序运行良好,current Threads_connected=%s,保持avg_time=%s,multiple=%s'%(Threads_connected,avg_time,multiple)
                        print info
                        logging.info(info)

                    start_time=time.time()##重设start_time



         

        except Exception,e:              
            if str(e):
                e=str(e).decode('utf-8','ignore')
            else:##queue raise error e , str(e)为空
                e=u'queue empty'

            logging.warn(u'function %s raise error cause by %s,traceback info is:%s '%(sys._getframe().f_code.co_name,e,traceback.format_exc()))
            print u'error info is:%s'%e

            if 'many connections' in e:##最好使用joinablequeue,##经过600秒一次的性能检测,很难抛出too many connections 异常了
                print u'current too many connections,sleep 3 second wait runing connections close'
                # song_info_queue.put(d)
                queue.put(d)
                print u'catch too many connections error ,so put d=%s back into queue'%d
                logging.info(u'catch too many connections error ,so put d=%s back into queue'%d)

                ##发生异常在于数据库操作,d的值可以获取到,所以把他重新放回queue中,所以不需要joinablequeue了

                mysql=Cloud_Music_MySQL()
                Threads_connected=mysql.show_Threads_connected()
         
                while Threads_connected>=100: 
                    info=u'current Threads_connected is:%s,also too much,so sleep 3 second!'%Threads_connected
                    print info
                    logging.debug(info)
                    time.sleep(3)
                    Threads_connected=mysql.show_Threads_connected()
                mysql.close_connect()
                continue

            elif 'empty' in e:
                print u'empty queue,break loop!'
                print u'wait 20 second ensure runing threading done'
                time.sleep(20)
                break

            else:
                info=u'unexcept error,here is traceback info:%s'%(traceback.format_exc())
                print info
                logging.error(info)
                # song_info_queue.put(d)
                queue.put(d)
                print u'catch unexcept error ,so put d=%s back into queue'%d
                break
Example #55
0
 def handle_default(self, params):
     logging.warn("got %s" % (params, ))
Example #56
0
def crash():
    """For testing error logging"""
    logging.warn("Crashing because you want me to (hit /crash)")
    raise Exception("OH NOES we've crashed!!!!!!!!!! /crash was hit")
Example #57
0
def command_upload(args):
    "Implements the upload subcommand"
    d = utils.docker_client(args)
    image = get_container_image(args, d)

    oauth2_instance = oauth2.build_oauth2(args)
    auth = oauth2_instance.build_authorizer()
    # TODO: use transloadit's signatures for upload signing.
    # authorization = authorize_upload(args, auth)

    # Generate a random uuid for upload.
    upload_id = uuid.uuid4().hex
    transloadit_host = idle_transloadit_server(args)
    upload_url = 'https://%(host)s/assemblies/%(id)s' % {
        'host': transloadit_host,
        'id': upload_id,
    }
    if args.upload_to_requestbin is not None:
        upload_url = 'http://requestb.in/%s' % args.upload_to_requestbin

    if not args.quiet > 0:
        sys.stdout.write(
            'About to upload to server:\n\t%(transloadit_host)s\n'
            'with upload id:\n\t%(upload_id)s\nStatus API:\n'
            '\t%(upload_url)s\nUploading...' % {
                'transloadit_host': transloadit_host,
                'upload_id': upload_id,
                'upload_url': upload_url,
            })
        sys.stdout.flush()
    p = multiprocessing.Process(target=upload, args=(args, upload_url, image))
    p.daemon = True  # Auto-kill when the main process exits.
    p.start()
    time.sleep(20)  # Yield control to the child process to kick off upload.

    upload_information = None

    while p.is_alive():
        upload_information = poll_transloadit(args, upload_url)
        if upload_information is not None:
            logging.warn(
                'Upload information retrieved before upload completed??! %s',
                upload_information)
            break
        time.sleep(10)  # 10 seconds

    p.join(1)  # Join to clean up zombie.

    # TODO: make time waiting for transloadit to finish processing configurable
    for i in xrange(300):
        upload_information = poll_transloadit(args, upload_url)
        if upload_information is not None:
            break
        time.sleep(5)

    if upload_information is None:
        logging.error(
            'Upload did not complete within expected time limits. Upload '
            'URL: %s', upload_url)
        return 1
    # Register the grader with Coursera to initiate the image cleaning process
    logging.debug('Grader upload info is: %s', upload_information)

    # Rebuild an authorizer to ensure it's fresh and not expired
    auth = oauth2_instance.build_authorizer()
    grader_id = register_grader(auth,
                                args,
                                bucket=upload_information[0],
                                key=upload_information[1])

    return update_assignments(auth, grader_id, args)
def parse_csv(infile, player_num):
    """Parse csv.

    fields:
    -------
    0 - division
    1 - opponent
    2 - date
    3 - youtube
    4 - yards
    5 - description
    6 - player number
    7 - Offense or Defense
    8 - start time hr:min:sec
    9 - duration
    10 - url link1
    11 - url link2
    """
    rv_list = []
    with open(infile, 'r') as fd:
        rd_list = csv.reader(fd, delimiter=',')
        base_len = 0
        for i,row in enumerate(rd_list):
            if i == 0:
                # header
                base_len = len(row)
                continue
            if len(row) != base_len:
                continue

            # valid row
            logging.debug('%d: %s', i, ','.join(row))

            if player_num != row[6]:
                continue
            
            # build description text
            txt = '#' + row[6]
            if row[7].lower().startswith('o'):
                txt += ':offense'
            elif row[7].lower().startswith('s'):
                txt += ':steams'
            else:
                txt += ':defense'

            if row[5] != '':
                txt += ":%s" % row[5]

            if row[8] == '' or row[2] == '':
                logging.warn('Skipping %s line ... date/time missing', row)
                continue

            time_idx = time.mktime(
                time.strptime("%s %s" % (csvdate2filedate(row[2]),
                row[8]), "%Y-%m-%d %H:%M:%S"))
            movie = find_file(row[0], row[1], row[2])
            logging.debug("time_idx:%d movie:%s", time_idx, movie)
            if movie is None:
                logging.warn('Unable to find movie for division:%s vs '
                    '%s on %s', row[0], row[1], row[2])
                break
            # movie, descrip, time, duration
            rv_list.append([time_idx, movie, txt, row[8], row[9]])

    return rv_list
Example #59
0
    def __init__(self, filename, file=None, path=None):
        self.materials = {}
        self.meshes = {}  # Name mapping
        self.mesh_list = []  # Also includes anonymous meshes

        if file is None:
            file = open(filename, 'r')

        if path is None:
            path = os.path.dirname(filename)
        self.path = path

        mesh = None
        group = None
        material = None

        vertices = [[0., 0., 0.]]
        normals = [[0., 0., 0.]]
        tex_coords = [[0., 0.]]

        for line in file:
            if line.startswith('#'):
                continue
            values = line.split()
            if not values:
                continue

            if values[0] == 'v':
                vertices.append(map(float, values[1:4]))
            elif values[0] == 'vn':
                normals.append(map(float, values[1:4]))
            elif values[0] == 'vt':
                tex_coords.append(map(float, values[1:3]))
            elif values[0] == 'mtllib':
                self.load_material_library(values[1])
            elif values[0] in ('usemtl', 'usemat'):
                material = self.materials.get(values[1], None)
                if material is None:
                    logging.warn('Unknown material: %s' % values[1])
                if mesh is not None:
                    group = MaterialGroup(material)
                    mesh.groups.append(group)
            elif values[0] == 'o':
                mesh = Mesh(values[1])
                self.meshes[mesh.name] = mesh
                self.mesh_list.append(mesh)
                group = None
            elif values[0] == 'f':
                if mesh is None:
                    mesh = Mesh('')
                    self.mesh_list.append(mesh)
                if material is None:
                    # FIXME
                    material = Material("<unknown>")
                if group is None:
                    group = MaterialGroup(material)
                    mesh.groups.append(group)

                # For fan triangulation, remember first and latest vertices
                n1 = None
                nlast = None
                t1 = None
                tlast = None
                v1 = None
                vlast = None
                #points = []
                for i, v in enumerate(values[1:]):
                    v_index, t_index, n_index = \
                        (map(int, [j or 0 for j in v.split('/')]) + [0, 0])[:3]
                    if v_index < 0:
                        v_index += len(vertices) - 1
                    if t_index < 0:
                        t_index += len(tex_coords) - 1
                    if n_index < 0:
                        n_index += len(normals) - 1
                    #vertex = tex_coords[t_index] + \
                    #         normals[n_index] + \
                    #         vertices[v_index]

                    group.normals += normals[n_index]
                    group.tex_coords += tex_coords[t_index]
                    group.vertices += vertices[v_index]

                    if i >= 3:
                        # Triangulate
                        group.normals += n1 + nlast
                        group.tex_coords += t1 + tlast
                        group.vertices += v1 + vlast

                    if i == 0:
                        n1 = normals[n_index]
                        t1 = tex_coords[t_index]
                        v1 = vertices[v_index]
                    nlast = normals[n_index]
                    tlast = tex_coords[t_index]
                    vlast = vertices[v_index]
Example #60
0
 def handle_unknown(self, params):
     logging.warn("Unknown message type %d: %s" %
                  (params['#msgid'], repr(params['#msg'])))