Example #1
1
File: eepy.py Project: yuka2py/eepy
    def block(blockname="content"):
        """ with 句で囲んだ範囲をブロックとして登録する。
        既に保存されたブロックがある時、ブロックの内容を保存されたブロックで置き換えます。
        保存されたブロックが無い時、ブロックの内容をそのまま出力し、ブロックを保存します。
        このヘルパは通常、extends と組み合わせて使用します。
        args:
            blockname: ブロックの名前
        """
        locals = buffer_frame_locals()
        locals.setdefault("__blocks", {})

        # 既に保存されたブロックがあれば、保存している内容を出力し、ここでのキャプチャ結果は破棄
        if blockname in locals["__blocks"]:
            buffer, locals["__buffer"] = locals["__buffer"], list()
            yield
            locals["__buffer"] = buffer
            locals["__buffer"].append(locals["__blocks"][blockname])  # 仕様:利用後も削除せずに残しておく

        # 保存されたブロックが無ければ、キャプチャ結果をブロックとして保存し、出力もする
        else:
            buffer, locals["__buffer"] = locals["__buffer"], list()
            yield
            captured = u"".join(locals["__buffer"])
            locals["__buffer"] = buffer
            locals["__buffer"].append(captured)
            locals["__blocks"][blockname] = captured
Example #2
0
    def __init__(self, name, args=[], opts=[], **kwargs):
        """
        Base class for POV objects.

        @param name: POV object name
        @param args: compulsory (comma separated?) pov args XX commas don't seem to matter?
        @param opts: eg. CSG items
        @param kwargs: key value pairs
        """
        #print "Item",name,args,opts,kwargs
        self.name = name

        args = list(args)
        for i in range(len(args)):
            args[i] = map_arg(args[i])
        self.args = flatten(args)

        opts = list(opts)
        for i in range(len(opts)):
            opts[i] = map_arg(opts[i])
        self.opts = flatten(opts)

        self.kwargs = dict(kwargs)  # take a copy
        for key, val in self.kwargs.items():
            if type(val) == tuple or type(val) == list:
                self.kwargs[key] = map_arg(val)
Example #3
0
    def jar(self):
        """Performs the 'jar' command."""
        class_name = getattr(self.flags, "class")
        if (class_name is None) and (len(self.args) > 0):
            class_name = self.pop_args_head()
        assert (class_name is not None), ("No class name specified with [--class=]<class>.")

        lib_jars = []
        if self.flags.jars is not None:
            lib_jars.extend(self.flags.jars)
        classpath = list(self.express.get_classpath(lib_jars=lib_jars))

        java_opts = []
        if self.flags.java_opts is not None:
            java_opts = [self.flags.java_opts]

        user_args = list(self.args)
        logging.info("Running java class %r with parameters: %r", class_name, user_args)

        cmd = [
            "java",
            # This property is only needed in kiji-schema v1.1 :
            "-Dorg.kiji.schema.impl.AvroCellEncoder.SCHEMA_VALIDATION=DISABLED",
        ] + java_opts + [
            "-classpath", ":".join(classpath),
            class_name,
        ] + user_args

        logging.debug("Running command:\n%s\n", " \\\n\t".join(map(repr, cmd)))
        return subprocess.call(cmd)
Example #4
0
    def _get_router_ids_for_agent(self, context, agent_db, router_ids):
        result_set = set(super(L3_DVRsch_db_mixin,
                            self)._get_router_ids_for_agent(
            context, agent_db, router_ids))
        router_ids = set(router_ids or [])
        if router_ids and result_set == router_ids:
            # no need for extra dvr checks if requested routers are
            # explicitly scheduled to the agent
            return list(result_set)

        # dvr routers are not explicitly scheduled to agents on hosts with
        # dvr serviceable ports, so need special handling
        if (self._get_agent_mode(agent_db) in
            [n_const.L3_AGENT_MODE_DVR,
             n_const.L3_AGENT_MODE_DVR_NO_EXTERNAL,
             n_const.L3_AGENT_MODE_DVR_SNAT]):
            if not router_ids:
                result_set |= set(self._get_dvr_router_ids_for_host(
                    context, agent_db['host']))
            else:
                for router_id in (router_ids - result_set):
                    subnet_ids = self.get_subnet_ids_on_router(
                        context, router_id)
                    if (subnet_ids and
                            self._check_dvr_serviceable_ports_on_host(
                                    context, agent_db['host'],
                                    list(subnet_ids))):
                        result_set.add(router_id)

        return list(result_set)
Example #5
0
 def test_combined_non_existing_field_desc(self):
     all_results = list(self.lib.items(u'id+'))
     q = u'foo- id+'
     results = list(self.lib.items(q))
     self.assertEqual(len(all_results), len(results))
     for r1, r2 in zip(all_results, results):
         self.assertEqual(r1.id, r2.id)
Example #6
0
    def test_invalid_final_lookup(self):
        qs = Book.objects.prefetch_related('authors__name')
        with self.assertRaises(ValueError) as cm:
            list(qs)

        self.assertTrue('prefetch_related' in str(cm.exception))
        self.assertTrue("name" in str(cm.exception))
Example #7
0
 def _distance_to_W(self, ids=None):
     allneighbors = {}
     weights = {}
     if ids:
         ids = np.array(ids)
     else:
         ids = np.arange(len(self._nmat))
     if self.binary:
         for i, neighbors in enumerate(self._nmat):
             ns = [ni for ni in neighbors if ni != i]
             neigh = list(ids[ns])
             if len(neigh) == 0:
                 allneighbors[ids[i]] = []
                 weights[ids[i]] = []
             else:
                 allneighbors[ids[i]] = neigh
                 weights[ids[i]] = [1] * len(ns)
     else:
         self.dmat = self.kd.sparse_distance_matrix(
             self.kd, max_distance=self.threshold)
         for i, neighbors in enumerate(self._nmat):
             ns = [ni for ni in neighbors if ni != i]
             neigh = list(ids[ns])
             if len(neigh) == 0:
                 allneighbors[ids[i]] = []
                 weights[ids[i]] = []
             else:
                 try:
                     allneighbors[ids[i]] = neigh
                     weights[ids[i]] = [self.dmat[(
                         i, j)] ** self.alpha for j in ns]
                 except ZeroDivisionError:
                     raise Exception, "Cannot compute inverse distance for elements at same location (distance=0)."
     return allneighbors, weights
Example #8
0
def ip_addresses(conn, interface=None, include_loopback=False):
    """
    Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is
    ignored, unless 'include_loopback=True' is indicated. If 'interface' is
    provided, then only IP addresses from that interface will be returned.

    Example output looks like::

        >>> ip_addresses(conn)
        >>> ['192.168.1.111', '10.0.1.12']

    """
    ret = set()
    ifaces = linux_interfaces(conn)
    if interface is None:
        target_ifaces = ifaces
    else:
        target_ifaces = dict((k, v) for k, v in ifaces.items()
                             if k == interface)
        if not target_ifaces:
            LOG.error('Interface {0} not found.'.format(interface))
    for ipv4_info in target_ifaces.values():
        for ipv4 in ipv4_info.get('inet', []):
            loopback = in_subnet('127.0.0.0/8', [ipv4.get('address')]) or ipv4.get('label') == 'lo'
            if not loopback or include_loopback:
                ret.add(ipv4['address'])
        for secondary in ipv4_info.get('secondary', []):
            addr = secondary.get('address')
            if addr and secondary.get('type') == 'inet':
                if include_loopback or (not include_loopback and not in_subnet('127.0.0.0/8', [addr])):
                    ret.add(addr)
    if ret:
        conn.logger.debug('IP addresses found: %s' % str(list(ret)))
    return sorted(list(ret))
Example #9
0
def show_level(level, path=[]):
    """ Displays a level via a print statement.

    Args:
        level: The level to be displayed.
        path: A continuous path to be displayed over the level, if provided.

    """
    xs, ys = zip(*(list(level['spaces'].keys()) + list(level['walls'])))
    x_lo, x_hi = min(xs), max(xs)
    y_lo, y_hi = min(ys), max(ys)

    path_cells = set(path)

    chars = []
    inverted_waypoints = {point: char for char, point in level['waypoints'].items()}

    for j in range(y_lo, y_hi + 1):
        for i in range(x_lo, x_hi + 1):

            cell = (i, j)
            if cell in path_cells:
                chars.append('*')
            elif cell in level['walls']:
                chars.append('X')
            elif cell in inverted_waypoints:
                chars.append(inverted_waypoints[cell])
            elif cell in level['spaces']:
                chars.append(str(int(level['spaces'][cell])))
            else:
                chars.append(' ')

        chars.append('\n')

    print(''.join(chars))
Example #10
0
def dedup_value(body, ctype, action="dedup_value", prop=None):
    '''
    Service that accepts a JSON document and enriches the prop field of that document by:

    a) Removing duplicates
    '''

    if prop:
        try:
            data = json.loads(body)
        except:
            response.code = 500
            response.add_header('content-type', 'text/plain')
            return "Unable to parse body as JSON"

    try:
        data = json.loads(body)
    except:
        response.code = 500
        response.add_header('content-type', 'text/plain')
        return "Unable to parse body as JSON"

    for p in prop.split(","):
        if exists(data, p):
            v = getprop(data, p)
            if isinstance(v, list):
                # Remove whitespace, periods, parens, brackets
                clone = [re.sub("[ \.\(\)\[\]\{\}]", "", s).lower() for s in v]
                # Get index of unique values
                index = list(set([clone.index(s) for s in list(set(clone))]))
            
                setprop(data, p, [v[i] for i in index])

    return json.dumps(data)
def test_parameter_grid():
    """Test basic properties of ParameterGrid."""
    params1 = {"foo": [1, 2, 3]}
    grid1 = ParameterGrid(params1)
    assert_true(isinstance(grid1, Iterable))
    assert_true(isinstance(grid1, Sized))
    assert_equal(len(grid1), 3)

    params2 = {"foo": [4, 2],
               "bar": ["ham", "spam", "eggs"]}
    grid2 = ParameterGrid(params2)
    assert_equal(len(grid2), 6)

    # loop to assert we can iterate over the grid multiple times
    for i in xrange(2):
        # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
        points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
        assert_equal(points,
                     set(("bar", x, "foo", y)
                         for x, y in product(params2["bar"], params2["foo"])))

    # Special case: empty grid (useful to get default estimator settings)
    empty = ParameterGrid({})
    assert_equal(len(empty), 1)
    assert_equal(list(empty), [{}])

    has_empty = ParameterGrid([{'C': [1, 10]}, {}])
    assert_equal(len(has_empty), 3)
    assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}])
def default (pixels):
    
    # Returns pixels list of tuples (R,G,B,Cluster Assignment('x' by default))
  
    pixels = [list(x)+ list(('x',)) for x in pixels]
    
    return pixels
 def parse_parameter_list(self, node):
     parameters = []
     special = []
     argnames = list(node.argnames)
     if node.kwargs:
         special.append(make_parameter(argnames[-1], excess_keyword=1))
         argnames.pop()
     if node.varargs:
         special.append(make_parameter(argnames[-1],
                                       excess_positional=1))
         argnames.pop()
     defaults = list(node.defaults)
     defaults = [None] * (len(argnames) - len(defaults)) + defaults
     function_parameters = self.token_parser.function_parameters(
         node.lineno)
     #print >>sys.stderr, function_parameters
     for argname, default in zip(argnames, defaults):
         if type(argname) is tuple:
             parameter = pynodes.parameter_tuple()
             for tuplearg in argname:
                 parameter.append(make_parameter(tuplearg))
             argname = normalize_parameter_name(argname)
         else:
             parameter = make_parameter(argname)
         if default:
             n_default = pynodes.parameter_default()
             n_default.append(Text(function_parameters[argname]))
             parameter.append(n_default)
         parameters.append(parameter)
     if parameters or special:
         special.reverse()
         parameters.extend(special)
         parameter_list = pynodes.parameter_list()
         parameter_list.extend(parameters)
         self.function.append(parameter_list)
Example #14
0
def encode_morze(text):
	text_list = list()
	sygnal = list()
	sygnal_final = list()
	morse_code = {" ":" ","A":".-","B":"-...","C":"-.-.","D":"-..","E":".","F":"..-.","G":"--.","H":"....","I":"..","J":".---","K":"-.-","L":".-..","M":"--","N":"-.","O":"---","P":".--.","Q":"--.-","R":".-.","S":"...","T":"-","U":"..-","V":"...-","W":".--","X":"-..-","Y":"-.--","Z":"--.."}
	sygnal_diagram = {" ":"___", ".":"^", "-":"^^^", "*":"___"}
	initial_text_list = list((str(text)).upper())
	for i in initial_text_list:
		if i in morse_code:
			text_list.insert((len(text_list)+1),morse_code[i])
			if i != ' ':
				text_list.insert((len(text_list)+2),"*")
	a = "".join(text_list)
	a = list(a)
	del a[len(a)-1]
	for i in range(0,len(a)-1):
		if a[i] == ' ':
			del a[i-1]
	for i in a:
		if i in sygnal_diagram:
			sygnal.insert((len(sygnal)+1),sygnal_diagram[i])
	for i in range(0,len(sygnal)-1):
		if sygnal[i+1] != '___' and sygnal[i] != '___':
			sygnal_final.insert((len(sygnal_final)+1),sygnal[i])
			sygnal_final.insert((len(sygnal_final)+1),'_')
		elif sygnal[i+1] == '___' or sygnal[i] == '___':
			sygnal_final.insert((len(sygnal_final)+1),sygnal[i])
		elif i == len(sygnal[i])-1:
			sygnal_final.insert((len(sygnal_final)+1),sygnal[i])
	sygnal_final.insert((len(sygnal_final)+1),sygnal[len(sygnal)-1])
	sygnal_final = "".join(sygnal_final)
	return sygnal_final
 def _get_field_values(self):
     values = {}
     vocabularies_resource = get_resource_service('vocabularies')
     values['anpa_category'] = vocabularies_resource.find_one(req=None, _id='categories')['items']
     req = ParsedRequest()
     req.where = json.dumps({'$or': [{"schema_field": "genre"}, {"_id": "genre"}]})
     genre = vocabularies_resource.get(req=req, lookup=None)
     if genre.count():
         values['genre'] = genre[0]['items']
     values['urgency'] = vocabularies_resource.find_one(req=None, _id='urgency')['items']
     values['priority'] = vocabularies_resource.find_one(req=None, _id='priority')['items']
     values['type'] = vocabularies_resource.find_one(req=None, _id='type')['items']
     subject = vocabularies_resource.find_one(req=None, schema_field='subject')
     if subject:
         values['subject'] = subject['items']
     else:
         values['subject'] = get_subjectcodeitems()
     values['desk'] = list(get_resource_service('desks').get(None, {}))
     values['stage'] = self._get_stage_field_values(values['desk'])
     values['sms'] = [{'qcode': 0, 'name': 'False'}, {'qcode': 1, 'name': 'True'}]
     values['embargo'] = [{'qcode': 0, 'name': 'False'}, {'qcode': 1, 'name': 'True'}]
     req = ParsedRequest()
     req.where = json.dumps({'$or': [{"schema_field": "place"}, {"_id": "place"}, {"_id": "locators"}]})
     place = vocabularies_resource.get(req=req, lookup=None)
     if place.count():
         values['place'] = place[0]['items']
     values['ingest_provider'] = list(get_resource_service('ingest_providers').get(None, {}))
     return values
Example #16
0
def combine(**kwargs):
  """Generate combinations based on its keyword arguments.

  Two sets of returned combinations can be concatenated using +.  Their product
  can be computed using `times()`.

  Args:
    **kwargs: keyword arguments of form `option=[possibilities, ...]`
         or `option=the_only_possibility`.

  Returns:
    a list of dictionaries for each combination. Keys in the dictionaries are
    the keyword argument names.  Each key has one value - one of the
    corresponding keyword argument values.
  """
  if not kwargs:
    return [OrderedDict()]

  sort_by_key = lambda k: k[0][0]
  kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
  first = list(kwargs.items())[0]

  rest = dict(list(kwargs.items())[1:])
  rest_combined = combine(**rest)

  key = first[0]
  values = first[1]
  if not isinstance(values, list):
    values = [values]

  return [
      OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
      for v in values
      for combined in rest_combined
  ]
Example #17
0
def times(*combined):
  """Generate a product of N sets of combinations.

  times(combine(a=[1,2]), combine(b=[3,4])) == combine(a=[1,2], b=[3,4])

  Args:
    *combined: N lists of dictionaries that specify combinations.

  Returns:
    a list of dictionaries for each combination.

  Raises:
    ValueError: if some of the inputs have overlapping keys.
  """
  assert combined

  if len(combined) == 1:
    return combined[0]

  first = combined[0]
  rest_combined = times(*combined[1:])

  combined_results = []
  for a in first:
    for b in rest_combined:
      if set(a.keys()).intersection(set(b.keys())):
        raise ValueError("Keys need to not overlap: {} vs {}".format(
            a.keys(), b.keys()))

      combined_results.append(OrderedDict(list(a.items()) + list(b.items())))
  return combined_results
Example #18
0
def save_level_costs(level, costs, filename='distance_map.csv'):
    """ Displays cell costs from an origin point over the given level.

    Args:
        level: The level to be displayed.
        costs: A dictionary containing a mapping of cells to costs from an origin point.
        filename: The name of the csv file to be created.

    """
    xs, ys = zip(*(list(level['spaces'].keys()) + list(level['walls'])))
    x_lo, x_hi = min(xs), max(xs)
    y_lo, y_hi = min(ys), max(ys)

    rows = []
    for j in range(y_lo, y_hi + 1):
        row = []

        for i in range(x_lo, x_hi + 1):
            cell = (i, j)
            if cell not in costs:
                row.append(inf)
            else:
                row.append(costs[cell])

        rows.append(row)

    assert '.csv' in filename, 'Error: filename does not contain file type.'
    with open(filename, 'w', newline='') as f:
        csv_writer = writer(f)
        for row in rows:
            csv_writer.writerow(row)
            
    
    print("Saved file:", filename)
Example #19
0
 def test_transport_adapter_ordering(self):
     s = requests.Session()
     order = ["https://", "http://"]
     assert order == list(s.adapters)
     s.mount("http://git", HTTPAdapter())
     s.mount("http://github", HTTPAdapter())
     s.mount("http://github.com", HTTPAdapter())
     s.mount("http://github.com/about/", HTTPAdapter())
     order = ["http://github.com/about/", "http://github.com", "http://github", "http://git", "https://", "http://"]
     assert order == list(s.adapters)
     s.mount("http://gittip", HTTPAdapter())
     s.mount("http://gittip.com", HTTPAdapter())
     s.mount("http://gittip.com/about/", HTTPAdapter())
     order = [
         "http://github.com/about/",
         "http://gittip.com/about/",
         "http://github.com",
         "http://gittip.com",
         "http://github",
         "http://gittip",
         "http://git",
         "https://",
         "http://",
     ]
     assert order == list(s.adapters)
     s2 = requests.Session()
     s2.adapters = {"http://": HTTPAdapter()}
     s2.mount("https://", HTTPAdapter())
     assert "http://" in s2.adapters
     assert "https://" in s2.adapters
def sentence_similarity(idx, ob, mode):

    s_list = list()
    pbar = ProgressBar(widgets=['%s: image ' % mode, SimpleProgress()],
                       maxval=len(sentences)).start()

    for im_idx, sentence_group in enumerate(np.array(sentences)[idx, :]):

        pbar.update(im_idx + 1)
        for sent in sentence_group:

            words = analyze(sent)

            sim = list()
            for w in words:

                syn1 = wn.synsets(w)
                syn2 = wn.synsets(ob)

                if syn1 and syn2:
                    sim.append(max(s1.path_similarity(s2) for (s1, s2)
                                   in product(syn1, syn2)))
                else:
                    # ignore word if no synset combination was found on wordnet
                    sim.append(None)

            if max(sim):
                s_list.append(max(sim))
            else:
                # ignore sentence if no word was similar enough
                s_list.append(float('nan'))

    pbar.finish()
    return s_list
Example #21
0
 def get_analysis_analysisID_dataStage01ResequencingAnalysis(self,analysis_id_I):
     '''Query rows that are used from the analysis'''
     try:
         data = self.session.query(data_stage01_resequencing_analysis).filter(
                 data_stage01_resequencing_analysis.analysis_id.like(analysis_id_I),
                 data_stage01_resequencing_analysis.used_.is_(True)).all();
         analysis_id_O = []
         experiment_id_O = []
         lineage_name_O = []
         sample_name_O = []
         analysis_type_O = []
         analysis_O = {};
         if data: 
             for d in data:
                 analysis_id_O.append(d.analysis_id);
                 experiment_id_O.append(d.experiment_id);
                 lineage_name_O.append(d.lineage_name);
                 sample_name_O.append(d.sample_name);
                 analysis_type_O.append(d.analysis_type);
             analysis_id_O = list(set(analysis_id_O))
             experiment_id_O = list(set(experiment_id_O))
             lineage_name_O = list(set(lineage_name_O))
             sample_name_O = list(set(sample_name_O))
             analysis_type_O = list(set(analysis_type_O))
             analysis_O={
                     'analysis_id':analysis_id_O,
                     'experiment_id':experiment_id_O,
                     'lineage_name':lineage_name_O,
                     'sample_name':sample_name_O,
                     'analysis_type':analysis_type_O};
             
         return analysis_O;
     except SQLAlchemyError as e:
         print(e);
Example #22
0
 def clean(self, value):
     cleaned_data = []
     errors = []
     if not any(value) and self.required:
         raise ValidationError(self.error_messages['required'])
     max_size = max(self.size, len(value))
     for index in range(max_size):
         item = value[index]
         try:
             cleaned_data.append(self.base_field.clean(item))
         except ValidationError as error:
             errors.append(prefix_validation_error(
                 error,
                 self.error_messages['item_invalid'],
                 code='item_invalid',
                 params={'nth': index},
             ))
             cleaned_data.append(None)
         else:
             errors.append(None)
     if self.remove_trailing_nulls:
         null_index = None
         for i, value in reversed(list(enumerate(cleaned_data))):
             if value in self.base_field.empty_values:
                 null_index = i
             else:
                 break
         if null_index:
             cleaned_data = cleaned_data[:null_index]
             errors = errors[:null_index]
     errors = list(filter(None, errors))
     if errors:
         raise ValidationError(list(chain.from_iterable(errors)))
     return cleaned_data
def getRecommendedItems(prefs,itemMatch,user):
  userRatings=prefs[user]
  scores={}
  totalSim={}
  # Loop over items rated by this user
  for (item,rating) in list(userRatings.items( )):

    # Loop over items similar to this one
    for (similarity,item2) in itemMatch[item]:

      # Ignore if this user has already rated this item
      if item2 in userRatings: continue
      # Weighted sum of rating times similarity
      scores.setdefault(item2,0)
      scores[item2]+=similarity*rating
      # Sum of all the similarities
      totalSim.setdefault(item2,0)
      totalSim[item2]+=similarity

  # Divide each total score by total weighting to get an average
  rankings=[(score/totalSim[item],item) for item,score in list(scores.items( ))]

  # Return the rankings from highest to lowest
  rankings.sort( )
  rankings.reverse( )
  return rankings
Example #24
0
def GetMapPickerItems(domain, root_path):
  """Fetches the list of maps to show in the map picker menu for a given domain.

  Args:
    domain: A string, the domain whose catalog to fetch.
    root_path: The relative path to the Crisis Map site root.

  Returns:
    A list of {'title': ..., 'url': ...} dictionaries describing menu items
    corresponding to the CatalogEntry entities for the specified domain.
  """
  map_picker_items = []

  # Add menu items for the CatalogEntry entities that are marked 'listed'.
  if domain:
    if domain == config.Get('primary_domain'):
      map_picker_items = [
          {'title': entry.title, 'url': root_path + '/' + entry.label}
          for entry in list(model.CatalogEntry.GetListed(domain))]
    else:
      map_picker_items = [
          {'title': entry.title,
           'url': root_path + '/%s/%s' % (entry.domain, entry.label)}
          for entry in list(model.CatalogEntry.GetListed(domain))]

  # Return all the menu items sorted by title.
  return sorted(map_picker_items, key=lambda m: m['title'])
def test_GeneratePurePlugMask_inputs():
    input_map = dict(args=dict(argstr='%s',
    ),
    environ=dict(nohash=True,
    usedefault=True,
    ),
    ignore_exception=dict(deprecated='1.0.0',
    nohash=True,
    usedefault=True,
    ),
    inputImageModalities=dict(argstr='--inputImageModalities %s...',
    ),
    numberOfSubSamples=dict(argstr='--numberOfSubSamples %s',
    sep=',',
    ),
    outputMaskFile=dict(argstr='--outputMaskFile %s',
    hash_files=False,
    ),
    terminal_output=dict(deprecated='1.0.0',
    nohash=True,
    ),
    threshold=dict(argstr='--threshold %f',
    ),
    )
    inputs = GeneratePurePlugMask.input_spec()

    for key, metadata in list(input_map.items()):
        for metakey, value in list(metadata.items()):
            assert getattr(inputs.traits()[key], metakey) == value
Example #26
0
def solve(par):
    C, combine, D, opposite, N, S = par
    comb = {}
    for c in combine:
        x = list(c)[:2]
        comb[tuple(x)] = c[2]
        x.reverse()
        comb[tuple(x)] = c[2]
    oppo = defaultdict(list)
    for o in opposite:
        oppo[o[0]].append(o[1])
        oppo[o[1]].append(o[0])
    
    result = []
    for s in list(S):
        if len(result) > 0 and (result[-1], s) in comb:
            c = result[-1]
            result.pop()
            result.append(comb[(c, s)])
            continue
        
        flag = True
        if s in oppo:
            for x in oppo[s]:
                if x in result:
                    result = []
                    flag = False
                    break
        if flag:
            result.append(s)
            
    return '[' + ', '.join(result) + ']'
Example #27
0
def resetTicks(x, y=None):
    """Reset X (and Y) axis ticks using values in given *array*.  Ticks in the
    current figure should not be fractional values for this function to work as
    expected."""

    import matplotlib.pyplot as plt
    if x is not None:
        try:
            xticks = plt.xticks()[0]
            xlist = list(xticks.astype(int))
            if xlist[-1] > len(x):
                xlist.pop()
            if xlist:
                xlist = list(x[xlist])
                plt.xticks(xticks, xlist + [''] * (len(xticks) - len(xlist)))
        except:
            LOGGER.warning('xticks could not be reset.')
    if y is not None:
        try:
            yticks = plt.yticks()[0]
            ylist = list(yticks.astype(int))
            if ylist[-1] > len(y):
                ylist.pop()
            if ylist:
                ylist = list(y[ylist])
                plt.yticks(yticks, ylist + [''] * (len(yticks) - len(ylist)))
        except:
            LOGGER.warning('xticks could not be reset.')
Example #28
0
 def testResolveBindings(self):
   qry, unused_options, bindings = query.parse_gql(
     'SELECT * FROM Foo WHERE name = :1')
   bindings[1].value = 'joe'
   self.assertEqual(list(qry), [self.joe])
   bindings[1].value = 'jill'
   self.assertEqual(list(qry), [self.jill])
Example #29
0
def lstm(trainData, trainMark, testData, embedding_dim, embedding_matrix, maxlen, output_len):
    # 填充数据,将每个序列长度保持一致
    trainData = list(sequence.pad_sequences(trainData, maxlen=maxlen,
                                            dtype='float64'))  # sequence返回的是一个numpy数组,pad_sequences用于填充指定长度的序列,长则阶段,短则补0,由于下面序号为0时,对应值也为0,因此可以这样
    testData = list(sequence.pad_sequences(testData, maxlen=maxlen,
                                           dtype='float64'))  # sequence返回的是一个numpy数组,pad_sequences用于填充指定长度的序列,长则阶段,短则补0

    # 建立lstm神经网络模型
    model = Sequential()  # 多个网络层的线性堆叠,可以通过传递一个layer的list来构造该模型,也可以通过.add()方法一个个的加上层
    # model.add(Dense(256, input_shape=(train_total_vova_len,)))   #使用全连接的输入层
    model.add(Embedding(len(embedding_matrix), embedding_dim, weights=[embedding_matrix], mask_zero=False,
                        input_length=maxlen))  # 指定输入层,将高维的one-hot转成低维的embedding表示,第一个参数大或等于0的整数,输入数据最大下标+1,第二个参数大于0的整数,代表全连接嵌入的维度
    # lstm层,也是比较核心的层
    model.add(LSTM(256))  # 256对应Embedding输出维度,128是输入维度可以推导出来
    model.add(Dropout(0.5))  # 每次在参数更新的时候以一定的几率断开层的链接,用于防止过拟合
    model.add(Dense(output_len))  # 全连接,这里用于输出层,1代表输出层维度,128代表LSTM层维度可以自行推导出来
    model.add(Activation('softmax'))  # 输出用sigmoid激活函数
    # 编译该模型,categorical_crossentropy(亦称作对数损失,logloss),adam是一种优化器,class_mode表示分类模式
    model.compile(loss='categorical_crossentropy', optimizer='sgd')

    # 正式运行该模型,我知道为什么了,因为没有补0!!每个array的长度是不一样的,因此才会报错
    X = np.array(list(trainData))  # 输入数据
    print("X:", X)
    Y = np.array(list(trainMark))  # 标签
    print("Y:", Y)
    # batch_size:整数,指定进行梯度下降时每个batch包含的样本数
    # nb_epoch:整数,训练的轮数,训练数据将会被遍历nb_epoch次
    model.fit(X, Y, batch_size=200, nb_epoch=10)  # 该函数的X、Y应该是多个输入:numpy list(其中每个元素为numpy.array),单个输入:numpy.array

    # 进行预测
    A = np.array(list(testData))  # 输入数据
    print("A:", A)
    classes = model.predict(A)  # 这个是预测的数据
    return classes
 def check(self, fix=False, silent=False):
     """Checks a grid for errors, and optionally fixes them.  Errors checked for are:
     - blocks not connected to any other blocks
     - blocks with isolated rocktypes
     Returns True if no errors were found, and False otherwise.  If silent is True, there is no printout.
     Unconnected blocks are fixed by deleting them.  Isolated rocktype blocks are fixed by assigning them the
     most popular rocktype of their neighbours."""
     ok = True
     ub = self.unconnected_blocks
     if len(ub) > 0:
         ok = False
         if not silent:
             print "Unconnected blocks:", list(ub)
         if fix:
             for blk in ub:
                 self.delete_block(blk)
             if not silent:
                 print "Unconnected blocks fixed."
     ib = self.isolated_rocktype_blocks
     if len(ib) > 0:
         ok = False
         if not silent:
             print "Isolated rocktype blocks:", list(ib)
         if fix:
             for blk in ib:
                 nbr_rocktype = [self.block[nbr].rocktype.name for nbr in self.block[blk].neighbour_name]
                 pop_rocktype = max(set(nbr_rocktype), key=nbr_rocktype.count)
                 self.block[blk].rocktype = self.rocktype[pop_rocktype]
             if not silent:
                 print "Isolated rocktype blocks fixed."
     if ok and not silent:
         print "No problems found."
     return ok