Esempio n. 1
4
File: eepy.py Progetto: yuka2py/eepy
    def block(blockname="content"):
        """ with 句で囲んだ範囲をブロックとして登録する。
        既に保存されたブロックがある時、ブロックの内容を保存されたブロックで置き換えます。
        保存されたブロックが無い時、ブロックの内容をそのまま出力し、ブロックを保存します。
        このヘルパは通常、extends と組み合わせて使用します。
        args:
            blockname: ブロックの名前
        """
        locals = buffer_frame_locals()
        locals.setdefault("__blocks", {})

        # 既に保存されたブロックがあれば、保存している内容を出力し、ここでのキャプチャ結果は破棄
        if blockname in locals["__blocks"]:
            buffer, locals["__buffer"] = locals["__buffer"], list()
            yield
            locals["__buffer"] = buffer
            locals["__buffer"].append(locals["__blocks"][blockname])  # 仕様:利用後も削除せずに残しておく

        # 保存されたブロックが無ければ、キャプチャ結果をブロックとして保存し、出力もする
        else:
            buffer, locals["__buffer"] = locals["__buffer"], list()
            yield
            captured = u"".join(locals["__buffer"])
            locals["__buffer"] = buffer
            locals["__buffer"].append(captured)
            locals["__blocks"][blockname] = captured
Esempio n. 2
1
 def _distance_to_W(self, ids=None):
     allneighbors = {}
     weights = {}
     if ids:
         ids = np.array(ids)
     else:
         ids = np.arange(len(self._nmat))
     if self.binary:
         for i, neighbors in enumerate(self._nmat):
             ns = [ni for ni in neighbors if ni != i]
             neigh = list(ids[ns])
             if len(neigh) == 0:
                 allneighbors[ids[i]] = []
                 weights[ids[i]] = []
             else:
                 allneighbors[ids[i]] = neigh
                 weights[ids[i]] = [1] * len(ns)
     else:
         self.dmat = self.kd.sparse_distance_matrix(
             self.kd, max_distance=self.threshold)
         for i, neighbors in enumerate(self._nmat):
             ns = [ni for ni in neighbors if ni != i]
             neigh = list(ids[ns])
             if len(neigh) == 0:
                 allneighbors[ids[i]] = []
                 weights[ids[i]] = []
             else:
                 try:
                     allneighbors[ids[i]] = neigh
                     weights[ids[i]] = [self.dmat[(
                         i, j)] ** self.alpha for j in ns]
                 except ZeroDivisionError:
                     raise Exception, "Cannot compute inverse distance for elements at same location (distance=0)."
     return allneighbors, weights
 def check(self, fix=False, silent=False):
     """Checks a grid for errors, and optionally fixes them.  Errors checked for are:
     - blocks not connected to any other blocks
     - blocks with isolated rocktypes
     Returns True if no errors were found, and False otherwise.  If silent is True, there is no printout.
     Unconnected blocks are fixed by deleting them.  Isolated rocktype blocks are fixed by assigning them the
     most popular rocktype of their neighbours."""
     ok = True
     ub = self.unconnected_blocks
     if len(ub) > 0:
         ok = False
         if not silent:
             print "Unconnected blocks:", list(ub)
         if fix:
             for blk in ub:
                 self.delete_block(blk)
             if not silent:
                 print "Unconnected blocks fixed."
     ib = self.isolated_rocktype_blocks
     if len(ib) > 0:
         ok = False
         if not silent:
             print "Isolated rocktype blocks:", list(ib)
         if fix:
             for blk in ib:
                 nbr_rocktype = [self.block[nbr].rocktype.name for nbr in self.block[blk].neighbour_name]
                 pop_rocktype = max(set(nbr_rocktype), key=nbr_rocktype.count)
                 self.block[blk].rocktype = self.rocktype[pop_rocktype]
             if not silent:
                 print "Isolated rocktype blocks fixed."
     if ok and not silent:
         print "No problems found."
     return ok
Esempio n. 4
0
def lstm(trainData, trainMark, testData, embedding_dim, embedding_matrix, maxlen, output_len):
    # 填充数据,将每个序列长度保持一致
    trainData = list(sequence.pad_sequences(trainData, maxlen=maxlen,
                                            dtype='float64'))  # sequence返回的是一个numpy数组,pad_sequences用于填充指定长度的序列,长则阶段,短则补0,由于下面序号为0时,对应值也为0,因此可以这样
    testData = list(sequence.pad_sequences(testData, maxlen=maxlen,
                                           dtype='float64'))  # sequence返回的是一个numpy数组,pad_sequences用于填充指定长度的序列,长则阶段,短则补0

    # 建立lstm神经网络模型
    model = Sequential()  # 多个网络层的线性堆叠,可以通过传递一个layer的list来构造该模型,也可以通过.add()方法一个个的加上层
    # model.add(Dense(256, input_shape=(train_total_vova_len,)))   #使用全连接的输入层
    model.add(Embedding(len(embedding_matrix), embedding_dim, weights=[embedding_matrix], mask_zero=False,
                        input_length=maxlen))  # 指定输入层,将高维的one-hot转成低维的embedding表示,第一个参数大或等于0的整数,输入数据最大下标+1,第二个参数大于0的整数,代表全连接嵌入的维度
    # lstm层,也是比较核心的层
    model.add(LSTM(256))  # 256对应Embedding输出维度,128是输入维度可以推导出来
    model.add(Dropout(0.5))  # 每次在参数更新的时候以一定的几率断开层的链接,用于防止过拟合
    model.add(Dense(output_len))  # 全连接,这里用于输出层,1代表输出层维度,128代表LSTM层维度可以自行推导出来
    model.add(Activation('softmax'))  # 输出用sigmoid激活函数
    # 编译该模型,categorical_crossentropy(亦称作对数损失,logloss),adam是一种优化器,class_mode表示分类模式
    model.compile(loss='categorical_crossentropy', optimizer='sgd')

    # 正式运行该模型,我知道为什么了,因为没有补0!!每个array的长度是不一样的,因此才会报错
    X = np.array(list(trainData))  # 输入数据
    print("X:", X)
    Y = np.array(list(trainMark))  # 标签
    print("Y:", Y)
    # batch_size:整数,指定进行梯度下降时每个batch包含的样本数
    # nb_epoch:整数,训练的轮数,训练数据将会被遍历nb_epoch次
    model.fit(X, Y, batch_size=200, nb_epoch=10)  # 该函数的X、Y应该是多个输入:numpy list(其中每个元素为numpy.array),单个输入:numpy.array

    # 进行预测
    A = np.array(list(testData))  # 输入数据
    print("A:", A)
    classes = model.predict(A)  # 这个是预测的数据
    return classes
Esempio n. 5
0
    def __init__(self, name, args=[], opts=[], **kwargs):
        """
        Base class for POV objects.

        @param name: POV object name
        @param args: compulsory (comma separated?) pov args XX commas don't seem to matter?
        @param opts: eg. CSG items
        @param kwargs: key value pairs
        """
        #print "Item",name,args,opts,kwargs
        self.name = name

        args = list(args)
        for i in range(len(args)):
            args[i] = map_arg(args[i])
        self.args = flatten(args)

        opts = list(opts)
        for i in range(len(opts)):
            opts[i] = map_arg(opts[i])
        self.opts = flatten(opts)

        self.kwargs = dict(kwargs)  # take a copy
        for key, val in self.kwargs.items():
            if type(val) == tuple or type(val) == list:
                self.kwargs[key] = map_arg(val)
Esempio n. 6
0
def resetTicks(x, y=None):
    """Reset X (and Y) axis ticks using values in given *array*.  Ticks in the
    current figure should not be fractional values for this function to work as
    expected."""

    import matplotlib.pyplot as plt
    if x is not None:
        try:
            xticks = plt.xticks()[0]
            xlist = list(xticks.astype(int))
            if xlist[-1] > len(x):
                xlist.pop()
            if xlist:
                xlist = list(x[xlist])
                plt.xticks(xticks, xlist + [''] * (len(xticks) - len(xlist)))
        except:
            LOGGER.warning('xticks could not be reset.')
    if y is not None:
        try:
            yticks = plt.yticks()[0]
            ylist = list(yticks.astype(int))
            if ylist[-1] > len(y):
                ylist.pop()
            if ylist:
                ylist = list(y[ylist])
                plt.yticks(yticks, ylist + [''] * (len(yticks) - len(ylist)))
        except:
            LOGGER.warning('xticks could not be reset.')
Esempio n. 7
0
    def jar(self):
        """Performs the 'jar' command."""
        class_name = getattr(self.flags, "class")
        if (class_name is None) and (len(self.args) > 0):
            class_name = self.pop_args_head()
        assert (class_name is not None), ("No class name specified with [--class=]<class>.")

        lib_jars = []
        if self.flags.jars is not None:
            lib_jars.extend(self.flags.jars)
        classpath = list(self.express.get_classpath(lib_jars=lib_jars))

        java_opts = []
        if self.flags.java_opts is not None:
            java_opts = [self.flags.java_opts]

        user_args = list(self.args)
        logging.info("Running java class %r with parameters: %r", class_name, user_args)

        cmd = [
            "java",
            # This property is only needed in kiji-schema v1.1 :
            "-Dorg.kiji.schema.impl.AvroCellEncoder.SCHEMA_VALIDATION=DISABLED",
        ] + java_opts + [
            "-classpath", ":".join(classpath),
            class_name,
        ] + user_args

        logging.debug("Running command:\n%s\n", " \\\n\t".join(map(repr, cmd)))
        return subprocess.call(cmd)
def test_GeneratePurePlugMask_inputs():
    input_map = dict(args=dict(argstr='%s',
    ),
    environ=dict(nohash=True,
    usedefault=True,
    ),
    ignore_exception=dict(deprecated='1.0.0',
    nohash=True,
    usedefault=True,
    ),
    inputImageModalities=dict(argstr='--inputImageModalities %s...',
    ),
    numberOfSubSamples=dict(argstr='--numberOfSubSamples %s',
    sep=',',
    ),
    outputMaskFile=dict(argstr='--outputMaskFile %s',
    hash_files=False,
    ),
    terminal_output=dict(deprecated='1.0.0',
    nohash=True,
    ),
    threshold=dict(argstr='--threshold %f',
    ),
    )
    inputs = GeneratePurePlugMask.input_spec()

    for key, metadata in list(input_map.items()):
        for metakey, value in list(metadata.items()):
            assert getattr(inputs.traits()[key], metakey) == value
Esempio n. 9
0
    def _get_router_ids_for_agent(self, context, agent_db, router_ids):
        result_set = set(super(L3_DVRsch_db_mixin,
                            self)._get_router_ids_for_agent(
            context, agent_db, router_ids))
        router_ids = set(router_ids or [])
        if router_ids and result_set == router_ids:
            # no need for extra dvr checks if requested routers are
            # explicitly scheduled to the agent
            return list(result_set)

        # dvr routers are not explicitly scheduled to agents on hosts with
        # dvr serviceable ports, so need special handling
        if (self._get_agent_mode(agent_db) in
            [n_const.L3_AGENT_MODE_DVR,
             n_const.L3_AGENT_MODE_DVR_NO_EXTERNAL,
             n_const.L3_AGENT_MODE_DVR_SNAT]):
            if not router_ids:
                result_set |= set(self._get_dvr_router_ids_for_host(
                    context, agent_db['host']))
            else:
                for router_id in (router_ids - result_set):
                    subnet_ids = self.get_subnet_ids_on_router(
                        context, router_id)
                    if (subnet_ids and
                            self._check_dvr_serviceable_ports_on_host(
                                    context, agent_db['host'],
                                    list(subnet_ids))):
                        result_set.add(router_id)

        return list(result_set)
 def _get_field_values(self):
     values = {}
     vocabularies_resource = get_resource_service('vocabularies')
     values['anpa_category'] = vocabularies_resource.find_one(req=None, _id='categories')['items']
     req = ParsedRequest()
     req.where = json.dumps({'$or': [{"schema_field": "genre"}, {"_id": "genre"}]})
     genre = vocabularies_resource.get(req=req, lookup=None)
     if genre.count():
         values['genre'] = genre[0]['items']
     values['urgency'] = vocabularies_resource.find_one(req=None, _id='urgency')['items']
     values['priority'] = vocabularies_resource.find_one(req=None, _id='priority')['items']
     values['type'] = vocabularies_resource.find_one(req=None, _id='type')['items']
     subject = vocabularies_resource.find_one(req=None, schema_field='subject')
     if subject:
         values['subject'] = subject['items']
     else:
         values['subject'] = get_subjectcodeitems()
     values['desk'] = list(get_resource_service('desks').get(None, {}))
     values['stage'] = self._get_stage_field_values(values['desk'])
     values['sms'] = [{'qcode': 0, 'name': 'False'}, {'qcode': 1, 'name': 'True'}]
     values['embargo'] = [{'qcode': 0, 'name': 'False'}, {'qcode': 1, 'name': 'True'}]
     req = ParsedRequest()
     req.where = json.dumps({'$or': [{"schema_field": "place"}, {"_id": "place"}, {"_id": "locators"}]})
     place = vocabularies_resource.get(req=req, lookup=None)
     if place.count():
         values['place'] = place[0]['items']
     values['ingest_provider'] = list(get_resource_service('ingest_providers').get(None, {}))
     return values
Esempio n. 11
0
 def test_combined_non_existing_field_desc(self):
     all_results = list(self.lib.items(u'id+'))
     q = u'foo- id+'
     results = list(self.lib.items(q))
     self.assertEqual(len(all_results), len(results))
     for r1, r2 in zip(all_results, results):
         self.assertEqual(r1.id, r2.id)
Esempio n. 12
0
 def get_analysis_analysisID_dataStage01ResequencingAnalysis(self,analysis_id_I):
     '''Query rows that are used from the analysis'''
     try:
         data = self.session.query(data_stage01_resequencing_analysis).filter(
                 data_stage01_resequencing_analysis.analysis_id.like(analysis_id_I),
                 data_stage01_resequencing_analysis.used_.is_(True)).all();
         analysis_id_O = []
         experiment_id_O = []
         lineage_name_O = []
         sample_name_O = []
         analysis_type_O = []
         analysis_O = {};
         if data: 
             for d in data:
                 analysis_id_O.append(d.analysis_id);
                 experiment_id_O.append(d.experiment_id);
                 lineage_name_O.append(d.lineage_name);
                 sample_name_O.append(d.sample_name);
                 analysis_type_O.append(d.analysis_type);
             analysis_id_O = list(set(analysis_id_O))
             experiment_id_O = list(set(experiment_id_O))
             lineage_name_O = list(set(lineage_name_O))
             sample_name_O = list(set(sample_name_O))
             analysis_type_O = list(set(analysis_type_O))
             analysis_O={
                     'analysis_id':analysis_id_O,
                     'experiment_id':experiment_id_O,
                     'lineage_name':lineage_name_O,
                     'sample_name':sample_name_O,
                     'analysis_type':analysis_type_O};
             
         return analysis_O;
     except SQLAlchemyError as e:
         print(e);
Esempio n. 13
0
    def test_invalid_final_lookup(self):
        qs = Book.objects.prefetch_related('authors__name')
        with self.assertRaises(ValueError) as cm:
            list(qs)

        self.assertTrue('prefetch_related' in str(cm.exception))
        self.assertTrue("name" in str(cm.exception))
Esempio n. 14
0
 def test_transport_adapter_ordering(self):
     s = requests.Session()
     order = ["https://", "http://"]
     assert order == list(s.adapters)
     s.mount("http://git", HTTPAdapter())
     s.mount("http://github", HTTPAdapter())
     s.mount("http://github.com", HTTPAdapter())
     s.mount("http://github.com/about/", HTTPAdapter())
     order = ["http://github.com/about/", "http://github.com", "http://github", "http://git", "https://", "http://"]
     assert order == list(s.adapters)
     s.mount("http://gittip", HTTPAdapter())
     s.mount("http://gittip.com", HTTPAdapter())
     s.mount("http://gittip.com/about/", HTTPAdapter())
     order = [
         "http://github.com/about/",
         "http://gittip.com/about/",
         "http://github.com",
         "http://gittip.com",
         "http://github",
         "http://gittip",
         "http://git",
         "https://",
         "http://",
     ]
     assert order == list(s.adapters)
     s2 = requests.Session()
     s2.adapters = {"http://": HTTPAdapter()}
     s2.mount("https://", HTTPAdapter())
     assert "http://" in s2.adapters
     assert "https://" in s2.adapters
Esempio n. 15
0
 def testResolveBindings(self):
   qry, unused_options, bindings = query.parse_gql(
     'SELECT * FROM Foo WHERE name = :1')
   bindings[1].value = 'joe'
   self.assertEqual(list(qry), [self.joe])
   bindings[1].value = 'jill'
   self.assertEqual(list(qry), [self.jill])
Esempio n. 16
0
def times(*combined):
  """Generate a product of N sets of combinations.

  times(combine(a=[1,2]), combine(b=[3,4])) == combine(a=[1,2], b=[3,4])

  Args:
    *combined: N lists of dictionaries that specify combinations.

  Returns:
    a list of dictionaries for each combination.

  Raises:
    ValueError: if some of the inputs have overlapping keys.
  """
  assert combined

  if len(combined) == 1:
    return combined[0]

  first = combined[0]
  rest_combined = times(*combined[1:])

  combined_results = []
  for a in first:
    for b in rest_combined:
      if set(a.keys()).intersection(set(b.keys())):
        raise ValueError("Keys need to not overlap: {} vs {}".format(
            a.keys(), b.keys()))

      combined_results.append(OrderedDict(list(a.items()) + list(b.items())))
  return combined_results
Esempio n. 17
0
def combine(**kwargs):
  """Generate combinations based on its keyword arguments.

  Two sets of returned combinations can be concatenated using +.  Their product
  can be computed using `times()`.

  Args:
    **kwargs: keyword arguments of form `option=[possibilities, ...]`
         or `option=the_only_possibility`.

  Returns:
    a list of dictionaries for each combination. Keys in the dictionaries are
    the keyword argument names.  Each key has one value - one of the
    corresponding keyword argument values.
  """
  if not kwargs:
    return [OrderedDict()]

  sort_by_key = lambda k: k[0][0]
  kwargs = OrderedDict(sorted(kwargs.items(), key=sort_by_key))
  first = list(kwargs.items())[0]

  rest = dict(list(kwargs.items())[1:])
  rest_combined = combine(**rest)

  key = first[0]
  values = first[1]
  if not isinstance(values, list):
    values = [values]

  return [
      OrderedDict(sorted(list(combined.items()) + [(key, v)], key=sort_by_key))
      for v in values
      for combined in rest_combined
  ]
Esempio n. 18
0
def GetMapPickerItems(domain, root_path):
  """Fetches the list of maps to show in the map picker menu for a given domain.

  Args:
    domain: A string, the domain whose catalog to fetch.
    root_path: The relative path to the Crisis Map site root.

  Returns:
    A list of {'title': ..., 'url': ...} dictionaries describing menu items
    corresponding to the CatalogEntry entities for the specified domain.
  """
  map_picker_items = []

  # Add menu items for the CatalogEntry entities that are marked 'listed'.
  if domain:
    if domain == config.Get('primary_domain'):
      map_picker_items = [
          {'title': entry.title, 'url': root_path + '/' + entry.label}
          for entry in list(model.CatalogEntry.GetListed(domain))]
    else:
      map_picker_items = [
          {'title': entry.title,
           'url': root_path + '/%s/%s' % (entry.domain, entry.label)}
          for entry in list(model.CatalogEntry.GetListed(domain))]

  # Return all the menu items sorted by title.
  return sorted(map_picker_items, key=lambda m: m['title'])
Esempio n. 19
0
def solve(par):
    C, combine, D, opposite, N, S = par
    comb = {}
    for c in combine:
        x = list(c)[:2]
        comb[tuple(x)] = c[2]
        x.reverse()
        comb[tuple(x)] = c[2]
    oppo = defaultdict(list)
    for o in opposite:
        oppo[o[0]].append(o[1])
        oppo[o[1]].append(o[0])
    
    result = []
    for s in list(S):
        if len(result) > 0 and (result[-1], s) in comb:
            c = result[-1]
            result.pop()
            result.append(comb[(c, s)])
            continue
        
        flag = True
        if s in oppo:
            for x in oppo[s]:
                if x in result:
                    result = []
                    flag = False
                    break
        if flag:
            result.append(s)
            
    return '[' + ', '.join(result) + ']'
Esempio n. 20
0
 def clean(self, value):
     cleaned_data = []
     errors = []
     if not any(value) and self.required:
         raise ValidationError(self.error_messages['required'])
     max_size = max(self.size, len(value))
     for index in range(max_size):
         item = value[index]
         try:
             cleaned_data.append(self.base_field.clean(item))
         except ValidationError as error:
             errors.append(prefix_validation_error(
                 error,
                 self.error_messages['item_invalid'],
                 code='item_invalid',
                 params={'nth': index},
             ))
             cleaned_data.append(None)
         else:
             errors.append(None)
     if self.remove_trailing_nulls:
         null_index = None
         for i, value in reversed(list(enumerate(cleaned_data))):
             if value in self.base_field.empty_values:
                 null_index = i
             else:
                 break
         if null_index:
             cleaned_data = cleaned_data[:null_index]
             errors = errors[:null_index]
     errors = list(filter(None, errors))
     if errors:
         raise ValidationError(list(chain.from_iterable(errors)))
     return cleaned_data
def getRecommendedItems(prefs,itemMatch,user):
  userRatings=prefs[user]
  scores={}
  totalSim={}
  # Loop over items rated by this user
  for (item,rating) in list(userRatings.items( )):

    # Loop over items similar to this one
    for (similarity,item2) in itemMatch[item]:

      # Ignore if this user has already rated this item
      if item2 in userRatings: continue
      # Weighted sum of rating times similarity
      scores.setdefault(item2,0)
      scores[item2]+=similarity*rating
      # Sum of all the similarities
      totalSim.setdefault(item2,0)
      totalSim[item2]+=similarity

  # Divide each total score by total weighting to get an average
  rankings=[(score/totalSim[item],item) for item,score in list(scores.items( ))]

  # Return the rankings from highest to lowest
  rankings.sort( )
  rankings.reverse( )
  return rankings
Esempio n. 22
0
def save_level_costs(level, costs, filename='distance_map.csv'):
    """ Displays cell costs from an origin point over the given level.

    Args:
        level: The level to be displayed.
        costs: A dictionary containing a mapping of cells to costs from an origin point.
        filename: The name of the csv file to be created.

    """
    xs, ys = zip(*(list(level['spaces'].keys()) + list(level['walls'])))
    x_lo, x_hi = min(xs), max(xs)
    y_lo, y_hi = min(ys), max(ys)

    rows = []
    for j in range(y_lo, y_hi + 1):
        row = []

        for i in range(x_lo, x_hi + 1):
            cell = (i, j)
            if cell not in costs:
                row.append(inf)
            else:
                row.append(costs[cell])

        rows.append(row)

    assert '.csv' in filename, 'Error: filename does not contain file type.'
    with open(filename, 'w', newline='') as f:
        csv_writer = writer(f)
        for row in rows:
            csv_writer.writerow(row)
            
    
    print("Saved file:", filename)
Esempio n. 23
0
def dedup_value(body, ctype, action="dedup_value", prop=None):
    '''
    Service that accepts a JSON document and enriches the prop field of that document by:

    a) Removing duplicates
    '''

    if prop:
        try:
            data = json.loads(body)
        except:
            response.code = 500
            response.add_header('content-type', 'text/plain')
            return "Unable to parse body as JSON"

    try:
        data = json.loads(body)
    except:
        response.code = 500
        response.add_header('content-type', 'text/plain')
        return "Unable to parse body as JSON"

    for p in prop.split(","):
        if exists(data, p):
            v = getprop(data, p)
            if isinstance(v, list):
                # Remove whitespace, periods, parens, brackets
                clone = [re.sub("[ \.\(\)\[\]\{\}]", "", s).lower() for s in v]
                # Get index of unique values
                index = list(set([clone.index(s) for s in list(set(clone))]))
            
                setprop(data, p, [v[i] for i in index])

    return json.dumps(data)
def default (pixels):
    
    # Returns pixels list of tuples (R,G,B,Cluster Assignment('x' by default))
  
    pixels = [list(x)+ list(('x',)) for x in pixels]
    
    return pixels
def test_parameter_grid():
    """Test basic properties of ParameterGrid."""
    params1 = {"foo": [1, 2, 3]}
    grid1 = ParameterGrid(params1)
    assert_true(isinstance(grid1, Iterable))
    assert_true(isinstance(grid1, Sized))
    assert_equal(len(grid1), 3)

    params2 = {"foo": [4, 2],
               "bar": ["ham", "spam", "eggs"]}
    grid2 = ParameterGrid(params2)
    assert_equal(len(grid2), 6)

    # loop to assert we can iterate over the grid multiple times
    for i in xrange(2):
        # tuple + chain transforms {"a": 1, "b": 2} to ("a", 1, "b", 2)
        points = set(tuple(chain(*(sorted(p.items())))) for p in grid2)
        assert_equal(points,
                     set(("bar", x, "foo", y)
                         for x, y in product(params2["bar"], params2["foo"])))

    # Special case: empty grid (useful to get default estimator settings)
    empty = ParameterGrid({})
    assert_equal(len(empty), 1)
    assert_equal(list(empty), [{}])

    has_empty = ParameterGrid([{'C': [1, 10]}, {}])
    assert_equal(len(has_empty), 3)
    assert_equal(list(has_empty), [{'C': 1}, {'C': 10}, {}])
Esempio n. 26
0
def encode_morze(text):
	text_list = list()
	sygnal = list()
	sygnal_final = list()
	morse_code = {" ":" ","A":".-","B":"-...","C":"-.-.","D":"-..","E":".","F":"..-.","G":"--.","H":"....","I":"..","J":".---","K":"-.-","L":".-..","M":"--","N":"-.","O":"---","P":".--.","Q":"--.-","R":".-.","S":"...","T":"-","U":"..-","V":"...-","W":".--","X":"-..-","Y":"-.--","Z":"--.."}
	sygnal_diagram = {" ":"___", ".":"^", "-":"^^^", "*":"___"}
	initial_text_list = list((str(text)).upper())
	for i in initial_text_list:
		if i in morse_code:
			text_list.insert((len(text_list)+1),morse_code[i])
			if i != ' ':
				text_list.insert((len(text_list)+2),"*")
	a = "".join(text_list)
	a = list(a)
	del a[len(a)-1]
	for i in range(0,len(a)-1):
		if a[i] == ' ':
			del a[i-1]
	for i in a:
		if i in sygnal_diagram:
			sygnal.insert((len(sygnal)+1),sygnal_diagram[i])
	for i in range(0,len(sygnal)-1):
		if sygnal[i+1] != '___' and sygnal[i] != '___':
			sygnal_final.insert((len(sygnal_final)+1),sygnal[i])
			sygnal_final.insert((len(sygnal_final)+1),'_')
		elif sygnal[i+1] == '___' or sygnal[i] == '___':
			sygnal_final.insert((len(sygnal_final)+1),sygnal[i])
		elif i == len(sygnal[i])-1:
			sygnal_final.insert((len(sygnal_final)+1),sygnal[i])
	sygnal_final.insert((len(sygnal_final)+1),sygnal[len(sygnal)-1])
	sygnal_final = "".join(sygnal_final)
	return sygnal_final
 def parse_parameter_list(self, node):
     parameters = []
     special = []
     argnames = list(node.argnames)
     if node.kwargs:
         special.append(make_parameter(argnames[-1], excess_keyword=1))
         argnames.pop()
     if node.varargs:
         special.append(make_parameter(argnames[-1],
                                       excess_positional=1))
         argnames.pop()
     defaults = list(node.defaults)
     defaults = [None] * (len(argnames) - len(defaults)) + defaults
     function_parameters = self.token_parser.function_parameters(
         node.lineno)
     #print >>sys.stderr, function_parameters
     for argname, default in zip(argnames, defaults):
         if type(argname) is tuple:
             parameter = pynodes.parameter_tuple()
             for tuplearg in argname:
                 parameter.append(make_parameter(tuplearg))
             argname = normalize_parameter_name(argname)
         else:
             parameter = make_parameter(argname)
         if default:
             n_default = pynodes.parameter_default()
             n_default.append(Text(function_parameters[argname]))
             parameter.append(n_default)
         parameters.append(parameter)
     if parameters or special:
         special.reverse()
         parameters.extend(special)
         parameter_list = pynodes.parameter_list()
         parameter_list.extend(parameters)
         self.function.append(parameter_list)
Esempio n. 28
0
def show_level(level, path=[]):
    """ Displays a level via a print statement.

    Args:
        level: The level to be displayed.
        path: A continuous path to be displayed over the level, if provided.

    """
    xs, ys = zip(*(list(level['spaces'].keys()) + list(level['walls'])))
    x_lo, x_hi = min(xs), max(xs)
    y_lo, y_hi = min(ys), max(ys)

    path_cells = set(path)

    chars = []
    inverted_waypoints = {point: char for char, point in level['waypoints'].items()}

    for j in range(y_lo, y_hi + 1):
        for i in range(x_lo, x_hi + 1):

            cell = (i, j)
            if cell in path_cells:
                chars.append('*')
            elif cell in level['walls']:
                chars.append('X')
            elif cell in inverted_waypoints:
                chars.append(inverted_waypoints[cell])
            elif cell in level['spaces']:
                chars.append(str(int(level['spaces'][cell])))
            else:
                chars.append(' ')

        chars.append('\n')

    print(''.join(chars))
def sentence_similarity(idx, ob, mode):

    s_list = list()
    pbar = ProgressBar(widgets=['%s: image ' % mode, SimpleProgress()],
                       maxval=len(sentences)).start()

    for im_idx, sentence_group in enumerate(np.array(sentences)[idx, :]):

        pbar.update(im_idx + 1)
        for sent in sentence_group:

            words = analyze(sent)

            sim = list()
            for w in words:

                syn1 = wn.synsets(w)
                syn2 = wn.synsets(ob)

                if syn1 and syn2:
                    sim.append(max(s1.path_similarity(s2) for (s1, s2)
                                   in product(syn1, syn2)))
                else:
                    # ignore word if no synset combination was found on wordnet
                    sim.append(None)

            if max(sim):
                s_list.append(max(sim))
            else:
                # ignore sentence if no word was similar enough
                s_list.append(float('nan'))

    pbar.finish()
    return s_list
Esempio n. 30
0
def ip_addresses(conn, interface=None, include_loopback=False):
    """
    Returns a list of IPv4 addresses assigned to the host. 127.0.0.1 is
    ignored, unless 'include_loopback=True' is indicated. If 'interface' is
    provided, then only IP addresses from that interface will be returned.

    Example output looks like::

        >>> ip_addresses(conn)
        >>> ['192.168.1.111', '10.0.1.12']

    """
    ret = set()
    ifaces = linux_interfaces(conn)
    if interface is None:
        target_ifaces = ifaces
    else:
        target_ifaces = dict((k, v) for k, v in ifaces.items()
                             if k == interface)
        if not target_ifaces:
            LOG.error('Interface {0} not found.'.format(interface))
    for ipv4_info in target_ifaces.values():
        for ipv4 in ipv4_info.get('inet', []):
            loopback = in_subnet('127.0.0.0/8', [ipv4.get('address')]) or ipv4.get('label') == 'lo'
            if not loopback or include_loopback:
                ret.add(ipv4['address'])
        for secondary in ipv4_info.get('secondary', []):
            addr = secondary.get('address')
            if addr and secondary.get('type') == 'inet':
                if include_loopback or (not include_loopback and not in_subnet('127.0.0.0/8', [addr])):
                    ret.add(addr)
    if ret:
        conn.logger.debug('IP addresses found: %s' % str(list(ret)))
    return sorted(list(ret))
 def create_int_feature(values):
     f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))
     return f
from sys import stdin,stdout
oddEven = [0,1,1,0,1,0,0,1]
def countSetBits(n):
      
      if n>=0 and n <= 7:
            return oddEven[n]
      else:
            if n%2:
                  return 1 - countSetBits(n//2)
            else:
                  return countSetBits(n//2)
for t in range(int(stdin.readline())):
      countEven = 0
      countOdd = 0
      N,Q = map(int,stdin.readline().split())
      A = list(map(int,stdin.readline().split()))
      for a in A:
            if countSetBits(a):
                  countOdd += 1 
            else:
                  countEven += 1 
      for q in range(Q):
            P = int(stdin.readline())
            countP = countSetBits(P)
            if countP :
                  print(countOdd,countEven)
            else:
                  print(countEven,countOdd)
            

      
Esempio n. 33
0
def nonuniform_random_number_generation(values, probabilities):
    probs = list(itertools.accumulate(probabilities))
    i = bisect.bisect(probs, random.random())
    return values[i]
Esempio n. 34
0
def get_file(file_dir):
    '''
    Args:
        file_dir: file directory
    Returns:
        images: image directories, list, string
        labels: label, list, int
    '''

    images = []
    temp = []
    for root, sub_folders, files in os.walk(file_dir):
        #image directories
        for name in files:
            images.append(os.path.join(root, name))
        #get 10 sub-folder names
        for name in sub_folders:
            temp.append(os.path.join(root, name))

    #assign 18 labels on the folder names
    labels = []
    for one_folder in temp:
        n_img = len(os.listdir(one_folder))
        letter = one_folder.split('/')[-1]

        if letter == 'agapostemonvirescens':
            labels = np.append(labels, n_img * [0])
        elif letter == 'augochlorapura':
            labels = np.append(labels, n_img * [1])
        elif letter == 'augochlorellastriata':
            labels = np.append(labels, n_img * [2])
        elif letter == 'bombusimpatiens':
            labels = np.append(labels, n_img * [3])
        elif letter == 'ceratinacalcarata':
            labels = np.append(labels, n_img * [4])
        elif letter == 'ceratinadupla':
            labels = np.append(labels, n_img * [5])
        elif letter == 'ceratinametallica':
            labels = np.append(labels, n_img * [6])
        elif letter == 'dialictusbruneri':
            labels = np.append(labels, n_img * [7])
        elif letter == 'dialictusillinoensis':
            labels = np.append(labels, n_img * [8])
        elif letter == 'dialictusimitatus':
            labels = np.append(labels, n_img * [9])
        elif letter == 'dialictusrohweri':
            labels = np.append(labels, n_img * [10])
        elif letter == 'halictusconfusus':
            labels = np.append(labels, n_img * [11])
        elif letter == 'halictusligatus':
            labels = np.append(labels, n_img * [12])
        elif letter == 'osmiaatriventis':
            labels = np.append(labels, n_img * [13])
        elif letter == 'osmiabucephala':
            labels = np.append(labels, n_img * [14])
        elif letter == 'osmiacornifrons':
            labels = np.append(labels, n_img * [15])
        elif letter == 'osmiageorgica':
            labels = np.append(labels, n_img * [16])
        elif letter == 'osmialignaria':
            labels = np.append(labels, n_img * [17])
        elif letter == 'osmiapumila':
            labels = np.append(labels, n_img * [18])
    #shuffle
    #print(temp)
    #temp = temp.transpose()
    #np.random.shuffle(temp)
    #print(temp)

    image_list = list(images)
    labels = list(labels)

    return image_list, labels
Esempio n. 35
0
File: sol1.py Progetto: Huijiny/TIL
import sys
sys.stdin = open('sample_input.txt')

T = int(input())
# K = 한 번의 충전으로 최대한 이동할 수 있는 정류장 수
# end = 종점 N번 정류장 버스는 0~N까
# M = 충전기가 설치된 정류장 번호의 총 갯수


for tc in range(1, T+1):
    K, end, M = list(map(int, input().split()))
    charge_place = list(map(int, input().split()))
    charge_count = 0
    current = 0

    # 0부터 종점까지 돌면서

    result = 0
    while current <= end:
        if current+K >= end: #종점
            break
        else: # 종점아니면
            charged_idx = [] # 충전할 인덱스 저장할 공간
            for next in range(current+1, current+K+1):
                if next in charge_place:
                    charged_idx.append(next)
                if len(charged_idx) > 1:
                    charged_idx.pop(0)
            if len(charged_idx) == 0:
                charge_count = 0
                break
Esempio n. 36
0
        return self.x, " ", self.y, " ", self.facing



if __name__ == "__main__":
    map_size = input().split()
    width = int(map_size[0])
    height = int(map_size[1])
    map_size = (width, height)

    x_pos, y_pos, facing = input().split()
    instructions = input()

    my_robot = Robot(int(x_pos), int(y_pos), facing, map_size)

    my_list = list (instructions)
    # my_list[:0] = instructions
    print(my_list)

    for elem in my_list:
        if elem == 'M':
            my_robot.move()
        elif elem == 'L':
            my_robot.rotate_left()
        elif elem == 'R':
            my_robot.rotate_right()
        print(my_robot.show_current_location())

    print(my_robot.show_current_location())

Esempio n. 37
0
def bio_tags_to_spans(tag_sequence: List[str],
                      classes_to_ignore: List[str] = None) -> List[TypedStringSpan]:
    """
    Given a sequence corresponding to BIO tags, extracts spans.
    Spans are inclusive and can be of zero length, representing a single word span.
    Ill-formed spans are also included (i.e those which do not start with a "B-LABEL"),
    as otherwise it is possible to get a perfect precision score whilst still predicting
    ill-formed spans in addition to the correct spans. This function works properly when
    the spans are unlabeled (i.e., your labels are simply "B", "I", and "O").

    Parameters
    ----------
    tag_sequence : List[str], required.
        The integer class labels for a sequence.
    classes_to_ignore : List[str], optional (default = None).
        A list of string class labels `excluding` the bio tag
        which should be ignored when extracting spans.

    Returns
    -------
    spans : List[TypedStringSpan]
        The typed, extracted spans from the sequence, in the format (label, (span_start, span_end)).
        Note that the label `does not` contain any BIO tag prefixes.
    """
    classes_to_ignore = classes_to_ignore or []
    spans: Set[Tuple[str, Tuple[int, int]]] = set()
    span_start = 0
    span_end = 0
    active_conll_tag = None
    for index, string_tag in enumerate(tag_sequence):
        # Actual BIO tag.
        bio_tag = string_tag[0]
        if bio_tag not in ["B", "I", "O"]:
            raise InvalidTagSequence(tag_sequence)
        conll_tag = string_tag[2:]
        if bio_tag == "O" or conll_tag in classes_to_ignore:
            # The span has ended.
            if active_conll_tag is not None:
                spans.add((active_conll_tag, (span_start, span_end)))
            active_conll_tag = None
            # We don't care about tags we are
            # told to ignore, so we do nothing.
            continue
        elif bio_tag == "B":
            # We are entering a new span; reset indices
            # and active tag to new span.
            if active_conll_tag is not None:
                spans.add((active_conll_tag, (span_start, span_end)))
            active_conll_tag = conll_tag
            span_start = index
            span_end = index
        elif bio_tag == "I" and conll_tag == active_conll_tag:
            # We're inside a span.
            span_end += 1
        else:
            # This is the case the bio label is an "I", but either:
            # 1) the span hasn't started - i.e. an ill formed span.
            # 2) The span is an I tag for a different conll annotation.
            # We'll process the previous span if it exists, but also
            # include this span. This is important, because otherwise,
            # a model may get a perfect F1 score whilst still including
            # false positive ill-formed spans.
            if active_conll_tag is not None:
                spans.add((active_conll_tag, (span_start, span_end)))
            active_conll_tag = conll_tag
            span_start = index
            span_end = index
    # Last token might have been a part of a valid span.
    if active_conll_tag is not None:
        spans.add((active_conll_tag, (span_start, span_end)))
    return list(spans)
Esempio n. 38
0
 def replace_label(full_label, new_label):
     # example: full_label = 'I-PER', new_label = 'U', returns 'U-PER'
     parts = list(full_label.partition('-'))
     parts[0] = new_label
     return ''.join(parts)
    data_usda = pd.read_csv(f"05_full_params_run_results\\{sp}_usda.csv",
                            index_col=0)
    data_bio = pd.read_csv(f"05_full_params_run_results\\{sp}_sbcm_bio.csv",
                           index_col=0)
    data_soil = pd.read_csv(f"05_full_params_run_results\\{sp}_sbcm_soil.csv",
                            index_col=0)
    output = pd.DataFrame(index=np.arange(-1, 10001, 1))
    #    output_bio = pd.DataFrame(index=np.arange(0,501,1))

    interim_soil_upper = pd.DataFrame(index=np.arange(-1, 10001, 1))
    interim_soil_lower = pd.DataFrame(index=np.arange(-1, 10001, 1))
    interim_bio_upper = pd.DataFrame(index=np.arange(-1, 10001, 1))
    interim_bio_lower = pd.DataFrame(index=np.arange(-1, 10001, 1))

    rmse_list = []
    rmse_values = list(data_rmse[sp])
    for i in rmse_values:
        if not np.isnan(i):
            rmse_list.append(i)
    rmse_values = rmse_list
    print(rmse_values)
    bio_column_names = list(data_bio.columns.values)
    soil_column_names = list(data_soil.columns.values)

    for value in rmse_values:
        if not np.isnan(value):
            i = rmse_values.index(value)
            soil_col = soil_column_names[i]
            bio_col = bio_column_names[i]
            interim_soil_upper[f"{soil_col}"] = data_soil.iloc[:, i] + value
            interim_soil_lower[f"{soil_col}"] = data_soil.iloc[:, i] - value
Esempio n. 40
0
    def sankey_preprocessing(self, input_file, user_apparition_threshold=0,
                             user_nunique_periods_threshold=3, keep_all_groups_in_periods=[]):

        le = LabelEncoder()
        demographics = extract_demographics(input_file)
        df = read_lcm_output(input_file).sort_values("period").reset_index(drop=True)

        file = f'{LINKS_FOLDER}/{input_file}'
        mlb = MultiLabelBinarizer(sparse_output=True)
        df_users_apparition = mlb.fit_transform(df.user_ids.tolist()).astype(bool)
        df_users_apparition = pd.DataFrame(df_users_apparition.toarray(), columns=mlb.classes_)

        df_stats = df_users_apparition.sum()
        df_users_apparition = df_users_apparition[df_stats[df_stats > user_apparition_threshold].index]
        df_users_apparition = df_users_apparition.T.apply(lambda x: np.where(x)[0], axis=1)
        df_stats = df_users_apparition.to_frame()[0].apply(
            lambda x: list(list(z) for idx, z in groupby(x, lambda y: df.iloc[y].period))
        )
        df_stats = df_stats[df_stats.apply(lambda x: len(x)) > user_nunique_periods_threshold]

        res = []
        df_stats.to_frame().reset_index().apply(lambda x: [res.append(i) for i in self.make_links(x[0], x["index"])],
                                                axis=1)
        links = pd.DataFrame(res)

        links.columns = ["source", "target", "user_id"]
        links = links.groupby(["source", "target"])["user_id"].apply(
            lambda x: ','.join(str(i) for i in x)).reset_index()
        links.to_csv(file)

        # Keep groups appearing in at least one week
        file = f'{GROUPS_FOLDER}/{input_file}'
        groups_to_keep = np.unique(np.union1d(links.source.unique(), links.target.unique()))

        groups_to_keep = np.union1d(groups_to_keep, df[df.period.isin(keep_all_groups_in_periods)].index)
        df_groups = df.loc[groups_to_keep].dropna()
        df_groups['depth'] = le.fit_transform(df_groups.period) / df_groups.period.nunique()
        df_groups['size'] = df_groups.user_ids.apply(lambda x: len(x))
        if len(demographics) == 1:
            df_groups[demographics[0]] = df_groups.property_values
        else:
            df_groups[demographics] = df_groups.property_values.str.split("_", expand=True)

        # Encoding items to their initial ID + adding names
        # TODO remove dependency to DatasetHandler
        self.dh = DatasetHandler()
        items = self.dh.get_items()

        df_groups["itemset_name"] = df_groups["itemsets"].astype(str).apply(
            lambda x: json.dumps(get_items_descriptions(x, items)))
        df_groups.to_csv(file)

        # Groups demographics stats
        file = f'{STATS_FOLDER}/{input_file}'
        stats = {}
        for i in np.intersect1d(GROUPS_DEMOGRAPHICS, demographics):
            b = df_groups.groupby(i).apply(lambda x: {"name": x[i].unique()[0], "value": x.index.shape[0],
                                                      "groups": ",".join(str(i) for i in x.index)}).values
            stats[i] = str(b.tolist())
        with open(file, 'w') as outfile:
            json.dump(stats, outfile)

        self.make_labeled_links(input_file, links, df_groups)
        print("Done", input_file)
Esempio n. 41
0
def norm(x, ord=None, axis=None, keepdims=False):
    """Returns one of matrix norms specified by ``ord`` parameter.

    Complex valued matrices and vectors are not supported.
    See numpy.linalg.norm for more detail.

    Args:
        x (cupy.ndarray): Array to take norm. If ``axis`` is None,
            ``x`` must be 1-D or 2-D.
        ord (non-zero int, inf, -inf, 'fro'): Norm type.
        axis (int, 2-tuple of ints, None): 1-D or 2-D norm is cumputed over
            ``axis``.
        keepdims (bool): If this is set ``True``, the axes which are normed
            over are left.

    Returns:
        cupy.ndarray

    """
    if not issubclass(x.dtype.type, (numpy.inexact, numpy.object_)):
        x = x.astype(float)

    # Immediately handle some default, simple, fast, and common cases.
    if axis is None:
        ndim = x.ndim
        if ((ord is None) or (ord in ('f', 'fro') and ndim == 2)
                or (ord == 2 and ndim == 1)):

            x = x.ravel()
            sqnorm = cupy.sum(x**2)
            ret = cupy.sqrt(sqnorm)
            if keepdims:
                ret = ret.reshape(ndim * [1])
            return ret

    # Normalize the `axis` argument to a tuple.
    nd = x.ndim
    if axis is None:
        axis = tuple(range(nd))
    elif not isinstance(axis, tuple):
        try:
            axis = int(axis)
        except Exception:
            raise TypeError(
                "'axis' must be None, an integer or a tuple of integers")
        axis = (axis, )

    if len(axis) == 1:
        if ord == numpy.Inf:
            return abs(x).max(axis=axis, keepdims=keepdims)
        elif ord == -numpy.Inf:
            return abs(x).min(axis=axis, keepdims=keepdims)
        elif ord == 0:
            # Zero norm
            return (x != 0).sum(axis=axis, keepdims=keepdims, dtype=x.dtype)
        elif ord == 1:
            # special case for speedup
            return abs(x).sum(axis=axis, keepdims=keepdims)
        elif ord is None or ord == 2:
            # special case for speedup
            s = x**2
            return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims))
        else:
            try:
                float(ord)
            except TypeError:
                raise ValueError("Invalid norm order for vectors.")
            absx = abs(x)
            absx **= ord
            return absx.sum(axis=axis, keepdims=keepdims)**(1.0 / ord)
    elif len(axis) == 2:
        row_axis, col_axis = axis
        if row_axis < 0:
            row_axis += nd
        if col_axis < 0:
            col_axis += nd
        if not (0 <= row_axis < nd and 0 <= col_axis < nd):
            raise ValueError('Invalid axis %r for an array with shape %r' %
                             (axis, x.shape))
        if row_axis == col_axis:
            raise ValueError('Duplicate axes given.')
        if ord == 1:
            if col_axis > row_axis:
                col_axis -= 1
            ret = abs(x).sum(axis=row_axis).max(axis=col_axis)
        elif ord == numpy.Inf:
            if row_axis > col_axis:
                row_axis -= 1
            ret = abs(x).sum(axis=col_axis).max(axis=row_axis)
        elif ord == -1:
            if col_axis > row_axis:
                col_axis -= 1
            ret = abs(x).sum(axis=row_axis).min(axis=col_axis)
        elif ord == -numpy.Inf:
            if row_axis > col_axis:
                row_axis -= 1
            ret = abs(x).sum(axis=col_axis).min(axis=row_axis)
        elif ord in [None, 'fro', 'f']:
            ret = cupy.sqrt((x**2).sum(axis=axis))
        else:
            raise ValueError("Invalid norm order for matrices.")
        if keepdims:
            ret_shape = list(x.shape)
            ret_shape[axis[0]] = 1
            ret_shape[axis[1]] = 1
            ret = ret.reshape(ret_shape)
        return ret
    else:
        raise ValueError("Improper number of dimensions to norm.")
Esempio n. 42
0
    freq = '20b'
    universe = Universe('custom', ['zz800'])
    data_source = 'postgres+psycopg2://postgres:[email protected]/alpha'
    engine = SqlEngine(data_source)
    horizon = map_freq(freq)

    """
    Factor Model
    """
    factor_name = 'SIZE'

    """
    Constraints
    """

    risk_names = list(set(risk_styles).difference({factor_name}))
    industry_names = list(set(industry_styles).difference({factor_name}))
    constraint_risk = risk_names + industry_names + macro_styles

    b_type = []
    l_val = []
    u_val = []

    for name in constraint_risk:
        if name in set(risk_styles):
            b_type.append(BoundaryType.ABSOLUTE)
            l_val.append(0.0)
            u_val.append(0.0)
        else:
            b_type.append(BoundaryType.RELATIVE)
            l_val.append(1.0)
Esempio n. 43
0
def keypoint_check(arg_list):
    if arg_list == '[]':
        return []
    else:
        return list(map(int, arg_list[1:-1].split(",")))
Esempio n. 44
0
def test_find_contiguous_1_jolt_diffs(jolts, diffs):
    assert list(day10.find_contiguous_1_jolt_diffs(sorted(jolts))) == diffs
Esempio n. 45
0
 def reset(self):
     self.is_reset = True
     action = np.random.randint(0,action_count)
     return mel_component_matrix[action], list(factors.keys())[action]
Esempio n. 46
0
def clean_data(indir=None, outdir=None):
    '''
    Takes the loaded data, converts pitch into C, and
    chunks the clips into subclips
    Outputs a pkl file with dataframe of cleaned and chunked data 
    '''

    if outdir and not os.path.exists(outdir):
        os.makedirs(outdir)
    df = pd.read_pickle(indir + '/loaded_data.pkl')

    dfdict = {'Name': [], 'y': [], 'sr': []}
    fmin = librosa.midi_to_hz(36)
    hop_length = 512

    audio = list(df['Name'])
    ys = np.asarray(list(df['y']))
    srs = list(df['sr'])

    pitchdict = {
        'Asavari Natabhairavi 1': 'C#',
        'Asavari Natabhairavi 2': 'D',
        'Asavari Natabhairavi 3': 'D',
        'Bhairav Mayamalavagowlai 1': 'C#',
        'Bhairav Mayamalavagowlai 2': 'C',
        'Bhairav Mayamalavagowlai 3': 'C#',
        'Bhairavi Hanumatodi 1': 'D',
        'Bhairavi Hanumatodi 2': 'C',
        'Bhairavi Hanumatodi 3': 'D#',
        'Bilawal Dheerashankarabharanam 1': 'D',
        'Bilawal Dheerashankarabharanam 2': 'D#',
        'Bilawal Dheerashankarabharanam 3': 'E',
        'Kafi Karaharapriya 1': 'D',
        'Kafi Karaharapriya 2': 'D',
        'Kafi Karaharapriya 3': 'D#',
        "Kalyan Kalyani 1": 'E',
        "Kalyan Kalyani 2": 'D#',
        'Kalyan Kalyani 3': 'D#',
        'Khamaj Harikambhoji 1': 'D',
        'Khamaj Harikambhoji 2': 'E',
        'Khamaj Harikambhoji 3': 'E',
        'Marva Gamanasharma 1': 'C',
        'Marva Gamanasharma 2': 'E',
        'Marva Gamanasharma 3': 'D',
        'Poorvi Kamavardhani 1': 'C#',
        'Poorvi Kamavardhani 2': 'C#',
        'Poorvi Kamavardhani 3': 'C#',
        'Todi Subhapantuvarali 1': 'C',
        'Todi Subhapantuvarali 2': 'C#',
        'Todi Subhapantuvarali 3': 'D'
    }

    #iterate through input dict
    for i in range(len(df)):
        y = ys[i]
        sr = srs[i]

        #check pitch of each input, convert it to C if it's not
        pitch = pitchdict[audio[i][:-4]]

        if pitch == 'C':
            newy = y
        elif pitch == 'C#':
            newy = librosa.effects.pitch_shift(y, sr, n_steps=-1)
        elif pitch == 'D':
            newy = librosa.effects.pitch_shift(y, sr, n_steps=-2)
        elif pitch == 'D#':
            newy = librosa.effects.pitch_shift(y, sr, n_steps=-3)
        elif pitch == 'E':
            newy = librosa.effects.pitch_shift(y, sr, n_steps=-4)
        elif pitch == 'F':
            newy = librosa.effects.pitch_shift(y, sr, n_steps=-5)
        elif pitch == 'F#':
            newy = librosa.effects.pitch_shift(y, sr, n_steps=-6)
        elif pitch == 'G':
            newy = librosa.effects.pitch_shift(y, sr, n_steps=-7)
        elif pitch == 'G#':
            newy = librosa.effects.pitch_shift(y, sr, n_steps=+4)
        elif pitch == 'A':
            newy = librosa.effects.pitch_shift(y, sr, n_steps=+3)
        elif pitch == 'A#':
            newy = librosa.effects.pitch_shift(y, sr, n_steps=+2)
        elif pitch == 'B':
            newy = librosa.effects.pitch_shift(y, sr, n_steps=+1)

        y = newy
        ylist = []

        #chunk the data to create smaller subclips
        first = 0
        second = 100000
        while second < len(y):
            ylist.append(y[first:second])
            first = second
            second = second + 100000

        cliptitles = []

        for j in range(len(ylist)):
            string = audio[i][:-6]  #clean string to show raga only
            cliptitles.append(string)

        for k in range(len(cliptitles)):
            dfdict['Name'].append(cliptitles[k])
            dfdict['y'].append(ylist[k])

            dfdict['sr'].append(sr)

    cleaned = pd.DataFrame(dfdict)

    #create output directory
    if outdir and not os.path.exists(outdir):
        os.makedirs(outdir)

    #output cleaned file
    cleaned.to_pickle(os.path.join(outdir, 'cleaned_data.pkl'))

    return cleaned
hf_tags = [
            r'\MA_POL_CA01T', r'\MA_POL_CA02T', r'\MA_POL_CA03T', r'\MA_POL_CA05T', r'\MA_POL_CA06T',
            r'\MA_POL_CA07T',r'\MA_POL_CA19T', r'\MA_POL_CA20T', r'\MA_POL_CA21T', r'\MA_POL_CA22T',
            r'\MA_POL_CA23T', r'\MA_POL_CA24T'
        ]
all_tags =  lf_tags + hf_tags
print(len(all_tags))

for i in range(len(all_tags)):
    all_tags[i] = all_tags[i][1:len(all_tags[i])]
print(all_tags)


TrainNumber = np.load(r"C:\Users\艾鑫坤\Desktop\破裂炮数据\新一轮\ValidNumber.npy")
TrainNumber = list(TrainNumber)

print(len(TrainNumber))
print(TrainNumber)

n = 1
IncompleteNumber = []
for i in TrainNumber:
    print("第{}个".format(n))
    n += 1
    g = h5py.File(r"I:\predict data\{}.hdf5".format(i),'r')
    ShotTag = list(g.keys())
    for j in all_tags:
        if j in ShotTag:
            continue
        else:
Esempio n. 48
0
def sort(values):
	for i in range(len(values)):		
		for j in range(i, len(values)):			
			if (values[i] > values[j]):
				temp = values[i]
				values[i] = values[j]
				values[j] = temp
x = raw_input().rstrip()
xList = list(x)
sort(xList)
print("".join(xList))
Esempio n. 49
0
class Voronoi(draw.Voronoi, GLPrimitive):
    __doc__ = draw.Voronoi.__doc__

    shaders = {}

    shaders['vertex'] = """
       uniform mat4 camera;
       uniform vec4 rotation;
       uniform vec3 translation;
       uniform float radius;

       attribute vec4 color;
       attribute vec2 position;
       attribute vec3 image;

       varying vec4 v_color;
       varying vec2 v_position;

       vec2 rotate(vec2 point, vec4 quat)
       {
           vec3 point3d = vec3(point.xy, 0.0);
           vec3 result = (quat.x*quat.x - dot(quat.yzw, quat.yzw))*point3d;
           result += 2.0*quat.x*cross(quat.yzw, point3d);
           result += 2.0*dot(quat.yzw, point3d)*quat.yzw;
           return result.xy;
       }

       void main()
       {
           vec2 vertexPos = position + image.xy*radius;
           vertexPos = rotate(vertexPos, rotation) + translation.xy;
           vec4 screenPosition = camera * vec4(vertexPos, 0, 1.0);
           screenPosition.z = image.z;

           // transform to screen coordinates
           gl_Position = screenPosition;
           v_position = position + image.xy*radius;
           v_color = color;
       }
       """

    shaders['fragment'] = """
       uniform mat2 clip_extent;

       varying vec4 v_color;
       varying vec2 v_position;

       void main()
       {
           vec2 boundaries = clip_extent*v_position;

           if(boundaries.x > 1.0 || boundaries.y > 1.0 ||
              boundaries.x < -1.0 || boundaries.y < -1.0)
               discard;

           gl_FragColor = v_color;
       }
       """

    _vertex_attribute_names = ['position', 'color', 'image']

    _GL_UNIFORMS = list(itertools.starmap(ShapeAttribute, [
        ('camera', np.float32, np.eye(4), 2, False,
         'Internal: 4x4 Camera matrix for world projection'),
        ('rotation', np.float32, (1, 0, 0, 0), 1, False,
         'Internal: Rotation to be applied to each scene as a quaternion'),
        ('translation', np.float32, (0, 0, 0), 1, False,
         'Internal: Translation to be applied to the scene'),
        ('radius', np.float32, 64, 0, False,
         'Maximum distance between displayed points'),
        ('clip_extent', np.float32, np.zeros((2, 2)), 2, False,
         'Matrix specifying areas to not display when dot(clip_extent, position) is outside [-1, 1]')
        ]))

    def __init__(self, *args, **kwargs):
        self.num_vertices = 32
        GLPrimitive.__init__(self)
        draw.Voronoi.__init__(self, *args, **kwargs)

    def update_arrays(self):
        try:
            for name in self._dirty_attributes:
                self._gl_vertex_arrays[name][:] = self._attributes[name]
                self._dirty_vertex_attribs.add(name)
        except (ValueError, KeyError):
            # wrap around to beginning of circle, but also have vertex 0
            # be (0, 0), so we still end up with num_vertices vertices in
            # total
            thetas = np.linspace(0, 2*np.pi, self.num_vertices, endpoint=True)
            vertices = np.array([np.cos(thetas), np.sin(thetas), np.ones_like(thetas)], dtype=np.float32).T
            vertices[0] = (0, 0, 0)

            triangleIndices = np.zeros((self.num_vertices - 1, 3), dtype=np.uint32)
            triangleIndices[:, 1] = np.arange(self.num_vertices - 1) + 1
            triangleIndices[:, 2] = np.arange(self.num_vertices - 1) + 2
            triangleIndices[-1, 2] = 1

            vertex_arrays = mesh.unfoldProperties(
                [self.positions, self.colors],
                [vertices])

            unfolded_shape = vertex_arrays[0].shape[:-1]
            indices = (np.arange(unfolded_shape[0])[:, np.newaxis, np.newaxis]*unfolded_shape[1] +
                       triangleIndices)
            indices = indices.reshape((-1, 3))

            self._finalize_array_updates(indices, vertex_arrays)

        self._dirty_attributes.clear()
Esempio n. 50
0
import sys
input = sys.stdin.readline
n, m, b = map(int, input().split())
graph = []
dy = [0, -1, 0, 1]
dx = [1, 0, -1, 0]
answer = int(1e10)
ans_height = 0
for i in range(n):
    graph.append(list(map(int, input().split())))

for height in range(257):
    over = 0
    leak = 0
    for i in range(n):
        for j in range(m):
            if graph[i][j] > height:
                over += graph[i][j] - height
            else:
                leak += height - graph[i][j]
    if leak > b + over:
        continue
    if answer >= over * 2 + leak:
        answer = min(answer, over * 2 + leak)
        ans_height = height
print(answer, ans_height)

Esempio n. 51
0
#
# Author: Sergey Kopeliovich ([email protected])
# Date: 2013.11.15
#
# Comment: O(1), try only +-8, no overflow
#

import sys

NAME = "sweets"

inf = open(NAME + ".in", "r")
out = open(NAME + ".out", "w")

n, a, b, c = list(map(int, inf.readline().split()))

x0 = n // (3 * a)
y0 = n // (3 * b)
z0 = n // (3 * c)
best = x0 * y0 * z0
rx, ry, rz = x0, y0, z0
M = 8
r = range(-M, M + 1)
for dx in r:
    for dy in r:
        for dz in r:
            x, y, z = x0 + dx, y0 + dy, z0 + dz
            if ((1 <= x) and (1 <= y) and (1 <= z)
                    and (a * x + b * y + c * z <= n)):
                tmp = x * y * z
                if (tmp > best):
import itertools
n = int(input())
ls = list(map(str , input().split(' ')))
count = ls.count('a')
k = int(input())
a = len(list(itertools.combinations(ls,k)))
for _ in range(count): 
	ls.remove('a')
b = len(list(itertools.combinations(ls,k)))
#print(a,b)
ans = (a-b)/a
print(ans)
Esempio n. 53
0
def execute(MAX_CLUSTERS=20, NOISE_PARAM=4.30, num_epochs=1, max_number_of_episodes=500, reward_sum=0):

    state_space, non_deterministic_hierarchical_clustering, least_noise_model, marginal_model = \
    state_space_model(factors, noises, mel_component_matrix, MAX_CLUSTERS)

    class PGCREnv(BanditEnv):
        def __init__(self, num_actions = 10, 
        observation_space = None, distribution = "factor_model", evaluation_seed=387):
            super(BanditEnv, self).__init__()
            
            self.action_space = ActionSpace(range(num_actions))
            self.distribution = distribution
            
            self.observation_space = observation_space
            
            np.random.seed(evaluation_seed)
            
            self.reward_parameters = None
            if distribution == "bernoulli":
                self.reward_parameters = np.random.rand(num_actions)
            elif distribution == "normal":
                self.reward_parameters = (np.random.randn(num_actions), np.random.rand(num_actions))
            elif distribution == "heavy-tail":
                self.reward_parameters = np.random.rand(num_actions)
            elif distribution == "factor_model":
                self.reward_parameters = (np.array(list(factors.values())).sum(axis=2), 
                            np.array(list(noises.values())))
            else:
                print("Please use a supported reward distribution", flush = True)
                sys.exit(0)
            
            if distribution != "normal":
                self.optimal_arm = np.argmax(self.reward_parameters)
            else:
                self.optimal_arm = np.argmax(self.reward_parameters[0])
        
        def reset(self):
            self.is_reset = True
            action = np.random.randint(0,action_count)
            return mel_component_matrix[action], list(factors.keys())[action]
        
        def compute_gap(self, action):
            if self.distribution == "factor_model":
                gap = np.absolute(self.reward_parameters[0][self.optimal_arm] - self.reward_parameters[0][action])
            elif self.distribution != "normal":
                gap = np.absolute(self.reward_parameters[self.optimal_arm] - self.reward_parameters[action])
            else:
                gap = np.absolute(self.reward_parameters[0][self.optimal_arm] - self.reward_parameters[0][action])
            return gap
        
        def step(self, action):
            self.is_reset = False
            
            valid_action = True
            if (action is None or action < 0 or action >= self.action_space.n):
                print("Algorithm chose an invalid action; reset reward to -inf", flush = True)
                reward = float("-inf")
                gap = float("inf")
                valid_action = False
            
            if self.distribution == "bernoulli":
                if valid_action:
                    reward = np.random.binomial(1, self.reward_parameters[action])
                    gap = self.reward_parameters[self.optimal_arm] - self.reward_parameters[action]
            elif self.distribution == "normal":
                if valid_action:
                    reward = self.reward_parameters[0][action] + self.reward_parameters[1][action] * np.random.randn()
                    gap = self.reward_parameters[0][self.optimal_arm] - self.reward_parameters[0][action]
            elif self.distribution == "heavy-tail":
                if valid_action:
                    reward = self.reward_parameters[action] + np.random.standard_cauchy()
                    gap = self.reward_parameters[self.optimal_arm] - self.reward_parameters[action]        #HACK to compute expected gap
            elif self.distribution == "factor_model":
                if valid_action:
                    reward = np.linalg.norm(
                        self.reward_parameters[0][action],1
                    ) + \
                    np.linalg.norm(
                        self.reward_parameters[1][action],1
                    ) * np.random.randn()
            else:
                print("Please use a supported reward distribution", flush = True)
                sys.exit(0)
                
            observation = marginal_model(action)
            
            return(observation, list(factors.keys())[action], reward, self.is_reset, '')

    running_variance = RunningVariance()

    epoch_stats = []
    net_actions = []
    net_rewards = []
    net_scores = []

    for epoch in tqdm(range(num_epochs)):
        stats = plotting.EpisodeStats(
            episode_lengths=np.zeros(max_number_of_episodes),
            episode_rewards=np.zeros(max_number_of_episodes),
            episode_running_variance=np.zeros(max_number_of_episodes),
            episode_scores=np.zeros(max_number_of_episodes),
            losses=np.zeros(max_number_of_episodes))

        env = PGCREnv(num_actions = action_count, observation_space = np.zeros((state_dim,1)))
        
        for episode_number in tqdm(range(max_number_of_episodes)):
            states, rewards, labels, scores = [],[],[],[]
            done = False

            observation, model = env.reset()
            factor_sequence = []
            t = 1
            for state_dim_i in range(state_dim):
                done = False
                while not done:
                    
                    state = np.ascontiguousarray(np.reshape(observation[:,state_dim_i], [1,120]).astype(np.float32))
                    states.append(state)

                    is_reset = False
                    score = 0.0
                    action, score = least_noise_model(
                        model, non_deterministic_hierarchical_clustering(model), 
                        scaler, MAX_CLUSTERS, NOISE_PARAM, noise_mean, noise_std, 
                        env=env
                    )
                    is_reset = env.is_reset
                    
                    net_actions.append(action)

                    z = np.ones((1,state_dim)).astype(np.float32) * 1.25/120
                    z[:,state_dim_i] = 0.75
                    labels.append(z)
                    
                    # step the environment and get new measurements
                    observation, model, reward, done, _ = env.step(action)
                    
                    done = is_reset if is_reset is True else False

                    observation = np.ascontiguousarray(observation)

                    net_rewards.append(reward)
                    net_scores.append(score)

                    reward_sum += float(reward)

                    # Record reward (has to be done after we call step() to get reward for previous action)
                    rewards.append(float(reward))

                    factor_sequence.append(list(factors.values())[action])

                    stats.episode_rewards[episode_number] += reward
                    stats.episode_lengths[episode_number] = t
                    stats.episode_scores[episode_number] += score

                    t += 1

            # Stack together all inputs, hidden states, action gradients, and rewards for this episode
            epr = np.vstack(rewards).astype(np.float32)

            # Compute the discounted reward backwards through time.
            discounted_epr = discount_rewards(epr, factor_sequence)
            
            for discounted_reward in discounted_epr:
                # Keep a running estimate over the variance of of the discounted rewards
                running_variance.add(discounted_reward.sum())

            stats.episode_running_variance[episode_number] = running_variance.get_variance()
        
        plotting.plot_pgresults(stats)
        epoch_stats.append(stats)

    uniq_actions = np.unique(net_actions)
    np.save(dirname + "/tmp_models/net_actions.npy", uniq_actions)

    print("MCTS Coverage: ", len(uniq_actions) / state_dim)

    return stats
Esempio n. 54
0
    def __init__(
        self,
        env,
        q_func,
        optimizer_spec,
        session,
        exploration=LinearSchedule(1000000, 0.1),
        stopping_criterion=None,
        replay_buffer_size=1000000,
        batch_size=32,
        gamma=0.99,
        learning_starts=50000,
        learning_freq=4,
        frame_history_len=4,
        target_update_freq=10000,
        grad_norm_clipping=10,
        rew_file=None,
        double_q=True,
        lander=False):
        """Run Deep Q-learning algorithm.

        You can specify your own convnet using q_func.

        All schedules are w.r.t. total number of steps taken in the environment.

        Parameters
        ----------
        env: gym.Env
            gym environment to train on.
        q_func: function
            Model to use for computing the q function. It should accept the
            following named arguments:
                img_in: tf.Tensor
                    tensorflow tensor representing the input image
                num_actions: int
                    number of actions
                scope: str
                    scope in which all the model related variables
                    should be created
                reuse: bool
                    whether previously created variables should be reused.
        optimizer_spec: OptimizerSpec
            Specifying the constructor and kwargs, as well as learning rate schedule
            for the optimizer
        session: tf.Session
            tensorflow session to use.
        exploration: rl_algs.deepq.utils.schedules.Schedule
            schedule for probability of chosing random action.
        stopping_criterion: (env, t) -> bool
            should return true when it's ok for the RL algorithm to stop.
            takes in env and the number of steps executed so far.
        replay_buffer_size: int
            How many memories to store in the replay buffer.
        batch_size: int
            How many transitions to sample each time experience is replayed.
        gamma: float
            Discount Factor
        learning_starts: int
            After how many environment steps to start replaying experiences
        learning_freq: int
            How many steps of environment to take between every experience replay
        frame_history_len: int
            How many past frames to include as input to the model.
        target_update_freq: int
            How many experience replay rounds (not steps!) to perform between
            each update to the target Q network
        grad_norm_clipping: float or None
            If not None gradients' norms are clipped to this value.
        double_q: bool
            If True, then use double Q-learning to compute target values. Otherwise, use vanilla DQN.
            https://papers.nips.cc/paper/3964-double-q-learning.pdf
        """
        assert type(env.observation_space) == gym.spaces.Box
        assert type(env.action_space)      == gym.spaces.Discrete

        self.target_update_freq = target_update_freq
        self.optimizer_spec = optimizer_spec
        self.batch_size = batch_size
        self.learning_freq = learning_freq
        self.learning_starts = learning_starts
        self.stopping_criterion = stopping_criterion
        self.env = env
        self.session = session
        self.exploration = exploration
        self.rew_file = str(uuid.uuid4()) + '.pkl' if rew_file is None else rew_file

        ###############
        # BUILD MODEL #
        ###############

        if len(self.env.observation_space.shape) == 1:
            # This means we are running on low-dimensional observations (e.g. RAM)
            input_shape = self.env.observation_space.shape
        else:
            img_h, img_w, img_c = self.env.observation_space.shape
            input_shape = (img_h, img_w, frame_history_len * img_c)
        self.num_actions = self.env.action_space.n

        # set up placeholders
        # placeholder for current observation (or state)
        self.obs_t_ph              = tf.placeholder(
            tf.float32 if lander else tf.uint8, [None] + list(input_shape))
        # placeholder for current action
        self.act_t_ph              = tf.placeholder(tf.int32,   [None])
        # placeholder for current reward
        self.rew_t_ph              = tf.placeholder(tf.float32, [None])
        # placeholder for next observation (or state)
        self.obs_tp1_ph            = tf.placeholder(
            tf.float32 if lander else tf.uint8, [None] + list(input_shape))
        # placeholder for end of episode mask
        # this value is 1 if the next state corresponds to the end of an episode,
        # in which case there is no Q-value at the next state; at the end of an
        # episode, only the current state reward contributes to the target, not the
        # next state Q-value (i.e. target is just rew_t_ph, not rew_t_ph + gamma * q_tp1)
        self.done_mask_ph          = tf.placeholder(tf.float32, [None])

        # casting to float on GPU ensures lower data transfer times.
        if lander:
            obs_t_float = self.obs_t_ph
            obs_tp1_float = self.obs_tp1_ph
        else:
            obs_t_float   = tf.cast(self.obs_t_ph,   tf.float32) / 255.0
            obs_tp1_float = tf.cast(self.obs_tp1_ph, tf.float32) / 255.0

        # Here, you should fill in your own code to compute the Bellman error. This requires
        # evaluating the current and next Q-values and constructing the corresponding error.
        # TensorFlow will differentiate this error for you, you just need to pass it to the
        # optimizer. See assignment text for details.
        # Your code should produce one scalar-valued tensor: total_error
        # This will be passed to the optimizer in the provided code below.
        # Your code should also produce two collections of variables:
        # q_func_vars
        # target_q_func_vars
        # These should hold all of the variables of the Q-function network and target network,
        # respectively. A convenient way to get these is to make use of TF's "scope" feature.
        # For example, you can create your Q-function network with the scope "q_func" like this:
        # <something> = q_func(obs_t_float, num_actions, scope="q_func", reuse=False)
        # And then you can obtain the variables like this:
        # q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')
        # Older versions of TensorFlow may require using "VARIABLES" instead of "GLOBAL_VARIABLES"
        # Tip: use huber_loss (from dqn_utils) instead of squared error when defining self.total_error
        ######

        # YOUR CODE HERE
        # NOTE: Here in the target_q I had as the first param obs_tp1_float and it
        # was causing the algorhitm not to converge. Just a bug. No depth at all.
        # Nor eason to work with same observations.
        num_actions = env.action_space.n    
        target_q = q_func(obs_tp1_float, num_actions, scope="target_q_func", reuse=False)
        q = q_func(obs_t_float, num_actions, scope="q_func", reuse=False)

        target_q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='target_q_func')
        q_func_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='q_func')

        target_val = self.rew_t_ph + (1 - self.done_mask_ph) * gamma * tf.reduce_max(target_q, axis=1)
        q_val = tf.reduce_sum(tf.one_hot(self.act_t_ph, num_actions) * q, axis=1)
        self.total_error = tf.reduce_sum(tf.squared_difference(q_val, target_val))

        ######

        # construct optimization op (with gradient clipping)
        self.learning_rate = tf.placeholder(tf.float32, (), name="learning_rate")
        optimizer = self.optimizer_spec.constructor(learning_rate=self.learning_rate, **self.optimizer_spec.kwargs)
        self.train_fn = minimize_and_clip(optimizer, self.total_error,
                     var_list=q_func_vars, clip_val=grad_norm_clipping)

        # update_target_fn will be called periodically to copy Q network to target Q network
        update_target_fn = []
        for var, var_target in zip(sorted(q_func_vars,        key=lambda v: v.name),
                                   sorted(target_q_func_vars, key=lambda v: v.name)):
            update_target_fn.append(var_target.assign(var))
        self.update_target_fn = tf.group(*update_target_fn)

        # construct the replay buffer
        self.replay_buffer = ReplayBuffer(replay_buffer_size, frame_history_len, lander=lander)
        self.replay_buffer_idx = None

        ###############
        # RUN ENV     #
        ###############
        self.model_initialized = False
        self.num_param_updates = 0
        self.mean_episode_reward      = -float('nan')
        self.best_mean_episode_reward = -float('inf')
        self.last_obs = self.env.reset()
        self.log_every_n_steps = 10000

        self.start_time = None
        self.t = 0
Esempio n. 55
0
def importPersonaCharacteristicsWorkbook(wbFile,session_id):
  eds = []
  drs = []
  pcs = []

  wb = load_workbook(filename = wbFile,data_only=True)
  edSheet = wb.worksheets[0]
  edNames = set([])
  for row in edSheet.iter_rows(min_row=2):
    edName = row[0].value
    edAuth = row[1].value
    edVer = row[2].value
    edPD = str(row[3].value)
    edDesc = row[4].value
    if (edName != '' and edName != None):
      edNames.add(edName)
      if (edAuth == '' or edAuth == None):
        edAuth = 'Unknown'
      if (edVer == '' or edVer == None):
        edVer = '1'
      if (edPD == '' or edPD == None):
        edPD = 'Unknown'
      if (edDesc == '' or edDesc == None):
        edDesc = 'Unknown'
      eds.append(ExternalDocumentParameters(edName,edVer,edPD,edAuth,edDesc))

  b = Borg()
  db_proxy = b.get_dbproxy(session_id)

  drNames = set([])
  drSheet = wb.worksheets[1]
  for row in drSheet.iter_rows(min_row=2):
    drName = row[0].value
    drDoc = row[1].value
    drCont = row[2].value
    drExc = row[3].value

    if (drName != '' and drName != None):
      drNames.add(drName)
      if (drDoc not in edNames):
        if (db_proxy.existingObject(drDoc,'external_document') == -1):
          raise ARMException("Cannot import document reference " + drName + ". " + drDoc + " not an external document.")
      if (drCont == '' or drCont == None):
        drCont = 'Unknown'
      if (drExc == '' or drExc == None):
        drExc = 'Unknown'
      drs.append(DocumentReferenceParameters(drName,drDoc,drCont,drExc))

  pcSheet = wb.worksheets[2]
  for row in pcSheet.iter_rows(min_row=2):
    pcName = row[0].value
    pName = row[1].value.strip()
    bvName = row[2].value.strip()
    modQual = row[3].value.strip()
    grounds  = row[4].value
    if (grounds == None):
      grounds = []
    else:
      grounds = list(map(lambda x: (x.strip(),'','document'),grounds.strip().split(',')))
      if (len(grounds) == 1 and grounds[0][0] == ''):
        grounds = []

    warrant  = row[5].value
    if (warrant == None):
      warrant = []
    else:
      warrant = list(map(lambda x: (x.strip(),'','document'),warrant.strip().split(',')))
      if (len(warrant) == 1 and warrant[0][0] == ''):
        warrant = []

    rebuttal = row[6].value
    if (rebuttal == None):
      rebuttal = []
    else:
      rebuttal = list(map(lambda x: (x.strip(),'','document'),rebuttal.strip().split(',')))
      if (len(rebuttal) == 1 and rebuttal[0][0] == ''):
        rebuttal = []

    if (pcName != ''):
      if (db_proxy.existingObject(pName,'persona') == -1):
        raise ARMException("Cannot import persona characteristic " + pcName + ". Persona " + pName + " does not exist.")
      if bvName not in ['Activities','Attitudes','Aptitudes','Motivations','Skills','Environment Narrative','Intrinsic','Contextual']:
        raise ARMException("Cannot import persona characteristic " + pcName + ". " + bvName + " is an invalid behavioural variable name.")

      if (modQual == '' or modQual == None):
        modQual = 'Possibly'

      for g in grounds:
        if ((g[0] not in drNames) and (db_proxy.existingObject(g[0],'document_reference') == -1)):
          raise ARMException("Cannot import persona characteristic " + pcName + ". Document reference corresponding with grounds " + g[0] + " does not exist.")
      for w in warrant:
        if ((w[0] not in drNames) and (db_proxy.existingObject(w[0],'document_reference') == -1)):
          raise ARMException("Cannot import persona characteristic " + pcName + ". Document reference corresponding with warrant " + w[0] + " does not exist.")
      for r in rebuttal:
        if ((r[0] not in drNames) and (db_proxy.existingObject(r[0],'document_reference') == -1)):
          raise ARMException("Cannot import persona characteristic " + pcName + ". Document reference corresponding with rebuttal " + r[0] + " does not exist.")

      pcs.append(PersonaCharacteristicParameters(pName,modQual,bvName,pcName,grounds,warrant,[],rebuttal))
      
  for edParameters in eds:
    objtId = db_proxy.existingObject(edParameters.name(),'external_document')
    if objtId == -1:
      db_proxy.addExternalDocument(edParameters)
    else:
      edParameters.setId(objtId)
      db_proxy.updateExternalDocument(edParameters)

  for drParameters in drs:
    objtId = db_proxy.existingObject(drParameters.name(),'document_reference')
    if objtId == -1:
      db_proxy.addDocumentReference(drParameters)
    else:
      drParameters.setId(objtId)
      db_proxy.updateDocumentReference(drParameters)

  for pcParameters in pcs:
    objtId = db_proxy.existingObject(pcParameters.characteristic(),'persona_characteristic')
    if objtId == -1:
      db_proxy.addPersonaCharacteristic(pcParameters)
    else:
      pcParameters.setId(objtId)
      db_proxy.updatePersonaCharacteristic(pcParameters)

  msgStr = 'Imported ' + str(len(eds)) + ' external documents, ' + str(len(drs)) + ' document references, and ' + str(len(pcs)) + ' persona characteristics.'
  return msgStr
Esempio n. 56
0
def main(clf, stats_odds1):
    stats_odds1.drop(columns=[
        'Thr_x', 'Opp. Starter (GmeSc)_x', ' Year_x_x', 'Gtm_y_x', 'Date_y_x',
        'Unnamed: 2_y_x', 'Opp_y_x', 'Rslt_y_x', 'IP_x', 'H_y_x', 'R_y_x',
        'ER_x', 'UER_x', 'BB_y_x', 'SO_y_x', 'HR_y_x', 'HBP_y_x', 'ERA_x',
        'BF_x', 'Pit_x', 'Str_x', 'IR_x', 'IS_x', 'SB_y_x', 'CS_y_x', 'AB_y_x',
        '2B_y_x', '3B_y_x', 'IBB_y_x', 'SH_y_x', 'SF_y_x', 'ROE_y_x',
        'GDP_y_x', '#_y_x', 'Umpire_x', 'Pitchers Used (Rest-GameScore-Dec)_x',
        ' Year_y_x', 'date_month_x', 'date_day_x', 'full_date_x', 'game_id_x',
        'Gtm_x_y', 'Date_x_y', 'Unnamed: 2_x_y', 'Opp_x_y', 'Rslt_x_y',
        'Thr_y', 'Opp. Starter (GmeSc)_y', ' Year_x_y', 'Gtm_y_y', 'Date_y_y',
        'Unnamed: 2_y_y', 'Opp_y_y', 'Rslt_y_y', 'IP_y', 'H_y_y', 'R_y_y',
        'ER_y', 'UER_y', 'BB_y_y', 'SO_y_y', 'HR_y_y', 'HBP_y_y', 'ERA_y',
        'BF_y', 'Pit_y', 'Str_y', 'IR_y', 'IS_y', 'SB_y_y', 'CS_y_y', 'AB_y_y',
        '2B_y_y', '3B_y_y', 'IBB_y_y', 'SH_y_y', 'SF_y_y', 'ROE_y_y',
        'GDP_y_y', '#_y_y', 'Umpire_y', 'Pitchers Used (Rest-GameScore-Dec)_y',
        ' Year_y_y', 'date_month_y', 'date_day_y', 'full_date_y', 'team_1',
        'team_2', 'stats_join', 'Home Team_x', 'Away Team', 'Home Code_x',
        'Away Code', 'Unnamed: 0', 'Unnamed: 0.1', 'game_id_y', 'date',
        'Visitor Team', 'Visitor Pitcher', 'Home Team_y', 'Home Pitcher',
        'Home Code_y', 'Vis Code', 'Final_x', 'Final_y', 'Gtm_x_x', 'Date_x_x',
        'Unnamed: 2_x_x', 'Opp_x_x', 'Rslt_x_x'
    ],
                     inplace=True)

    stats_odds1_16 = stats_odds1.iloc[:7419]

    stats_odds1_17 = stats_odds1.iloc[7419:]

    stats_odds1_16 = stats_odds1_16.dropna()

    stats_odds1_17 = stats_odds1_17.dropna()
    # return stats_odds1_16, stats_odds1_17

    hopen_ml_16 = stats_odds1_16.pop('Home Open ML')
    vopen_ml_16 = stats_odds1_16.pop('Open Visitor ML')
    stats_odds1_16.drop(columns=['Close Visitor ML', 'Home Close ML'],
                        inplace=True)

    hopen_ml_17 = stats_odds1_17.pop('Home Open ML')
    vopen_ml_17 = stats_odds1_17.pop('Open Visitor ML')
    stats_odds1_17.drop(columns=['Close Visitor ML', 'Home Close ML'],
                        inplace=True)

    y = stats_odds1_16.pop('result')

    X = stats_odds1_16

    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.25,
                                                        random_state=42)

    # clf = RandomForestClassifier(n_estimators=100, max_features=10, max_depth=10, n_jobs=-2)

    # clf.fit(X_train, y_train)

    y_preds = clf.predict(X_test)

    clf.predict_proba(X_test)

    train_accuracy = np.mean(
        cross_val_score(clf, X_train, y_train, cv=5, scoring='accuracy'))
    recall = np.mean(
        cross_val_score(clf, X_train, y_train, cv=5, scoring='recall'))
    precision = np.mean(
        cross_val_score(clf, X_train, y_train, cv=5, scoring='precision'))
    print('Random Forest Train: ')
    print('Train Accuracy: ', train_accuracy)
    print('Recall: ', recall)
    print('Precision: ', precision)

    list(zip(X, clf.feature_importances_))

    feature_import = list(zip(X, clf.feature_importances_))
    feature_import.sort(key=lambda x: x[1], reverse=True)
    feature_import = feature_import[:10]
    plt.bar(*zip(*feature_import))
    plt.xticks(rotation='vertical')

    ####Logistic Regression

    clf1 = LogisticRegression(solver='lbfgs', max_iter=2000)
    clf1.fit(X_train, y_train)
    y_preds1 = clf1.predict(X_test)
    train_accuracy1 = np.mean(
        cross_val_score(clf1, X_train, y_train, cv=5, scoring='accuracy'))
    recall1 = np.mean(
        cross_val_score(clf1, X_train, y_train, cv=5, scoring='recall'))
    precision1 = np.mean(
        cross_val_score(clf1, X_train, y_train, cv=5, scoring='precision'))
    print("Logistic Regression Train:")
    print("Train Accuracy: ", train_accuracy1)
    print("Recall: ", recall1)
    print("Precision: ", precision1)

    #### test results on holdout
    y = stats_odds1_17.pop('result')

    X = stats_odds1_17

    y_preds = clf.predict(X)
    probs = clf.predict_proba(X)
    y_pred1 = clf1.predict(X)

    print("Random Forest Holdout:")
    print("Holdout Accuracy:", metrics.accuracy_score(y, y_preds))
    print("Holdout Recall:", metrics.recall_score(y, y_preds))
    print("Holdout Precision:", metrics.precision_score(y, y_preds))

    print("Logistic Regression Holdout:")
    print("Train Accuracy: ", metrics.accuracy_score(y, y_pred1))
    print("Recall: ", metrics.recall_score(y, y_pred1))
    print("Precision: ", metrics.precision_score(y, y_pred1))

    X['prob_0'] = probs[:, 0]
    X['prob_1'] = probs[:, 1]

    X['y_preds'] = y_preds

    X['result'] = y

    X['Open Visitor ML'] = vopen_ml_17

    X['Home Open ML'] = hopen_ml_17

    X['Visitors Odds Prob'] = X['Open Visitor ML'].apply(
        lambda x: abs(x) / (abs(x) + 100) if x < 100 else 100 / (x + 100))
    X['Home Odds Prob'] = X['Home Open ML'].apply(
        lambda x: abs(x) / (abs(x) + 100) if x < 100 else 100 / (x + 100))

    X['max_model_prob'] = X[["prob_0", "prob_1"]].max(axis=1) * 100

    X['max_odds_prob'] = X[['Visitors Odds Prob', 'Home Odds Prob'
                            ]].max(axis=1) * 100

    X['potential edge'] = X['max_model_prob'] - X['max_odds_prob']

    X['wager'] = X['potential edge'].apply(lambda x: 10 if x > 0 else 0)

    X['Home Payout'] = X['Home Open ML'].apply(lambda x: (100 / abs(x) + 1)
                                               if x < 100 else (x / 100 + 1))
    X['Visitor Payout'] = X['Open Visitor ML'].apply(
        lambda x: (100 / abs(x) + 1) if x < 100 else (x / 100 + 1))
    X['Home Payout'] = X['Home Open ML'].apply(
        lambda x: (100 / abs(x) + 1) * 10 if x < 100 else (x / 100 + 1) * 10)
    X['Visitor Payout'] = X['Open Visitor ML'].apply(
        lambda x: (100 / abs(x) + 1) * 10 if x < 100 else (x / 100 + 1) * 10)

    X['incorrect'] = np.where(X['result'] == X['y_preds'], 0, 1)

    conditions = [
        (X['incorrect'] == 0) & (X['y_preds'] == 0) & (X['wager'] == 10),
        (X['incorrect'] == 0) & (X['y_preds'] == 1) & (X['wager'] == 10),
        (X['incorrect'] == 1) & (X['wager'] == 10),
        (X['incorrect'] == 1) & (X['wager'] == 0)
    ]
    choices = [X['Visitor Payout'], X['Home Payout'], 0, 0]
    X['payout'] = np.select(conditions, choices)

    X['payout'].sum()

    X['wager'].sum()

    roi = (X['payout'].sum() / X['wager'].sum() - 1) * 100

    print('ROI = {}'.format(roi))
        maxright[i] = temp
        temp = max(temp, arr[i])

    temp = arr[0]
    for i in range(1, n-1):
        water += max(0, min(temp, maxright[i]) - arr[i])
        temp = max(temp, arr[i])

    return water


T = int(input())

for _ in range(T):
    n = int(input())
    arr = list(map(int, input().split()))
    print(solver(arr, n))


'''
4
6
3 0 0 2 0 4
4
7 4 0 9
3
6 9 9
7
8 8 2 4 5 5 1

Esempio n. 58
0
 def all(self, filename=None):
     if filename:
         return [error
                 for error in self
                 if error.filename == filename]
     return list(self._items.values())
Esempio n. 59
0
latent = np.load(dirname + '/../dataset/latent.npy')
mel_calibration_matrix = np.load(dirname + '/../dataset/mel_calibration.npy')
mel_component_matrix = sess.run(None, {'latent': latent})
mel_component_matrix = mel_component_matrix[0]
mel_component_matrix = np.matmul(mel_component_matrix, mel_calibration_matrix)
mel_component_matrix = mel_component_matrix.transpose(0,2,1)
factors = pickle.load(open(dirname + "/../dataset/factors.pkl", "rb"))
noises = pickle.load(open(dirname + "/../dataset/noises.pkl", "rb"))

state_dim = 24 # Dimension of state space
action_count = 60 # Number of actions
hidden_size = 256 # Number of hidden units
update_frequency = 20

noise_values = np.array(list(noises.values()))
noise_mean = np.mean(noise_values)
noise_std = np.std(noise_values)

scaler = MinMaxScaler(feature_range=(0,6)).fit((noise_values - noise_mean)/noise_std)

def discount_rewards(r, factor_model, gamma=0.999):
    """Take 1D float array of rewards and compute discounted reward """
    discounted_r = np.zeros_like(r,dtype=np.float32)
    running_add = 0
    f_ = 0
    f = np.linalg.norm(factor_model[0],1)
    running_add = f
    for t in reversed(range(0, r.size)):
        if (t < (r.size - 1) and r.size >= 2):
            f_ = np.linalg.norm(factor_model[t+1],1)
import sys

#
# Complete the breakingRecords function below.
#
def breakingRecords(scores):
    #
    # Write your code here.
    #
    minScore, maxScore = 0, 0
    bh, bl = 0, 0
    index = 0
    for score in scores:
        if(index==0):
            minScore = score
            maxScore = score
        else:
            if (score<minScore):
                bl+=1
                minScore = score
            else:
                if (score > maxScore):
                    bh+=1
                    maxScore = score
        index+=1
    return [bh, bl]

if __name__ == '__main__':
    n = int(input())
    score = list(map(int, input().split()))
    result = print(*breakingRecords(score))