Esempio n. 1
0
def evaluate_transfer(whichdecoder, data_source, epoch):
    # Turn on evaluation mode which disables dropout.
    autoencoder.eval()
    ntokens = len(corpus.dictionary.word2idx)

    original = []
    transferred = []
    for i, batch in enumerate(data_source):
        source, target, lengths = batch
        target = target.view(source.size(0), -1)
        source = to_gpu(args.cuda, Variable(source, volatile=True))

        mask = target.gt(0)
        hidden = autoencoder(0, source, lengths, noise=False, encode_only=True)

        # output: batch x seq_len x ntokens
        if whichdecoder == 1:
            max_indices = autoencoder.generate(2, hidden, maxlen=args.maxlen)
        else:
            max_indices = autoencoder.generate(1, hidden, maxlen=args.maxlen)

        for t, idx in zip(target, max_indices):
            t = t.numpy()
            idx = idx.data.cpu().numpy()

            words = [corpus.dictionary.idx2word[x] for x in t]
            original.append(truncate(words))

            words = [corpus.dictionary.idx2word[x] for x in idx]
            transferred.append(truncate(words))

    return original, transferred
Esempio n. 2
0
def test_truncate():
    s = '12345'
    assert s == truncate(s, 10)
    s = '1234567890'
    assert s == truncate(s, 10)
    s = '12345678901234567890'
    assert '1234567...' == truncate(s, 10)
    assert '1234561234' == truncate(s, 10, '1234')
Esempio n. 3
0
 def list_tasks(self, tasks):
   descs = [utils.truncate(task['description'], 40) for task in tasks]
   desc_length = max(len(word) for word in descs)
   format_str = '{:25s} {:16s} {:%ds} {:10s} {:s}' % (desc_length + 1)
   for task in tasks:
     truncated_desc = utils.truncate(task['description'], 40)
     print format_str.format(
         task['id'], task['task_type'], truncated_desc,
         task['state'], task.get('error_message', '---'))
Esempio n. 4
0
 def list_tasks(self, tasks):
     descs = [utils.truncate(task['description'], 40) for task in tasks]
     desc_length = max(len(word) for word in descs)
     format_str = '{:25s} {:16s} {:%ds} {:10s} {:s}' % (desc_length + 1)
     for task in tasks:
         truncated_desc = utils.truncate(task['description'], 40)
         print format_str.format(task['id'], task['task_type'],
                                 truncated_desc, task['state'],
                                 task.get('error_message', '---'))
Esempio n. 5
0
 def run(self, unused_args, config):
   config.ee_init()
   tasks = ee.data.getTaskList()
   descs = [utils.truncate(task.get('description', ''), 40) for task in tasks]
   desc_length = max(len(word) for word in descs)
   format_str = '{:25s} {:13s} {:%ds} {:10s} {:s}' % (desc_length + 1)
   for task in tasks:
     truncated_desc = utils.truncate(task.get('description', ''), 40)
     task_type = TASK_TYPES.get(task['task_type'], 'Unknown')
     print format_str.format(
         task['id'], task_type, truncated_desc,
         task['state'], task.get('error_message', '---'))
def compareProbabilities(year):
    ## Expectation of a normal random variable suggests 50/50 over/under probabilites ##
    teamWins = expectedWins(year)
    teamOdds = oddsScraper()
    vegasImpliedProbs = vegasProbabilites(teamOdds)
    vegasValgoProbabilities = {}
    for team in teamWins:
        vegasExpectation = vegasImpliedProbs[team][0]
        vegasDistribution = stats.norm(vegasExpectation, 2.5)
        againstVegasUnderProbability = float(truncate(vegasDistribution.cdf(teamWins[team]), 2))
        againstVegasOverProbability = float(truncate(1 - againstVegasUnderProbability, 2))
        vegasValgoProbabilities[team] = [vegasImpliedProbs[team][0], againstVegasOverProbability, againstVegasUnderProbability]
    return vegasValgoProbabilities, teamWins
Esempio n. 7
0
    def to_jstree_dict(self, prefixes, index=None):
        tmp_prefixes = self._get_prefixes(prefixes, index)
        data = self._tagname
        value = getattr(self, '_value', None)
        if value:
            data += u' <span class="_tree_text">(%s)</span>' % (
                utils.truncate(value))

        css_class = 'tree_' + ':'.join(prefixes or [])
        if index is not None:
            css_class += ' tree_' + ':'.join((prefixes + [str(index)]))
        else:
            css_class += ':' + self._tagname

        dic = {
            'data': data,
            'attr': {
                'id': 'tree_' + ':'.join(tmp_prefixes),
                'class': css_class,
            },
        }
        children = []
        for elt in self._sub_elements:
            v = elt._to_jstree_dict(self, tmp_prefixes)
            if v:
                children += [v]
        dic['children'] = children
        return dic
    def update_winnings(self, slider=None):
        bounty = self._tournament['round']['bounty']
        winners = []
        self._winners = []

        total = 0
        for item in self._submissions_list.items:
            slider = item.find_node('Slider')
            item.find_node('Winnings').get_content().text_value = ''

            if slider.enabled:
                amount = slider.get_content().current_value
                total += amount
                winners.append((amount, item))

        for amount, item in winners:
            mtx = utils.truncate(bounty * amount / total)

            winnings = item.find_node('Winnings').get_content()
            winnings.text_value = mtx + ' MTX'

            self._winners.append((int(1e6 * amount), item.submission['hash']))

        self._button_continue.unusable = total == 0

        self._plugin.refresh_menu()
Esempio n. 9
0
 def convertToFunkLoad(self, request):
     """return a funkload python instruction."""
     text = []
     text.append("        # " + request.file_path)
     if request.host != self.server_url:
         text.append('self.%s("%s"' % (request.method.lower(), request.url))
     else:
         text.append('self.%s(server_url + "%s"' % (request.method.lower(), request.rurl.strip()))
     description = "%s %s" % (request.method.capitalize(), request.path | truncate(42))
     if request.body:
         params = request.extractParam()
         if isinstance(params, Data):
             params = "Data('%s', '''%s''')" % (params.content_type, params.data)
         else:
             myfaces_form = None
             if self.MYFACES_STATE not in [key for key, value in params]:
                 params = "params=%s" % params
             else:
                 # apache myfaces state add a wrapper
                 self.use_myfaces = True
                 new_params = []
                 for key, value in params:
                     if key == self.MYFACES_STATE:
                         continue
                     if key == self.MYFACES_FORM:
                         myfaces_form = value
                         continue
                     new_params.append([key, value])
                 params = "    self.myfacesParams(%s, form='%s')" % (new_params, myfaces_form)
             params = re.sub("'Upload\(([^\)]*)\)'", "Upload(\\1)", params)
         text.append(", " + params)
     text.append(', description="%s")' % description)
     return "".join(text)
Esempio n. 10
0
File: al.py Progetto: tgage/asm3
def fixed_chars(s, chars=10):
    # Forces a string to be chars in length by padding or truncating
    if len(s) > chars:
        return utils.truncate(s, chars)
    if len(s) < chars:
        return utils.spaceright(s, chars)
    return s
Esempio n. 11
0
    def setup_withdraw_button(self, commit):
        # get balance on commit
        user = self._plugin._account.address
        mtx = self._plugin._web3._commit.getAvailableRewardForUser(
            commit['hash'], user)

        self._icon_withdraw.add_new_image(
            os.path.join(os.path.dirname(__file__), '..', 'images',
                         'withdrawMTX.png'))
        self._button_withdraw.enabled = mtx > 0
        self._button_withdraw.get_content().set_all_text(
            '\n\n\n\nReward Available \n %s MTX' % utils.truncate(mtx))

        def cb(b):
            self._button_withdraw.enabled = False
            self._plugin._modal.show_message('Withdrawing your MTX...')

            tx_hash = self._plugin._web3._commit.withdrawAvailableReward(
                commit['hash'])
            self._plugin._web3.wait_for_tx(tx_hash)

            self._plugin.update_account()
            self._plugin._modal.show_message('MTX sucessfully withdrawn')

        self._button_withdraw.get_content().register_pressed_callback(cb)
Esempio n. 12
0
def fixed_chars(s, chars=10):
    # Forces a string to be chars in length by padding or truncating
    if len(s) > chars:
        return utils.truncate(s, chars)
    if len(s) < chars:
        return utils.spaceright(s, chars)
    return s
Esempio n. 13
0
 def __init__(self, location_id, date_location, inventaire_id, nom_materiel,
              nom_client, prenom_client):
     JsonSerialize.__init__(self)
     self.IdLocation = location_id
     # convert datetime object to string
     self.DateLocation = str(date_location)
     self.InventaireId = inventaire_id
     self.NomMateriel = truncate(nom_materiel, 25)
     self.NomClient = nom_client[0].upper() + '.' + prenom_client
Esempio n. 14
0
def generalRecommendations(year):
    algoProbabilites, algoWins = compareProbabilities(year)
    recommendations = {}
    for team in algoProbabilites:
        if abs(algoProbabilites[team][0] - algoWins[team]) < 1.5:
            recommendations[team] = {"Algo Expected Wins": algoWins[team],
                                     "Vegas Wins": algoProbabilites[team][0],
                                     "Recommendation": "Avoid", 
                                     "Edge": "Minimal/None"}
        elif algoProbabilites[team][1] < 0.50:
            recommendations[team] = {"Algo Expected Wins": algoWins[team],
                                     "Vegas Wins": algoProbabilites[team][0],
                                     "Recommendation": "Over", 
                                     "Edge": truncate((algoProbabilites[team][2] - 0.50) * 100, 2) + "%"}
        elif algoProbabilites[team][1] > 0.50:
            recommendations[team] = {"Algo Expected Wins": algoWins[team],
                                     "Vegas Wins": algoProbabilites[team][0],
                                     "Recommendation": "Under", 
                                     "Edge": truncate((algoProbabilites[team][1] - 0.50) * 100, 2) + "%"}
    return recommendations
Esempio n. 15
0
File: git.py Progetto: cholin/gix
 def _convert_commit(self, c):
     return {
       'sha' : c.hex,
       'subject' : utils.truncate(utils.commit_subject(c.message), 60),
       'message' : c.message,
       'commit_time' : utils.prettydate(utils.to_date(c.commit_time)),
       'author' : {
           'name' : c.author.name, 'email' : c.author.email
       },
       'committer' : {
           'name' : c.committer.name, 'email' : c.committer.email
       }
     }
Esempio n. 16
0
	def run(self):
		self.announce()
		while self.target_tracker.choose_first_target() == None:
			if self.count == Me.WANDER_LOOP_ON_ITER:
				trans = self.random_translation()
				self.count = 0
			self.count += 1

			# Seek the random translation and send off
			# print "Seeking trans", trans
			vel = pure_seek.seek(trans, Me.MAX_LIN, Me.MAX_ANG)
			vel.angular.z = utils.truncate(vel.angular.z, vel.linear.x/2) #cap rotation speed
			MotionValidator.validate_and_publish(vel)

		return TaskStatus.SUCCESS
Esempio n. 17
0
 def forward(self, y, large_z, context):  # train time
     y, lengths = append(truncate(y, 'eos'), 'sos')
     if self.word_drop > 0.:
         y = word_drop(y, self.word_drop)
     embedded = self.embedding(y)  # (B, l, 300)
     embedded = torch.cat(
         [embedded, context.repeat(1, embedded.size(1), 1)], dim=-1)
     packed = pack_padded_sequence(embedded, lengths, batch_first=True)
     init_hidden = self._transform_hidden(large_z)
     packed_output, _ = self.lstm(packed, init_hidden)
     total_length = embedded.size(1)
     output, _ = pad_packed_sequence(packed_output,
                                     batch_first=True,
                                     total_length=total_length)
     recon_logits = self.out(output)
     return recon_logits  # (B, L, vocab_size)
Esempio n. 18
0
 def forward(self, orig, para, z):  # train time
     orig, orig_lengths = orig  # (B, l), (B,)
     orig = self.embedding(orig)  # (B, l, 300)
     orig_packed = pack_padded_sequence(orig,
                                        orig_lengths,
                                        batch_first=True)
     _, orig_hidden = self.lstm_orig(orig_packed)
     para, _ = append(truncate(para, 'eos'), 'sos')
     if self.word_drop > 0.:
         para = word_drop(para, self.word_drop)  # from Bowman's paper
     para = self.embedding(para)
     L = para.size(1)
     para_z = torch.cat([para, z.repeat(1, L, 1)],
                        dim=-1)  # (B, L, 1100+300)
     para_output, _ = self.lstm_para(para_z, orig_hidden)  # no packing
     logits = self.linear(para_output)
     return logits  # (B, L, vocab_size)
Esempio n. 19
0
 def get_bill(self, year, month, last_day_in_month):
     if len(str(month)) < 2:
         month = f'0{month}'
     response = self.aws_client(
         resource=False, region_name='us-east-1',
         aws_service='ce').get_cost_and_usage(TimePeriod={
             'Start':
             f'{year}-{month}-01',
             'End':
             f'{year}-{month}-{last_day_in_month}'
         },
                                              Granularity='MONTHLY',
                                              Metrics=['AmortizedCost'])
     cost = response['ResultsByTime'][0]['Total']['AmortizedCost']['Amount']
     cost = utils.truncate(float(cost), 2)
     date = f'{month}/{year}'
     bill = f'{cost} $'
     return date, bill
Esempio n. 20
0
 def convertToFunkLoad(self, request):
     """return a funkload python instruction."""
     text = []
     text.append('        # ' + request.file_path)
     specialHeaders = self.listSpecialHeaders(request)
     if len(specialHeaders) > 0:
         text.append('self.clearHeaders()')
         for h in specialHeaders:
           text.append('self.setHeader(\'' + h + '\', \'' + request.headers[h] + '\')')
     if request.host != self.server_url:
         text.append('self.%s("%s"' % (request.method.lower(),
                                       request.url))
     else:
         text.append('self.%s(server_url + "%s"' % (
             request.method.lower(),  request.rurl.strip()))
     description = "%s %s" % (request.method.capitalize(),
                              request.path | truncate(42))
     if request.body:
         params = request.extractParam()
         if isinstance(params, Data):
             params = "Data('%s', '''%s''')" % (params.content_type,
                                                    params.data)
         else:
             myfaces_form = None
             if self.MYFACES_STATE not in [key for key, value in params]:
                 params = 'params=%s' % params
             else:
                 # apache myfaces state add a wrapper
                 self.use_myfaces = True
                 new_params = []
                 for key, value in params:
                     if key == self.MYFACES_STATE:
                         continue
                     if key == self.MYFACES_FORM:
                         myfaces_form = value
                         continue
                     new_params.append([key, value])
                 params = "    self.myfacesParams(%s, form='%s')" % (
                     new_params, myfaces_form)
             params = re.sub("'Upload\(([^\)]*)\)'", "Upload(\\1)", params)
         text.append(', ' + params)
     text.append(', description="%s")' % description)
     return ''.join(text)
Esempio n. 21
0
    def forward(self, z, l, x=None):
        """
        z: (B, 500)
        l: (B,)
        x: tuple of (B, L+1), (B,)
        """
        B = l.size(0)
        l_embed = self.attr_emb(l) # (B, 200)
        hidden = torch.cat([z, l_embed], dim=-1).unsqueeze(0) # (1, B, 700)

        if x is not None: # loss computation with teacher forcing
            x, lengths = append(truncate(x, 'eos'), 'sos')
            x_embed = self.emb(x) # (B, L+1, 300)
            packed_in = pack_padded_sequence(x_embed, lengths, batch_first=True)
            packed_out, _ = self.gru(packed_in, hidden)
            total_length = x.size(1)
            # (B, L, 700)
            hx, lengths = pad_packed_sequence(packed_out, batch_first=True,
                                                  total_length=total_length)
            output = self.out(hx)
            return (hx, lengths), (output, lengths) # (B, L+1, 700), (B,)
                                                    # (B, L+1, vocab), (B,)
        else: # sample y
            y = []
            hy = []
            input_ = l.new_full((B, 1), SOS_IDX)
            for t in range(MAXLEN):
                input_ = self.emb(input_) # (B, 1, 300)
                # output (B, 1, 700), hidden (1, B, 700)
                output, hidden = self.gru(input_, hidden)
                input_ = self._hard_sampling(self.out(output))
                hy.append(output)
                y.append(input_)
            input_ = l.new_full((B,1), EOS_IDX) # feed <eos> as last input,
            output, _ = self.gru(self.emb(input_), hidden)
            hy.append(output)
            y.append(input_) # append <eos> as last token

            hy = torch.cat(hy, dim=1)
            y = torch.cat(y, dim=1)
            hy, y, lengths = self._tighten(hy, y)
            #lengths = y.new_full((B,), MAXLEN+1)
            return (hy, lengths), (y, lengths) # (B, MAXLEN+1, 700), (B, MAXLEN+1), (B, )
Esempio n. 22
0
 def _compute_loss(self, batch):
     if self.backward:
         logprobs, attn_weights = self.model(truncate(batch.resp, 'sos'),
                                             truncate(batch.hist2, 'eos'))
         target, _ = truncate(batch.hist2, 'sos')
     else:
         logprobs, attn_weights = self.model(
             truncate(batch.merged_hist, 'sos'),
             truncate(batch.resp, 'eos'))
         target, _ = truncate(batch.resp, 'sos')
     B, L, _ = logprobs.size()
     loss = self.criterion(logprobs.contiguous().view(B * L, -1),
                           target.contiguous().view(-1))
     return loss
Esempio n. 23
0
    def to_jstree_dict(self, prefixes, index=None):
        tmp_prefixes = self._get_prefixes(prefixes, index)
        data = self._tagname
        value = getattr(self, "_value", None)
        if value:
            data += u' <span class="_tree_text">(%s)</span>' % (utils.truncate(value))

        css_class = "tree_" + ":".join(prefixes or [])
        if index is not None:
            css_class += " tree_" + ":".join((prefixes + [str(index)]))
        else:
            css_class += ":" + self._tagname

        dic = {"data": data, "attr": {"id": "tree_" + ":".join(tmp_prefixes), "class": css_class}}
        children = []
        for elt in self._sub_elements:
            v = elt._to_jstree_dict(self, tmp_prefixes)
            if v:
                children += [v]
        dic["children"] = children
        return dic
Esempio n. 24
0
def load_zotero_atom(uri):
    tree = ElementTree()
    library = []
    try:
        tree.parse(urlopen(uri))
        for entry in tree.findall("{http://www.w3.org/2005/Atom}entry"):
            item_type = entry.find("{http://zotero.org/ns/api}itemType").text
            if item_type == "attachment":
                continue
            key = entry.find("{http://zotero.org/ns/api}key").text
            content = entry.find("{http://www.w3.org/2005/Atom}content").text
            try:
                library.append((key, truncate(zotero_item_to_text(json.loads(content)))))
            except KeyError as e:
                # logger.warning(e)
                continue
        for link in tree.findall("{http://www.w3.org/2005/Atom}link"):
            if link.attrib.get("rel", None) == "next":
                library.extend(load_zotero_atom(link.attrib["href"]))
                break
    except ExpatError as e:
        # logger.error(e)
        pass
    return library
Esempio n. 25
0
def load_zotero_atom(uri):
    tree = ElementTree()
    library = []
    try:
        tree.parse(urlopen(uri))
        for entry in tree.findall('{http://www.w3.org/2005/Atom}entry'):
            item_type = entry.find('{http://zotero.org/ns/api}itemType').text
            if item_type == 'attachment':
                continue
            key = entry.find('{http://zotero.org/ns/api}key').text
            content = entry.find('{http://www.w3.org/2005/Atom}content').text
            try:
                library.append(
                    (key, truncate(zotero_item_to_text(json.loads(content)))))
            except KeyError as e:
                logger.warning(e)
                continue
        for link in tree.findall('{http://www.w3.org/2005/Atom}link'):
            if link.attrib.get('rel', None) == 'next':
                library.extend(load_zotero_atom(link.attrib['href']))
                break
    except ExpatError as e:
        logger.error(e)
    return library
Esempio n. 26
0
 def __unicode__(self):
     return utils.truncate(self.as_text())
Esempio n. 27
0
 def test_truncate(self):
     self.assertEquals(utils.truncate(u'xxxxxxxxxx', 4), u'xx... xx')
Esempio n. 28
0
 def _get_jstree_data(self):
     data = self.tagname
     if self.text:
         data += u' <span class="_tree_text">(%s)</span>' % (utils.truncate(
             self.text))
     return data
Esempio n. 29
0
    fp = "./img/Circuit.tif"
    if not os.path.exists(fp):
        raise Exception('[Error] image file not exists')

    img = cv2.cvtColor(cv2.imread(fp, cv2.IMREAD_COLOR), cv2.COLOR_BGR2GRAY)
    w, h = img.shape
    opt = parse_args()
    opt.width = w
    opt.height = h

    opt.a = 0
    opt.b = 20
    opt.mode = "gaussian"
    noise_gaussian = get_noise(param=opt)
    img_noise_gaussian = truncate(img + noise_gaussian,
                                  dst="./img/gaussian_noise.tif")
    img_gaussian_arifi = arithmetic_mean_filter(img_noise_gaussian,
                                                size=(3, 3))
    cv2.imwrite("./img/gaussian_arithmetic.tif", img_gaussian_arifi)
    img_gaussian_geofi = geometric_mean_filter(img_noise_gaussian, size=(3, 3))
    cv2.imwrite("./img/gaussian_geometric.tif", img_gaussian_geofi)

    opt.a = 0
    opt.b = 800**0.5
    opt.mode = "uniform"
    noise_uniform = get_noise(param=opt)
    img_noise_uniform = truncate(img + noise_uniform,
                                 dst="./img/uniform_noise.tif")

    opt.a = 0.1
    opt.b = 0.1
Esempio n. 30
0
def influxQuery5m(client,
                  max_points,
                  min_points,
                  measurements,
                  interfaces,
                  start_time="now()"):

    for measure in measurements:

        if (measurements[measure]["name"] == "arp_packets"
                or measurements[measure]["name"] == "ping_packets"
                or measurements[measure]["name"] == "dns_packets"
                or measurements[measure]["name"] == "dns_errors"):
            doSum = True
        else:
            doSum = False

        num_den = measure.split()
        numerator = num_den[0]
        denumerator2 = None

        if (len(num_den) >= 2):
            denumerator = num_den[1]
            if len(num_den) == 3:
                denumerator2 = num_den[2]
        else:
            denumerator = numerator

        key = numerator.split(':')[0]
        hosts = client.query(
            'SHOW TAG VALUES ON "ntopng" FROM "autogen"."%s" WITH KEY = "%s" LIMIT %s'
            % (numerator, key, str(cf.limitRSI)))

        m_numerator = []
        m_denumerator = []
        m_denumerator2 = []
        s_clause = ""

        for metric in measurements[measure]["metrics"][0]:
            m_numerator.append(metric)
            s_clause += "NON_NEGATIVE_DERIVATIVE(LAST(" + metric + "), 1s) AS " + metric + ", "

        if (m_numerator[0].find("flows") == -1):
            for metric in measurements[measure]["metrics"][1]:
                m_denumerator.append(metric)
                s_clause += "NON_NEGATIVE_DERIVATIVE(LAST(" + metric + "), 1s) AS " + metric + ", "
            if (denumerator2 != None):
                for metric in measurements[measure]["metrics"][2]:
                    m_denumerator2.append(metric)
                    s_clause += "NON_NEGATIVE_DERIVATIVE(LAST(" + metric + "), 1s) AS " + metric + ", "
        else:
            m_denumerator = m_numerator

        s_clause = s_clause[slice(len(s_clause) - 2)]

        f_clause = """"ntopng"."autogen".""" + '"' + numerator + '"'
        if (denumerator != numerator):
            f_clause += """, "ntopng"."autogen".""" + '"' + denumerator + '"'
            if (denumerator2 != None):
                f_clause += """, "ntopng"."autogen".""" + '"' + denumerator2 + '"'

        p2p_metric = ''
        if (numerator.find("unreachable") != -1):
            if (m_numerator[0].find("server") != -1):
                p2p_metric = "bytes_sent"
            else:
                p2p_metric = "bytes_rcvd"

        ips = hosts.get_points()
        for ip in ips:

            if isLocalHost(ip['value']):
                continue

            for ifid in interfaces:

                #key
                host_interface_measure = ip[
                    'value'] + "|" + ifid + "|" + measurements[measure]["name"]

                if (start_time == "now()"
                        and host_interface_measure in hostRSI):
                    w_clause = start_time + " - 5m"
                    min_points = 1
                else:
                    if (start_time == "now()"):
                        max_points = 2 * min_points
                    w_clause = start_time + " - " + str(
                        (max_points - 1) * 5) + "m AND time < " + start_time

                results = client.query(
                    """  SELECT %s 
                                            FROM %s 
                                            WHERE time >= %s AND "%s"='%s' AND "ifid"='%s' 
                                            GROUP BY time(5m) fill(none) """ %
                    (s_clause, f_clause, w_clause, key, ip['value'], ifid))

                n_points = list(results.get_points(measurement=numerator))

                if (numerator == denumerator):
                    d_points = n_points
                else:
                    d_points = list(
                        results.get_points(measurement=denumerator))
                    if denumerator2 != None:
                        d2_points = list(
                            results.get_points(measurement=denumerator2))
                        if len(d_points) == 0:
                            d_points = d2_points
                            m_denumerator = []
                        elif len(d2_points) == 0:
                            d2_points = d_points
                            m_denumerator2 = []

                dim_numerator = len(n_points)
                dim_denumerator = len(d_points)
                dim_denumerator2 = len(
                    d2_points) if denumerator2 != None else dim_numerator

                if (min(dim_numerator, dim_denumerator, dim_denumerator2) <
                        min_points):
                    continue

                if (dim_numerator != dim_denumerator):
                    if dim_denumerator > dim_numerator:
                        d_points = d_points[-dim_numerator:]
                    else:
                        n_points = n_points[-dim_denumerator:]

                #by reference changes
                dim_denumerator2 = len(
                    d2_points) if denumerator2 != None else dim_denumerator

                if (dim_denumerator2 != dim_denumerator
                        and denumerator2 != None):
                    if dim_denumerator2 > dim_denumerator:
                        d2_points = d2_points[-dim_denumerator:]
                    else:
                        n_points = n_points[-dim_denumerator2:]
                        d_points = d_points[-dim_denumerator2:]

                if p2p_metric:

                    results = client.query(
                        """ SELECT NON_NEGATIVE_DIFFERENCE(SUM("%s")) AS "%s"
                                                FROM "ntopng"."autogen"."host:p2p" 
                                                WHERE time >= %s AND "%s"='%s' AND "ifid"='%s' 
                                                GROUP BY time(5m) fill(0) """ %
                        (p2p_metric, p2p_metric, w_clause, key, ip['value'],
                         ifid))

                    p2p_points = list(
                        results.get_points(measurement="host:p2p"))

                    p2p_non_zero_points = []
                    for i in range(len(p2p_points)):
                        if (p2p_points[i][p2p_metric] != 0):
                            p2p_non_zero_points.append(p2p_points[i]['time'])

                series = []
                seriesDate = []

                for i in range(len(n_points)):
                    sum_numerator = 0
                    sum_denumerator = 0

                    if (n_points[i]['time'] != d_points[i]['time']):
                        break

                    if (denumerator2 != None):
                        if d_points[i]['time'] != d2_points[i]['time']:
                            break

                    if (p2p_metric != ''):
                        if n_points[i]['time'] in p2p_non_zero_points:
                            break

                    try:
                        for x in m_numerator:
                            sum_numerator += n_points[i][x]
                        for x in m_denumerator:
                            sum_denumerator += d_points[i][x]
                        for x in m_denumerator2:
                            sum_denumerator += d2_points[i][x]
                    except TypeError:  #portability
                        continue

                    if (sum_denumerator >= measurements[measure]["minValue"][1]
                            or sum_numerator >=
                            measurements[measure]["minValue"][0]):

                        if doSum:
                            sum_denumerator += sum_numerator

                        if sum_denumerator != 0:
                            ratioValue = truncate(sum_numerator /
                                                  sum_denumerator)
                        else:
                            continue

                        if measurements[measure]["name"].find("size") != -1:
                            thVal = cf.packet_size_threshold
                        else:
                            thVal = cf.ratio_threshold

                        if (not checkThreshold(
                                ratioValue, measurements[measure]["name"],
                                ip['value'], ifid, statsThreshold,
                                n_points[i]['time'], "ip", thVal) or
                            (doSum and
                             measurements[measure]["name"] != "dns_errors")):
                            continue

                        series.append(ratioValue)
                        if (len(series) >= min_points
                                or w_clause == "now()-5m"):
                            seriesDate.append(n_points[i]['time'])

                if (w_clause != "now() - 5m"):
                    if (len(series) >= min_points):
                        if (start_time == "now()"):
                            rsi = RSI(series[-min_points:], min_points - 1,
                                      ip['value'],
                                      measurements[measure]["name"], ifid,
                                      seriesDate[0] + "|wu", statsRSI)
                            hostRSI.update({host_interface_measure: rsi})
                        else:
                            rsi = RSI(series[:min_points], min_points - 1,
                                      ip['value'],
                                      measurements[measure]["name"], ifid,
                                      seriesDate[0] + "|wu", statsRSI)
                            test_series = series[min_points:]
                            for i in range(len(test_series)):
                                rsi.next(test_series[i], seriesDate[i + 1],
                                         statsRSI)
                else:
                    if (len(series) >= 1):
                        hostRSI[host_interface_measure].next(
                            series[-1], seriesDate[-1], statsRSI)
 def belongs_to_curve(self, x, y):
     """ does point belong to this curve """
     return utils.truncate(y**2) == utils.truncate(x**3 + self.a*x + self.b)
Esempio n. 32
0
 def __str__(self):
     return '@{0:06d} ({1})'.format(self.pk, truncate(self.description))
        # This step utilizes a 5 layer "priority network" to quickly scan
        # through the image to find areas of the pool table in which something
        # is probably there, to eventually feed to the much deeper classifier network
        heatmap = np.zeros((67, 33, 3))
        count = 0
        start = time.time()
        for (x, y, window) in utils.sliding_window(new_image,
                                                   stepSize=smallwindow_step,
                                                   windowSize=(winW, winH)):
            if window.shape[0] != winH or window.shape[1] != winW:
                continue
            window = utils.skimage.transform.resize(window, (24, 24))
            predictions = priority.isball(window)
            heatmap[int(x / smallwindow_step),
                    int(y / smallwindow_step),
                    0] = utils.truncate(predictions[0], 3)
            heatmap[int(x / smallwindow_step),
                    int(y / smallwindow_step),
                    1] = utils.truncate(predictions[1], 3)
            count += 1

        # DEBUG: Print runtime for priority network to slide over image
        end = time.time()
        print("RUNTIME", end - start)
        heatmap = heatmap.transpose(1, 0, 2)
        heatmap = heatmap[:, :, 1]

        # Get precise ball positions from local max finder
        balls = utils.personalspace(heatmap, 0.49)

        # DEBUG: Print results
def build_context_menu_entries(num_comments,commentsUrl, subreddit, domain, link_url, post_id, post_title, posted_by, onClick_action, thumbnail):
    s=truncate(subreddit,15)     #crop long subreddit names in context menu
    colored_subreddit_short=colored_subreddit( s )
    colored_domain_full=colored_subreddit( domain, 'tan',False )
    post_title_short=truncate(post_title,15)
    post_author=truncate(posted_by,15)

    label_view_comments=translation(32504)+' ({0})'.format(num_comments)
    label_more_by_author=translation(32506).format(author=post_author)
    label_goto_subreddit=translation(32508).format(subreddit=subreddit)
    label_goto_domain=translation(32510).format(domain=domain)
    label_autoplay_after=translation(32513)+' '+colored_subreddit( post_title_short, 'gray',False )

    label_add_to_shortcuts=translation(32516).format(subreddit=subreddit)
    cxm_list=[]

    cxm_list.extend( build_link_in_browser_context_menu_entries(link_url) )
    cxm_list.extend( build_open_browser_to_pair_context_menu_entries(link_url) )

    if cxm_show_comments:
        cxm_list.append((label_view_comments , build_script('listLinksInComment', commentsUrl )  ))

    #more by author
    if GCXM_hasmultipleauthor and cxm_show_by_author:
        cxm_list.append( (label_more_by_author, build_script("listSubReddit", assemble_reddit_filter_string("","/user/"+posted_by+'/submitted'), posted_by)  ) )

    #more from r/subreddit
    if GCXM_hasmultiplesubreddit and cxm_show_by_subreddit:
        cxm_list.append( (label_goto_subreddit, build_script("listSubReddit", assemble_reddit_filter_string("",subreddit), subreddit)  ) )

    #more from domain
    if GCXM_hasmultipledomain and cxm_show_by_domain:
        cxm_list.append( (   label_goto_domain, build_script("listSubReddit", assemble_reddit_filter_string("",'','',domain), domain)  ) )

    #more random (no setting to disable this)
    if any(x in GCXM_actual_url_used_to_generate_these_posts.lower() for x in ['/random','/randnsfw']): #if '/rand' in GCXM_actual_url_used_to_generate_these_posts:
        cxm_list.append( (translation(32511) +' random', build_script('listSubReddit', GCXM_actual_url_used_to_generate_these_posts)) , )  #Reload

    #Autoplay all
    #Autoplay after post_title
    #slideshow
    if cxm_show_autoplay:
        cxm_list.extend( [
                        (translation(32512)    , build_script('autoPlay', GCXM_reddit_query_of_this_gui)),
                        (label_autoplay_after  , build_script('autoPlay', GCXM_reddit_query_of_this_gui.split('&after=')[0]+'&after='+post_id)),
                        ])

    if cxm_show_slideshow:
        cxm_list.append( (translation(32514)    , build_script('autoSlideshow', GCXM_reddit_query_of_this_gui)) )

    #Add %s to shortcuts
    if not subreddit_in_favorites(subreddit) and cxm_show_add_shortcuts:
        cxm_list.append( (label_add_to_shortcuts, build_script("addSubreddit", subreddit)  ) )

    #Add to subreddit/domain filter
    if cxm_show_filter:
        cxm_list.append( (translation(32519).format(colored_subreddit_short), build_script("addtoFilter", subreddit,'','subreddit')  ) )
        cxm_list.append( (translation(32519).format(colored_domain_full)    , build_script("addtoFilter", domain,'','domain')  ) )

    #Search / Other posts with this link
    cxm_list.extend( build_reddit_search_context_menu_entries(GCXM_hasmultiplesubreddit,subreddit,link_url) )

    if cxm_show_youtube_items:
        cxm_list.extend( build_youtube_context_menu_entries('', link_url, video_id=None, title=post_title ))

    cxm_list.extend( build_add_to_favourites_context_menu_entry(title=post_title, onClick_action=onClick_action,thumbnail=thumbnail) )

    return cxm_list
Esempio n. 35
0
def truncatable_prime(prime):
    return all(is_prime(num) for num in truncate(prime))
Esempio n. 36
0
 def body_display(self, obj):
     return truncate(strip_tags(obj.body), 90)
Esempio n. 37
0
 def get_description(self, obj):
     return truncate(strip_tags(obj.description), 180)
Esempio n. 38
0
def store(sensor, measures, ifnotexists=False):
    # if an exception occurred, skip this sensor
    if measures is None: return
    # for each returned measure
    for measure in measures:
        # set the timestamp to now if not already set
        if "timestamp" not in measure: measure["timestamp"] = utils.now()
        # define the key to store the value
        key = sensor["db_group"] + ":" + measure["key"]
        # if ifnotexists is set, check if the key exists
        if ifnotexists and db.exists(key):
            log.debug("[" + sensor["module_id"] + "][" + sensor["group_id"] +
                      "][" + sensor["sensor_id"] +
                      "] key already exists, ignoring new value")
            return
        # delete previous values if needed
        realtime_count = conf["sensors"]["retention"]["realtime_count"]
        if "retention" in sensor and "realtime_count" in sensor["retention"]:
            realtime_count = sensor["retention"]["realtime_count"]
        if realtime_count > 0:
            db.deletebyrank(key, 0, -realtime_count)
        # if only measures with a newer timestamp than the latest can be added, apply the policy
        realtime_new_only = conf["sensors"]["retention"]["realtime_new_only"]
        if "retention" in sensor and "realtime_new_only" in sensor["retention"]:
            realtime_new_only = sensor["retention"]["realtime_new_only"]
        if realtime_new_only:
            # retrieve the latest measure's timestamp
            last = db.range(key, -1, -1)
            if len(last) > 0:
                last_timestamp = last[0][0]
                # if the measure's timestamp is older or the same, skip it
                if measure["timestamp"] <= last_timestamp:
                    log.debug("[" + sensor["module_id"] + "][" +
                              sensor["group_id"] + "][" + sensor["sensor_id"] +
                              "] (" +
                              utils.timestamp2date(measure["timestamp"]) +
                              ") old event, ignoring " + measure["key"] +
                              ": " + str(measure["value"]))
                    continue
        # check if there is already something stored with the same timestamp
        old = db.rangebyscore(key, measure["timestamp"], measure["timestamp"])
        if len(old) > 0:
            if old[0][1] == measure["value"]:
                # if the value is also the same, skip it
                log.debug("[" + sensor["module_id"] + "][" +
                          sensor["group_id"] + "][" + sensor["sensor_id"] +
                          "] (" + utils.timestamp2date(measure["timestamp"]) +
                          ") already in the database, ignoring " +
                          measure["key"] + ": " + str(measure["value"]))
                continue
            else:
                # same timestamp but different value, remove the old value so to store the new one
                db.deletebyscore(key, measure["timestamp"],
                                 measure["timestamp"])
        # store the value into the database
        log.info("[" + sensor["module_id"] + "][" + sensor["group_id"] + "][" +
                 sensor["sensor_id"] + "] (" +
                 utils.timestamp2date(measure["timestamp"]) + ") saving " +
                 measure["key"] + ": " +
                 utils.truncate(str(measure["value"])) +
                 conf["constants"]["formats"][sensor["format"]]["suffix"])
        db.set(key, measure["value"], measure["timestamp"])
        # re-calculate the derived measures for the hour/day
        if "summarize" in sensor:
            summarize(sensor, 'hour', utils.hour_start(measure["timestamp"]),
                      utils.hour_end(measure["timestamp"]))
            summarize(sensor, 'day', utils.day_start(measure["timestamp"]),
                      utils.day_end(measure["timestamp"]))
Esempio n. 39
0
 def _get_jstree_data(self):
     data = self.tagname
     if self.text:
         data += u' <span class="_tree_text">(%s)</span>' % (
             utils.truncate(self.text))
     return data
Esempio n. 40
0
 def __unicode__(self):
     return u'@{0:06d} ({1})'.format(self.pk, truncate(self.description))
Esempio n. 41
0
 def setUp(self):
     truncate()
Esempio n. 42
0
 def update_account(self):
     account = self._account
     self._account_eth.text_value = utils.truncate(
         self._web3.get_eth(account.address)) + ' ETH'
     self._account_mtx.text_value = utils.truncate(
         self._web3.get_mtx(account.address)) + ' MTX'
Esempio n. 43
0
 def __unicode__(self):
     if self.category:
         return '%s: %s' % (self.category.name, truncate(strip_tags(self.body), 38))
     else:
         return truncate(strip_tags(self.body), 71)
Esempio n. 44
0
def truncatable_prime(prime):
    return all(is_prime(num) for num in truncate(prime))
def build_context_menu_entries(num_comments, commentsUrl, subreddit, domain,
                               link_url, post_id, post_title, posted_by):
    from reddit import assemble_reddit_filter_string, subreddit_in_favorites  #, this_is_a_user_saved_list
    from utils import colored_subreddit, build_script, truncate

    s = truncate(subreddit, 15)  #crop long subreddit names in context menu
    colored_subreddit_short = colored_subreddit(s)
    colored_subreddit_full = colored_subreddit(subreddit)
    colored_domain_full = colored_subreddit(domain, 'tan', False)
    post_title_short = truncate(post_title, 15)
    post_author = truncate(posted_by, 15)

    label_view_comments = translation(32050) + ' ({})'.format(num_comments)
    label_goto_subreddit = translation(32051) + ' {}'.format(
        colored_subreddit_full)
    label_goto_domain = translation(32053) + ' {}'.format(colored_domain_full)
    label_search = translation(32052)
    label_autoplay_after = translation(32055) + ' ' + colored_subreddit(
        post_title_short, 'gray', False)
    label_more_by_author = translation(32049) + ' ' + colored_subreddit(
        post_author, 'gray', False)

    cxm_list = [
        ('html to text', build_script('readHTML', link_url)),
        (label_view_comments, build_script('listLinksInComment', commentsUrl)),
    ]

    #more by author
    if GCXM_hasmultipleauthor:
        cxm_list.append((label_more_by_author,
                         build_script(
                             "listSubReddit",
                             assemble_reddit_filter_string(
                                 "", "/user/" + posted_by + '/submitted'),
                             posted_by)))

    #more from r/subreddit
    if GCXM_hasmultiplesubreddit:
        cxm_list.append(
            (label_goto_subreddit,
             build_script("listSubReddit",
                          assemble_reddit_filter_string("", subreddit),
                          subreddit)))

    #more from domain
    if GCXM_hasmultipledomain:
        cxm_list.append(
            (label_goto_domain,
             build_script("listSubReddit",
                          assemble_reddit_filter_string("", '', '', domain),
                          domain)))

    #more random
    if any(x in GCXM_actual_url_used_to_generate_these_posts.lower()
           for x in ['/random', '/randnsfw']
           ):  #if '/rand' in GCXM_actual_url_used_to_generate_these_posts:
        cxm_list.append(
            (translation(32053) + ' random',
             build_script('listSubReddit',
                          GCXM_actual_url_used_to_generate_these_posts)),
        )  #Reload

    #Autoplay all
    #Autoplay after post_title
    #slideshow
    cxm_list.extend([
        (translation(32054),
         build_script('autoPlay', GCXM_reddit_query_of_this_gui)),
        (label_autoplay_after,
         build_script(
             'autoPlay',
             GCXM_reddit_query_of_this_gui.split('&after=')[0] + '&after=' +
             post_id)),
        (translation(32048),
         build_script('autoSlideshow', GCXM_reddit_query_of_this_gui)),
    ])

    #Add %s to shortcuts
    if not subreddit_in_favorites(subreddit):
        cxm_list.append((translation(32056) % colored_subreddit_short,
                         build_script("addSubreddit", subreddit)))

    #Add to subreddit/domain filter
    cxm_list.append((translation(32057) % colored_subreddit_short,
                     build_script("addtoFilter", subreddit, '', 'subreddit')))
    cxm_list.append((translation(32057) % colored_domain_full,
                     build_script("addtoFilter", domain, '', 'domain')))

    #Search
    if GCXM_hasmultiplesubreddit:
        cxm_list.append((label_search, build_script("search", '', '')))
    else:
        label_search += ' {}'.format(colored_subreddit_full)
        cxm_list.append((label_search, build_script("search", '', subreddit)))

    return cxm_list
Esempio n. 46
0
if __name__ == '__main__':

    fn = "./img/book_cover.jpg"
    if not os.path.exists(fn):
        raise Exception('[Error] image file not exists')

    img = cv2.cvtColor(cv2.imread(fn, cv2.IMREAD_COLOR), cv2.COLOR_BGR2GRAY)
    img_blurred, hm = blurring_filter(img,
                                      a=0.1,
                                      b=0.1,
                                      t=1,
                                      dst="./img/blurred.tif")

    noise_gaussian = get_gaussian_noise(img_blurred, a=0, b=650**0.5)
    img_noise_gauss = truncate(img_blurred + noise_gaussian,
                               dst="./img/blurred_noise_g.tif")

    img_inverse_blurred = inverse_filter(img_blurred,
                                         hm,
                                         dst="./img/inverse_blurred.tif")
    img_inverse_blurred_g = inverse_filter(img_noise_gauss,
                                           hm,
                                           dst="./img/inverse_noise_g.tif")
    img_weiner_blurred = wiener_deconv_filter(img_blurred,
                                              img,
                                              hm,
                                              noise_gaussian,
                                              dst="./img/wiener_blurred.tif")
    img_weiner_blurred_g = wiener_deconv_filter(img_noise_gauss,
                                                img,
                                                hm,
Esempio n. 47
0
 def setUp(self):
     truncate()
Esempio n. 48
0
 def setUp(self) -> None:
     utils.truncate(initial_number=AUTO_INCREMENT)
Esempio n. 49
0
 def test_truncate(self):
     self.assertEquals(utils.truncate(u'xxxxxxxxxx', 4), u'xx... xx')