def checkHTTPS(self, userData):
        util.debug('[SC] kontrolujem nastavenia HTTPS s WS [%s] [%s]' %
                   (getSetting('ws_usessl'),
                    userData.find('wants_https_download').text))
        toggle = False
        if getSettingAsBool('ws_usessl') is not True and userData.find(
                'wants_https_download').text == '1':
            toggle = True
        elif getSettingAsBool('ws_usessl') is True and userData.find(
                'wants_https_download').text == '0':
            toggle = True

        if toggle:
            headers, req = self._create_request('/', {'wst': self.token})
            try:
                util.info('[SC] userData menim nastavenie http(s)')
                data = post(
                    self._url('api/toggle_https_download/'),
                    req,
                    headers=headers,
                    output="content")
                util.debug('[SC] zmena: %s' % str(data))
            except:
                self.clearToken()
                return False
            pass
Example #2
0
def get_info(url):
	cached = get_cached_info(url)
	if cached:
		return cached
	else:
		info = _empty_info()
		util.info('Not in cache : '+url)
		try:
			page = util.request(url,headers={'Referer':BASE_URL,'User-Agent':util.UA})
		except:
			util.error('Unable to read page '+url)
			traceback.print_exc()
			info['url'] = url
			return info
		info['title'] = _get_title(page)
		info['search-title'] = _search_title(page)
		info['url'] = url
		info['trailers_url'] = url.rstrip('/')+'/videa/'
		info['cast'] = _cast(page)
		info['genre'] = _genre(page)
		info['img'] = _get_img(page)
		info['plot'] = _plot(page)
		country,info['year'] = _origin(page)
		info['percent'],info['rating'] = _rating(page)
		info['director'] = _director(page)
		info['votes'] = _votes(page)
		_validate(info)
		set_info(info)
		return info
Example #3
0
    def run(self, circuit):
        """Given a logisim.Circuit object, set its input pins, evaluate
        the circuit, and determine if its output matches the expected
        output.
        """
        from logisim.errors import NoInputsError

        util.info("running eval test on '{}'".format(circuit.name))

        try:
            output_vals = circuit.eval(self.input)
        except NoInputsError as e:
            desc = "a component is missing an input value"

            return {'deduction': self.deduction,
                    'description': desc,
                    'notes': [str(e)]}

        failed = False
        for label, value in self.output.items():
            pin = _get_pin_with_label(output_vals.keys(), label)

            if output_vals[pin] != value:
                failed = True
                break

        if failed:
            desc = _build_description(self.input, self.output).split('\n')
            desc += ["your circuit produced:"]
            desc += _build_description({}, output_vals).split('\n')

            return {'deduction': self.deduction,
                    'description': "did not produce the correct output",
                    'notes': desc}
Example #4
0
 def do_POST(self):
     """Handle POST requests to the API endpoint"""
     global endpoint
     parsed_path = urlparse(self.path)
     if "/api/v1/post_parameters" in parsed_path:
         self.send_response(200)
         # TODO: Security?
         self.send_header("Access-Control-Allow-Origin", "*")
         self.end_headers()
         length = int(self.headers["Content-Length"])
         post_data = self.rfile.read(length).decode("utf-8")
         #post_data = self.rfile.read(length)
         # Parse data from POST
         print('Got a post ')
         print(type(post_data))
         print(post_data)
         new_data = parse_qs(post_data)
         print(type(new_data))
         for x in new_data:
            print(x)
            print(new_data[x])
         params.new_num_active_workers = int(new_data['num_workers'][0])
         #change_num_active_workers()
     else:
         util.info("POST sent to " + str(parsed_path[2]))
Example #5
0
def run_hooks_for(trigger):
    from sys import exit
    from os.path import sep
    from subprocess import call
    global _triggers

    if trigger not in _triggers:
        raise ValueError("unknown trigger: '" + str(trigger) + "'")

    hooks = list(set(_hooks[trigger]) - set(_hooks_done[trigger]))
    num_done = 0

    if len(hooks) > 0:
        util.info("running hooks for trigger '" + str(trigger) + "'")

        for fname in hooks:
            rv = call(config.hooks_dir + sep + fname, env=_create_env())
            _hooks_done[trigger].append(fname)
            num_done += 1

            if rv != 0:
                util.error("hook '" + str(fname) + "' exited abnormally")
                util.exit(util.ERR_ABNORMAL_HOOK_EXIT)

        util.info("successfully ran " + str(num_done) + " " + \
                  util.plural('hook', num_done))
    def policy(self, state, legal_moves):
        """The policy of picking an action based on their weights."""
        if not legal_moves:
            return None

        if not self.minimax_enabled:
            # don't use minimax if we're in learning mode
            best_move, _ = best_move_val(
                self.model.predict(numpify(state)),
                legal_moves
            )
            return best_move
        else:
            next_states = {self.reversi.next_state(
                state, move): move for move in legal_moves}
            move_scores = []
            for s in next_states.keys():
                score = self.minimax(s)
                move_scores.append((score, s))
                info('{}: {}'.format(next_states[s], score))

            best_val = -float('inf')
            best_move = None
            for each in move_scores:
                if each[0] > best_val:
                    best_val = each[0]
                    best_move = next_states[each[1]]

            assert best_move is not None
            return best_move
    def _get_file_url_anonymous(self,page,post_url,headers,captcha_cb):

        capdata = json.loads(util.request(self._url('reloadXapca.php')))
        captcha = capdata['image']
        if not captcha.startswith('http'):
            captcha = 'http:' + captcha
        sound = capdata['sound']
        if not sound.startswith('http'):
            sound = 'http:' + sound
        # ask callback to provide captcha code
        self.info('Asking for captcha img %s' % captcha)
        code = captcha_cb({'id':captcha,'img': captcha,'snd':sound})
        if not code:
            self.info('Captcha not provided, done')
            return

        ts = re.search('<input type=\"hidden\" name=\"ts\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
        cid = re.search('<input type=\"hidden\" name=\"cid\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
        sign = re.search('<input type=\"hidden\" name=\"sign\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
        has = capdata['hash']
        salt = capdata['salt']
        timestamp = capdata['timestamp']
        token = re.search('<input type=\"hidden\" name=\"_token_\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
        if not (sign and ts and cid and has and token):
            util.error('[uloz.to] - unable to parse required params from page, plugin needs fix')
            return
        request = {'captcha_type':'xcapca','hash':has,'salt':salt,'timestamp':timestamp,'ts':ts.group(1),'cid':cid.group(1),'sign':sign.group(1),'captcha_value':code,'do':'downloadDialog-freeDownloadForm-submit','_token_':token.group(1)}
        req = urllib2.Request(post_url,urllib.urlencode(request))
        req.add_header('User-Agent',util.UA)
        req.add_header('Referer',post_url)
        req.add_header('Accept','application/json')
        req.add_header('X-Requested-With','XMLHttpRequest')
        sessid=[]
        for cookie in re.finditer('(ULOSESSID=[^\;]+)',headers.get('Set-Cookie'),re.IGNORECASE | re.DOTALL):
            sessid.append(cookie.group(1))
        req.add_header('Cookie','nomobile=1; uloztoid='+cid.group(1)+'uloztoid2='+cid.group(1)+'; '+sessid[-1])
        util.info(request)
        try:
            resp = urllib2.urlopen(req)
            page = resp.read()
            headers = resp.headers
        except urllib2.HTTPError:
            # this is not OK, something went wrong
            traceback.print_exc()
            util.error('[uloz.to] cannot resolve stream url, server did not redirected us')
            util.info('[uloz.to] POST url:'+post_url)
            return
        try:
            result = json.loads(page)
        except:
            raise ResolveException('Unexpected error, addon needs fix')
        if not 'status' in result.keys():
            raise ResolveException('Unexpected error, addon needs fix')
        if result['status'] == 'ok':
            return self._fix_stream_url(result['url'])
        elif result['status'] == 'error':
            # the only known state is wrong captcha for now
            util.error('Captcha validation failed, please try playing/downloading again')
            util.error(result)
            raise ResolveException('Captcha failed, try again')
def resolve(url):
    """
        resolves given url by asking all resolvers

        returns None if no resolver advised to be able to resolve this url
        returns False if resolver did his job, but did not return any value (thus failed)
        returns Array of resolved objects in positive usecase
    """
    url = util.decode_html(url)
    util.info('Resolving ' + url)
    resolver = _get_resolver(url)
    value = None
    if resolver is None:
        return None
    util.info('Using resolver \'%s\'' % str(resolver.__name__));
    try:
        value = resolver.resolve(url)
    except:
        traceback.print_exc()
    if value is None:
        return False
    default = item()

    def fix_stream(i, url, resolver, default):
        """ fix  missing but required values """
        if 'name' not in i.keys():
            i['name'] = resolver.__name__
        if 'surl' not in i.keys():
            i['surl'] = url
        for key in default.keys():
            if key not in i.keys():
                i[key] = default[key]

    [fix_stream(i, url, resolver, default) for i in value]
    return sorted(value, key=lambda i: i['quality'])
 def download(self, urls=None):
     total_downloaded = 0
     if urls is None:
         connections = [self.connect(self.host) for i in range(self.runs)]
     else:
         connections = [self.connect(h['host']) for h in urls]
     total_start_time = time()
     for current_file in self.DOWNLOAD_FILES:
         threads = []
         for run in range(self.runs):
             thread = Thread(
                 target=self.downloadthread,
                 args=(connections[run],
                       '%s?x=%d' % (current_file, int(time() * 1000))
                       if urls is None else urls[run]['url']))
             thread.run_number = run + 1
             thread.start()
             threads.append(thread)
         for thread in threads:
             try:
                 thread.join()
                 total_downloaded += thread.downloaded
                 util.debug('[SC] Run %d for %s finished' %
                            (thread.run_number, current_file))
             except:
                 pass
     total_ms = (time() - total_start_time) * 1000
     for connection in connections:
         connection.close()
     util.info('[SC] Took %d ms to download %d bytes' % (total_ms,
                                                         total_downloaded))
     return total_downloaded * 8000 / total_ms
Example #10
0
  def validate_model(best):
    if gen_n_text_samples:
      print '\nGenerating %d text samples...' % gen_n_text_samples
      n_seed = 100
      start = max(0, np.random.randint(0, training_dataset.n_words - n_seed))
      seed = training_dataset.get_words()[start: start + n_seed]
      gen_text(seed=seed, how_many=gen_n_text_samples)

    print '\nValidating model...'
    validation_t.start()
    v_model.set_weights(t_model.get_weights())
    v_model.reset_states()
    n_v_samples, gen_v = validation_data[0]()
    loss, _ = v_model.evaluate_generator(gen_v, n_v_samples)
    pp = np.exp(loss)
    val_elapsed, val_tot = validation_t.lap()
    validation_info = '''Validation result:
  - Model loss:        %f
  - Perplexity:        %f %s
  - OOV rate:          %f
  - Validation took:   %s
  - Total validation:  %s
    ''' % (loss, pp, delta_str(pp, best), validation_data[1], val_elapsed, val_tot)
    info(validation_info)
    return pp
 def _add_source(self, filename, sourcename, transform=None, only_a_few_lines=True):
     """
     Update the features dict with data from the file named filename.
     Use the same name for the type of data source.
     """
     self._progress("Adding source = %s" % sourcename)
     file = open(filename, "r")
     line_no = 0
     for line in file:
         line_no += 1
         if line_no % 500 == 0:
             self._progress("cur line = %d" % line_no)
         if only_a_few_lines and not self.production_run and line_no > 200:
             util.info("\tWARNING: stopping at line 200 of input file. Turn off for production runs.")
             break
         cur_tag = self._read_tag(line)
         if cur_tag in self.only_these_tags:
             cur_dict = self._line_to_dict(line.rstrip().split("\t"), transform=transform)
             if cur_dict: # that is, if cur_dict is not empty
                 try:
                     source_dict = self.features[cur_tag]
                 except KeyError:
                     source_dict = dict()
                 try:
                     old_dict = source_dict[sourcename]
                     # If we get here, we need to merge the new
                     # cur_dict with the old one.
                     source_dict[sourcename] = self._merge_song_dicts(old_dict, cur_dict)
                 except KeyError: # We're adding a new source.
                     source_dict[sourcename] = cur_dict
                 self.features[cur_tag] = source_dict
     file.close()
Example #12
0
def test(cmd):
    global expected
    global got
    global count

    util.info("")
    util.info("- Starting " + cmd)
    util.info("")
    util.run(cmd)
    
    start = time.time()

    clientlist = []
    expected = []
    for i in range(1, NUM_CLIENT):
        expected.append(commands.getoutput("cat ./testdata/file%s.txt" % str(i)))

    commands.getoutput("rm -rf %s" % tmpfile)

    for i in range(0, NUM_CLIENT):
        client = testit("Client-" + str(i), i)
        clientlist.append(client)
        client.start()
        time.sleep(0.3)
    
    for client in clientlist:
        client.join()

    end = time.time()
    util.info("Elapsed time (in seconds): " + str(end-start))

    time.sleep(CGI_SPIN_TIME + 2)
    res = commands.getoutput("cat %s" % tmpfile)

    if util.is_server_alive(cmd) == -1:
        util.error("Ouch! Server is dead!"
                   " Your bounded buffered may not be well protected");

    pos0 = res.find(expected[0])
    pos1 = res.find(expected[1])
    pos2 = res.find(expected[2])
    passed = pos0 > 0 and pos1 > 0 and pos2 > 0 and pos0 < pos1 and pos1 < pos2
    
    util.info(res)

    if passed:
        print ""
        print "#####################################"
        print "GOOD! you implement SFF correctly"
        print "#####################################"
        print ""
        count = count + 1
    else:
        print ""
        print "#####################################"
        print "Oh oh! ERROR ERROR!"
        print "SFF is not implemented correctly"
        print "#####################################"
        print ""
        sys.exit(-1)
 def _performance_stats(self, remove_tags_dont_work=True):
     self._progress("Computing performance stats.")
     for (tag, true_y) in self.y.items():
         if tag not in self.stats:
             pdb.set_trace()
         yhat = self.yhat[tag].transpose() # make n_songs x 1
         self.stats[tag]["Num Songs"] = len(yhat)
         # SSE
         self.stats[tag][r'$\text{SSE} / n$'] = numpy.sum(numpy.power((true_y-yhat),2)) / len(true_y)
         # precision, recall, etc.
         sorted_yhat = sorted([(yhat[i,0], i) for i in range(len(yhat))], reverse=True)
         graded = [self._in_ground_truth(true_y[i,0]) for (yhat_val, i) in sorted_yhat]
         try:
             self.stats[tag]["Baseline"] = self._random_precision(graded)
             self.stats[tag]["AUC"] = self._areaUnderCurve(graded)
             self.stats[tag]["MAP"] = self._avgPrecision(graded)
             self.stats[tag]["R-Prec"] = self._rPrecision(graded)
             self.stats[tag]["10-Prec"] = self._tenPrecision(graded)
             baseline = self.stats[tag]["Baseline"]
             if baseline > 0:
                 self.stats[tag]["MAP/Baseline"] = self.stats[tag]["MAP"] / baseline
                 self.stats[tag]["R-Prec/Baseline"] = self.stats[tag]["R-Prec"] / baseline
                 self.stats[tag]["10-Prec/Baseline"] = self.stats[tag]["10-Prec"] / baseline
         except ValueError:
             util.info("WARNING: TP==0 or FP==0 for tag = %s." % tag)
             if remove_tags_dont_work:
                 self._remove_tag(tag)
                 continue
         # Record best and worst songs.
         song_list = list(self.song_lists[tag])
         self.best_worst_songs[tag] = dict()
         index_best_song = sorted_yhat[0][1]
         self.best_worst_songs[tag]["Best Song"] = (self.songid_to_song[song_list[index_best_song]], 1 if true_y[index_best_song,0] else 0)
         index_worst_song = sorted_yhat[-1][1]
         self.best_worst_songs[tag]["Worst Song"] = (self.songid_to_song[song_list[index_worst_song]], 1 if true_y[index_worst_song,0] else 0)
    def __init__(self, provider, settings, addon):
        '''
        XBMContentProvider constructor
        Args:
            name (str): name of provider
        '''
        self.provider = provider
        # inject current user language
        try:  # not fully supported on Frodo
            provider.lang = xbmc.getLanguage(xbmc.ISO_639_1)
        except:
            provider.lang = None
            pass
        self.settings = settings
        # lang setting is optional for plugins
        if not 'lang' in self.settings:
            self.settings['lang'] = '0'

        util.info('Initializing provider %s with settings %s' % (provider.name, settings))
        self.addon = addon
        self.addon_id = addon.getAddonInfo('id')
        if '!download' not in self.provider.capabilities():
            self.check_setting_keys(['downloads'])
        self.cache = provider.cache
        provider.on_init()
Example #15
0
    def service(self):
        util.info("SOSAC Service Started")
        try:
            sleep_time = int(self.getSetting("start_sleep_time")) * 1000 * 60
        except:
            sleep_time = self.sleep_time
            pass

        self.sleep(sleep_time)

        try:
            self.last_run = float(self.cache.get("subscription.last_run"))
        except:
            self.last_run = time.time()
            self.cache.set("subscription.last_run", str(self.last_run))
            pass

        if not xbmc.abortRequested and time.time() > self.last_run:
            self.evalSchedules()

        while not xbmc.abortRequested:
            # evaluate subsciptions every 10 minutes
            if(time.time() > self.last_run + 600):
                self.evalSchedules()
                self.last_run = time.time()
                self.cache.set("subscription.last_run", str(self.last_run))
            self.sleep(self.sleep_time)
        util.info("SOSAC Shutdown")
Example #16
0
def findstreams(data,regexes):
    resolvables = {}
    resolved = []
    # keep list of found urls to aviod having duplicates
    urls = []
    error = False
    for regex in regexes:
        for match in re.finditer(regex,data,re.IGNORECASE | re.DOTALL):
            url = filter_resolvable(match.group('url'))
            if url:
                util.info('Found resolvable %s ' % url)
                resolvables[url] = None
    for rurl in resolvables:        
            streams = resolve(rurl)
            if streams == []:
                util.debug('There was an error resolving '+rurl)
                error = True
            if not streams == None:
                if len(streams) > 0:
                    for stream in streams:
                        resolved.append(stream)
    if error and len(resolved) == 0:
        return None
    if len(resolved) == 0:
        return {}
    resolved = sorted(resolved,key=lambda i:i['quality'])
    resolved = sorted(resolved,key=lambda i:len(i['quality']))
    resolved.reverse()
    return resolved
    def resolve(self, ident, download_type=None):
        params = {'ident': ident, 'wst': self.token}

        if None is not download_type:
            params.update({
                'download_type': download_type,
                'device_uuid': getSetting('uid'),
                'device_res_x': infoLabel('System.ScreenWidth'),
                'device_res_y': infoLabel('System.ScreenHeight'),
            })

        headers, req = self._create_request('/', params)
        util.info(headers)
        util.info(req)
        try:
            data = post(
                self._url('api/file_link/'),
                req,
                headers=headers,
                output="content")
            xml = ET.fromstring(data)
            if not xml.find('status').text == 'OK':
                self.clearToken()
                util.error(
                    '[SC] Server returned error status, response: %s' % data)
                raise ResolveException(xml.find('message').text)
            return xml.find('link').text
        except Exception as e:
            self.clearToken()
            raise ResolveException(e)
Example #18
0
def resolve(url):
    url = util.decode_html(url)
    util.info('Resolving '+url)
    resolver = _get_resolver(url)
    value = None
    if resolver == None:
        return None
    util.debug('Using resolver '+str(resolver.__name__));
    try:
        value = resolver.resolve(url)
    except:
        traceback.print_exc()
    if value == None:
        return []
    default = item()
    # fix  missing but required values 
    def fix_stream(i,url,resolver,default):
        if not 'name' in i.keys():
            i['name'] = resolver.__name__
        if not 'surl' in i.keys():
            i['surl'] = url
        for key in default.keys():
            if not key in i.keys():
                i[key] = default[key]
    [fix_stream(i,url,resolver,default) for i in value]
    return sorted(value,key=lambda i:i['quality'])
Example #19
0
def test(cmd):
    util.info("")
    util.info("- Starting " + cmd)
    util.info("")
    util.run(cmd)
    
    commands.getoutput("rm -rf " + file1)
    util.info("")
    util.info("- Sending ./testclient localhost 2010 /testdata/file1.txt")

    res = commands.getoutput("./testclient localhost 2010 /testdata/file1.txt")
    arrival = util.get_stat2(res, "Stat-req-arrival")
    dispatch = util.get_stat2(res, "Stat-req-dispatch")
    read = util.get_stat2(res, "Stat-req-read")
    complete = util.get_stat2(res, "Stat-req-complete")
    print ""
    print "dispatch = " + str(dispatch)
    print "read = " + str(read)
    print "complete = " + str(complete)


    if dispatch >= 0 and read >=0 and complete >= 0 and dispatch + read <= complete:
        util.good("You passed this test")
    else:
        util.error("Expected dispatch >= 0 and read >=0 and complete >= 0 and" 
                   " dispatch + read <= complete:")
Example #20
0
    def service(self):
        util.info("Start")
        try:
            sleep_time = int(self.getSetting("start_sleep_time")) * 1000 * 60
        except:
            sleep_time = self.sleep_time
            pass

        self.sleep(sleep_time)

        try:
            self.last_run = float(self.cache.get("subscription.last_run"))  # time.time()
        except:
            self.last_run = time.time()
            self.cache.set("subscription.last_run", str(self.last_run))
            pass

        if time.time() > self.last_run + 24 * 3600:
            self.evalSchedules()

        while not xbmc.abortRequested:
            if(time.time() > self.last_run + 24 * 3600):
                self.evalSchedules()
                self.last_run = time.time()
                self.cache.set("subscription.last_run", str(self.last_run))
            self.sleep(self.sleep_time)
        util.info("Koncim")
def download(addon, filename, url, local, notifyFinishDialog=True, headers={}):
    try:
        util.info('Downloading %s to %s' % (url, local))
    except:
        util.info('Downloading ' + url)
    local = xbmc.makeLegalFilename(local)
    try:
        filename = util.replace_diacritic(util.decode_html(filename))
    except:
        filename = 'Video soubor'
    icon = os.path.join(addon.getAddonInfo('path'), 'icon.png')
    notifyEnabled = addon.getSetting('download-notify') == 'true'
    notifyEvery = addon.getSetting('download-notify-every')
    notifyPercent = 1
    if int(notifyEvery) == 0:
        notifyPercent = 10
    if int(notifyEvery) == 1:
        notifyPercent = 5

    def encode(string):
        return u' '.join(string).encode('utf-8')

    def notify(title, message, time=3000):
        try:
            xbmcgui.Dialog().notification(encode(title), encode(message), time=time, icon=icon,
                                          sound=False)
        except:
            traceback.print_exc()
            error('unable to show notification')

    def callback(percent, speed, est, filename):
        if percent == 0 and speed == 0:
            notify(xbmc.getLocalizedString(13413), filename)
            return
        if notifyEnabled:
            if percent > 0 and percent % notifyPercent == 0:
                esTime = '%ss' % est
                if est > 60:
                    esTime = '%sm' % int(est / 60)
                message = xbmc.getLocalizedString(
                    24042) % percent + ' - %s KB/s %s' % (speed, esTime)
                notify(message, filename)

    downloader = Downloader(callback)
    result = downloader.download(url, local, filename, headers)
    try:
        if result == True:
            if xbmc.Player().isPlaying():
                notify(xbmc.getLocalizedString(20177), filename)
            else:
                if notifyFinishDialog:
                    xbmcgui.Dialog().ok(xbmc.getLocalizedString(20177), filename)
                else:
                    notify(xbmc.getLocalizedString(20177), filename)
        else:
            notify(xbmc.getLocalizedString(257), filename)
            xbmcgui.Dialog().ok(filename, xbmc.getLocalizedString(257) + ' : ' + result)
    except:
        traceback.print_exc()
Example #22
0
 def run(self):
     clientcmd = "./testclient localhost 2010 /testdata/testfile.txt"
     util.info(self.clientname + ": " + clientcmd)
     response = commands.getoutput(clientcmd)
     if response.find("hey this is a test file") == -1:
         util.error(self.clientname + 
                    ":ouchs! client can not get the right file")
     print self.clientname + ":Client got expected response"
Example #23
0
 def pack(self):
     archive = tarfile.open(self.dest, 'w:bz2')
     archive.add(self.builddir,
         arcname = '/',
         recursive = True
     )
     archive.close()
     util.info('%s is ready.' % self.dest)
 def _info_about_r_error(self, tag_list):
     """
     Print info when rhier... functions raise a sorting-related error.
     Delete all tags we're working with.
     """
     util.info("\tERROR: Problem with R's sorting thingymajig. %s" % str(sys.exc_info()))
     for tag in tag_list:
         self._remove_tag(tag)
Example #25
0
def get_or_make_group(conn, name):
    groups = conn.get_all_security_groups()
    group = [g for g in groups if g.name == name]
    if len(group) > 0:
        return group[0]
    else:
        info("Creating security group {name} in {region}".format(name=name, region=conn.region))
        return conn.create_security_group(name, "Auto created by Tachyon deploy")
 def _remove_uncommon_tags(self):
     feature_tags = set(self.features.keys())
     ground_truth_tags = set(self.ground_truth.keys())
     remove_these = feature_tags.symmetric_difference(ground_truth_tags)
     for tag in remove_these:
         self._remove_tag(tag, verbosity=0)
     if self.verbosity > 1:
         util.info("\tWARNING: Removing these tags not in intersection of features and ground truth: %s." % str(remove_these))
 def ask_for_account_type(self):
     if len(self.provider.username) == 0:
         util.info('Username is not set, NOT using VIP account')
         return False
     if self.settings['vip'] == '0':
         util.info('Asking user whether to use VIP account')
         ret = xbmcgui.Dialog().yesno(self.provider.name, xbmcutil.__lang__(30010))
         return ret == 1
     return self.settings['vip'] == '1'
Example #28
0
def _validate(info):
	dummy = _empty_info()
	for key in info.keys():
		if not type(info[key]) == type(dummy[key]):
			util.info('key '+key+' is type of '+str(type(info[key]))+' does not match required type '+str(type(dummy[key])))
			info[key] = dummy[key]
	for key in dummy.keys():
		if not key in info:
			info[key] = dummy[key]
Example #29
0
def set_security_group(conn, name):
    info("Setting up security group {} in {}".format(name, conn.region))
    sg = get_or_make_group(conn, name)
    if sg.rules != []:
        warn('security group {} in {} already has rules, no modification will happen then'.format(name, conn.region))
        return
    proto = ['tcp', 'udp']
    authorized_ip = '0.0.0.0/0' # all IP
    for p in proto:
        sg.authorize(p, 0, 65535, authorized_ip)
Example #30
0
 def to_downloads(self,url):
     if not self.login():
         util.error('[hellspy] login failed, unable to add to downloads')
     util.info('adding to downloads')
     try:
         util.request(self._url(url+'&do=downloadControl-favourite'))
     except urllib2.HTTPError:
         traceback.print_exc()
         util.error('[hellspy] failed to add to downloads')
         return
     util.info('added, DONE')
Example #31
0
import guillotine
import datafiles
import config
from data_sources import file_source

IDLE_TIME = datetime.timedelta(seconds=1)
MAX_NOFILL_TIME = datetime.timedelta(minutes=50)
WARN_NOFILL_SYMS = 10
NUM_WAIT_TIMES_IN_IDLE = 3  # number of times we should wait in the idle loop for guillotine
# to be initialized (get symbols etc). The time for which we wait
# is this number multiplied by the 'timeout' (specified below)
MKT_CLOSE_TIME = util.exchangeOpenClose()[1]
util.check_include()

cfg_file = os.environ['CONFIG_DIR'] + '/exec.conf'
util.info("loading config file: %s" % cfg_file)
trade_cfg = config.load_trade_config(cfg_file)

tic2sec, sec2tic = datafiles.load_tickers(os.environ['RUN_DIR'] +
                                          "/tickers.txt")
util.set_log_file()


class trade_listener(object):
    def __init__(self, gtc):
        self.gtc = gtc
        #        self.last_submitted = None
        self.last_submitted_time = 0
        self.fs = file_source.FileSource(os.environ['RUN_DIR'] + '/orders/')
        self.last_idle_time = datetime.datetime.utcnow()
        self.last_trade_time = datetime.datetime.utcnow()
Example #32
0
        action='store_true',
        help='Dictates whether to load optim dict, scheduler dict, epoch_i')
    args = parser.parse_args()
    assert args.save_path is not None

    # Make dirs
    if args.load_path is not None:
        os.makedirs(os.path.dirname(args.load_path), exist_ok=True)
    os.makedirs(os.path.dirname(args.save_path), exist_ok=True)
    if args.log_path is not None:
        os.makedirs(os.path.dirname(args.log_path), exist_ok=True)
    DO_LOAD = args.load_path is not None and os.path.exists(args.load_path)

    # Start logging
    util.init_logger(args.log_path)
    info(args)

    # Make dataloaders
    trainset, testset = get_datasets(args.train, args.test)
    trainloader = DataLoader(trainset, batch_size=args.train_bs, shuffle=True)
    testloader = DataLoader(testset, batch_size=args.test_bs)

    # Build model
    master_net = network.Net(arch=[int(x) for x in args.arch.split()])
    if DO_LOAD:
        dump = torch.load(args.load_path)
        epoch_i = dump['epoch_i'] if args.do_continue else 0
        master_net.load_state_dict(dump['state_dict'])
        info('Loaded from %s' % args.load_path)
    else:
        epoch_i = 0
Example #33
0
        self.fps_counter = Label(master=self.root, bg='#000000', fg='#FFFFFF')
        self.fps_counter.place(anchor=SE, x=1280, y=720)

        self.time_ = time.time()
        self.frames = 0

    def update(self):
        self.root.update()
        self.root.update_idletasks()

        self.window_manager.update()

        self.frames += 1
        time_b = time.time()
        diff = time_b - self.time_
        if diff > 1:
            self.fps_counter.config(text=str(self.frames) + " fps")
            self.frames = 0
            self.time_ = time.time()

    def show(self, content):
        self.window_manager.show(content)


if __name__ == '__main__':
    info("Starting Application.")
    window = Application()
    while True:
        window.update()
Example #34
0
 def get_data_cached(self, url, post=False):
     try:
         url.index('/json/')
         self._oldapi()
     except Exception:
         pass
     headers = {
         'X-UID':
         self.uid,
         'X-LANG':
         self.tr['language'],
         'X-VER':
         sctop.API_VERSION,
         'Accept':
         'application/vnd.bbaron.kodi-plugin-v%s+json' %
         (sctop.API_VERSION),
     }
     url = self._url(url)
     code = None
     try:
         if post != False:
             util.debug("POST URL: %s %s" % (url, str(post)))
             (ret, code) = sctop.post(url, post, headers, "extend")
             self.handleHttpError(code)
             return ret
         util.info("GET x URL: %s" % url)
         ret = False
         if sctop.getSettingAsBool('usecache') is not False:
             util.debug("[SC] skusam cache")
             ret = self.cache.get(str(url))
         if not ret:
             util.debug("[SC] url BEZ cache %s" % str(url))
             (ret, code, info) = sctop.request(url, headers, "info")
             util.debug("[SC] code: %s %s" % (str(code), str(info)))
             self.handleHttpError(code, data=ret, i=info)
             if code == 200:
                 ttl = datetime.timedelta(hours=2)
                 try:
                     util.debug("[SC] info: %s " % str(info))
                     if 'x-ttl' in info:
                         ttl = datetime.timedelta(
                             seconds=int(info.get('x-ttl')))
                         util.debug("[SC] mame TTL: %s" % str(ttl))
                 except:
                     pass
                 try:
                     self.cache.cache.set(str(url), ret, expiration=ttl)
                 except:
                     self.cache.set(str(url), ret)
         else:
             util.debug("[SC] url z cache %s" % str(url))
         util.debug("[SC] return data")
         return ret
     except Exception as e:
         inet = sctop.getCondVisibility('System.InternetState')
         util.debug("[SC] inet scinema status: %s | %s" %
                    (str(inet), str(e)))
         if inet is False or inet == 0:
             HANDLE = int(sys.argv[1])
             xbmcplugin.endOfDirectory(HANDLE, succeeded=False)
             sctop.dialog.ok("internet",
                             'Skontrolujte pripojenie na internet')
             return False
         util.error('[SC] ERROR URL: --------- %s --------' %
                    str(traceback.format_exc()))
         if code is None:
             sctop.dialog.ok("error", url)
         return False
Example #35
0
def deploy_monitoring_landscape(
    cfg_set: ConfigurationSet,
    cfg_factory: ConfigFactory,
):
    kubernetes_cfg = cfg_set.kubernetes()
    concourse_cfg = cfg_set.concourse()

    # Set the global context to the cluster specified in KubernetesConfig
    kube_ctx.set_kubecfg(kubernetes_cfg.kubeconfig())
    ensure_cluster_version(kubernetes_cfg)

    monitoring_config_name = concourse_cfg.monitoring_config()
    monitoring_cfg = cfg_factory.monitoring(monitoring_config_name)
    monitoring_namespace = monitoring_cfg.namespace()

    tls_config_name = concourse_cfg.tls_config()
    tls_config = cfg_factory.tls_config(tls_config_name)

    # deploy kube-state-metrics
    kube_state_metrics_helm_values = create_kube_state_metrics_helm_values(
        monitoring_cfg=monitoring_cfg)
    execute_helm_deployment(
        kubernetes_cfg,
        monitoring_namespace,
        'stable/kube-state-metrics',
        'kube-state-metrics',
        kube_state_metrics_helm_values,
    )

    # deploy postgresql exporter
    postgresql_helm_values = create_postgresql_helm_values(
        concourse_cfg=concourse_cfg,
        cfg_factory=cfg_factory,
    )
    execute_helm_deployment(
        kubernetes_cfg,
        monitoring_namespace,
        'stable/prometheus-postgres-exporter',
        'prometheus-postgres-exporter',
        postgresql_helm_values,
    )

    # deploy ingresses for kube-state-metrics, postgresql exporter
    monitoring_tls_secret_name = monitoring_cfg.tls_secret_name()

    info(
        'Creating tls-secret in monitoring namespace for kube-state-metrics and postgresql...'
    )
    create_tls_secret(tls_config=tls_config,
                      tls_secret_name=monitoring_tls_secret_name,
                      namespace=monitoring_namespace,
                      basic_auth_cred=BasicAuthCred(
                          user=monitoring_cfg.basic_auth_user(),
                          password=monitoring_cfg.basic_auth_pwd()))

    ingress_helper = kube_ctx.ingress_helper()
    info('Create ingress for kube-state-metrics')
    ingress = generate_monitoring_ingress_object(
        secret_name=monitoring_tls_secret_name,
        namespace=monitoring_namespace,
        hosts=[monitoring_cfg.ingress_host(),
               monitoring_cfg.external_url()],
        service_name=monitoring_cfg.kube_state_metrics().service_name(),
        service_port=monitoring_cfg.kube_state_metrics().service_port(),
    )
    ingress_helper.replace_or_create_ingress(monitoring_namespace, ingress)

    info('Create ingress for postgres-exporter')
    ingress = generate_monitoring_ingress_object(
        secret_name=monitoring_tls_secret_name,
        namespace=monitoring_namespace,
        hosts=[monitoring_cfg.ingress_host(),
               monitoring_cfg.external_url()],
        service_name=monitoring_cfg.postgresql_exporter().service_name(),
        service_port=monitoring_cfg.postgresql_exporter().service_port(),
    )
    ingress_helper.replace_or_create_ingress(monitoring_namespace, ingress)
Example #36
0
            inpath="/".join((os.environ["RUN_DIR"],"calcres"))
            outpath="/".join((os.environ["ROOT_DIR"],"reports",args.strat,"dailylive",args.date))
        elif args.mode == "old":
            inpath="/".join((os.environ["RUN_DIR"],"oldsystem","calcres"))
            outpath="/".join((os.environ["ROOT_DIR"],"reports",args.strat,"dailylive",args.date))
        else:
            print "Unknown mode!"
            exit(1)
        
        #create the outpath if it doesn't exist
        try:
            os.makedirs(outpath)
        except:
            pass
        
        util.info("Looking for calcres files in {}".format(inpath))
        infiles = os.popen("ls -rt {}/calcres.*.txt.gz | tail -1".format(inpath)).readlines()
        for infile in [x.strip() for x in infiles]:
            outfile = infile[infile.rindex("/"):].split(".")
            if args.mode != "old": outfile.insert(1,"stats")
            else: outfile.insert(1,"old.stats")
            outfile=".".join(outfile[:-1])
            outfile = outpath+"/"+outfile
            calcresStats(infile, outfile,True if args.mode=="old" else False)
            util.cat(outfile)
    
    if args.forecastStats:
        if args.infile is not None and args.outfile is not None:
            forecastStats(args.infile,args.outfile,args.configfile)
            exit(0)
Example #37
0
    def read_data(self):
        """Provides images and camera intrinsics."""
        with tf.name_scope('data_loading'):
            with tf.name_scope('enqueue_paths'):
                seed = random.randint(0, 2**31 - 1)
                self.file_lists = self.compile_file_list(
                    self.data_dir, self.input_file)
                image_paths_queue = tf.train.string_input_producer(
                    self.file_lists['image_file_list'],
                    seed=seed,
                    shuffle=self.shuffle,
                    num_epochs=(1 if not self.shuffle else None))
                seg_paths_queue = tf.train.string_input_producer(
                    self.file_lists['segment_file_list'],
                    seed=seed,
                    shuffle=self.shuffle,
                    num_epochs=(1 if not self.shuffle else None))
                cam_paths_queue = tf.train.string_input_producer(
                    self.file_lists['cam_file_list'],
                    seed=seed,
                    shuffle=self.shuffle,
                    num_epochs=(1 if not self.shuffle else None))
                img_reader = tf.WholeFileReader()
                _, image_contents = img_reader.read(image_paths_queue)
                seg_reader = tf.WholeFileReader()
                _, seg_contents = seg_reader.read(seg_paths_queue)
                if self.file_extension == 'jpg':
                    image_seq = tf.image.decode_jpeg(image_contents)
                    seg_seq = tf.image.decode_jpeg(seg_contents, channels=3)
                elif self.file_extension == 'png':
                    image_seq = tf.image.decode_png(image_contents, channels=3)
                    seg_seq = tf.image.decode_png(seg_contents, channels=3)

            with tf.name_scope('load_intrinsics'):
                cam_reader = tf.TextLineReader()
                _, raw_cam_contents = cam_reader.read(cam_paths_queue)
                rec_def = []
                for _ in range(9):
                    rec_def.append([1.0])
                raw_cam_vec = tf.decode_csv(raw_cam_contents,
                                            record_defaults=rec_def)
                raw_cam_vec = tf.stack(raw_cam_vec)
                intrinsics = tf.reshape(raw_cam_vec, [3, 3])

            with tf.name_scope('convert_image'):
                image_seq = self.preprocess_image(
                    image_seq)  # Converts to float32.

            if self.random_color:
                with tf.name_scope('image_augmentation'):
                    image_seq = self.augment_image_colorspace(image_seq)

            image_stack = self.unpack_images(image_seq)
            seg_stack = self.unpack_images(seg_seq)

            if self.flipping_mode != FLIP_NONE:
                random_flipping = (self.flipping_mode == FLIP_RANDOM)
                with tf.name_scope('image_augmentation_flip'):
                    image_stack, seg_stack, intrinsics = self.augment_images_flip(
                        image_stack,
                        seg_stack,
                        intrinsics,
                        randomized=random_flipping)

            if self.random_scale_crop:
                with tf.name_scope('image_augmentation_scale_crop'):
                    image_stack, seg_stack, intrinsics = self.augment_images_scale_crop(
                        image_stack, seg_stack, intrinsics, self.img_height,
                        self.img_width)

            with tf.name_scope('multi_scale_intrinsics'):
                intrinsic_mat = self.get_multi_scale_intrinsics(
                    intrinsics, self.num_scales)
                intrinsic_mat.set_shape([self.num_scales, 3, 3])
                intrinsic_mat_inv = tf.matrix_inverse(intrinsic_mat)
                intrinsic_mat_inv.set_shape([self.num_scales, 3, 3])

            if self.imagenet_norm:
                im_mean = tf.tile(tf.constant(IMAGENET_MEAN),
                                  multiples=[self.seq_length])
                im_sd = tf.tile(tf.constant(IMAGENET_SD),
                                multiples=[self.seq_length])
                image_stack_norm = (image_stack - im_mean) / im_sd
            else:
                image_stack_norm = image_stack

            with tf.name_scope('batching'):
                if self.shuffle:
                    (image_stack, image_stack_norm, seg_stack, intrinsic_mat,
                     intrinsic_mat_inv) = tf.train.shuffle_batch(
                         [
                             image_stack, image_stack_norm, seg_stack,
                             intrinsic_mat, intrinsic_mat_inv
                         ],
                         batch_size=self.batch_size,
                         capacity=QUEUE_SIZE + QUEUE_BUFFER * self.batch_size,
                         min_after_dequeue=QUEUE_SIZE)
                else:
                    (image_stack, image_stack_norm, seg_stack, intrinsic_mat,
                     intrinsic_mat_inv) = tf.train.batch(
                         [
                             image_stack, image_stack_norm, seg_stack,
                             intrinsic_mat, intrinsic_mat_inv
                         ],
                         batch_size=self.batch_size,
                         num_threads=1,
                         capacity=QUEUE_SIZE + QUEUE_BUFFER * self.batch_size)
                logging.info('image_stack: %s', util.info(image_stack))
        return (image_stack, image_stack_norm, seg_stack, intrinsic_mat,
                intrinsic_mat_inv)
Example #38
0
    def upload_image(
        self,
        container_image: ContainerImage,
        component: Component,
    ) -> UploadResult:
        metadata = self._metadata(
            container_image=container_image,
            component=component,
            omit_version=False,
        )

        upload_result = partial(UploadResult,
                                container_image=container_image,
                                component=component)

        # check if the image has already been uploaded for this component
        scan_result = self.retrieve_scan_result(
            container_image=container_image,
            component=component,
        )
        reference_results = [
            self.retrieve_scan_result(
                container_image=container_image,
                component=component,
                group_id=group_id,
            ) for group_id in self._reference_group_ids
        ]
        reference_results = [r for r in reference_results
                             if r]  # remove None entries
        if scan_result:
            reference_results.insert(0, scan_result)

        # collect old triages in order to "transport" them after new upload (may be None)
        triages = self._existing_triages(analysis_results=reference_results, )

        upload_action = self._determine_upload_action(
            container_image=container_image, scan_result=scan_result)

        if not upload_action.upload and not upload_action.rescan:
            # early exit (nothing to do)
            return upload_result(
                status=UploadStatus.SKIPPED,
                result=scan_result,
            )

        if upload_action.upload:
            info(
                f'uploading to protecode: {container_image.image_reference()}')
            image_data_fh = retrieve_container_image(
                container_image.image_reference(),
                outfileobj=tempfile.NamedTemporaryFile(),
            )
            if self._upload_registry_prefix:
                self.upload_image_to_container_registry(
                    container_image, image_data_fh)
            # keep old product_id (in order to delete after update)
            if scan_result:
                product_id = scan_result.product_id()
            else:
                product_id = None

            try:
                # Upload image and update outdated analysis result with the one triggered
                # by the upload.
                scan_result = self._api.upload(
                    application_name=self._upload_name(
                        container_image=container_image,
                        component=component).replace('/', '_'),
                    group_id=self._group_id,
                    data=image_data_fh,
                    custom_attribs=metadata,
                )
            finally:
                image_data_fh.close()

            for triage in triages:
                if triage.scope() is TriageScope.GROUP:
                    self._api.add_triage(
                        triage=triage,
                        scope=TriageScope.GROUP,
                        group_id=self._group_id,
                    )
                else:
                    # hard-code scope for now
                    self._api.add_triage(
                        triage=triage,
                        scope=TriageScope.RESULT,
                        product_id=scan_result.product_id(),
                    )

            # rm (now outdated) scan result
            if product_id:
                self._api.delete_product(product_id=product_id)

        if upload_action.rescan:
            self._api.rescan(scan_result.product_id())

        result = self._api.wait_for_scan_result(
            product_id=scan_result.product_id())

        if result.status() == ProcessingStatus.BUSY:
            # Should not happen since we waited until the scan result is ready.
            raise RuntimeError(
                'Analysis of container-image {c} was reported as completed, '
                'but is still pending'.format(c=container_image.name(), ))
        else:
            upload_status = UploadStatus.DONE

        return upload_result(status=upload_status, result=result)
Example #39
0
def main(args):
    ec2_conf = get_ec2_conf()
    conn = get_conn()
    if args.submit:
        info(
            'waiting for spot instance requests to be fulfilled, you can cancel by ctrl+c ...'
        )
        try:
            requests = submit_request(conn, ec2_conf)
        except (KeyboardInterrupt, RequestFailedError) as e:
            error(e)
            exit(1)
        info('spot instance requests fulfilled')
        instance_id_to_tag_ip = {}
        rid_tag = request_id_to_tag(requests)
        info('getting instance IPs...')
        for r in requests:
            instance_id = r.instance_id
            info('waiting for ip to be allocated to the machine')
            ip = conn.get_only_instances([instance_id])[0].ip_address
            while ip is None:
                time.sleep(1)
                ip = conn.get_only_instances([instance_id])[0].ip_address
            instance_id_to_tag_ip[instance_id] = (rid_tag[r.id], ip)
        info('mocking vagrant info under .vagrant...')
        mock_vagrant_info(instance_id_to_tag_ip)
        info('creation of spot instances done')
        info('waiting for ssh to be available...')
        wait_for_ssh([ip for tag, ip in instance_id_to_tag_ip.values()])
        info('ssh for all instances are ready')
    elif args.cancel:
        cancel_request(conn)
Example #40
0
    def _get_file_url_anonymous(self,page,post_url,headers,captcha_cb):

        data = util.request(self._url('reloadXapca.php'))
        capdata = json.loads(data)
        captcha = capdata['image']
        if not captcha.startswith('http'):
            captcha = 'http:' + captcha
        sound = capdata['sound']
        if not sound.startswith('http'):
            sound = 'http:' + sound
        # ask callback to provide captcha code
        self.info('Asking for captcha img %s' % captcha)
        code = captcha_cb({'id':captcha,'img': captcha,'snd':sound})
        if not code:
            self.info('Captcha not provided, done')
            return

        ts = re.search('<input type=\"hidden\" name=\"ts\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
        cid = re.search('<input type=\"hidden\" name=\"cid\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
        sign = re.search('<input type=\"hidden\" name=\"sign\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
        sign_a = re.search('<input type=\"hidden\" name=\"sign_a\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
        has = capdata['hash']
        salt = capdata['salt']
        timestamp = capdata['timestamp']
        token = re.search('<input type=\"hidden\" name=\"_token_\".+?value=\"([^\"]+)"',page,re.IGNORECASE | re.DOTALL)
        if not (sign and ts and cid and has and token):
            util.error('[uloz.to] - unable to parse required params from page, plugin needs fix')
            return
        request = {
            'captcha_type':'xapca',
            'hash':has,
            'salt':salt,
            'timestamp':timestamp,
            'ts':ts.group(1),
            'cid':'',
            'sign':sign.group(1),
            'sign_a':sign_a.group(1),
            'captcha_value':code,
            '_do':'download-freeDownloadTab-freeDownloadForm-submit',
            '_token_':token.group(1),
            'adi':'f'
        }
        req = urllib2.Request(post_url,urllib.urlencode(request))
        req.add_header('User-Agent',util.UA)
        req.add_header('Referer',post_url)
        req.add_header('Accept','application/json')
        req.add_header('X-Requested-With','XMLHttpRequest')
        sessid=[]
        for cookie in re.finditer('(ULOSESSID=[^\;]+)',headers.get('Set-Cookie'),re.IGNORECASE | re.DOTALL):
            sessid.append(cookie.group(1))
        req.add_header('Cookie','nomobile=1; uloztoid='+cid.group(1)+'; uloztoid2='+cid.group(1)+'; '+sessid[-1])
        util.info(req.headers)
        util.info(request)
        try:
            resp = urllib2.urlopen(req)
            page = resp.read()
            headers = resp.headers
        except urllib2.HTTPError:
            # this is not OK, something went wrong
            traceback.print_exc()
            util.error('[uloz.to] cannot resolve stream url, server did not redirected us')
            util.info('[uloz.to] POST url:'+post_url)
            return
        try:
            result = json.loads(page)
        except:
            raise ResolveException('Unexpected error, addon needs fix')
        if not 'status' in result.keys():
            raise ResolveException('Unexpected error, addon needs fix')
        if result['status'] == 'ok':
            return self._fix_stream_url(result['url'])
        elif result['status'] == 'error':
            # the only known state is wrong captcha for now
            util.error('Captcha validation failed, please try playing/downloading again')
            util.error(result)
            raise ResolveException('Captcha failed, try again')
Example #41
0
def cleanUp():
    info("Cleaning...")
    if os.path.isdir('./tmp'):
        shutil.rmtree('./tmp')
Example #42
0
 def validate(self):
     for step in self._steps:
         info(f"Validating step '{step.name()}'")
         step.validate()
Example #43
0
def filter_and_display_upload_results(
    upload_results: typing.Sequence[UploadResult],
    cve_threshold=7,
    ignore_if_triaged=True,
) -> typing.Sequence[typing.Tuple[AnalysisResult, int]]:
    # we only require the analysis_results for now
    results = [r.result for r in upload_results]

    results_without_components = []
    results_below_cve_thresh = []
    results_above_cve_thresh = []

    for result in results:
        components = result.components()
        if not components:
            results_without_components.append()
            continue

        greatest_cve = -1

        for component in components:
            vulnerabilities = filter(lambda v: not v.historical(),
                                     component.vulnerabilities())
            if ignore_if_triaged:
                vulnerabilities = filter(lambda v: not v.has_triage(),
                                         vulnerabilities)
            greatest_cve_candidate = highest_major_cve_severity(
                vulnerabilities)
            if greatest_cve_candidate > greatest_cve:
                greatest_cve = greatest_cve_candidate

        if greatest_cve >= cve_threshold:
            results_above_cve_thresh.append((result, greatest_cve))
            continue
        else:
            results_below_cve_thresh.append((result, greatest_cve))
            continue

    if results_without_components:
        warning(
            f'Protecode did not identify components for {len(results_without_components)}:\n'
        )
        for result in results_without_components:
            print(result.display_name())
        print('')

    def render_results_table(
            results: typing.Sequence[typing.Tuple[AnalysisResult, int]]):
        header = ('Component Name', 'Greatest CVE')
        results = sorted(results, key=lambda e: e[1])

        result = tabulate.tabulate(
            map(lambda r: (r[0].display_name(), r[1]), results),
            headers=header,
            tablefmt='fancy_grid',
        )
        print(result)

    if results_below_cve_thresh:
        info(
            f'The following components were below configured cve threshold {cve_threshold}'
        )
        render_results_table(results=results_below_cve_thresh)
        print('')

    if results_above_cve_thresh:
        warning('The following components have critical vulnerabilities:')
        render_results_table(results=results_above_cve_thresh)

    return results_above_cve_thresh
Example #44
0
 def info(self, msg):
     util.info('[%s] %s' % (self.name, msg))
Example #45
0
def estimate_radial_distortion(obj_points, img_points, K, extrinsics):
    util.info("Estimating radial distortion by alternation...")
    M = len(img_points)
    N = obj_points[0].shape[0]

    model = util.to_homogeneous_3d_multiple_points(obj_points[0])

    u_c, v_c = K[0, 2], K[1, 2]

    # Form radius vector
    r = np.zeros(2 * M * N)
    for e, E in enumerate(extrinsics):
        normalized_projection = np.dot(model, E.T)
        normalized_projection = util.to_inhomogeneous_multiple_points(
            normalized_projection)

        x_normalized_proj, y_normalized_proj = normalized_projection[:,
                                                                     0], normalized_projection[:,
                                                                                               1]
        r_i = np.sqrt(x_normalized_proj**2 + y_normalized_proj**2)
        r[e * N:(e + 1) * N] = r_i
    r[M * N:] = r[:M * N]

    # Form observation vector
    obs = np.zeros(2 * M * N)
    u_data, v_data = np.zeros(M * N), np.zeros(M * N)
    for d, data in enumerate(img_points):
        u_i, v_i = data[:, 0][:, 0], data[:, 0][:, 1]
        u_data[d * N:(d + 1) * N] = u_i
        v_data[d * N:(d + 1) * N] = v_i
    obs[:M * N] = u_data
    obs[M * N:] = v_data

    # Form prediction vector
    pred = np.zeros(2 * M * N)
    pred_centered = np.zeros(2 * M * N)
    u_pred, v_pred = np.zeros(M * N), np.zeros(M * N)
    for e, E in enumerate(extrinsics):
        P = np.dot(K, E)
        projection = np.dot(model, P.T)
        projection = util.to_inhomogeneous_multiple_points(projection)
        u_pred_i = projection[:, 0]
        v_pred_i = projection[:, 1]

        u_pred[e * N:(e + 1) * N] = u_pred_i
        v_pred[e * N:(e + 1) * N] = v_pred_i
    pred[:M * N] = u_pred
    pred[M * N:] = v_pred
    pred_centered[:M * N] = u_pred - u_c
    pred_centered[M * N:] = v_pred - v_c

    # Form distortion coefficient constraint matrix
    D = np.zeros((2 * M * N, 2))
    D[:, 0] = pred_centered * r**2
    D[:, 1] = pred_centered * r**4

    # Form values (difference between sensor observations and predictions)
    b = obs - pred

    # Use pseudoinverse technique to compute least squares solution for distortion coefficients
    D_inv = np.linalg.pinv(D)
    k = np.dot(D_inv, b)
    util.info("DONE.")

    return k
Example #46
0
def main():

    n_epoch = params.n_epoch
    save_weight_filename = params.save_weight_file
    do_validation_only = params.test_only
    gen_n_text_samples = params.gen_n_samples
    learning_rate = params.learning_rate

    training_t = Timer()
    validation_t = Timer()

    best_pp = None
    prev_loss = None
    prev_acc = None
    patience = MAX_PATIENCE

    if params.mode == 'C2W2C':

        def c2w2c_weighted_objective(fn):
            def weighted(y_true, y_pred, weights, mask=None):
                assert mask is None
                assert weights is not None
                score_array = fn(y_true, y_pred)

                # reduce score_array to same ndim as weight array
                ndim = K.ndim(score_array)
                weight_ndim = K.ndim(weights)
                score_array = K.mean(score_array,
                                     axis=list(range(weight_ndim, ndim)))

                # apply sample weighting
                score_array *= weights
                word_scores = K.sum(score_array, axis=-1)
                return K.mean(word_scores)

            return weighted

        # by default Keras calculates only mean which is not correct because
        # word loss = sum(char losses), thus we need to monkey batch the
        # weighted_objective function to return correct loss for C2W2C model
        # ATTENTION: this might not work in later Keras versions, only tested with 1.0.5
        ket.weighted_objective = c2w2c_weighted_objective

    # ======== PREPARE MODELS AND DATA  ========

    t_model, v_model, training_data, validation_data, gen_text = prepare_env(
        params)

    def validate_model(best):
        if gen_n_text_samples:
            print '\nGenerating %d text samples...' % gen_n_text_samples
            n_seed = 100
            start = max(
                0, np.random.randint(0, training_dataset.n_words - n_seed))
            seed = training_dataset.get_words()[start:start + n_seed]
            gen_text(seed=seed, how_many=gen_n_text_samples)

        print '\nValidating model...'
        validation_t.start()
        v_model.set_weights(t_model.get_weights())
        v_model.reset_states()
        n_v_samples, gen_v = validation_data[0]()
        loss, _ = v_model.evaluate_generator(gen_v, n_v_samples)
        pp = np.exp(loss)
        val_elapsed, val_tot = validation_t.lap()
        validation_info = '''Validation result:
  - Model loss:        %f
  - Perplexity:        %f %s
  - OOV rate:          %f
  - Validation took:   %s
  - Total validation:  %s
    ''' % (loss, pp, delta_str(pp,
                               best), validation_data[1], val_elapsed, val_tot)
        info(validation_info)
        return pp

    if do_validation_only:
        validate_model(None)
        sys.exit(0)

    print '\nTraining model...'
    for epoch in range(1, n_epoch + 1):
        print '=== Epoch %d ===' % epoch
        training_t.start()

        n_t_samples, gen_t = training_data[0]()

        t_model.reset_states()

        callbacks = []
        if save_weight_filename:
            callbacks += [
                ModelCheckpoint(save_weight_filename,
                                monitor='loss',
                                mode='min',
                                save_best_only=True)
            ]

        h = t_model.fit_generator(generator=gen_t,
                                  samples_per_epoch=n_t_samples,
                                  callbacks=callbacks,
                                  nb_epoch=1,
                                  verbose=1)
        fit_elapsed, fit_tot = training_t.lap()

        loss = h.history['loss'][0]
        acc = h.history['acc'][0]
        epoch_info = '''Epoch %d summary at %s:
  - Model loss:         %f %s
  - Model accuracy:     %f %s
  - Perplexity:         %f
  - Training took:      %s
  - Total training:     %s''' % (
            epoch, strftime("%Y-%m-%d %H:%M:%S", localtime()), loss,
            delta_str(loss, prev_loss), acc, delta_str(
                acc, prev_acc), np.exp(loss), fit_elapsed, fit_tot)
        print ''
        info(epoch_info)

        pp = validate_model(best_pp)

        if best_pp is not None and pp > best_pp:
            if patience <= 0 and learning_rate > MIN_LR:
                learning_rate /= 2.
                learning_rate = max(learning_rate, MIN_LR)
                info(
                    'Validation perplexity increased. Halving learning rate to %f...\n'
                    % learning_rate)
                K.set_value(t_model.optimizer.lr, learning_rate)
                patience = 1
            else:
                patience -= 1
        else:
            best_pp = pp
            patience = MAX_PATIENCE

        prev_acc = acc
        prev_loss = loss

    print 'Training complete'
Example #47
0
def deploy_and_run_smoketest_pipeline(
    config_dir: str,
    config_name: str,
    concourse_team_name: str,
    cc_pipelines_repo_dir: str,
    cc_utils_repo_dir: str,
    wait_for_job_execution: bool=False,
):
    config_factory = ConfigFactory.from_cfg_dir(cfg_dir=config_dir)
    config_set = config_factory.cfg_set(cfg_name=config_name)
    concourse_cfg = config_set.concourse()

    # as this is an integration test, hard-code assumptions about the layout of
    # our pipelines repository
    template_path = os.path.join(cc_utils_repo_dir, 'concourse', 'templates')
    template_include_dir = os.path.join(cc_utils_repo_dir, 'concourse')
    pipeline_name = 'cc-smoketest'

    # retrieve pipeline-definition from github at hardcoded location
    github_cfg = config_set.github()

    githubrepobranch = GitHubRepoBranch(
        github_config=github_cfg,
        repo_owner='kubernetes',
        repo_name='cc-smoketest',
        branch='master',
    )

    helper = GitHubRepositoryHelper.from_githubrepobranch(
      githubrepobranch=githubrepobranch,
    )
    pipeline_definition = yaml.load(
        helper.retrieve_text_file_contents(
            file_path='.ci/smoketest-pipeline.yaml',
        ),
        Loader=yaml.SafeLoader,
    )

    definition_descriptor = DefinitionDescriptor(
        pipeline_name=pipeline_name,
        pipeline_definition=pipeline_definition[pipeline_name],
        main_repo={'path': 'kubernetes/cc-smoketest', 'branch': 'master'},
        concourse_target_cfg=concourse_cfg,
        concourse_target_team=concourse_team_name,
    )

    preprocessor = DefinitionDescriptorPreprocessor()
    template_retriever = TemplateRetriever(template_path=template_path)
    renderer = Renderer(
        template_retriever=template_retriever,
        template_include_dir=template_include_dir,
        cfg_set=config_set,
    )
    deployer = ConcourseDeployer(
        unpause_pipelines=True,
        expose_pipelines=True
    )

    definition_descriptor = preprocessor.process_definition_descriptor(definition_descriptor)
    rendering_result = renderer.render(definition_descriptor)

    info('deploying pipeline')
    deployment_result = deployer.deploy(rendering_result.definition_descriptor)

    if not deployment_result.deploy_status & DeployStatus.SUCCEEDED:
        fail('deployment failed')
Example #48
0
    def process_results(self, results):
        # collect pipelines by concourse target (concourse_cfg, team_name) as key
        concourse_target_results = {}
        for result in results:
            definition_descriptor = result.definition_descriptor
            concourse_target_key = definition_descriptor.concourse_target_key()
            if concourse_target_key not in concourse_target_results:
                concourse_target_results[concourse_target_key] = set()
            concourse_target_results[concourse_target_key].add(result)

        for concourse_target_key, concourse_results in concourse_target_results.items(
        ):
            # TODO: implement eq for concourse_cfg
            concourse_cfg, concourse_team = next(iter(
                concourse_results)).definition_descriptor.concourse_target()
            concourse_results = concourse_target_results[concourse_target_key]
            concourse_api = client.from_cfg(
                concourse_cfg=concourse_cfg,
                team_name=concourse_team,
            )
            # find pipelines to remove
            deployed_pipeline_names = set(
                map(lambda r: r.definition_descriptor.pipeline_name,
                    concourse_results))

            pipelines_to_remove = set(
                concourse_api.pipelines()) - deployed_pipeline_names

            for pipeline_name in pipelines_to_remove:
                info('removing pipeline: {p}'.format(p=pipeline_name))
                concourse_api.delete_pipeline(pipeline_name)

            # trigger resource checks in new pipelines
            self._initialise_new_pipeline_resources(concourse_api,
                                                    concourse_results)

            # order pipelines alphabetically
            pipeline_names = list(concourse_api.pipelines())
            pipeline_names.sort()
            concourse_api.order_pipelines(pipeline_names)

        # evaluate results
        failed_descriptors = [
            d for d in results if not d.deploy_status & DeployStatus.SUCCEEDED
        ]

        failed_count = len(failed_descriptors)

        info('Successfully replicated {d} pipeline(s)'.format(d=len(results) -
                                                              failed_count))

        if failed_count == 0:
            return True

        warning('Errors occurred whilst replicating {d} pipeline(s):'.format(
            d=failed_count, ))

        all_notifications_succeeded = True
        for failed_descriptor in failed_descriptors:
            warning(failed_descriptor.definition_descriptor.pipeline_name)
            try:
                self._notify_broken_definition_owners(failed_descriptor)
            except Exception:
                warning(
                    'an error occurred whilst trying to send error notifications'
                )
                traceback.print_exc()
                all_notifications_succeeded = False

        # signall error only if error notifications failed
        return all_notifications_succeeded
Example #49
0
def findstreams(data, regexes=None):
    """
    Finds streams in given data. Respects caller add-on settings about
    quality and asks user if necessary.

    :param data: A string (piece of text / HTML code), an array of URLs or an
                 array of dictionaries, where 'url' key stores actual URL and
                 all other keys not present in item() are being copied to the
                 resolved stream dictionary
    :param regexes: An array of strings - regular expressions, each MUST define
                    named group called 'url', which retrieves resolvable URL
                    (that one is passed to resolve operation); only used
                    with 'data' of type 'string'
    :returns: An array of resolved objects, None if at least 1 resolver failed
              to resolve and nothing else was found, an empty array if no
              resolvers for URLs has been found or False if none of regexes
              found anything
    """
    def get_url(obj):
        return obj['url'] if isinstance(obj, dict) else obj

    urls = []
    resolvables = []
    resolved = []
    not_found = False
    if isinstance(data, str) and regexes:
        for regex in regexes:
            for match in re.finditer(regex, data, re.IGNORECASE | re.DOTALL):
                urls.append(match.group('url'))
    elif isinstance(data, list):
        urls = data
    else:
        raise TypeError
    for url in urls:
        if isinstance(url, dict):
            url['url'] = filter_resolvable(url['url'])
        else:
            url = filter_resolvable(url)
        if url and url not in resolvables:
            util.info('Found resolvable ' + get_url(url))
            resolvables.append(url)
    if len(resolvables) == 0:
        util.info('No resolvables found!')
        return False
    for url in resolvables:
        streams = resolve(get_url(url))
        if streams is None:
            util.info('No resolver found for ' + get_url(url))
            not_found = True
        elif not streams:
            util.info('There was an error resolving ' + get_url(url))
        elif len(streams) > 0:
            for stream in streams:
                if isinstance(url, dict):
                    for key in list(url.keys()):
                        if key not in stream:
                            stream[key] = url[key]
                        elif key not in item():
                            if isinstance(stream[key], str) and \
                                    isinstance(url[key], str):
                                stream[key] = url[key] + ' +' + stream[key]
                            elif isinstance(stream[key], list) and \
                                    isinstance(url[key], list):
                                stream[key] = url[key] + stream[key]
                            elif isinstance(stream[key], dict) and \
                                    isinstance(url[key], dict):
                                stream[key].update(url[key])
                resolved.append(stream)
    if len(resolved) == 0:
        if not_found:
            return []
        return None
    resolved = sorted(resolved, key=lambda i: i['quality'])
    resolved = sorted(resolved, key=lambda i: len(i['quality']))
    resolved.reverse()
    return resolved
Example #50
0
def deploy_concourse_landscape(config_name: str,
                               deployment_name: str = 'concourse',
                               timeout_seconds: int = '180'):
    ensure_not_empty(config_name)
    ensure_helm_setup()

    # Fetch all the necessary config
    config_factory = global_ctx().cfg_factory()
    config_set = config_factory.cfg_set(cfg_name=config_name)
    concourse_cfg = config_set.concourse()

    # Set the global context to the cluster specified by the given config
    kubernetes_config = config_set.kubernetes()
    kubeutil.ctx.set_kubecfg(kubernetes_config.kubeconfig())

    ensure_cluster_version(kubernetes_config)

    # Container-registry config
    image_pull_secret_name = concourse_cfg.image_pull_secret()
    container_registry = config_factory._cfg_element(
        cfg_type_name='container_registry',
        cfg_name=image_pull_secret_name,
    )
    cr_credentials = container_registry.credentials()

    # TLS config
    tls_config_name = concourse_cfg.tls_config()
    tls_config = config_factory._cfg_element(cfg_type_name='tls_config',
                                             cfg_name=tls_config_name)
    tls_secret_name = concourse_cfg.tls_secret_name()

    # Secrets server
    secrets_server_config = config_set.secrets_server()

    # Helm config
    helmchart_cfg_type = 'concourse_helmchart'
    default_helm_values = config_factory._cfg_element(
        cfg_type_name=helmchart_cfg_type,
        cfg_name=concourse_cfg.helm_chart_default_values_config()).raw
    custom_helm_values = config_factory._cfg_element(
        cfg_type_name=helmchart_cfg_type,
        cfg_name=concourse_cfg.helm_chart_values()).raw

    info('Creating default image-pull-secret ...')
    create_image_pull_secret(
        credentials=cr_credentials,
        image_pull_secret_name=image_pull_secret_name,
        namespace=deployment_name,
    )

    info('Creating tls-secret ...')
    create_tls_secret(
        tls_config=tls_config,
        tls_secret_name=tls_secret_name,
        namespace=deployment_name,
    )

    info('Deploying secrets-server ...')
    deploy_secrets_server(secrets_server_config=secrets_server_config, )

    info('Deploying delaying proxy ...')
    deploy_delaying_proxy(
        concourse_cfg=concourse_cfg,
        deployment_name=deployment_name,
    )
    info('Deploying Concourse ...')
    # Concourse is deployed last since Helm will lose connection if deployment takes more than ~60 seconds.
    # Helm will still continue deploying server-side, but the client will report an error.
    deploy_or_upgrade_concourse(
        default_helm_values=default_helm_values,
        custom_helm_values=custom_helm_values,
        concourse_cfg=concourse_cfg,
        kubernetes_config=kubernetes_config,
        deployment_name=deployment_name,
    )

    info('Waiting until the webserver can be reached ...')
    deployment_helper = kubeutil.ctx.deployment_helper()
    is_web_deployment_available = deployment_helper.wait_until_deployment_available(
        namespace=deployment_name,
        name='concourse-web',
        timeout_seconds=timeout_seconds,
    )
    if not is_web_deployment_available:
        fail(
            dedent("""No Concourse webserver reachable after {t} second(s).
                Check status of Pods created by "concourse-web"-deployment in namespace {ns}
                """).format(
                t=timeout_seconds,
                ns=deployment_name,
            ))
    info('Webserver became accessible.')

    # Even though the deployment is available, the ingress might need a few seconds to update.
    time.sleep(3)

    info('Setting teams on Concourse ...')
    set_teams(config=concourse_cfg)
Example #51
0
    def __init__(self):
        newdb.init_db()
        database = newdb.get_db()

        rows = database.execute(
            "SELECT value FROM {} WHERE xref_type=%(type)s AND source=%(source)s AND born<=%(now)s AND (died>%(now)s OR died is NULL)"
            .format(database.XREF_TABLE), {
                "type": database.getXrefType("TIC"),
                "now": util.now(),
                "source": database.getSourceType("compustat_idhist")
            }).fetchall()
        tickers = [
            row['value'] for row in rows
            if re.match("[0-9].+", row["value"]) == None
        ]
        util.info("Retrieving info on %d tickers" % len(tickers))
        database.close()

        fields = [
            "symbol",
            "name",
            "exchange",
            "error_flag",
            "market_cap",
            "avg_daily_volume",
            "ex_dividend_date",
            "dividend_pay_date",
            "dividend_share_ratio",
            "dividend_yield",

            #"ebitda",
            "earnings_share_ratio",
            "eps_est_cur_year",
            "eps_est_next_qtr",
            "eps_est_next_year",
            "pe_ratio",
            "peg_ratio",
            "price_book_ratio",
            "price_eps_est_cur_year_ratio",
            "price_eps_est_next_year_ratio",
            "price_sales_ratio",
            "short_ratio",
        ]
        # Grab data
        data = ystockquote.get_symbols(tickers, fields)
        # Save data to temp dir
        tempdir = tempfile.mkdtemp(dir=os.environ['TMP_DIR'])
        f = open("%s/yahoo.csv" % tempdir, "w")
        writer = csv.DictWriter(f, fields)
        rows = [dict(zip(fields, fields))]
        rows.extend(data.values())
        writer.writerows(rows)
        f.close()
        # Zip file
        result = os.system(
            "zip -j %s/yahoo-%s.csv.zip %s/yahoo.csv 1>/dev/null" %
            (tempdir, datetime.datetime.now().strftime("%Y%m%d%H%M"), tempdir))
        if (result != 0):
            shutil.rmtree(tempdir)
            raise DataSourceError("Could not zip file")
        os.remove("%s/yahoo.csv" % tempdir)
        self._remote_dir = tempdir
Example #52
0


#-------------------------------------------------------------------------------
hardCodedProfiles = cppHelper.HardCodedProfiles(profilePath)
# Supported profiles are at least composed of hard coded profiles
supportedProfiles = copy.deepcopy(hardCodedProfiles.getProfilesHardCoded())


#-------------------------------------------------------------------------------
xmlRootNode = xml.etree.ElementTree.parse(xmlInputFilePath).getroot()
if xmlRootNode.tag != "eep":
   raise Exception("getAllNodes : Invalid root \"" + xmlRootNode.tag + "\", \"eep\" expected")
xmlProfileNode = xmlRootNode.find("profile")

util.info("Hard-coded profiles are : " + str(hardCodedProfiles.getProfilesHardCoded()))



# CRorgs : Main Rorgs class, listing Rorg messages
rorgsClass = cppClass.CppClass("CRorgs")
rorgsItems = xmlHelper.getEnumValues(inNode=xmlProfileNode, foreachSubNode="rorg", enumValueNameTag="title", enumValueTag="number")
# Add 0xD4 (Universal Teach-in message) to RORG list as it doesn't not appear in eepXX.xml file
rorgsItems.append(['UTE Telegram ', '0xD4'])
rorgsClass.addSubType(cppClass.CppEnumType("ERorgIds", rorgsItems, cppClass.PUBLIC))
rorgsClass.addMember(cppClass.CppMember("RorgMap", "std::map<unsigned int, std::string>", cppClass.PRIVATE, cppClass.STATIC | cppClass.CONST, \
   cppHelper.getMapInitCode(rorgsItems)))
rorgsClass.addMethod(cppClass.CppMethod("toRorgId", "CRorgs::ERorgIds", "unsigned int id", cppClass.PUBLIC, cppClass.STATIC, \
   "   if (RorgMap.find(id) == RorgMap.end())\n" \
   "      throw std::out_of_range(\"Unknown rorg \" + CProfileHelper::byteToHexString(id));\n" \
   "   return static_cast<ERorgIds>(id);\n"))
Example #53
0
    def system(self, data, cl=False):
        util.debug("[SC] SYSYEM CL: %s" % str(cl))
        if cl is False and "setContent" in data:
            xbmcplugin.setContent(int(sys.argv[1]), data["setContent"])
            '''
            view_mode=data["setContent"].lower()
            skin_name=xbmc.getSkinDir() # nacitame meno skinu
            util.debug("[SC] skin_name='"+skin_name+"'")
            try:
                util.debug("[SC] view mode is "+view_mode)
                view_codes=sctop.ALL_VIEW_CODES.get(view_mode)
                view_code=view_codes.get(skin_name)
                util.debug("[SC] view code for "+view_mode+" in "+skin_name+" is "+str(view_code))
                xbmc.executebuiltin("Container.SetViewMode("+str(view_code)+")")
                #xbmc.executebuiltin("Container.Refresh")
            except:
                util.debug("[SC] Unable to find view code for view mode "+str(view_mode)+" and skin "+skin_name)
            '''

        if cl is False and "setPluginCategory" in data:
            xbmcplugin.setPluginCategory(int(sys.argv[1]),
                                         data["setPluginCategory"])

        if cl is False and "addSortMethod" in data:
            xbmcplugin.addSortMethod(
                int(sys.argv[1]), sctop.sortmethod[int(data["addSortMethod"])])

        if cl is False and data.get('addSortMethods'):
            for m in data.get("addSortMethods"):
                xbmcplugin.addSortMethod(int(sys.argv[1]),
                                         sctop.sortmethod[int(m)])

        if cl is False and "setPluginFanart" in data:
            xbmcplugin.setPluginFanart(int(sys.argv[1]),
                                       data["setPluginFanart"])

        if cl is False and "version" in data:
            util.info("[SC] kontrola verzie: %s %s" %
                      (str(sctop.addonInfo('version')), data["version"]))
            if sctop.addonInfo('version') != data[
                    "version"] and sctop.getSetting('ver') != data['version']:
                try:
                    sctop.dialog.ok(
                        sctop.getString(30954),
                        sctop.getString(30955) % str(data['version']))
                except:
                    pass
                xbmc.executebuiltin('UpdateAddonRepos')
                sctop.setSetting('ver', data['version'])
                sctop.setSetting('ws_checkssl', 'false')
                sctop.setSetting('check_ssl1', 'false')
                sctop.setSetting('usecache', 'true')
            if sctop.getSettingAsBool(
                    'cachemigrate'
            ) == '' or sctop.getSettingAsBool('cachemigrate') is False:
                self.parent.cacheMigrate()
                pass
            pass

        if cl is False and "focus" in data:
            self.parent.system = {"focus": data['focus']}

        if cl is True and "focus" in data:
            try:
                self.parent.endOfDirectory()
                util.debug("[SC] nastavujem focus na: %d" % int(data['focus']))
                xel = xbmcgui.Window(xbmcgui.getCurrentWindowId())
                ctr = xel.getControl(xel.getFocusId())
                ctr.selectItem(int(data['focus']))
            except Exception as e:
                util.debug("[SC] error focus :-( %s" %
                           str(traceback.format_exc()))
                pass
    def userData(self, all=False):
        self.getToken()
        if self.token is None:
            self.login()

        if self.token is not None:
            if self._userData is None:
                headers, req = self._create_request('/', {'wst': self.token})
                try:
                    util.info('[SC] userData')
                    data = post(self._url('api/user_data/'),
                                req,
                                headers=headers,
                                output="content")
                except:
                    self.clearToken()
                    return False
                util.info('[SC] userdata dat: %s' % str(data))
                xml = ET.fromstring(str(data))
                self._userData = xml
            else:
                xml = self._userData
            if not xml.find('status').text == 'OK':
                self.clearToken()
                return False
            if all == True:
                return xml
            util.debug("[SC] userInfo: %s %s" %
                       (xml.find('ident').text, xml.find('vip').text))
            if xml.find('vip').text == '1':
                xbmcgui.Window(10000).setProperty('ws.vip', '1')
                try:
                    t = [
                        'credits', 'points', 'files', 'bytes', 'score_files',
                        'score_bytes', 'private_files', 'private_bytes',
                        'private_space', 'tester', 'role', 'id', 'username',
                        'email'
                    ]
                    j = dict((i, xml.find(i).text) for i in t
                             if xml.find(i) is not None)
                    xbmcgui.Window(10000).setProperty('ws.j',
                                                      str(json.dumps(j)))
                except Exception as e:
                    util.debug('[SC] chyba pri natiahnuti dat %s' %
                               str(traceback.format_exc()))

                try:
                    private_space = int(
                        xml.find('private_space').text) / 1073741824
                    util.info('[SC] private_space %d' % private_space)
                    viptyp = 4 if private_space > 50 else 3 if private_space >= 50 else 2 if private_space >= 20 else 1
                    xbmcgui.Window(10000).setProperty('ws.viptyp', str(viptyp))
                except:
                    util.info('[SC] vip typ error %s ' %
                              str(traceback.format_exc()))
                    pass
                xbmcgui.Window(10000).setProperty('ws.ident',
                                                  xml.find('ident').text)
                xbmcgui.Window(10000).setProperty('ws.days',
                                                  xml.find('vip_days').text)
                return int(xml.find('vip_days').text)
            else:
                xbmcgui.Window(10000).setProperty('ws.vip', '0')

        return False
def teardown_test_db_app():
    info("teardown_test_db_app")
    reset_database()
Example #56
0
    util.check_include()

    parser = OptionParser()
    parser.add_option("-d",
                      "--debug",
                      default=False,
                      action="store_true",
                      dest="debug")
    (options, args) = parser.parse_args()

    if options.debug:
        util.set_debug()
    else:
        util.set_log_file()

    util.info('launching fills listener')
    cfg_file = os.environ['CONFIG_DIR'] + '/exec.conf'
    moc_cfg_file = os.environ['CONFIG_DIR'] + '/exec.moc.conf'
    util.info("loading config file: %s" % cfg_file)
    trade_cfg = config.load_trade_config(cfg_file)
    moc_trade_cfg = config.load_trade_config(moc_cfg_file)

    gtc = guillotine.simple_multiplex_channel()
    gtc.connect(trade_cfg['servers'],
                trade_cfg['account'],
                trade_cfg['password'],
                name="fills_listener_" + os.environ['STRAT'],
                listenToBcast=1)
    gtc.connect(moc_trade_cfg['servers'],
                moc_trade_cfg['account'],
                moc_trade_cfg['password'],
Example #57
0
 def print(self, mode, loss, tictoc):
     info('%6s  %4d:%-7d %e %s' %
          (mode, self.epoch_i, self.iter_i, loss, str(tictoc)))
Example #58
0
def release_and_prepare_next_dev_cycle(
    githubrepobranch: GitHubRepoBranch,
    repository_version_file_path: str,
    release_version: str,
    repo_dir: str,
    release_notes_policy: str,
    release_commit_callback: str = None,
    next_version_callback: str = None,
    version_operation: str = "bump_minor",
    prerelease_suffix: str = "dev",
    author_name: str = "gardener-ci",
    author_email: str = "*****@*****.**",
    component_descriptor_file_path: str = None,
    slack_cfg_name: str = None,
    slack_channel: str = None,
    rebase_before_release: bool = False,
):
    release_notes_policy = ReleaseNotesPolicy(release_notes_policy)
    github_helper = GitHubRepositoryHelper.from_githubrepobranch(
        githubrepobranch)
    git_helper = GitHelper.from_githubrepobranch(
        githubrepobranch=githubrepobranch,
        repo_path=repo_dir,
    )

    release_commits_step = ReleaseCommitsStep(
        git_helper=git_helper,
        repo_dir=repo_dir,
        release_version=release_version,
        repository_version_file_path=repository_version_file_path,
        repository_branch=githubrepobranch.branch(),
        version_operation=version_operation,
        prerelease_suffix=prerelease_suffix,
        release_commit_callback=release_commit_callback,
        next_version_callback=next_version_callback,
        rebase_before_release=rebase_before_release,
    )

    github_release_step = GitHubReleaseStep(
        github_helper=github_helper,
        githubrepobranch=githubrepobranch,
        repo_dir=repo_dir,
        release_version=release_version,
        component_descriptor_file_path=component_descriptor_file_path,
    )

    release_transaction = Transaction(
        release_commits_step,
        github_release_step,
    )

    release_transaction.validate()
    if not release_transaction.execute():
        raise RuntimeError('An error occurred while creating the Release.')

    publish_release_notes_step = PublishReleaseNotesStep(
        githubrepobranch=githubrepobranch,
        github_helper=github_helper,
        release_version=release_version,
        repo_dir=repo_dir,
    )

    cleanup_draft_releases_step = CleanupDraftReleaseStep(
        github_helper=github_helper,
        release_version=release_version,
    )

    if release_notes_policy == ReleaseNotesPolicy.DISABLED:
        return info('release notes were disabled - skipping')
    elif release_notes_policy == ReleaseNotesPolicy.DEFAULT:
        pass
    else:
        raise NotImplementedError(release_notes_policy)

    release_notes_steps = [
        publish_release_notes_step,
        cleanup_draft_releases_step,
    ]

    if slack_cfg_name and slack_channel:
        post_to_slack_step = PostSlackReleaseStep(
            slack_cfg_name=slack_cfg_name,
            slack_channel=slack_channel,
            release_version=release_version,
            githubrepobranch=githubrepobranch,
        )
        release_notes_steps.append(post_to_slack_step)

    release_notes_transaction = Transaction(*release_notes_steps)
    release_notes_transaction.validate()
    if not release_notes_transaction.execute():
        raise RuntimeError(
            'An error occurred while Publishing the Release Notes.')
    def onPlayBackStarted(self):
        if self.scid is not None:
            self.onPlayBackStopped()
        self.upNextEnable = True
        self.se = None
        self.ep = None
        self.libItem = None
        self.watchedTime = 0
        self.log("[SC] Zacalo sa prehravat")
        mojPlugin = self.win.getProperty(sctop.__scriptid__)
        if sctop.__scriptid__ not in mojPlugin:
            util.debug("[SC] Nieje to moj plugin ... ")
            return
        util.debug("[SC] JE to moj plugin ... %s" % str(mojPlugin))
        self.scid = self.win.getProperty('scid')
        try:
            self.ids = json.loads(
                self.win.getProperty('%s.ids' % sctop.__scriptid__))
        except:
            self.ids = {}
            pass
        try:
            stream = json.loads(
                self.win.getProperty('%s.stream' % sctop.__scriptid__))
            util.debug("[SC] stream %s" % str(stream))
        except:
            stream = {}
            pass
        self.stream = stream
        self.win.clearProperty(sctop.__scriptid__)
        self.win.clearProperty('%s.ids' % sctop.__scriptid__)
        self.win.clearProperty('%s.stream' % sctop.__scriptid__)
        self.win.clearProperty('scid')
        self.win.clearProperty('scresume')
        try:
            if sctop.getSettingAsBool('filter_audio'):
                util.debug("[SC] skusam vybrat spravne audio")
                self.selectAudio()
            else:
                util.debug("[SC] nemame filter pre audio")
        except:
            util.debug(
                "[SC] XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX"
            )
            pass
        try:
            if not self.isPlayingVideo():
                return

            self.itemDuration = self.getTotalTime()
            # plánovaný čas dokončení 100 % přehrání
            self.estimateFinishTime = xbmc.getInfoLabel(
                'Player.FinishTime(hh:mm:ss)')
            if 'originaltitle' in stream:
                season = stream.get('season')
                episode = stream.get('episode')
                if episode is not None and season is not None:
                    showtitle = stream.get('originaltitle')
                else:
                    showtitle = None
                year = stream.get('year')
                title = stream.get('originaltitle')
                try:
                    imdb = 'tt%07d' % int(stream.get('imdb')) if stream.get(
                        'imdb').isdigit() else None
                except:
                    imdb = None
                    util.debug("[SC] imdb %s" % str(traceback.format_exc()))
                self.se = season
                self.ep = episode
            else:
                season = xbmc.getInfoLabel('VideoPlayer.Season')
                episode = xbmc.getInfoLabel('VideoPlayer.Episode')
                self.se = season
                self.ep = episode
                showtitle = xbmc.getInfoLabel('VideoPlayer.TVShowTitle')
                year = xbmc.getInfoLabel('VideoPlayer.Year')
                title = xbmc.getInfoLabel('VideoPlayer.Title')
                imdb = xbmc.getInfoLabel(
                    "VideoPlayer.IMDBNumber")  #"ListItem.IMDBNumber")

            if episode is not None:
                util.debug("[SC] Serial")
                self.itemType = 'episode'
            else:
                util.debug("[SC] Film")
                self.itemType = 'movie'

            try:
                if self.itemType == 'movie':
                    self.addLast('lastm', self.scid)
                    method = 'VideoLibrary.GetMovies'
                    try:
                        if self.ids is not None and trakt.getTraktCredentialsInfo(
                        ) == True:
                            #trakt.addTraktCollection({'movies':[{'ids':self.ids}]})
                            pass
                    except:
                        self.log(
                            "[SC] trakt.tv error - nepodarilo sa pridat film do kolekcie: %s"
                            % str(traceback.format_exc()))
                        pass
                    value = "%s (%s).strm" % (self.parent.normalize_filename(
                        str(title)), str(year))
                    field = 'filename'
                    res = self.executeJSON({
                        'jsonrpc': '2.0',
                        'method': method,
                        'params': {
                            'filter': {
                                'operator': 'contains',
                                'field': field,
                                'value': value
                            },
                            'properties': ['file', 'resume'],
                        },
                        'id': 1
                    })

                    if 'result' in res and 'movies' in res['result']:
                        for m in res['result']['movies']:
                            util.debug("[SC] m: %s" % str(m))
                            if 'movieid' in m:
                                self.libItem = m
                                self.itemDBID = m['movieid']
                                break
                else:
                    self.addLast('last', self.scid)
                    if self.ids is not None and trakt.getTraktCredentialsInfo(
                    ) == True:
                        #trakt.addTraktCollection({'shows':[{'ids':self.ids}]})
                        pass

                    method = 'VideoLibrary.GetTVShows'
                    value = self.parent.normalize_filename(
                        str(showtitle)
                    )  #/Season %s/%sx%s.strm" % (showtitle, season, season, episode)
                    field = 'path'
                    res = self.executeJSON({
                        'jsonrpc': '2.0',
                        'method': method,
                        'params': {
                            'filter': {
                                'operator': 'contains',
                                'field': field,
                                'value': value
                            }
                        },
                        'id': 1
                    })

                    if 'result' in res:
                        for m in res['result']['tvshows']:
                            if 'tvshowid' in m:
                                self.itemDBID = int(m['tvshowid'])
                                res = self.executeJSON({
                                    'jsonrpc': '2.0',
                                    'method': 'VideoLibrary.GetEpisodes',
                                    'params': {
                                        'tvshowid': int(m['tvshowid']),
                                        'season': int(season),
                                        'properties':
                                        ['episode', 'file', 'resume'],
                                        'sort': {
                                            'method': 'episode'
                                        }
                                    },
                                    'id': 1
                                })
                                util.info("[SC] tvshow json: %s" % str(res))
                                for e in res['result']['episodes']:
                                    if int(e['episode']) == int(episode):
                                        self.libItem = e
                                        self.itemDBID = e['episodeid']
                                        break
                                break

            except Exception:
                self.log("[SC] Chyba JSONRPC: %s" %
                         str(traceback.format_exc()))
                pass

            res = self.executeJSON({
                'jsonrpc': '2.0',
                'method': 'Player.GetItem',
                'params': {
                    'playerid': 1
                },
                'id': 1
            })
            if res:
                _filename = None
                try:
                    _filename = os.path.basename(self.getPlayingFile())
                except:
                    util.debug(
                        "[SC] onPlayBackStarted() - Exception trying to get playing filename, player suddenly stopped."
                    )
                    return
                util.debug(
                    "[SC] Zacalo sa prehravat: DBID: [%s], SCID: [%s] imdb: %s dur: %s est: %s fi: [%s] | %sx%s - title: %s (year: %s) showtitle: %s"
                    % (str(self.itemDBID), str(
                        self.scid), str(imdb), str(self.itemDuration),
                       self.estimateFinishTime, _filename, str(season),
                       str(episode), str(title), str(year), str(showtitle)))
                data = {
                    'scid': self.scid,
                    'action': 'start',
                    'ep': episode,
                    'se': season
                }
                util.debug("[SC] prehravanie %s" % str(res))

                self.action(data)
                if 'item' in res and 'id' not in res['item']:
                    util.debug("[SC] prehravanie mimo kniznice")
        except Exception:
            self.log("[SC] Chyba MyPlayer: %s" % str(traceback.format_exc()))
            pass

        try:
            if 'resume' in self.libItem:
                util.debug("[SC] resume! %s" % str(self.libItem))
                pos = self.libItem['resume'].get('position', 0)
                maxPos = self.getTotalTime() * .75
                if pos > 3 * 60 and pos < maxPos:
                    self.seekTime(pos)
        except:
            pass
Example #60
0
    def action(self, data):
        if self.scid is None:
            util.debug("[SC] nemame scid")
            return
        if data.get('action', None) is None:
            util.debug("[SC] nemame action")
            return

        if self.stream is None:
            try:
                stream = json.loads(
                    self.win.getProperty('%s.stream' % sctop.__scriptid__))
                util.debug("[SC] stream %s" % str(stream))
                self.stream = stream
            except:
                pass

        if data.get('action', 'None') == 'None':
            return

        url = "%s/Stats?action=%s" % (sctop.BASE_URL, data.get(
            'action', 'None'))
        data.update({'est': self.estimateFinishTime})
        data.update({'se': self.se, 'ep': self.ep})
        data.update({'ver': sctop.addonInfo('version')})
        try:
            data.update(
                {'state': bool(xbmc.getCondVisibility("!Player.Paused"))})
            data.update({
                'ws': xbmcgui.Window(10000).getProperty('ws.ident'),
                'vip': xbmcgui.Window(10000).getProperty('ws.vip'),
                'vt': xbmcgui.Window(10000).getProperty('ws.viptyp'),
                'j': xbmcgui.Window(10000).getProperty('ws.j')
            })
            data.update({'vd': xbmcgui.Window(10000).getProperty('ws.days')})
            data.update({'skin': xbmc.getSkinDir()})
            if self.stream is not None:
                if 'bitrate' in self.stream:
                    util.debug("[SC] action bitrate")
                    data.update({'bt': self.stream['bitrate']})
                else:
                    util.debug("[SC] action no bitrate")
                if 'sid' in self.stream:
                    util.info(
                        '[SC] mame sid <===================================================================================='
                    )
                    data.update({'sid': self.stream['sid']})
                else:
                    util.info(
                        '[SC] no sid in stream <==================================================================================== %s'
                        % str(self.stream))
        except Exception as e:
            util.info('[SC] problem s updatom dat: %s' %
                      str(traceback.format_exc()))
            pass
        try:
            if self.itemDuration > 0:
                data.update({'dur': self.itemDuration})
        except Exception:
            pass

        lastAction = xbmcgui.Window(10000).getProperty('sc.lastAction')
        util.debug('[SC] lastAction: %s' % (str(lastAction)))
        if lastAction == "" or time() - float(lastAction) > 5:
            xbmcgui.Window(10000).setProperty('sc.lastAction', str(time()))
            util.debug("[SC] action: %s" % str(data))
            url = self.parent.provider._url(url)
            try:
                sctop.post_json(url, data, {'X-UID': sctop.uid})
            except:
                pass