Esempio n. 1
0
    def init_crawl(self):
        # Fetch the sitemap. We ignore robots.txt in this case, and
        # assume it's always under /sitemap.xml
        r = requests.get("https://%s/sitemap.xml" % self.hostname)
        if r.status_code != 200:
            raise Exception("Could not load sitemap: %s" % r.status_code)

        p = SitemapParser()
        p.parse(r.text)

        # Attempt to fetch a sitempa_internal.xml. This is used to index
        # pages on our internal search engine that we don't want on
        # Google. They should also be excluded from default search
        # results (unless searching with a specific suburl)
        r = requests.get("https://%s/sitemap_internal.xml" % self.hostname)
        if r.status_code == 200:
            p.parse(r.text, True)

        for url, prio, lastmod, internal in p.urls:
            # Advance 8 characters - length of https://.
            url = url[len(self.hostname) + 8:]
            if lastmod:
                if url in self.scantimes:
                    if lastmod < self.scantimes[url]:
                        # Not modified since last scan, so don't reload
                        # Stick it in the list of pages we've scanned though,
                        # to make sure we don't remove it...
                        self.pages_crawled[url] = 1
                        continue
            self.queue.put((url, prio, internal))

        log("About to crawl %s pages from sitemap" % self.queue.qsize())
def update_movie_set(percent, movieset, art_type, filename):
    log("Selected %s: %s" % (art_type, filename), xbmc.LOGDEBUG)
    dialog_msg( "update", percent = percent, line1 = __language__( 32006 ), \
                line2 = " %s %s" % ( __language__( 32007 ), movieset[ "label" ] ), \
                line3 = filename )
    return DB.updateDatabase(movieset["setid"], filename, art_type,
                             enable_force_update)
Esempio n. 3
0
def doit(opt):
    cp = ConfigParser()
    cp.read("search.ini")
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    conn = psycopg2.connect(cp.get("search", "db"))

    curs = conn.cursor()

    if opt.list:
        # Multiple lists can be specified with a comma separator (no spaces)
        curs.execute("SELECT id,name FROM lists WHERE name=ANY(%(names)s)", {
            'names': opt.list.split(','),
        })
    else:
        curs.execute("SELECT id,name FROM lists WHERE active ORDER BY id")

    listinfo = [(id, name) for id, name in curs.fetchall()]
    c = MultiListCrawler(listinfo, conn, opt.status_interval,
                         opt.commit_interval)
    n = c.crawl(opt.full, opt.month)

    # Update total counts
    curs.execute(
        "WITH t AS (SELECT list,count(*) AS c FROM messages GROUP BY list) UPDATE lists SET pagecount=t.c FROM t WHERE id=t.list"
    )
    # Indicate when we crawled
    curs.execute("UPDATE lastcrawl SET lastcrawl=CURRENT_TIMESTAMP")
    conn.commit()

    log("Indexed %s messages" % n)
    time.sleep(1)
Esempio n. 4
0
    def get_artist_image_filepath(self, artist):
        # If the file already exists, return filepath
        artist_underscore = artist.replace(" ", "_")
        artist_plus = artist.replace(" ", "+")
        if os.path.isfile("./data/images/artists/%s.jpg" % artist_underscore):
            return "%s/data/images/artists/%s.jpg" % (
                os.getcwd(), artist_underscore)

            # Download json to get image urls
        data = self._get_json(
            "https://api.spotify.com/v1/search?type=artist&q=%s" % (
                artist_plus))
        try:
            images = data["artists"]["items"][0]["images"]
        except IndexError:
            log("Could not find artist: %s" % artist_underscore)
            return "%s/data/images/no-artist.jpg" % (os.getcwd())

        # Get image with biggest width
        best_width = 0
        best_url = ""
        for image in images:
            if image["width"] > best_width:
                best_width = image["width"]
                best_url = image["url"]

        # Now we have the best url, download it to a file and return filepath
        self._download_image(best_url, "./data/images/artists/%s.jpg" % (
            artist_underscore))
        log("Artist image downloaded: " + artist)
        return "%s/data/images/artists/%s.jpg" % (
            os.getcwd(), artist_underscore)
Esempio n. 5
0
def _create_intermediate_ca_signing_config(payload: dict) -> dict:
    # create the base signing config
    signing_config: dict = {
        'signing': {
            'profiles': {
                'ca': {
                    'expiry': INTERMEDIATE_CA_DEFAULT_EXPIRY,
                    'usages': [
                        'cert sign',
                        'crl sign'
                    ],
                    'ca_constraint': {
                        'is_ca': True,
                        'max_path_len': 0,
                        'max_path_len_zero': True
                    }
                }
            }
        }
    }
    # set ca properties from payload, if present
    if 'ca' in payload['params']:
        # expiry
        signing_config['signing']['profiles']['ca']['expiry'] = \
            payload['params']['ca'].get(
                'expiry', INTERMEDIATE_CA_DEFAULT_EXPIRY)
    # log config for debugging
    log("intermediate ca signing config:")
    log(json.dumps(signing_config, indent=4))
    return signing_config
Esempio n. 6
0
	def crawl(self):
		self.init_crawl()

		# Fire off worker threads
		for x in range(5):
			t = threading.Thread(name="Indexer %s" % x,
					   target = lambda: self.crawl_from_queue())
			t.daemon = True
			t.start()

		t = threading.Thread(name="statusthread", target = lambda: self.status_thread())
		t.daemon = True
		t.start()

		# XXX: need to find a way to deal with all threads crashed and
		# not done here yet!
		self.queue.join()
		self.stopevent.set()

		# Remove all pages that we didn't crawl
		curs = self.dbconn.cursor()
		curs.execute("DELETE FROM webpages WHERE site=%(site)s AND NOT suburl=ANY(%(urls)s)", {
				'site': self.siteid,
				'urls': self.pages_crawled.keys(),
				})
		if curs.rowcount:
			log("Deleted %s pages no longer accessible" % curs.rowcount)
		self.pages_deleted += curs.rowcount

		self.dbconn.commit()
		log("Considered %s pages, wrote %s updated and %s new, deleted %s." % (len(self.pages_crawled), self.pages_updated, self.pages_new, self.pages_deleted))
Esempio n. 7
0
        def __init__(self):
            self.banner = self._read_data(
                os.path.join(const.DATA_LOCATION, "banner.txt"), False)

            log("Reading zone information")
            self._read_zones()
            log(f"Loaded {len(self.rooms)} rooms")
Esempio n. 8
0
def push():
    config = {
        'GCM_API_KEY': variables.get("gcm_api_key", "")
    }
    app.config.update(config)

    client = FlaskGCM()
    client.init_app(app)

    with app.app_context():
        tokens = variables.get("gcm_tokens", [])
        if not tokens:
            log("No devices registered")
            return
        playing = variables.get("playing", [])
        alert = {"artist": playing[0],
                 "album": playing[1],
                 "song": playing[2],
                 "duration": variables.get("song_duration", 0)}

        # Send to single device.
        # NOTE: Keyword arguments are optional.
        res = client.send(tokens,
                          alert,
                          collapse_key='collapse_key',
                          delay_while_idle=False,
                          time_to_live=600)
Esempio n. 9
0
	def parse_date(self, d):
		# For some reason, we have dates that look like this:
		# http://archives.postgresql.org/pgsql-bugs/1999-05/msg00018.php
		# Looks like an mhonarc bug, but let's just remove that trailing
		# stuff here to be sure...
		if self._date_trailing_envelope.search(d):
			d = self._date_trailing_envelope.sub('', d)

		# We have a number of dates in the format
		# "<full datespace> +0200 (MET DST)"
		# or similar. The problem coming from the space within the
		# parenthesis, or if the contents of the parenthesis is
		# completely empty
		if self._date_multi_re.search(d):
			d = self._date_multi_re.sub('', d)
		# Isn't it wonderful with a string with a trailing quote but no
		# leading quote? MUA's are weird...
		if d.endswith('"') and not d.startswith('"'):
			d = d[:-1]

		# We also have "known incorrect timezone specs".
		if d.endswith('MST7MDT'):
			d = d[:-4]
		elif d.endswith('METDST'):
			d = d[:-3]
		elif d.endswith('"MET'):
			d = d[:-4] + "MET"

		try:
			self.date = dateutil.parser.parse(d)
		except ValueError, e:
			log("Failed to parse date '%s'" % d)
			return False
Esempio n. 10
0
def logStatus(text, status, overWrite=False):
    statusText = [
        f"{Fore.RED}✗ ERRR", f"{Fore.YELLOW}● WAIT", f"{Fore.GREEN}✓ OKAY"
    ]
    log("INFO",
        "{:66}{}{}".format(text, statusText[status + 1], Fore.RESET),
        resetCursor=(not overWrite))
Esempio n. 11
0
def doit():
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    conn = psycopg2.connect(cp.get("search", "db"))

    curs = conn.cursor()

    # Start by indexing the main website
    log("Starting indexing of main website")
    SitemapSiteCrawler("www.postgresql.org", conn, 1,
                       cp.get("search", "frontendip"), True).crawl()
    conn.commit()

    # Skip id=1, which is the main site..
    curs.execute("SELECT id, hostname, https FROM sites WHERE id>1")
    for siteid, hostname, https in curs.fetchall():
        log("Starting indexing of %s" % hostname)
        GenericSiteCrawler(hostname, conn, siteid, https).crawl()
        conn.commit()

    curs.execute(
        "WITH t AS (SELECT site,count(*) AS c FROM webpages GROUP BY site) UPDATE sites SET pagecount=t.c FROM t WHERE id=t.site"
    )
    conn.commit()

    time.sleep(1)
Esempio n. 12
0
 def send_all(self, message):
     for c in self.client_pool:
         try:
             c.send(message)
         except ConnectionAbortedError:
             self.client_pool.remove(c)
             log(f"Attempted to send_all to an expired client {c}")
Esempio n. 13
0
    def run(self):
        """
        Main loop for the thread, handles messaging to/from the client
        """
        # Log in stuff
        self.send(self.server.world.banner)
        self.send(
            "{yellow}connect <username> <password>{normal} for an existing hero"
        )
        self.send(
            "{yellow}create <username> <password>{normal} for a new hero")

        # Client game loop
        while True:
            # Socket mode is set to non-blocking here, so we have to catch timeout errors and handle them if/when
            try:
                data = self.receive()
            except socket.timeout:
                self.send(
                    "You seem distant, is there another woman? WHO IS THE BITCH?? (disconnected due to timeout)"
                )
                self.exit()
                return

            if not data:
                continue

            log("Server - Received '{0}'".format(data))
            if data == "exit":
                self.exit()
                return

            self.run_command(data)
Esempio n. 14
0
def _create_leaf_signing_request(payload: dict) -> dict:
    # create the base request
    signing_request: dict = {
        'key': {
            'algo': LEAF_DEFAULT_KEY_ALGORITHM,
            'size': LEAF_DEFAULT_KEY_SIZE
        },
        'names': []
    }
    # set key properties from payload, if present
    if 'key' in payload['params']:
        # key algorithm
        signing_request['key']['algo'] = \
            payload['params']['key'].get(
                'algo', LEAF_DEFAULT_KEY_ALGORITHM)
        # key size
        signing_request['key']['size'] = \
            payload['params']['key'].get(
                'size', LEAF_DEFAULT_KEY_SIZE)
    # set names from payload, if present
    if 'names' in payload['params']:
        signing_request['names'] = payload['params']['names']
    # log request for debugging
    log("leaf signing request:")
    log(json.dumps(signing_request, indent=4))
    # return signing request
    return signing_request
Esempio n. 15
0
def doit(opt):
    cp = ConfigParser()
    cp.read("search.ini")
    psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
    conn = psycopg2.connect(cp.get("search", "db"))

    curs = conn.cursor()

    if opt.list:
        # Multiple lists can be specified with a comma separator (no spaces)
        curs.execute("SELECT id,name FROM lists WHERE name=ANY(%(names)s)", {"names": opt.list.split(",")})
    else:
        curs.execute("SELECT id,name FROM lists WHERE active ORDER BY id")

    listinfo = [(id, name) for id, name in curs.fetchall()]
    c = MultiListCrawler(listinfo, conn, opt.status_interval, opt.commit_interval)
    n = c.crawl(opt.full, opt.month)

    # Update total counts
    curs.execute(
        "WITH t AS (SELECT list,count(*) AS c FROM messages GROUP BY list) UPDATE lists SET pagecount=t.c FROM t WHERE id=t.list"
    )
    # Indicate when we crawled
    curs.execute("UPDATE lastcrawl SET lastcrawl=CURRENT_TIMESTAMP")
    conn.commit()

    log("Indexed %s messages" % n)
    time.sleep(1)
Esempio n. 16
0
def _create_leaf_signing_config(payload: dict) -> dict:
    # create the base signing config
    signing_config: dict = {
        'signing': {
            'profiles': {
                'leaf': {
                    'expiry': LEAF_DEFAULT_EXPIRY,
                    'usages': LEAF_DEFAULT_USAGES
                }
            }
        }
    }
    # set leaf properties from payload, if present
    if 'leaf' in payload['params']:
        # expiry
        signing_config['signing']['profiles']['leaf']['expiry'] = \
            payload['params']['leaf'].get(
                'expiry', LEAF_DEFAULT_EXPIRY)
        # usages
        signing_config['signing']['profiles']['leaf']['usages'] = \
            payload['params']['leaf'].get(
                'usages', LEAF_DEFAULT_USAGES)
    # log config for debugging
    log("leaf signing config:")
    log(json.dumps(signing_config, indent=4))
    return signing_config
Esempio n. 17
0
    def init_crawl(self):
        # Fetch the sitemap. We ignore robots.txt in this case, and
        # assume it's always under /sitemap.xml
        u = urllib.urlopen("https://%s/sitemap.xml" % self.hostname)
        p = SitemapParser()
        p.parse(u)
        u.close()

        # Attempt to fetch a sitempa_internal.xml. This is used to index
        # pages on our internal search engine that we don't want on
        # Google. They should also be excluded from default search
        # results (unless searching with a specific suburl)
        u = urllib.urlopen("https://%s/sitemap_internal.xml" % self.hostname)
        if u.getcode() == 200:
            p.parse(u, True)
        u.close()

        for url, prio, lastmod, internal in p.urls:
            # Advance 8 characters - length of https://.
            url = url[len(self.hostname) + 8:]
            if lastmod:
                if self.scantimes.has_key(url):
                    if lastmod < self.scantimes[url]:
                        # Not modified since last scan, so don't reload
                        # Stick it in the list of pages we've scanned though,
                        # to make sure we don't remove it...
                        self.pages_crawled[url] = 1
                        continue
            self.queue.put((url, prio, internal))

        log("About to crawl %s pages from sitemap" % self.queue.qsize())
Esempio n. 18
0
 def crawl_from_queue(self):
     while not self.stopevent.is_set():
         (url, relprio, internal) = self.queue.get()
         try:
             self.crawl_page(url, relprio, internal)
         except Exception as e:
             log("Exception crawling '%s': %s" % (url, e))
         self.queue.task_done()
Esempio n. 19
0
def removeClient(connected, msg):
	log(f"remove client {clientAddress}: {msg}", "info")
	try:
		clientSockets.remove(connected)
	except KeyError:
		log(f"[WARN] client {clientAddress} already removed", "warning")

	return False
Esempio n. 20
0
 def run_command(self, command: str):
     """
     Forces a client to run a command, useful for nesting commands
     """
     try:
         handle_command(command, self, self.server)
     except Exception as e:
         log(f"SOME BAD SHIT WENT DOWN IN COMMAND LAND\n{e}")
Esempio n. 21
0
def _run_command(command, debug=os.getenv('DEBUG', "False").lower() == "true"):
    """ Run command and return its output. """
    log("Executing command \"%s\"" % command, debug=True)
    process = os.popen(command)
    output = process.read()
    process.close()
    log("Command result: %s" % output, debug=True)
    return output
Esempio n. 22
0
	def crawl_from_queue(self):
		while not self.stopevent.is_set():
			(url, relprio) = self.queue.get()
			try:
				self.crawl_page(url, relprio)
			except Exception, e:
				log("Exception crawling '%s': %s" % (url, e))
			self.queue.task_done()
Esempio n. 23
0
 def get_album(self, artist_name, album_title):
     # Get the albumObject given the album title and artist name
     for artist in self.artists:
         if artist.get_name() == artist_name:
             for album in artist.get_albums():
                 if album.get_title() == album_title:
                     return album
     log("Album not found: %s - %s" % (artist_name, album_title))
Esempio n. 24
0
def main(base_url, config):

    while True:
        try:
            duration = get_duration(base_url, config)
            persist(time(), duration, OUT_FILE)
        except Exception as e:
            log(e)
        sleep(INTERVAL_SECONDS)
Esempio n. 25
0
def error403(code):
    log.log("Tried to access: {0}".format(request.path), "ERROR 403")
    if "username" in session:
        username = session['username']
        flash("You do not have sufficient rights to access this page.")
        return (render_template("error/403.html"), 403)
    else:
        flash("You do not have sufficient rights to access this page. Please log in.")
        return (applications.usermanager.login(), 403)
Esempio n. 26
0
	def crawl_month(self, listid, listname, year, month, maxmsg):
		currentmsg = maxmsg
		while True:
			currentmsg += 1
			try:
				if not self.crawl_single_message(listid, listname, year, month, currentmsg):
					break
			except Exception, e:
				log("Exception when crawling %s/%s/%s/%s - %s" % (
					listname, year, month, currentmsg, e))
Esempio n. 27
0
def mark_as_unread(ids, host=HOST, username=USERNAME, password=PASSWORD):
    debug("Marking {} emails as unread.".format(len(ids)))
    try:
        with IMAP4_SSL(host) as email:
            email.login(username, password)
            email.select()
            for e_id in ids:
                email.store(e_id, '-FLAGS', '\Seen')
    except OSError:
        log("Failed to mark {} e-mails as unread".format(len(ids)))
Esempio n. 28
0
def register_token():
    if not valid_ip():
        block_user()
    data = request.data
    log("Token: " + str(data))
    tokens = variables.get("gmc_tokens", [])
    if data not in tokens:
        tokens.append(data)
        variables.put("gcm_tokens", tokens)
    return "Successfully registered token: %s" % data
Esempio n. 29
0
 def get_song(self, artist_name, album_title, song_name):
     # Get the songObject given the artist, album and title of the song
     for artist in self.artists:
         if artist.get_name() == artist_name:
             for album in artist.get_albums():
                 if album.get_title() == album_title:
                     for song in album.get_songs():
                         if song.get_title() == song_name:
                             return song
     log("Song not found: %s, %s, %s" % (artist_name, album_title, song_name))
Esempio n. 30
0
def collect(request):
    asset_info = json.loads(request.body)
    if request.method == 'POST':
        vendor = asset_info['vendor']
        # group = asset_info['group']
        disk = asset_info['disk']
        cpu_model = asset_info['cpu_model']
        cpu_num = asset_info['cpu_num']
        memory = asset_info['memory']
        sn = asset_info['sn']
        osver = asset_info['osver']
        hostname = asset_info['hostname']
        ip = asset_info['ip']
        if not ip or not hostname:
            return HttpResponse(
                "Error your agent ip or hostname is empty! Please resolve your hostname."
            )
        # asset_type = ""
        # status = ""
        try:
            host = Host.objects.get(hostname=hostname)
        except Exception as msg:
            print(msg)
            host = Host()
            level = get_dir("log_level")
            ssh_pwd = get_dir("ssh_pwd")
            log_path = get_dir("log_path")
            log("cmdb.log", level, log_path)
            logging.info("==========sshkey deploy start==========")
            data = deploy_key(ip, ssh_pwd)
            logging.info(data)
            logging.info("==========sshkey deploy end==========")

        # if req.POST.get('identity'):
        #     identity = req.POST.get('identity')
        #     try:
        #         host = Host.objects.get(identity=identity)
        #     except:
        #         host = Host()
        host.hostname = hostname
        # host.group = group
        host.cpu_num = int(cpu_num)
        host.cpu_model = cpu_model
        host.memory = int(memory)
        host.sn = sn
        host.disk = disk
        host.os = osver
        host.vendor = vendor
        host.ip = ip
        # host.asset_type = asset_type
        # host.status = status
        host.save()
        return HttpResponse("Post asset data to server successfully!")
    else:
        return HttpResponse("No any post data!")
Esempio n. 31
0
    def put(self, key, value, writeToFile=True):
        # Check if the key and value are of the correct types
        if type(key) != str:
            log("First argument should be string not of type: " + str(type(key)))
            return
        if type(value) not in [int, float, str, list, dict, tuple, bool, type(None)]:
            log("Value type not supported: " + str(type(value)))
            return

        # Save variable in global
        key = key.strip()
        self.glob[key] = value

        if not writeToFile:
            # Don't write the key and value to file
            return

        # Write key to file
        self.lock.acquire()  # Acquire lock
        try:
            # Check if key already exists
            f = open(self.filename, "r")
            for i, line in enumerate(f):
                linesplit = line.split("=")
                if linesplit[0].strip() == key:
                    # Key already exists
                    f.close()
                    line_no = i
                    break
            else:
                # Key does not exists
                f.close()
                # Append to file
                f = open(self.filename, "a")
                if type(value) == str:
                    f.write(key + " = '" + str(value) + "'\n")
                else:
                    f.write(key + " = " + str(value) + "\n")
                f.close()
                return  # Finally will be called where the lock will be released
            # Key already exists
            f = open(self.filename, "r")
            lines = f.readlines()
            if type(value) == str:
                lines[line_no] = key + " = '" + str(value) + "'\n"
            else:
                lines[line_no] = key + " = " + str(value) + "\n"
            f.close()
            # Overwrite new lines to file
            f = open(self.filename, "w")
            f.writelines(lines)
            f.close()
        finally:
            # Release lock
            self.lock.release()
Esempio n. 32
0
def fade_in(s):
    # Fade in in s seconds
    if s < 0.5:
        log("Time to fade in may not be smaller than 0.5 seconds.")
        s = 0.5
    volume = variables.get("volume", 75)
    s -= 0.5  # Compensate for computing time
    for i in range(20):
        vol = volume * (i / 20.)
        set_volume(int(vol), True)
        time.sleep(s / 20.)
Esempio n. 33
0
 def crawl_month(self, listid, listname, year, month, maxmsg):
     currentmsg = maxmsg
     while True:
         currentmsg += 1
         try:
             if not self.crawl_single_message(listid, listname, year, month,
                                              currentmsg):
                 break
         except Exception, e:
             log("Exception when crawling %s/%s/%s/%s - %s" %
                 (listname, year, month, currentmsg, e))
Esempio n. 34
0
def colorize(message: str):
    """
    Returns the correctly colorized message based on the known color codes
    """
    if not message.endswith("{normal}"):
        message = message + "{normal}"
    try:
        return message.format(**COLOR_MAP)
    except Exception as e:
        log(e)
        return message
Esempio n. 35
0
def _start_container(name):
    pid = ipc.get_pid(name)
    if not pid:
        log('failed to start container')
        raise Exception('failed to start container')
    cfg = os.path.join(PATH_LXC_LIB, name, 'config')
    cmd = ['lxc-start', '-F', '-P', PATH_LXC_LIB, '-p', _get_path_run(name), '-n', name, '-f', cfg, '--share-ipc', str(pid), '-o', _get_log_path(name), '-c', '/dev/null']
    if SHOW_CMD:
        _log('%s' % ' '.join(cmd))
    popen(cmd)
    os.system('lxc-wait -n %s -s "RUNNING"' % name)
    os.system('lxc-device add -n %s /dev/ckpt' % name)
Esempio n. 36
0
def work():
    global wait_for

    try:
        ids, messages = fetch_new_emails()
    except OSError:
        log("Failed to connect with e-mail server to parse messages.")
        return
    except NoMessages:
        debug("No e-mails to parse.")
        return

    data = parse_messages(messages)

    request_data = to_json({"token": AUTH_TOKEN, "data": data})
    debug("JSON: {}".format(request_data))

    try:
        debug("Connecting to Server to register parsed events.")
        response = post_request(API_ENDPOINT,
                                headers={'Content-Type': 'application/json'},
                                data=request_data)
        debug("Events registered.")

        # Server returns wait_for until next run (in minutes)
        received_wait_for = int(response.text)

        if received_wait_for == -1:
            log("Invalid token.")
            if not DEBUG:
                mark_as_unread(ids)
            return
        elif received_wait_for == -2:
            log("Database error.")
            if not DEBUG:
                mark_as_unread(ids)
            return

        debug("Received {} (minutes) from the server, "
              "to wait until next execution.".format(received_wait_for))

        if 0 < received_wait_for <= MAX_WAITING_PERIOD:
            wait_for = received_wait_for
        else:
            debug("Ignoring {} as it's not between 1 and {}".format(
                received_wait_for, MAX_WAITING_PERIOD))
    except RequestException:
        log("Failed to connect to Server")
        if not DEBUG:
            mark_as_unread(ids)
    except ValueError:
        log("Received {} from the Server, failed to convert to int "
            "to wait for (in minutes)".format(response.text))
Esempio n. 37
0
def one_term(args, term):
    pretty_term = f'{str(term)[:4]}:{str(term)[4]}'

    log(pretty_term, 'Loading courses')
    courses = list(load_some_courses(term))

    if args.legacy:
        [regress_course(c) for c in courses]

    log(pretty_term, 'Saving term')
    for f in args.format:
        save_term(term, courses, kind=f, root_path=args.out_dir)
Esempio n. 38
0
def collect(request):
    asset_info = json.loads(request.body)
    if request.method == 'POST':
        vendor = asset_info['vendor']
        # group = asset_info['group']
        disk = asset_info['disk']
        cpu_model = asset_info['cpu_model']
        cpu_num = asset_info['cpu_num']
        memory = asset_info['memory']
        sn = asset_info['sn']
        osver = asset_info['osver']
        hostname = asset_info['hostname']
        ip = asset_info['ip']
        if not ip or not hostname:
            return HttpResponse("Error your agent ip or hostname is empty! Please resolve your hostname.")
        # asset_type = ""
        # status = ""
        try:
            host = Host.objects.get(hostname=hostname)
        except Exception as msg:
            print(msg)
            host = Host()
            level = get_dir("log_level")
            ssh_pwd = get_dir("ssh_pwd")
            log_path = get_dir("log_path")
            log("cmdb.log", level, log_path)
            logging.info("==========sshkey deploy start==========")
            data = deploy_key(ip, ssh_pwd)
            logging.info(data)
            logging.info("==========sshkey deploy end==========")

        # if req.POST.get('identity'):
        #     identity = req.POST.get('identity')
        #     try:
        #         host = Host.objects.get(identity=identity)
        #     except:
        #         host = Host()
        host.hostname = hostname
        # host.group = group
        host.cpu_num = int(cpu_num)
        host.cpu_model = cpu_model
        host.memory = int(memory)
        host.sn = sn
        host.disk = disk
        host.os = osver
        host.vendor = vendor
        host.ip = ip
        # host.asset_type = asset_type
        # host.status = status
        host.save()
        return HttpResponse("Post asset data to server successfully!")
    else:
        return HttpResponse("No any post data!")
Esempio n. 39
0
def one_term(args, term):
    pretty_term = f'{str(term)[:4]}:{str(term)[4]}'

    log(pretty_term, 'Loading courses')
    courses = list(load_some_courses(term))

    if args.legacy:
        [regress_course(c) for c in courses]

    log(pretty_term, 'Saving term')
    for f in args.format:
        save_term(term, courses, kind=f, root_path=args.out_dir)
Esempio n. 40
0
def start(name):
    path = get_path(name)
    _log('start, mnt=%s' % path)
    mkdirs(path)
    umount(path)
    ftp.mount(path, name2addr(name), FS_PORT)
    path = _get_bin(name)
    if not os.path.exists(path):
        log('failed to start (name=%s)' % name)
        raise Exception('Error: failed to start (name=%s)' % name)
    if LBFS:
        _start_lbfs(name)
Esempio n. 41
0
    def fetch_page(self, url):
        try:
            headers = {
                'User-agent': 'pgsearch/0.2',
            }
            if url in self.scantimes:
                headers["If-Modified-Since"] = formatdate(
                    time.mktime(self.scantimes[url].timetuple()))

            if self.serverip and False:
                connectto = self.serverip
                headers['Host'] = self.hostname
            else:
                connectto = self.hostname

            resp = requests.get(
                '{}://{}{}'.format(self.https and 'https' or 'http', connectto,
                                   url),
                headers=headers,
                timeout=10,
            )

            if resp.status_code == 200:
                if not self.accept_contenttype(resp.headers["content-type"]):
                    # Content-type we're not interested in
                    return (2, None, None)
                return (0, resp.text,
                        self.get_date(resp.headers.get("last-modified", None)))
            elif resp.status_code == 304:
                # Not modified, so no need to reprocess, but also don't
                # give an error message for it...
                return (0, None, None)
            elif resp.status_code == 301:
                # A redirect... So try again with the redirected-to URL
                # We send this through our link resolver to deal with both
                # absolute and relative URLs
                if resp.headers.get('location', '') == '':
                    log("Url %s returned empty redirect" % url)
                    return (2, None, None)

                for tgt in self.resolve_links([
                        resp.header['location'],
                ], url):
                    return (1, tgt, None)
                # No redirect at all found, becaue it was invalid?
                return (2, None, None)
            else:
                # print "Url %s returned status %s" % (url, resp.status)
                pass
        except Exception as e:
            log("Exception when loading url %s: %s" % (url, e))
        return (2, None, None)
Esempio n. 42
0
def _get_receive_file_error(path):
    file_sniffs = _get_sniffs(path, "summary")
    if not file_sniffs:
        return None

    try:
        file_error = file_sniffs[-2].split(" ")
        error_count = int(file_error[3])
        warning_count = int(file_error[6])
    except ValueError, e:
        log('_get_receive_file_error error:' + e.message, path, file_error,
            file_sniffs)
        raise e
Esempio n. 43
0
def waitForClient(socket):
	try:
		msg = socket.send("?".encode())
	except Exception as e:
		# client no longer connected
		# remove it from the set
		log(f"[ERR] {clientAddress} : {e}", "Error")
		try:
			clientSockets.remove(socket)
		except Exception as e:
			log(f"[ERR] {clientAddress} : {e}", "error")
	else:
		parseCommand(socket)
Esempio n. 44
0
def requestModel(connected):
	log(f"[INFO] Waiting for model from {clientAddress}", "info")

	model = connected.recv(4).decode()

	log("[INFO] Got model: " + model.lower(), "info")
	model = model.lower()
	if model == "ince":
		model = "Inception"
	elif model == "dens":
		model = "densenet"
	elif model == "mycn":
		model = "myCNN"
	elif model == "vgg!":
		model = "VGG"
	elif model == "xcep":
		model = "Xception"
	elif model == "resn":
		model = "Resnet"
	else:
		model = "fail"

	if model == "fail":
		log(f"[EE] Replying to {clientAddress}: 3", "warning")
		connected.sendall("3".encode())
	else:
		log(f"[INFO] Replying to {clientAddress}: Success", "info")
		connected.sendall("0".encode())
		return model
def one_term(term, workers, **kwargs):
    str_term = str(term)
    pretty_term = str_term[0:4] + ':' + str_term[4]

    log(pretty_term, 'Loading term')
    raw_term_data = load_term(term,
                              force_download=kwargs['force_download_terms'])

    if not raw_term_data:
        return []

    log(pretty_term, 'Extracting courses')
    courses = raw_term_data['searchresults']['course']

    log(pretty_term, 'Loading details')
    details = fetch_course_details([c['clbid'] for c in courses],
                                   dry_run=kwargs['dry_run'],
                                   force_download=kwargs['force_download_details'])

    log(pretty_term, 'Processing courses')
    final_courses = process_courses(courses, details,
                                    dry_run=kwargs['dry_run'],
                                    find_revisions=kwargs['find_revisions'],
                                    ignore_revisions=kwargs['ignore_revisions'])

    return final_courses
Esempio n. 46
0
def makePrediction(connected, imageFile, model):
	trainedModel = tf.keras.models.load_model(modelFolder + model)

	processedImage = prepareImage(imageFile)

	pred = np.argmax(trainedModel.predict(processedImage), axis=-1)

	if pred == 0:
		results = "N"
	else:
		results = "P"

	log(f"[INFO] Sending Prediction to {clientAddress}: {results}", "info")
	connected.sendall(results.encode())
Esempio n. 47
0
    def serve(self):
        worker_thread = Thread(target=self.tick)
        worker_thread.daemon = True
        worker_thread.start()

        while True:
            connection, address = self.socket_server.accept()
            connection.settimeout(const.SERVER_CONNECTION_TIMEOUT)
            log(f"Server - {address} connected")

            thread = ClientThread(connection, address, self)
            thread.daemon = True
            self.client_pool.append(thread)
            thread.start()
Esempio n. 48
0
 def contains_in_file(self, key):
     ### Should not be used any more! ###
     # Return True/False if file contains key
     if type(key) != str:
         log("Argument should be string not of type: " + str(type(key)))
         return
     key = key.strip()
     f = open(self.filename, "r")
     for line in f:
         if line.split("=")[0].strip() == key:
             f.close()
             return True
     f.close()
     return False
Esempio n. 49
0
def error(code):
    log.log("An error occured on \"{0}\" reason: {1}".format(request.path, repr(code)), "ERROR 500")
    try:
        username = session['username']
    except:
        username = "******"
    try:
        ip = flask.request.environ["REMOTE_ADDR"]
    except:
        ip = "UNKNOWN IP"

    text = mail.error_adminmail.format(username=username, time=now(), ip=ip, url = request.path, code=repr(code))
    mail.admin("ERROR", text, type="html", mail_admins=True)
    return (render_template("error/500.html"), 500)
Esempio n. 50
0
def fade_out(s):
    # Music paused afterwards and original volume restored.
    if s < 0.5:
        log("Time to fade out may not be smaller than 0.5 seconds.")
        s = 0.5
    volume = variables.get("volume", 75)
    s -= 0.5  # Compensate for computing time
    for i in range(20):
        vol = volume * (1 - (i / 20.))
        set_volume(int(vol), True)
        time.sleep(s / 20.)
    pause()  # Pause the music
    # Wait, otherwise you hear the music resume for a fraction of a second
    time.sleep(0.5)
    set_volume(volume, True)  # Set the volume back
Esempio n. 51
0
def play(song):
    log("Playing song: %s by %s" %(song.get_title(), song.get_artist().get_name()))
    variables.put("status", variables.PLAYING)
    variables.put("playing",
                  [song.get_artist().get_name(), song.get_album().get_title(),
                   song.get_title()])
    variables.put("song_duration", song.get_duration())
    variables.put("song_start", time.time(), False)
    os.system("pkill mpg123")  # Kill everything that might be playing
    os.system('mpg123 -q "%s" &' % song.get_path())  # Play the song
    push()  # Notify the users

    # Start counting in new thread
    thread = Thread(target=start_timer)
    thread.start()
Esempio n. 52
0
def find_images_lenient ( file_map, base, recurse ):
    log( "Looking for artwork in %s" % base )
    dirs, files = vfs_listdir(base)
    dirs = [f.decode('utf-8') for f in dirs]
    files = [f.decode('utf-8') for f in files]
    for filename in files :
        full_path = join_path(base, filename)
        if filename.lower().endswith(('.jpg', '.jpeg', '.png')):
            lenient = get_lenient_name( filename)
            if( not( lenient in file_map ) ):
                file_map[ lenient ] = full_path
    if( recurse ):
        for dirname in dirs :
            full_path = join_path(base, dirname)
            find_images_lenient(file_map, full_path, recurse)
Esempio n. 53
0
	def status_thread(self):
		starttime = time.time()
		while not self.stopevent.is_set():
			self.stopevent.wait(self.status_interval)
			nowtime = time.time()
			with self.counterlock:
				log("Considered %s pages, wrote %s upd, %s new, %s del (%s threads, %s in queue, %.1f pages/sec)" % (
					len(self.pages_crawled),
					self.pages_updated,
					self.pages_new,
					self.pages_deleted,
					threading.active_count() - 2,
					self.queue.qsize(),
					len(self.pages_crawled) / (nowtime - starttime),
					))
Esempio n. 54
0
    def fetch_page(self, url):
        try:
            headers = {
                'User-agent': 'pgsearch/0.2',
            }
            if url in self.scantimes:
                headers["If-Modified-Since"] = formatdate(time.mktime(self.scantimes[url].timetuple()))

            if self.serverip and False:
                connectto = self.serverip
                headers['Host'] = self.hostname
            else:
                connectto = self.hostname

            resp = requests.get(
                '{}://{}{}'.format(self.https and 'https' or 'http', connectto, url),
                headers=headers,
                timeout=10,
            )

            if resp.status_code == 200:
                if not self.accept_contenttype(resp.headers["content-type"]):
                    # Content-type we're not interested in
                    return (2, None, None)
                return (0, resp.text, self.get_date(resp.headers.get("last-modified", None)))
            elif resp.status_code == 304:
                # Not modified, so no need to reprocess, but also don't
                # give an error message for it...
                return (0, None, None)
            elif resp.status_code == 301:
                # A redirect... So try again with the redirected-to URL
                # We send this through our link resolver to deal with both
                # absolute and relative URLs
                if resp.headers.get('location', '') == '':
                    log("Url %s returned empty redirect" % url)
                    return (2, None, None)

                for tgt in self.resolve_links([resp.header['location'], ], url):
                    return (1, tgt, None)
                # No redirect at all found, becaue it was invalid?
                return (2, None, None)
            else:
                # print "Url %s returned status %s" % (url, resp.status)
                pass
        except Exception as e:
            log("Exception when loading url %s: %s" % (url, e))
        return (2, None, None)
Esempio n. 55
0
	def status_thread(self):
		lastcommit = 0
		starttime = time.time()
		while not self.stopevent.is_set():
			self.stopevent.wait(self.status_interval)
			nowtime = time.time()
			with self.counterlock:
				log("Indexed %s messages so far (%s active threads, %s months still queued, %.1f msg/sec)" % (
					self.counter,
					threading.active_count() - 2 , # main thread + status thread
					self.queue.qsize(),
					self.counter / (nowtime - starttime),
					))
				# Commit every 500 messages
				if self.counter - lastcommit > self.commit_interval:
					lastcommit = self.counter
					self.conn.commit()
Esempio n. 56
0
def previous_song():
    queue = variables.get("queue", None)
    playing = variables.get("playing", None)
    if queue is None or playing is None:
        return "Something went wrong"
    queue_nr = queue.index(playing)
    if variables.get("elapsed", 0) > 4:
        # Replay the song
        song = playing
        log("Replaying song from beginning:" + str(song))
    else:
        # Play the previous song
        song = queue[queue_nr - 1]
    song_obj = library.get_song(song[0], song[1], song[2])
    server.audio.play(song_obj)
    
    return song[0] + ";" + song[1] + ";" + song[2]
Esempio n. 57
0
def start(name, key):
    bridge = _get_bridge(key)
    path = _get_path_run(name)
    kill_by_file(EDGE_NAME, path)
    cmd = [EDGE_NAME, '-r', '-d', name, '-a', '0.0.0.0', '-s', NETMASK, '-c', name, '-k', key, '-l', bridge]
    pid = popen(cmd)
    if not chkpid(pid):
        log('failed to start edge node')
        raise Exception('failed to start edge node')
    save_pid(path, pid)
    cmd = ['dhclient', '-q', name]
    popen(cmd)
    ret = chkaddr(name)
    if not ret:
        log('failed to start edge node, invalid address')
        raise Exception('failed to start edge node, invalid address')
    return ret
Esempio n. 58
0
	def crawl_single_message(self, listid, listname, year, month, msgnum):
		curs = self.conn.cursor()
		h = httplib.HTTPConnection(host="archives.postgresql.org",
								   port=80,
								   strict=True,
								   timeout=10)
		url = "/%s/%04d-%02d/msg%05d.php" % (
			listname,
			year,
			month,
			msgnum)
		h.putrequest("GET", url)
		h.putheader("User-agent", "pgsearch/0.2")
		h.putheader("Connection", "close")
		h.endheaders()
		resp = h.getresponse()
		txt = resp.read()
		h.close()

		if resp.status == 404:
			# Past the end of the month
			return False
		elif resp.status != 200:
			raise Exception("%s/%s/%s/%s returned status %s" % (listname, year, month, msgnum, resp.status))

		# Else we have the message!
		p = ArchivesParser()
		if not p.parse(txt):
			log("Failed to parse %s/%s/%s/%s" % (listname, year, month, msgnum))
			# We return true to move on to the next message anyway!
			return True
		curs.execute("INSERT INTO messages (list, year, month, msgnum, date, subject, author, txt, fti) VALUES (%(listid)s, %(year)s, %(month)s, %(msgnum)s, %(date)s, %(subject)s, %(author)s, %(txt)s, setweight(to_tsvector('pg', %(subject)s), 'A') || to_tsvector('pg', %(txt)s))", {
				'listid': listid,
				'year': year,
				'month': month,
				'msgnum': msgnum,
				'date': p.date,
				'subject': p.subject[:127],
				'author': p.author[:127],
				'txt': p.body,
				})
		with self.counterlock:
			self.counter += 1

		return True