コード例 #1
0
	def __train_epoch(self, epoch):
		# Keep track of the batch number.
		batch = 0

		# Training loss is the cumulative loss across an epoch.
		training_loss = 0

		# Process all training data.
		for images, labels in self.train_data:
			# Increment the batch number.
			batch += 1

			if batch % self.validate_every == 0:
				Logger.log(f"Epoch: {epoch}. Batch: {batch}. Training...", color='m')

			# Train on the batch.
			loss = self.__train_batch(images, labels)
			training_loss += loss

			if batch % self.validate_every == 0:
				# Average the training loss across the number of batches of training we've done.
				loss = training_loss / (batch)
				Logger.log(f"Training loss: {loss:.3f}. Validating...", indent=1)

				self.__validate()

		# Save a checkpoint.
		self.__create_checkpoint(epoch)
コード例 #2
0
	def __validate(self):
		# Put model into evaluation mode, this stops dropout.
		self.model.eval()

		# Keep track of the batch number.
		batch = 0

		# Keep track of the validation loss so we can average over the number of batches and compare
		# to our training loss.
		validation_loss = 0

		# Track how many we got right.
		num_correct = 0

		for images, labels in self.valid_data:
			# Increment the batch number.
			batch += 1

			# Validate against the batch.
			loss, correct = self.__validate_batch(images, labels)
			validation_loss += loss
			num_correct += correct

		# Calculate the validation loss.
		loss = validation_loss / batch
		Logger.log(f"Validation loss: {loss:.3f}.", indent=1)

		# Calculate the validation accuracy.
		accuracy = 100 * num_correct / self.valid_data.num_samples()
		Logger.log(f"Validation accuracy: {accuracy:.3f}%", indent=1)

		# Put model back into training mode.
		self.model.train()
コード例 #3
0
def main(args):
    logger = Logger(args.output_dir)
    args.logger = logger
    trainer = Trainer(args)
    evaluator = Evaluator(trainer)
    for i_epoch in range(0, args.epoch + 1):

        # train
        log_dict = {
            'i_epoch': i_epoch,
            'train_losses': [],  # per batch
            'test_bleus': []
        }  # per sample
        trainer.train_one_epoch(log_dict)

        # evaluation and logging
        logger.log('%d th epoch' % i_epoch)
        evaluator.bleu(log_dict)
        evaluator.sample_translation()
        log_dict_mean = {
            'i_epoch': log_dict['i_epoch'],
            'train_loss': np.mean(log_dict['train_losses']),
            'test_bleu': np.mean(log_dict['test_bleus'])
        }
        logger.dump(log_dict_mean)
        trainer.save_best(log_dict_mean)
        logger.log('-' * 10)
コード例 #4
0
ファイル: bc_ws.py プロジェクト: choldrim/bearychat_irc
    def handle_channel_msg(self, data):

        # normal msg doesn't contain subtype  =>  F**K, normal msg contain subtype now.
        #if data.get("subtype"):
        #    return

        # filter mismatch channel
        c_id = data.get("vchannel_id")
        if c_id != self.bc_default_channel:
            return

        sender_id = ""
        name = ""

        # get sender
        if data.get("subtype") == "robot":
            sender_id = data.get("robot_id")
            name = Cache.get_robot_true_name(sender_id)
        else:
            sender_id = data.get("uid")
            name = Cache.get_user_en_name(sender_id)

        # filter sender
        if sender_id in self.id_filter:
            Logger.log("sender %s (%s) in the filter list, abort msg" % (name, sender_id))
            return

        msg = data.get("text")

        # filter msg
        if msg.startswith(self.msg_enable_pre):
            msg = msg.split(self.msg_enable_pre, 1)[-1:][0]
            self.send_irc_msg(name, msg)
        else:
            Logger.log("bc msg (%s) was not the standardized format, abort forwarding" % (msg))
コード例 #5
0
ファイル: bc_ws.py プロジェクト: choldrim/bearychat_irc
 def send_ping(self, ws):
     msg_id = 0
     while not self.exit_all:
         try:
             msg = '{"type":"ping","call_id":%d}' % msg_id
             Logger.log_bc_ws(">>> %s" % msg)
             ws.send(msg)
             msg_id += 1
             time.sleep(5)
         except Exception as exc:
             Logger.log("[send_ping]catch exception: %s" % str(exc))
             break
コード例 #6
0
ファイル: bc_ws.py プロジェクト: choldrim/bearychat_irc
    def start_server(self):
        api = BC_API()
        api.login()
        ws_url = api.get_ws_url()

        ws = create_connection(ws_url)
        Logger.log("connected to bc server")

        # send ping thread
        threading.Thread(target=self.send_ping, args=(ws, )).start()

        # loop worker
        self.server_loop(ws)
コード例 #7
0
ファイル: bc_ws.py プロジェクト: choldrim/bearychat_irc
 def server_loop(self, ws):
     while not self.exit_all:
         result = ws.recv()
         if len(result):
             self.connect_live = True
             data = json.loads(result)
             Logger.log_bc_ws("<<< %s" % result)
             self.handle_msg(data)
         else:
             Logger.log("recv empty msg, connected: ", ws.connected)
             if not ws.connected:
                 Logger.log("**=** recv empty msg, ws conn may be kicked by bc server")
                 break
コード例 #8
0
    def exe(command):
        if not len(command):
            return False

        try:
            process = subprocess.Popen([command],
                                       stdout=subprocess.PIPE,
                                       shell=True)
            (out, error) = process.communicate()
            out = out.decode("utf-8")
            if len(out):
                Logger.log(out)
            if error:
                Logger.error(error)
        except Exception as error:
            Logger.error(error)
コード例 #9
0
    def serve(self):
        settings = dict(
            template_path=rel('./web/templates'),
            static_path=rel('./web'),
            debug=True
        )
        app = Application([
            (r"/", Homepage, dict(kiwi=self.kiwi)),
            (r"/emotion", Emotions, dict(kiwi=self.kiwi)),
            (r"/freehand", Freehand, dict(kiwi=self.kiwi)),
            (r"/independant", Independant, dict(kiwi=self.kiwi)),
            (r"/settings", Settings, dict(kiwi=self.kiwi)),
        ], **settings)

        http_server = tornado.httpserver.HTTPServer(app)
        port = config.get('server', 'port')
        http_server.listen(port=port)
        Logger.log("... kiwi GUI is serving under 127.0.0.1:{}.".format(port))
コード例 #10
0
ファイル: starter.py プロジェクト: rusko124/portf_view
class ProcWrapper:
    def __init__(self, args, id, logger_name, sleep_timeout=80):
        self.__args = args
        self.__id = id
        self.__name = logger_name
        self.__sleep_time = 0
        self.__last_sleep_time = 0
        self.__sleep_timeout = sleep_timeout
        self.proc_logger = Logger(name=self.__name, color=True)

    def run(self, shell=True):
        self.__process = psutil.Popen(self.__args, shell=shell)
        self.proc_logger.log("%s proc start with pid %s" % (self.__name, self.__process.pid))

    @property
    def is_run(self):
        status = self.__process.status()
        if status == psutil.STATUS_SLEEPING:
            if self.__last_sleep_time:
                self.__sleep_time += time() - self.__last_sleep_time
            self.__last_sleep_time = time()
            # self.proc_logger.log_console("Sleep time: %s" % self.__sleep_time, status='!')
        else:
            self.__last_sleep_time = 0
            self.__sleep_time = 0
        # if self.__name == "Consumer" and status != psutil.STATUS_SLEEPING:
        #    self.proc_logger.log_console("Status %s" % status, status='!')
        if status == psutil.STATUS_STOPPED or status == psutil.STATUS_DEAD or status == psutil.STATUS_ZOMBIE or \
                        self.__sleep_time > self.__sleep_timeout:
            return False
        return True

    def stop(self):
        try:
            self.__process.terminate()
        except OSError:
            pass
コード例 #11
0
ファイル: bearychat.py プロジェクト: choldrim/bearychat_irc
 def __checking_live_thread(self):
     while True:
         time.sleep(20)
         Logger.log("checking server live...")
         if self.bc_server.connect_live:
             Logger.log("server is alive")
             self.bc_server.connect_live = False
         else:
             Logger.log("server is not alive, restart server...")
             self.__restart()
コード例 #12
0
ファイル: starter.py プロジェクト: rusko124/portf_view
class RunManager:
    def __init__(self, consumer_sh=None, producer_sh=None):
        self.run_logger = Logger(name="RunManager", logfile="data/logs/runmanager.log")
        self.__producers_list = []
        self.__consumers_list = []
        self.__consumer_sh_path = consumer_sh
        self.__producer_sh_path = producer_sh

    def start(self, producers_count, consumers_count, from_systemd=False):
        self.run_logger.log("Start producers. Count of producers %s" % producers_count)
        if from_systemd:
            fd = open("data/systemd/consumer.service")
            consumer_data = fd.read()
            fd.close()
            fd = open("data/systemd/producer.service")
            producer_data = fd.read()
            fd.close()

            for index in range(producers_count):
                fd = open("/etc/systemd/system/producer%s.service" % (index + 1), "w")
                fd.write(producer_data)
                fd.close()
                os.system("systemctl start producer%s.service" % (index + 1))
            for index in range(consumers_count):
                fd = open("/etc/systemd/system/consumer%s.service" % (index + 1), "w")
                fd.write(consumer_data)
                fd.close()
                os.system("systemctl start consumer%s.service" % (index + 1))
            return

        if self.__consumer_sh_path is None or self.__producer_sh_path is None:
            self.run_logger.log_console("Shell script does not set", status='~')
            return

        for index in range(producers_count):
            pr = ProcWrapper([self.__producer_sh_path], index, "Producer")
            pr.run(shell=False)
            self.__producers_list.append(pr)

        for index in range(consumers_count):
            pr = ProcWrapper([self.__consumer_sh_path], index, "Consumer")
            pr.run(shell=True)
            self.__consumers_list.append(pr)

        while True:
            delete_list = []
            for proc in self.__consumers_list:
                if not proc.is_run:
                    proc.proc_logger.log("Proc end")
                    proc.stop()
                    delete_list.append(proc)

            for proc in delete_list:
                self.__consumers_list.remove(proc)

            to_run = consumers_count - len(self.__consumers_list)
            for index in range(to_run):
                    pr = ProcWrapper([self.__consumer_sh_path], index, "Consumer")
                    pr.run(shell=True)
                    self.__consumers_list.append(pr)

    def stop(self, producers_count, consumers_count, from_systemd=True):
        self.run_logger.log("Stop producers. Count of producers %s" % producers_count)
        if from_systemd:
            for index in range(producers_count):
                os.system("systemctl stop producer%s.service" % (index + 1))

            for index in range(consumers_count):
                os.system("systemctl stop consumer%s.service" % (index + 1))

    def delete(self, producers_count, consumers_count, from_systemd=True):
        self.run_logger.log("Delete producers and counters")
        if from_systemd:
            for index in range(producers_count):
                os.system("rm /etc/systemd/system/producer%s.service" % (index + 1))

            for index in range(consumers_count):
                os.system("rm /etc/systemd/system/consumer%s.service" % (index + 1))
コード例 #13
0
ファイル: main.py プロジェクト: abhay-venkatesh/f1-cv
class MNISTF1Trainer:
    def __init__(self, config):
        self.config = config
        self.device = torch.device('cuda' if torch.cuda.
                                   is_available() else 'cpu')
        self.logger = Logger(config["stats folder"])

    def _load_checkpoint(self, model):
        start_epochs = 0
        if self.config["checkpoint path"]:
            start_epochs = int(Path(self.config["checkpoint path"]).stem)
            model.load_state_dict(
                torch.load(Path(self.config["checkpoint path"])))
        return start_epochs

    def _save_checkpoint(self, epoch, model, retain=False):
        checkpoint_filename = str(epoch + 1) + ".ckpt"
        checkpoint_path = Path(self.config["checkpoints folder"],
                               checkpoint_filename)

        torch.save(model.state_dict(), checkpoint_path)

        if not retain:
            prev_checkpoint_filename = str(epoch) + ".ckpt"
            prev_checkpoint_path = Path(self.config["checkpoints folder"],
                                        prev_checkpoint_filename)
            if os.path.exists(prev_checkpoint_path):
                os.remove(prev_checkpoint_path)

    def run(self):
        # Training set
        trainset = MNISTF1(
            self.config["dataset path"], train=True, download=True)
        train_loader = DataLoader(
            trainset, shuffle=True, batch_size=self.config["batch size"])

        # Validation set
        valset = MNISTF1(
            self.config["dataset path"], train=False, download=True)
        val_loader = DataLoader(valset, batch_size=self.config["batch size"])

        # Model
        model = FFNetF1().to(self.device)

        # Load checkpoint if exists
        start_epochs = self._load_checkpoint(model)

        # Constants
        num_positives = train_loader.dataset.num_positives

        # Primal variables
        tau = torch.rand(
            len(train_loader.dataset), device=self.device, requires_grad=True)
        eps = torch.rand(1, device=self.device, requires_grad=True)
        w = torch.rand(1, device=self.device, requires_grad=True)

        # Dual variables
        lamb = torch.zeros(len(train_loader.dataset), device=self.device)
        lamb.fill_(0.001)
        mu = torch.zeros(1, device=self.device)
        mu.fill_(0.001)
        gamma = torch.zeros(len(train_loader.dataset), device=self.device)
        gamma.fill_(0.001)

        # Primal Optimization
        var_list = [{
            "params": model.parameters(),
            "lr": self.config["learning rate"]
        }, {
            "params": tau,
            "lr": self.config["eta_tau"]
        }, {
            "params": eps,
            "lr": self.config["eta_eps"]
        }, {
            "params": w,
            "lr": self.config["eta_w"]
        }]
        optimizer = torch.optim.SGD(var_list)

        # Dataset iterator
        train_iter = iter(train_loader)

        # Count epochs and steps
        epochs = 0
        step = 0

        # Cache losses
        total_loss = 0
        total_t1_loss = 0
        total_t2_loss = 0

        # Train
        for outer in tqdm(range(start_epochs, self.config["n_outer"])):
            model.train()

            for inner in tqdm(range(self.config["n_inner"])):
                step += 1

                # Sample
                try:
                    X, Y = next(train_iter)
                except StopIteration:
                    train_iter = iter(train_loader)
                    X, Y = next(train_iter)

                # Forward computation
                X, Y = X.to(self.device), Y.to(self.device)
                y0_, y1_ = model(X)
                y0 = Y[:, 0]
                y1 = Y[:, 1]
                i = Y[:, 2]

                # Compute loss
                t1_loss = F.cross_entropy(y0_, y0)
                t2_loss = lagrange(num_positives, y1_, y1, w, eps, tau[i],
                                   lamb[i], mu, gamma[i])
                loss = t1_loss + (self.config["beta"] * t2_loss)

                # Store losses for logging
                total_loss += loss.item()
                total_t1_loss += t1_loss.item()
                total_t2_loss += t2_loss.item()

                # Backpropagate
                loss.backward()
                optimizer.step()
                optimizer.zero_grad()

                # Project eps to ensure non-negativity
                eps.data = torch.max(
                    torch.zeros(1, dtype=torch.float, device=self.device),
                    eps.data)

                # Log and validate per epoch
                if (step + 1) % len(train_loader) == 0:
                    epochs += 1

                    # Log loss
                    avg_loss = total_loss / len(train_loader)
                    avg_t1_loss = total_t1_loss / len(train_loader)
                    avg_t2_loss = total_t2_loss / len(train_loader)
                    total_loss = 0
                    total_t1_loss = 0
                    total_t2_loss = 0
                    self.logger.log("epochs", epochs, "loss", avg_loss)
                    self.logger.log("epochs", epochs, "t1loss", avg_t1_loss)
                    self.logger.log("epochs", epochs, "t2loss", avg_t2_loss)

                    # Validate
                    model.eval()
                    total = 0
                    correct = 0
                    with torch.no_grad():
                        for X, Y in val_loader:
                            X, Y = X.to(self.device), Y.to(self.device)
                            y0_, y1_ = model(X)
                            y0 = Y[:, 0]
                            y1 = Y[:, 1]
                            _, predicted = torch.max(y0_.data, 1)
                            total += y0.size(0)
                            correct += (predicted == y0).sum().item()
                    accuracy = 100. * correct / total
                    self.logger.log("epochs", epochs, "accuracy", accuracy)

                    # Graph
                    self.logger.graph()

                    # Checkpoint
                    self._save_checkpoint(epochs, model)

            # Dual Updates
            with torch.no_grad():
                mu_cache = 0
                lamb_cache = torch.zeros_like(lamb)
                gamma_cache = torch.zeros_like(gamma)
                for X, Y in tqdm(train_loader):
                    # Forward computation
                    X, Y = X.to(self.device), Y.to(self.device)
                    _, y1_ = model(X)
                    y1 = Y[:, 1]
                    i = Y[:, 2]

                    # Cache for mu update
                    mu_cache += tau[i].sum()

                    # Lambda and gamma updates
                    y1 = y1.float()
                    y1_ = y1_.view(-1)

                    lamb_cache[i] += (
                        self.config["eta_lamb"] * (y1 * (tau[i] - (w * y1_))))
                    gamma_cache[i] += (
                        self.config["eta_gamma"] * (y1 * (tau[i] - eps)))

                # Update data
                mu.data += self.config["eta_mu"] * (mu_cache - 1)
                lamb.data += lamb_cache
                gamma.data += gamma_cache
コード例 #14
0
ファイル: QPidController.py プロジェクト: Vivero/KerbalPie
 def _log(self, log_message, log_type='info', log_data=None):
     Logger.log(QPidController.subsys, log_message, log_type, log_data)
コード例 #15
0
class ProxyManager(Cacheable):
    def __init__(self, config, timeout=None, cache_url=None):
        self.__config = config
        self.__timeout = timeout if timeout else 5
        self.__logger = Logger(name="ProxyManager", color=True)
        self.__check_counter = 0
        self.__check_progress_bar = None
        self.__check_proxy_count = 0
        self.__proxy_load_time = 0
        self.__pool = []
        super().__init__(cache_url)

    def crawl_proxy(self, no_check=False):
        db = DBWorker(self.__config)
        db.run_sql("TRUNCATE TABLE proxy;")
        self.__logger.log_console("Fetch proxy from %s" %
                                  self.__config["producer"]["proxy_url"],
                                  status='+')
        proxy_data = None
        try:
            proxy_data = [
                x.rstrip("\n\r") for x in requests.get(
                    self.__config["producer"]["proxy_url"]).text.split("\n")
            ]
        except requests.exceptions.ReadTimeout:
            self.__logger.log(
                "Proxy url request. Timeout",
                status="~",
                mformat=["%s" % self.__config["producer"]["proxy_url"]])
            return False

        except requests.exceptions.ConnectionError:
            self.__logger.log(
                "Connection Error to proxy url. Down",
                status="~",
                mformat=["%s" % self.__config["producer"]["proxy_url"]])
            return False

        except requests.exceptions.InvalidHeader:
            self.__logger.log(
                "Invalid header from proxy server. Invalid header return",
                status="~",
                mformat=["%s" % self.__config["producer"]["proxy_url"]])
            return False
        except requests.exceptions.ContentDecodingError:
            self.__logger.log(
                "Proxy url request. Decode error",
                status="~",
                mformat=["%s" % self.__config["producer"]["proxy_url"]])
        except IOError:
            self.__logger.log(
                "Can't open proxy fetch file",
                status="~",
                mformat=["%s" % self.__config["producer"]["proxy_url"]])
            return False
        self.__logger.log_console("Fetch %s proxies" % len(proxy_data),
                                  status='+')

        if no_check:
            self.__check_proxy_count = len(proxy_data)
            workers = []
            for chunk in chunks(proxy_data, 10000):
                pr = Process(target=to_db, args=(
                    chunk,
                    self.__config,
                ))
                pr.start()
                workers.append(pr)
            self.__logger.log_console("Processes created. Count %s" %
                                      (len(workers)),
                                      status='+')
            self.__check_progress_bar = self.__logger.progress_bar(status='?')
            self.__check_proxy_count = len(proxy_data)
            next(self.__check_progress_bar)
            for worker in workers:
                worker.join()
                self.__check_counter += 1000
                if self.__check_counter % math.floor(
                        self.__check_proxy_count / 100) == 0:
                    next(self.__check_progress_bar)
            try:
                next(self.__check_progress_bar)
            except StopIteration:
                pass
            self.__logger.log_console("No check data saved")
            return True

        self.__logger.log_console("Check proxies", status='+')
        checked_proxy = 0
        self.__check_progress_bar = self.__logger.progress_bar(status='?')
        self.__check_proxy_count = len(proxy_data)
        next(self.__check_progress_bar)

        loop = asyncio.get_event_loop()
        for chunk in chunks(proxy_data, 100):
            results = loop.run_until_complete(self._check_many(chunk, loop))
            for item in results[0]:
                result_check_record = item.result()
                if result_check_record["result"]:
                    db.run_sql(sql_insert_proxy, [
                        result_check_record["ip"], result_check_record["port"],
                        result_check_record["elapsed"]
                    ])
                    checked_proxy += 1

        try:
            next(self.__check_progress_bar)
        except StopIteration:
            pass
        self.__logger.log_console("Checked %s/%s" %
                                  (checked_proxy, len(proxy_data)))
        return True

    async def _check_many(self, proxies, loop):
        tasks = [
            loop.create_task(self.async_check(proxy)) for proxy in proxies
        ]
        return await asyncio.wait(tasks)

    async def async_check(self, proxy):
        start_time = time.time()
        try:
            ip, port = proxy.split(":")
            pr_connector = ProxyConnector(remote_resolve=True,
                                          verify_ssl=False)
            with aiohttp.ClientSession(connector=pr_connector, request_class=ProxyClientRequest) as session, \
                    aiohttp.Timeout(self.__timeout):
                async with session.get('http://www.httpbin.org/get?show_env=1',
                                       proxy="socks5://%s:%s" %
                                       (ip, port)) as resp:
                    await resp.json()

            result_dict = {
                'ip': ip,
                'port': port,
                'result': True,
                'elapsed': time.time() - start_time,
                'exc': None
            }
        except BaseException as exc:
            result_dict = {
                'ip': None,
                'port': None,
                'result': False,
                'elapsed': -1,
                'exc': exc
            }
        self.__check_counter += 1
        if self.__check_counter % math.floor(
                self.__check_proxy_count / 100) == 0:
            next(self.__check_progress_bar)
        return result_dict

    def __load_proxies(self):
        db = DBWorker(self.__config)
        with db.cursor() as cursor:
            cursor.execute("select*from proxy order by delay;")
            self.__proxy_load_time = time.time()
            return cursor.fetchall()

    def get(self):
        """
        Returns random proxy from the pool.
        """
        if not self.__pool or time.time(
        ) - self.__proxy_load_time > 30 * ONE_MINUTE:
            while True:
                pool = self.__load_proxies()
                self.__logger.log('Load proxy complete . Length %s' %
                                  len(pool))
                if len(pool) > 1000:
                    self.__pool = pool
                    break
                time.sleep(ONE_MINUTE)

        result = random.choice(self.__pool)
        return result

    def remove_from_pool(self, proxy):
        self.__pool.remove(proxy)

    def get_all(self):
        """
        Returns proxy, real IP address, timezone & language from the pool.
        """
        while True:
            try:
                _, ip, port, delay = self.get()
                answer = requests.get('http://httpbin.org/get?show_env=1',
                                      proxies=dict(http="socks5://%s:%s" %
                                                   (ip, port)),
                                      timeout=self.__timeout).json()
                check_proxy = requests.get('http://www.iconspedia.com/',
                                           proxies=dict(http="socks5://%s:%s" %
                                                        (ip, port)),
                                           timeout=5)
                proxy = "%s:%s" % (ip, port)
                if "http" not in proxy:
                    proxy = "http://" + proxy
                ip_address = answer["origin"]
                timezone, country, language = self.get_timezone_and_language(
                    ip_address)
                if country in BAD_COUNTRIES:
                    raise BadProxy
                self.__logger.log('Chosen proxy: %s with external ip %s' %
                                  (proxy, ip_address))
                #open("/root/clickbot2/check_connect/good_"+str(os.getpid())+".txt","w")
                return proxy, ip_address, timezone, language
            except BadProxy:
                #open("/root/clickbot2/check_connect/bad_proxy_error_"+str(os.getpid())+".txt","w")
                print("Bad proxy")
                pass
            except requests.exceptions.ReadTimeout:
                #open("/root/clickbot2/check_connect/timeout_error_"+str(os.getpid())+".txt","w")
                print("Timeout")
                pass
            except requests.exceptions.ConnectionError:
                #open("/root/clickbot2/check_connect/connection_error_"+str(os.getpid())+".txt","w")
                print("Connection error")
                pass
            except requests.exceptions.InvalidHeader:
                #open("/root/clickbot2/check_connect/header_error_"+str(os.getpid())+".txt","w")
                print("InvalidHeader")
                pass
            except requests.exceptions.ContentDecodingError:
                #open("/root/clickbot2/check_connect/decoding_error_"+str(os.getpid())+".txt","w")
                print("Decoding error")
                pass

    @cache()
    def get_timezone_and_language(self,
                                  ip_address,
                                  default_time_zone_offset=0,
                                  default_language='en'):
        """
        Returns timezone for IP address or default if nothing was found.
        """
        db = DBWorker(self.__config)
        value = convert_ip_address(ip_address)
        with db.cursor() as cursor:
            cursor.execute(
                'SELECT time_zone_offset, country_code, language FROM ip_data WHERE ip_range @> %s::BIGINT',
                [value])
            try:
                time_zone_offset, country, language = cursor.fetchone()
                if time_zone_offset is None:
                    time_zone_offset = default_time_zone_offset
                if language is None:
                    language = default_language
                self.__logger.log('Timezone offset for %s: %s' %
                                  (ip_address, time_zone_offset))
                self.__logger.log('Language for %s: %s' %
                                  (ip_address, language))
                return time_zone_offset, country, language
            except (ProgrammingError, TypeError) as ex:
                # No results to fetch.
                return default_time_zone_offset, None, default_language
コード例 #16
0
 def _log(self, log_message, log_type='info', log_data=None):
     Logger.log(KPSerialInterface.subsys, log_message, log_type, log_data)
コード例 #17
0
 def operate_independant(self):
     Logger.log("Alright, then i'll do what i want :-P")
     return 500
コード例 #18
0
ファイル: hdWE.py プロジェクト: enzyx/hdWE
        iterations[-1].target_number_of_segments  = INITIAL_TARGET_NUMBER_OF_SEGMENTS
        for this_bin in iterations[-1].bins:
            this_bin.target_number_of_segments = iterations[-1].target_number_of_segments                
            this_bin.sample_region = iterations[-1].isInSampleRegion(this_bin.getCoordinateIds()) 

    #TODO: check if all files are present
else:
    iterations.append(initiate.createInitialIteration(STARTING_STRUCTURES,
                                                      WORKDIR, 
                                                      JOBNAME,
                                                      INITIAL_TARGET_NUMBER_OF_SEGMENTS, 
                                                      INITIAL_BOUNDARIES, 
                                                      INITIAL_SAMPLE_REGION,
                                                      md_module,
                                                      START_BIN_COORDINATE_IDS))
    logger.log(iterations[0], CONFIGFILE)  

# Create an instance of the resampling module
resampler = resampling.Resampling(md_module, RESAMPLING_MODE, CLOSEST_MERGE_THRESHOLD, 
                                  PRIMARY_COORDINATE, SPLIT_FORWARD_NUMBER_OF_CHILDREN, 
                                  SPLIT_REGION, FRONT_INTERVAL)

# Handle the deletion/compression of MD output files 
cleaner = cleanup.Cleanup(md_module, NUMBER_OF_THREADS, COMPRESS_ITERATION, 
                          COMPRESS_CLOSEST_MASK, KEEP_COORDS_FREQUENCY, 
                          KEEP_COORDS_SEGMENTS, DEBUG)

#############################
#         Main Loop         #
#############################
for iteration_counter in range(iterations[-1].getId() + 1, MAX_ITERATIONS + 1):
コード例 #19
0
from lib.logger import Logger
from lib.gps import GPSReader

def drone_signal():
	input()
	return True

def drone_next_position():
	print("Going to next position")


if __name__ == '__main__':
	file_path = os.path.dirname(__file__)
    file_path = os.path.abspath(os.path.join(file_path, "log.log"))
	log = Logger(file_path)
	log.log('Initializing objects', level=1, days_to_remain=5)
	imu = ImuReader()
	lidar = Lidar()
	image_manager = ImageManager()
	db_manager = DatabaseManager()
	gps = GPSReader()
	savePath = os.path.dirname(__file__)
	picID = "test_"
	iteration = 1
	log.log('Objects successfully initialized', level=1, days_to_remain=5)

	while True:

		if drone_signal() is True:
			log.log('Drone signal received', level=1, days_to_remain=5)
			status = image_manager.aquire_picture(picID, savePath, iteration)['status']
コード例 #20
0
ファイル: main.py プロジェクト: gormux/merlin8282
#!/usr/bin/env python3

from lib.checks import Checker
from lib.logger import Logger
from lib.system_functions import System

# Initialize Checker
checker = Checker()
# Check python version
checker.check_python_version()

# Initialize Logger
logger = Logger()

# Initialize System Functions
system = System()

# Start the bot
if __name__ == "__main__":
    logger.log()
    system.quit()
コード例 #21
0
 def _log(self, log_message, log_type='info', log_data=None):
     Logger.log(KPMissionProgramsDatabase.subsys, log_message, log_type,
                log_data)
コード例 #22
0
 def _log(self, log_message, log_type='info', log_data=None):
     Logger.log(KPSerialInterface.subsys, log_message, log_type, log_data)
コード例 #23
0
 def __init__(self, start_mode):
     Logger.log('Initializing the kiwi system, press Ctrl-C to quit...')
     SoundEffect.play('hello')
     self.operation_mode = config.get('kiwi', 'start_mode')
コード例 #24
0
 def set_operation_mode(self, mode, command=None):
     Logger.log('Received order for operation-mode change. Going to: ' +
                mode + ' mode...')
     self.operation_mode = mode
     self.command_to_execute = command
コード例 #25
0
 def __init__(self):
     self.logger = Logger()
     self.main_loop = MainLoop()
     self.usb = Usb()
     self.action = Action()
     Logger.log('Init RemoteApp', True)
コード例 #26
0
 def _log(self, log_message, log_type='info', log_data=None):
     Logger.log(KPFlightController.subsys, log_message, log_type, log_data)
コード例 #27
0
 def operate_on_command(self):
     Logger.log("Yes, master. I'm executing your order: " +
                self.command_to_execute)
     self.operation_mode = 'waiting_for_command'
     return 500
コード例 #28
0
	def __create_checkpoint(self, epoch):
		filepath = os.path.join(self.save_dir, "checkpoint_{}.pth".format(epoch))

		Checkpoint.create(self.model, filepath)

		Logger.log('Model saved.', color='r')
コード例 #29
0
 def _log(self, log_message, log_type='info', log_data=None):
     Logger.log(KPMissionProgramsDatabase.subsys, log_message, log_type, log_data)