Beispiel #1
0
def main():
    print("\n\n")
    args = init_args()
    if args.host is None:
        dev = discover.discover(args.device, timeout=args.timeout)
    else:
        if args.type is str:
            dev_type = int(args.type, 0)
        else:
            dev_type = args.type
        dev = discover.gendevice(dev_type, args.host,
                                 bytearray.fromhex(args.mac))

    if dev is None:
        print('No device founded')
        exit(1)

    # print(dev.host)
    # print(''.join('{:02x}'.format(x) for x in dev.mac))

    if args.timezone:
        utils.set_default_timezone(timezone(args.timezone))

    if network_tools.check_network_connection(url=args.network_test_url,
                                              timeout=args.network_timeout):
        print("{} Network status is good!".format(utils.get_datetime_string()))
    else:
        print("{} Network status is disconnected\nreset cable modem".format(
            utils.get_datetime_string()))
        dev.auth()
        print("Power Off")
        dev.set_power(False)
        time.sleep(2)
        print("Power On")
        dev.set_power(True)

        print('wait for network connected')
        for i in range(0, 300, 5):
            print('check network connection status')
            args.network_test_url = 'http://clients3.google.com/generate_204'
            if network_tools.check_network_connection(
                    url=args.network_test_url, timeout=args.network_timeout):
                print('network connected')
                targets = {}
                if args.slack:
                    targets['slack'] = args.slack

                status_notifier.notify('Network recovered', args.name, targets)
                exit(0)
                break
            else:
                time.sleep(5)
        print('network recovery failed')
        exit(1)
Beispiel #2
0
    def get_pending_reminders(self, count, timestamp):
        log.debug("Fetching pending reminders")
        c = self.dbConn.cursor()
        results = []
        for row in c.execute(
                '''
			SELECT rm.ID, rm.Source, rm.RequestedDate, rm.TargetDate, rm.Message, rm.User, rm.Defaulted, us.TimeZone
			FROM reminders rm
				LEFT JOIN user_settings us
					ON us.User = rm.User
			WHERE rm.TargetDate < ?
			ORDER BY rm.TargetDate
			LIMIT ?
			''', (utils.get_datetime_string(timestamp), count)):
            reminder = Reminder(
                source=row[1],
                target_date=utils.parse_datetime_string(row[3]),
                message=row[4],
                user=row[5],
                db_id=row[0],
                requested_date=utils.parse_datetime_string(row[2]),
                defaulted=row[6] == 1,
                timezone=row[7])
            results.append(reminder)

        log.debug(f"Found reminders: {len(results)}")
        return results
Beispiel #3
0
    def save_reminder(self, reminder):
        if not isinstance(reminder, Reminder):
            return False

        c = self.dbConn.cursor()
        if reminder.db_id is not None:
            log.debug(f"Updating reminder: {reminder.db_id}")
            try:
                c.execute(
                    '''
					UPDATE reminders
					SET Source = ?,
						RequestedDate = ?,
						TargetDate = ?,
						Message = ?,
						User = ?,
						Defaulted = ?
					WHERE ID = ?
				''', (reminder.source, utils.get_datetime_string(reminder.requested_date),
                utils.get_datetime_string(reminder.target_date), reminder.message,
                reminder.user, reminder.defaulted, reminder.db_id))
            except sqlite3.IntegrityError as err:
                log.warning(f"Failed to update reminder: {err}")
                return False
        else:
            log.debug("Saving new reminder")
            try:
                c.execute(
                    '''
					INSERT INTO reminders
					(Source, RequestedDate, TargetDate, Message, User, Defaulted)
					VALUES (?, ?, ?, ?, ?, ?)
				''', (reminder.source, utils.get_datetime_string(reminder.requested_date),
                utils.get_datetime_string(reminder.target_date), reminder.message,
                reminder.user, reminder.defaulted))
            except sqlite3.IntegrityError as err:
                log.warning(f"Failed to save reminder: {err}")
                return False

            if c.lastrowid is not None:
                reminder.db_id = c.lastrowid
                log.debug(f"Saved to: {reminder.db_id}")

        self.dbConn.commit()

        return True
Beispiel #4
0
def test(args):
    if not args.model:
        print('Need a pretrained model!')
        return

    if not args.color_labels:
        print('Need to specify color labels')
        return

    resize_img = False if args.image_width is None or args.image_height is None else True

    # check if output dir exists
    output_dir = args.output_dir if args.output_dir else 'test-{}'.format(utils.get_datetime_string())
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    # load model
    if args.network == 'unet':
        network = networks.UNet
    elif args.network == 'triangle':
        network = networks.TriangleNet
    else:
        pass

    model = network(args.unet_layers, 3, len(args.color_labels), groups=args.groups)
    model.load_state_dict(torch.load(args.model))
    model = model.eval()

    if not args.cpu:
        model.cuda()

    # iterate all images with one by one
    transform = torchvision.transforms.ToTensor()
    for filename in [x for x in os.listdir(args.dataroot)]:
        filepath = os.sep.join([args.dataroot, filename])
        with open(filepath, 'r') as f:
            img = Image.open(f)
            img = img.resize((args.image_width, args.image_height))
            img = transform(img)
            img = img.view(1, *img.shape)
            img = Variable(img)
        if not args.cpu:
            img = img.cuda()
        output = model(img)
        _, c, h, w = output.data.shape
        output_numpy = output.data.numpy()[0] if args.cpu else output.data.cpu().numpy()[0]
        output_argmax = numpy.argmax(output_numpy, axis=0)
        out_img = numpy.zeros((h, w, 3), dtype=numpy.uint8)
        for i, color in enumerate(args.color_labels):
            out_img[output_argmax == i] = numpy.array(args.color_labels[i], dtype=numpy.uint8)
        out_img = Image.fromarray(out_img)
        seg_filepath = os.sep.join([output_dir, filename[:filename.rfind('.')]+'.png'])
        out_img.save(seg_filepath)
        print('{} is exported!'.format(seg_filepath))
Beispiel #5
0
def check_network_connection(url='http://clients3.google.com/generate_204',
                             timeout=5):
    try:
        print('{} Sending request to {}'.format(utils.get_datetime_string(),
                                                url))
        response = urllib.request.urlopen(url, timeout=timeout)

        status = response.status
    except KeyboardInterrupt:
        sys.exit(0)
    except Exception as e:
        # Generally using a catch-all is a bad practice but
        # I think it's ok in this case
        print(e)
        print('{} Request to {} failed'.format(utils.get_datetime_string(),
                                               url))
        status = 0

    if status == 204:
        return True
    else:
        return False
Beispiel #6
0
def test(args):
    if not args.model:
        print('Need a pretrained model!')
        return

    if not args.color_labels:
        print('Need to specify color labels')
        return

    resize_img = False if args.image_width is None or args.image_height is None else True

    # check if output dir exists
    output_dir = args.output_dir if args.output_dir else 'test-{}'.format(
        utils.get_datetime_string())
    if not os.path.exists(output_dir):
        os.mkdir(output_dir)

    # load model
    model = networks.UNet(args.unet_layers, 3, len(args.color_labels))
    model.load_state_dict(torch.load(args.model))
    model = model.eval()

    if not args.cpu:
        model.cuda()

    # iterate all images with one by one
    transform = torchvision.transforms.ToTensor()
    for filename in [x for x in os.listdir(args.dataroot)]:
        filepath = os.sep.join([args.dataroot, filename])
        with open(filepath, 'r') as f:
            img = Image.open(f)
            img = img.resize((args.image_width, args.image_height))
            img = transform(img)
            img = img.view(1, *img.shape)
            img = Variable(img)
        if not args.cpu:
            img = img.cuda()
        output = model(img)
        _, c, h, w = output.data.shape
        output_numpy = output.data.numpy()[0] if args.cpu else output.data.cpu(
        ).numpy()[0]
        output_argmax = numpy.argmax(output_numpy, axis=0)
        out_img = numpy.zeros((h, w, 3), dtype=numpy.uint8)
        for i, color in enumerate(args.color_labels):
            out_img[output_argmax == i] = numpy.array(args.color_labels[i],
                                                      dtype=numpy.uint8)
        out_img = Image.fromarray(out_img)
        seg_filepath = os.sep.join(
            [output_dir, filename[:filename.rfind('.')] + '.png'])
        out_img.save(seg_filepath)
        print('{} is exported!'.format(seg_filepath))
Beispiel #7
0
    def ban_subreddit(self, subreddit):
        log.debug(f"Banning subreddit: {subreddit}")
        c = self.dbConn.cursor()
        c.execute(
            '''
			SELECT Banned
			FROM subreddits
			WHERE Subreddit = ?
			''', (subreddit, ))

        result = c.fetchone()
        if result is None or len(result) == 0:
            try:
                c.execute(
                    '''
					INSERT INTO subreddits
					(Subreddit, Banned, BanChecked)
					VALUES (?, ?, ?)
				''', (subreddit, True, utils.get_datetime_string(utils.datetime_now())))
            except sqlite3.IntegrityError as err:
                log.warning(f"Failed to ban subreddit: {err}")
                return False
        else:
            try:
                c.execute(
                    '''
					UPDATE subreddits
					SET Banned = ?
						,BanChecked = ?
					WHERE Subreddit = ?
				''', (True, utils.get_datetime_string(utils.datetime_now()), subreddit))
            except sqlite3.IntegrityError as err:
                log.warning(f"Failed to update subreddit ban: {err}")
                return False

        self.dbConn.commit()
        return True
Beispiel #8
0
    def get_count_pending_reminders(self, timestamp):
        log.debug("Fetching count of pending reminders")
        c = self.dbConn.cursor()
        c.execute(
            '''
			SELECT COUNT(*)
			FROM reminders
			WHERE TargetDate < ?
			''', (utils.get_datetime_string(timestamp), ))

        result = c.fetchone()
        if result is None or len(result) == 0:
            log.debug("No pending reminders")
            return 0

        log.debug(f"Count reminders: {result[0]}")
        return result[0]
Beispiel #9
0
    def get_subreddit_banned(self, subreddit):
        log.debug(f"Getting subreddit ban: {subreddit}")
        c = self.dbConn.cursor()
        c.execute(
            '''
			SELECT Banned
			FROM subreddits
			WHERE Subreddit = ?
				AND BanChecked > ?
			''', (subreddit,
         utils.get_datetime_string(utils.datetime_now() - timedelta(days=30))))

        result = c.fetchone()
        if result is None or len(result) == 0:
            log.debug("Not banned")
            return False

        log.debug(f"Value: {result[0] == 1}")
        return result[0] == 1
Beispiel #10
0
    def get_pending_cakedays(self, count, timestamp):
        log.debug("Fetching pending cakedays")
        c = self.dbConn.cursor()
        results = []
        for row in c.execute(
                '''
			SELECT ID, CakedayDate, User
			FROM cakedays
			WHERE CakedayDate < ?
			ORDER BY CakedayDate ASC
			LIMIT ?
			''', (utils.get_datetime_string(timestamp), count)):
            cakeday = Cakeday(user=row[2],
                              date_time=utils.parse_datetime_string(row[1]),
                              db_id=row[0])
            results.append(cakeday)

        log.debug(f"Found cakedays: {len(results)}")
        return results
Beispiel #11
0
    def bump_cakeday(self, cakeday):
        if cakeday.db_id is None:
            log.warning(f"This cakeday doesn't exist: {cakeday.user}")

        c = self.dbConn.cursor()
        log.debug("Bumping cakeday one year")
        try:
            c.execute(
                '''
				UPDATE cakedays
				SET CakedayDate = ?
				WHERE ID = ?
			''', (utils.get_datetime_string(utils.add_years(cakeday.date_time,
                                                   1)), cakeday.db_id))
        except sqlite3.IntegrityError as err:
            log.warning(f"Failed to bump cakeday: {err}")
            return False

        self.dbConn.commit()

        return True
Beispiel #12
0
    def init(self, debug, publish, clone):
        if debug:
            if clone:
                if os.path.exists(static.DATABASE_DEBUG_NAME):
                    os.remove(static.DATABASE_DEBUG_NAME)
                copyfile(static.DATABASE_NAME, static.DATABASE_DEBUG_NAME)

            self.dbConn = sqlite3.connect(static.DATABASE_DEBUG_NAME)
        else:
            self.dbConn = sqlite3.connect(static.DATABASE_NAME)

        c = self.dbConn.cursor()
        if publish:
            for table in Database.tables:
                c.execute(f"DROP TABLE IF EXISTS {table}")

        for table in Database.tables:
            c.execute(Database.tables[table])

        if self.get_keystore("remindme_comment") is None:
            self.save_keystore("remindme_comment",
                               utils.get_datetime_string(utils.datetime_now()))

        self.dbConn.commit()
Beispiel #13
0
    def add_cakeday(self, cakeday):
        if cakeday.db_id is not None:
            log.warning(f"This cakeday already exists: {cakeday.db_id}")

        c = self.dbConn.cursor()
        log.debug("Saving new cakeday")
        try:
            c.execute(
                '''
				INSERT INTO cakedays
				(CakedayDate, User)
				VALUES (?, ?)
			''', (utils.get_datetime_string(cakeday.date_time), cakeday.user))
        except sqlite3.IntegrityError as err:
            log.warning(f"Failed to save cakeday: {err}")
            return False

        if c.lastrowid is not None:
            cakeday.db_id = c.lastrowid
            log.debug(f"Saved to: {cakeday.db_id}")

        self.dbConn.commit()

        return True
Beispiel #14
0
		"--no_backup", help="Don't backup the database", action='store_const', const=True, default=False)
	parser.add_argument(
		"--reset_comment", help="Reset the last comment read timestamp", action='store_const', const=True,
		default=False)
	parser.add_argument("--debug", help="Set the log level to debug", action='store_const', const=True, default=False)
	args = parser.parse_args()

	if args.debug:
		discord_logging.set_level(logging.DEBUG)

	discord_logging.init_discord_logging(args.user, logging.WARNING, 1)
	reddit = reddit_class.Reddit(args.user, args.no_post)
	database = Database(debug=args.debug_db, clone=args.clone_db)
	if args.reset_comment:
		log.info("Resetting comment processed timestamp")
		database.save_keystore("remindme_comment", utils.get_datetime_string(utils.datetime_now()))

	last_backup = None
	last_comments = None
	while True:
		startTime = time.perf_counter()
		log.debug("Starting run")

		actions = 0
		errors = 0

		try:
			actions += messages.process_messages(reddit, database)
		except Exception as err:
			log.warning(f"Error processing messages: {err}")
			log.warning(traceback.format_exc())
Beispiel #15
0
			count_default_comment += 1
			reminder.message = None

		if isinstance(reminder.source, (bytes, bytearray)):
			reminder.source = reminder.source.decode("utf-8")
		if reminder.source == info_page:
			count_info_page += 1
			reminder.source = "Unfortunately I couldn't find a source for this reminder. " \
				"This happens sometimes with really old reminders"

		new_c.execute('''
			INSERT INTO reminders
			(Source, RequestedDate, TargetDate, Message, User, Defaulted)
			VALUES (?, ?, ?, ?, ?, 0)
		''', (
			reminder.source,
			utils.get_datetime_string(reminder.requested_date),
			utils.get_datetime_string(reminder.target_date),
			reminder.message,
			reminder.user))
	except Exception as err:
		log.info(err)
		log.info(reminder)
	if loop % 10000 == 0:
		log.info(f"{loop}: {int(time.perf_counter() - startTime)}s : {count_default_comment} : {count_info_page}")

new_db_conn.commit()
new_db_conn.close()
old_db_conn.close()
log.info(f"{loop}: {int(time.perf_counter() - startTime)}s : {count_default_comment} : {count_info_page}")
Beispiel #16
0
            count_default_comment += 1
            reminder.message = None

        if isinstance(reminder.source, (bytes, bytearray)):
            reminder.source = reminder.source.decode("utf-8")
        if reminder.source == info_page:
            count_info_page += 1
            reminder.source = "Unfortunately I couldn't find a source for this reminder. " \
             "This happens sometimes with really old reminders"

        new_c.execute(
            '''
			INSERT INTO reminders
			(Source, RequestedDate, TargetDate, Message, User, Defaulted)
			VALUES (?, ?, ?, ?, ?, 0)
		''', (reminder.source, utils.get_datetime_string(reminder.requested_date),
        utils.get_datetime_string(
            reminder.target_date), reminder.message, reminder.user))
    except Exception as err:
        log.info(err)
        log.info(reminder)
    if loop % 10000 == 0:
        log.info(
            f"{loop}: {int(time.perf_counter() - startTime)}s : {count_default_comment} : {count_info_page}"
        )

new_db_conn.commit()
new_db_conn.close()
old_db_conn.close()
log.info(
    f"{loop}: {int(time.perf_counter() - startTime)}s : {count_default_comment} : {count_info_page}"
Beispiel #17
0
    elif args.pushshift == "auto":
        pushshift_client = PushshiftType.AUTO
    else:
        log.warning(f"Invalid pushshift client: {args.pushshift}")
        sys.exit(1)

    reddit = praw_wrapper.Reddit(args.user,
                                 args.no_post,
                                 user_agent=static.USER_AGENT,
                                 pushshift_client=pushshift_client)
    static.ACCOUNT_NAME = reddit.username
    database = Database(debug=args.debug_db)
    if args.reset_comment:
        log.info("Resetting comment processed timestamp")
        database.save_keystore("comment_timestamp",
                               utils.get_datetime_string(utils.datetime_now()))

    last_backup = None
    last_comments = None
    while True:
        startTime = time.perf_counter()
        log.debug("Starting run")

        actions = 0
        errors = 0

        counters.objects.labels(type="reminders").set(
            database.get_count_all_reminders())
        counters.objects.labels(type="comments").set(
            database.get_count_all_comments())
        counters.objects.labels(type="users").set(
Beispiel #18
0
def train(args):
    # set logger
    logging_dir = args.output_dir if args.output_dir else 'train-{}'.format(utils.get_datetime_string())
    os.mkdir('{}'.format(logging_dir))
    logging.basicConfig(
        level=logging.INFO,
        filename='{}/log.txt'.format(logging_dir),
        format='%(asctime)s %(message)s',
        filemode='w'
    )

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s %(message)s')
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)

    logging.info('=========== Taks {} started! ==========='.format(args.output_dir))
    for arg in vars(args):
        logging.info('{}: {}'.format(arg, getattr(args, arg)))
    logging.info('========================================')

    # initialize loader
    multi_scale = len(args.layers) if args.network != 'unet' else 0
    train_set = utils.SegmentationImageFolder(os.sep.join([args.dataroot, 'train']),
                                              image_folder=args.img_dir,
                                              segmentation_folder=args.seg_dir,
                                              labels=args.color_labels,
                                              image_size=(args.image_width, args.image_height),
                                              random_horizontal_flip=args.random_horizontal_flip,
                                              random_rotation=args.random_rotation,
                                              random_crop=args.random_crop,
                                              random_square_crop=args.random_square_crop,
                                              label_regr=args.regression,
                                              multi_scale=multi_scale)
    val_set = utils.SegmentationImageFolder(os.sep.join([args.dataroot, 'val']),
                                            image_folder=args.img_dir,
                                            segmentation_folder=args.seg_dir,
                                            labels=args.color_labels,
                                            image_size=(args.image_width, args.image_height),
                                            random_square_crop=args.random_square_crop,
                                            label_regr=args.regression)
    train_loader = torch.utils.data.DataLoader(train_set, batch_size=args.batch_size, shuffle=True)
    val_loader = torch.utils.data.DataLoader(val_set, batch_size=args.val_batch_size)

    # initialize model, input channels need to be calculated by hand
    n_classes = len(args.color_labels)

    if args.network == 'unet':
        network = networks.UNet
        criterion = nn.MSELoss() if args.regression else utils.CrossEntropyLoss2D()
    elif args.network == 'triangle':
        network = networks.TriangleNet
        criterion = utils.MSCrossEntropyLoss2D([0.15]+[0.85/float(multi_scale)]*multi_scale)
    else:
        pass
    val_criterion = utils.CrossEntropyLoss2D()

    if args.regression:
        model = network(args.layers, 3, 1, groups=args.groups)
    else:
        model = network(args.layers, 3, n_classes, groups=args.groups)
    if not args.cpu:
        model.cuda()

    # train
    iterations = 0
    for epoch in range(args.epochs):
        model.train()
        # update lr according to lr policy
        if epoch in args.lr_policy:
            lr = args.lr_policy[epoch]
            optimizer = utils.get_optimizer(args.optimizer, model.parameters(),
                                            lr=lr, momentum=args.momentum, nesterov=args.nesterov)
            if epoch > 0:
                logging.info('| Learning Rate | Epoch: {: >3d} | Change learning rate to {}'.format(epoch+1, lr))
            else:
                logging.info('| Learning Rate | Initial learning rate: {}'.format(lr))

        # iterate all samples
        losses = utils.AverageMeter()
        for i_batch, (img, seg) in enumerate(train_loader):

            img = Variable(img)
            seg = Variable(seg) if not multi_scale else [Variable(x) for x in seg]

            if not args.cpu:
                img = img.cuda()
                seg = seg.cuda() if not multi_scale else [x.cuda() for x in seg]

            # compute output
            output = model(img)
            loss = criterion(output, seg)
            losses.update(loss.data[0])

            # compute gradient and do SGD step
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # logging training curve
            if iterations % args.print_interval == 0:
                logging.info(
                    '| Iterations: {: >6d} '
                    '| Epoch: {: >3d}/{: >3d} '
                    '| Batch: {: >4d}/{: >4d} '
                    '| Training loss: {:.6f}'.format(
                        iterations, 
                        epoch+1, args.epochs,
                        i_batch, len(train_loader)-1,
                        losses.avg
                    )
                )
                losses = utils.AverageMeter()

            # validation on all val samples
            if iterations % args.validation_interval == 0:
                model.eval()
                val_losses = utils.AverageMeter()
                gt_pixel_count = [0] * n_classes
                pred_pixel_count = [0] * n_classes
                intersection_pixel_count = [0] * n_classes
                union_pixel_count = [0] * n_classes

                for img, seg in val_loader:

                    img = Variable(img)
                    seg = Variable(seg)

                    if not args.cpu:
                        img = img.cuda()
                        seg = seg.cuda()

                    # compute output
                    output = model(img)
                    loss = val_criterion(output, seg)
                    val_losses.update(loss.data[0], float(img.size(0))/float(args.batch_size))
                    output_numpy = output.data.numpy() if args.cpu else output.data.cpu().numpy()
                    pred_labels = numpy.argmax(output_numpy, axis=1)
                    gt_labels = seg.data.numpy() if args.cpu else seg.data.cpu().numpy()

                    pred_labels = pred_labels.flatten()
                    gt_labels = gt_labels.flatten()

                    for i in range(n_classes):
                        pred_pixel_count[i] += (pred_labels == i).sum()
                        gt_pixel_count[i] += (gt_labels == i).sum()
                        gt_dumb = numpy.full(gt_labels.shape, -1, dtype=numpy.int)
                        pred_dumb = numpy.full(pred_labels.shape, -2, dtype=numpy.int)
                        gt_dumb[gt_labels == i] = 0
                        pred_dumb[pred_labels == i] = 0
                        intersection_pixel_count[i] += (gt_dumb == pred_dumb).sum()
                        pred_dumb[gt_labels == i] = 0
                        union_pixel_count[i] += (pred_dumb == 0).sum()

                # calculate mPA & mIOU
                mPA = 0
                mIOU = 0
                for i in range(n_classes):
                    mPA += float(intersection_pixel_count[i]) / float(gt_pixel_count[i])
                    mIOU += float(intersection_pixel_count[i]) / float(union_pixel_count[i])
                mPA /= float(n_classes)
                mIOU /= float(n_classes)

                logging.info(
                    '| Iterations: {: >6d} '
                    '| Epoch: {: >3d}/{: >3d} '
                    '| Average mPA: {:.4f} '
                    '| Average mIOU: {:.4f} '
                    '| Validation loss: {:.6f} '.format(
                        iterations, 
                        epoch+1, args.epochs,
                        mPA,
                        mIOU,
                        val_losses.avg
                    )
                )

                model.train()

            if iterations % args.checkpoint_interval == 0 and iterations > 0:
                model_weights_path = '{}/iterations-{:0>6d}-epoch-{:0>3d}.pth'.format(logging_dir, iterations, epoch+1)
                torch.save(model.state_dict(), model_weights_path)
                logging.info('| Checkpoint | {} is saved!'.format(model_weights_path))

            iterations += 1
Beispiel #19
0
def train(args):
    # set logger
    logging_dir = args.output_dir if args.output_dir else 'train-{}'.format(
        utils.get_datetime_string())
    os.mkdir('{}'.format(logging_dir))
    logging.basicConfig(level=logging.INFO,
                        filename='{}/log.txt'.format(logging_dir),
                        format='%(asctime)s %(message)s',
                        filemode='w')

    console = logging.StreamHandler()
    console.setLevel(logging.INFO)
    formatter = logging.Formatter('%(asctime)s %(message)s')
    console.setFormatter(formatter)
    logging.getLogger('').addHandler(console)

    logging.info('=========== Taks {} started! ==========='.format(
        args.output_dir))
    for arg in vars(args):
        logging.info('{}: {}'.format(arg, getattr(args, arg)))
    logging.info('========================================')

    # initialize loader
    multi_scale = len(args.layers) if args.network != 'unet' else 0
    train_set = utils.SegmentationImageFolder(
        os.sep.join([args.dataroot, 'train']),
        image_folder=args.img_dir,
        segmentation_folder=args.seg_dir,
        labels=args.color_labels,
        image_size=(args.image_width, args.image_height),
        random_horizontal_flip=args.random_horizontal_flip,
        random_rotation=args.random_rotation,
        random_crop=args.random_crop,
        random_square_crop=args.random_square_crop,
        label_regr=args.regression,
        multi_scale=multi_scale)
    val_set = utils.SegmentationImageFolder(
        os.sep.join([args.dataroot, 'val']),
        image_folder=args.img_dir,
        segmentation_folder=args.seg_dir,
        labels=args.color_labels,
        image_size=(args.image_width, args.image_height),
        random_square_crop=args.random_square_crop,
        label_regr=args.regression)
    train_loader = torch.utils.data.DataLoader(train_set,
                                               batch_size=args.batch_size,
                                               shuffle=True)
    val_loader = torch.utils.data.DataLoader(val_set,
                                             batch_size=args.val_batch_size)

    # initialize model, input channels need to be calculated by hand
    n_classes = len(args.color_labels)

    if args.network == 'unet':
        network = networks.UNet
        criterion = nn.MSELoss(
        ) if args.regression else utils.CrossEntropyLoss2D()
    elif args.network == 'triangle':
        network = networks.TriangleNet
        criterion = utils.MSCrossEntropyLoss2D([0.15] +
                                               [0.85 / float(multi_scale)] *
                                               multi_scale)
    else:
        pass
    val_criterion = utils.CrossEntropyLoss2D()

    if args.regression:
        model = network(args.layers, 3, 1, groups=args.groups)
    else:
        model = network(args.layers, 3, n_classes, groups=args.groups)
    if not args.cpu:
        model.cuda()

    # train
    iterations = 0
    for epoch in range(args.epochs):
        model.train()
        # update lr according to lr policy
        if epoch in args.lr_policy:
            lr = args.lr_policy[epoch]
            optimizer = utils.get_optimizer(args.optimizer,
                                            model.parameters(),
                                            lr=lr,
                                            momentum=args.momentum,
                                            nesterov=args.nesterov)
            if epoch > 0:
                logging.info(
                    '| Learning Rate | Epoch: {: >3d} | Change learning rate to {}'
                    .format(epoch + 1, lr))
            else:
                logging.info(
                    '| Learning Rate | Initial learning rate: {}'.format(lr))

        # iterate all samples
        losses = utils.AverageMeter()
        for i_batch, (img, seg) in enumerate(train_loader):

            img = Variable(img)
            seg = Variable(seg) if not multi_scale else [
                Variable(x) for x in seg
            ]

            if not args.cpu:
                img = img.cuda()
                seg = seg.cuda() if not multi_scale else [
                    x.cuda() for x in seg
                ]

            # compute output
            output = model(img)
            loss = criterion(output, seg)
            losses.update(loss.data[0])

            # compute gradient and do SGD step
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # logging training curve
            if iterations % args.print_interval == 0:
                logging.info('| Iterations: {: >6d} '
                             '| Epoch: {: >3d}/{: >3d} '
                             '| Batch: {: >4d}/{: >4d} '
                             '| Training loss: {:.6f}'.format(
                                 iterations, epoch + 1, args.epochs, i_batch,
                                 len(train_loader) - 1, losses.avg))
                losses = utils.AverageMeter()

            # validation on all val samples
            if iterations % args.validation_interval == 0:
                model.eval()
                val_losses = utils.AverageMeter()
                gt_pixel_count = [0] * n_classes
                pred_pixel_count = [0] * n_classes
                intersection_pixel_count = [0] * n_classes
                union_pixel_count = [0] * n_classes

                for img, seg in val_loader:

                    img = Variable(img)
                    seg = Variable(seg)

                    if not args.cpu:
                        img = img.cuda()
                        seg = seg.cuda()

                    # compute output
                    output = model(img)
                    loss = val_criterion(output, seg)
                    val_losses.update(
                        loss.data[0],
                        float(img.size(0)) / float(args.batch_size))
                    output_numpy = output.data.numpy(
                    ) if args.cpu else output.data.cpu().numpy()
                    pred_labels = numpy.argmax(output_numpy, axis=1)
                    gt_labels = seg.data.numpy() if args.cpu else seg.data.cpu(
                    ).numpy()

                    pred_labels = pred_labels.flatten()
                    gt_labels = gt_labels.flatten()

                    for i in range(n_classes):
                        pred_pixel_count[i] += (pred_labels == i).sum()
                        gt_pixel_count[i] += (gt_labels == i).sum()
                        gt_dumb = numpy.full(gt_labels.shape,
                                             -1,
                                             dtype=numpy.int)
                        pred_dumb = numpy.full(pred_labels.shape,
                                               -2,
                                               dtype=numpy.int)
                        gt_dumb[gt_labels == i] = 0
                        pred_dumb[pred_labels == i] = 0
                        intersection_pixel_count[i] += (
                            gt_dumb == pred_dumb).sum()
                        pred_dumb[gt_labels == i] = 0
                        union_pixel_count[i] += (pred_dumb == 0).sum()

                # calculate mPA & mIOU
                mPA = 0
                mIOU = 0
                for i in range(n_classes):
                    mPA += float(intersection_pixel_count[i]) / float(
                        gt_pixel_count[i])
                    mIOU += float(intersection_pixel_count[i]) / float(
                        union_pixel_count[i])
                mPA /= float(n_classes)
                mIOU /= float(n_classes)

                logging.info('| Iterations: {: >6d} '
                             '| Epoch: {: >3d}/{: >3d} '
                             '| Average mPA: {:.4f} '
                             '| Average mIOU: {:.4f} '
                             '| Validation loss: {:.6f} '.format(
                                 iterations, epoch + 1, args.epochs, mPA, mIOU,
                                 val_losses.avg))

                model.train()

            if iterations % args.checkpoint_interval == 0 and iterations > 0:
                model_weights_path = '{}/iterations-{:0>6d}-epoch-{:0>3d}.pth'.format(
                    logging_dir, iterations, epoch + 1)
                torch.save(model.state_dict(), model_weights_path)
                logging.info(
                    '| Checkpoint | {} is saved!'.format(model_weights_path))

            iterations += 1
Beispiel #20
0
		pushshift_client = PushshiftType.PROD
	elif args.pushshift == "beta":
		pushshift_client = PushshiftType.BETA
	elif args.pushshift == "auto":
		pushshift_client = PushshiftType.AUTO
	else:
		log.warning(f"Invalid pushshift client: {args.pushshift}")
		sys.exit(1)

	reddit = praw_wrapper.Reddit(
		args.user, args.no_post, user_agent=static.USER_AGENT, pushshift_client=pushshift_client)
	static.ACCOUNT_NAME = reddit.username
	database = Database(debug=args.debug_db)
	if args.reset_comment:
		log.info("Resetting comment processed timestamp")
		database.save_keystore("comment_timestamp", utils.get_datetime_string(utils.datetime_now()))

	last_backup = None
	last_comments = None
	while True:
		startTime = time.perf_counter()
		log.debug("Starting run")

		actions = 0
		errors = 0

		counters.objects.labels(type="reminders").set(database.get_count_all_reminders())
		counters.objects.labels(type="comments").set(database.get_count_all_comments())
		counters.objects.labels(type="users").set(database.get_count_all_users())
		counters.objects.labels(type="subreddits").set(database.get_count_all_subreddits())
		counters.objects.labels(type="subreddits_banned").set(database.get_count_banned_subreddits())
Beispiel #21
0
        previousEpoch = comment['created_utc'] - 1
        regex_string = r'(?:remindme.? )(.*?)(?:\[|\n|\"|$)'
        times = re.findall(regex_string, comment['body'], flags=re.IGNORECASE)
        if len(times) > 0:
            time_string = times[0]

            try:
                cal = pdt.Calendar()
                holdTime = cal.parse(time_string, current)
                old_date = time.strftime('%Y-%m-%d %H:%M:%S', holdTime[0])
            except Exception:
                old_date = "None"

            try:
                new_date = utils.get_datetime_string(
                    utils.parse_time(time_string, current, None),
                    format_string='%Y-%m-%d %H:%M:%S')
            except Exception:
                new_date = "None"

            if old_date != new_date and old_date != utils.get_datetime_string(
                    current, format_string='%Y-%m-%d %H:%M:%S'):
                old_date_time = utils.parse_datetime_string(old_date)
                new_date_time = utils.parse_datetime_string(new_date)
                if old_date_time is None or new_date_time is None or \
                  not (old_date_time.replace(year=1, month=1, day=1) == current_hour and
                  new_date_time.replace(year=1, month=1, day=1) == zero_hour):
                    log.info(
                        f"{old_date.ljust(19)} | {new_date.ljust(19)} | {time_string}"
                    )
#{utils.reddit_link(comment['permalink']).ljust(120)} |