def load_sites_feeds(): from tech_rss.models import Site fix_multiprocessing() clf = Classifier() for site in Site.objects.all(): print('Starting {}'.format(site.domain)) news = site.get_new_news() if not news: continue categories = clf.predict(news) for category, page in zip(categories, news): print(CATEGORIES_SHORT[category]) print(page['title'], '\n') url, title = save_post(category, page, site) users = site.users.filter(categories__contains=[category]) users_id = [getattr(user, 'id') for user in users] send_post_to_subscribers(TelegramBot, users_id, url, title)
def main(*args): """Predict the top K classes of an image. Args: *args: args to be parsed by the ArgumentParser Returns: None """ # Instantiating with formatter_class argument will make default values print # in the help message. parser = argparse.ArgumentParser( description='Process an image & report results.', formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument( 'image_path', type=str, help=('path to the image to process or to a dataset ' + 'directory with images to choose randomly from ' + 'Ex: flowers/test/1/image_06743.jpg or ' + 'flowers/test')) parser.add_argument('checkpoint', type=str, help='path to the model checkpoint to load') parser.add_argument('--top_k', type=int, default=1, help='Return top K most likely classes') parser.add_argument('--category_names', type=str, help='use a mapping of categories to real names') parser.add_argument('--gpu', action='store_true', help=('if available, use gpu to process the image ' + 'instead of the cpu')) args = parser.parse_args(args) if os.path.isdir(args.image_path): print(f'{args.image_path} is a directory.', 'Choosing a random image to process.') image_path = get_random_image_from_dir(args.image_path) print(f'Using image: {image_path}') else: image_path = args.image_path if not os.path.isfile(args.checkpoint): print(f'ERROR: {args.checkpoint} is not a file.', file=sys.stderr) sys.exit(-1) if args.category_names: cat_to_name = load_json(args.category_names) else: cat_to_name = None if args.gpu: device = 'cuda' if not torch.cuda.is_available(): print('ERROR: cuda is not available on this machine.', 'Use cpu for prediction instead.', file=sys.stderr) sys.exit(-1) else: device = 'cpu' classifier = Classifier(checkpoint=args.checkpoint) probs, classes = classifier.predict(image_path, topk=args.top_k, device=device) if cat_to_name is not None: classes = [cat_to_name[c] for c in classes] class_len = len(max(cat_to_name.values(), key=len)) else: class_len = 10 # padding needed to space column 1 title 'Class' below print(f'{"Class":{class_len}}{"Probability"}') for prob, class_ in zip(probs, classes): print(f'{class_:{class_len}}{prob:4.2f}')
def main(): ap = argparse.ArgumentParser() ap.add_argument("-v", "--video", required=True, help="Path to the video file") ap.add_argument("-m", "--main", required=True, help="json file main menu configuration") ap.add_argument("-i", "--item", required=True, help="json file menu item configuration") ap.add_argument("-w", "--write", required=False, help="output path for the result") args = vars(ap.parse_args()) mainMenuConf = Conf(args['main']) menuItemConf = Conf(args['item']) (mainMenufeatureList, mainMenulabels) = h5_load_dataset(mainMenuConf['feature_file'], mainMenuConf['dataset_feature_name']) # read main menu class classInfo = [] mainMenuClassName = None if (mainMenuConf['class'] != None): for name in open(mainMenuConf['class']).read().split("\n"): classInfo.append(name) if len(classInfo) != 0: mainMenuClassName = classInfo[0] else: mainMenuClassName = 'mainMenu' voc = pacasl_voc_reader(mainMenuConf['dataset_xml']) objectList = voc.getObjectList() for (className, mainMenuBox) in objectList: if (className == mainMenuClassName): break # read menu item class itemClassInfo = [] if (menuItemConf['class'] != None): for name in open(menuItemConf['class']).read().split("\n"): itemClassInfo.append(name) voc = pacasl_voc_reader(menuItemConf['dataset_xml']) objectList = voc.getObjectList() for (className, itemBox) in objectList: if (className == itemClassInfo[0]): break itemClassifier = Classifier(menuItemConf['classifier_path'], "SVC") itemHOG = HOG(menuItemConf['orientations'], menuItemConf['pixels_per_cell'], menuItemConf['cells_per_block'], True if menuItemConf['transform_sqrt'] == 1 else False, menuItemConf['normalize']) imgReader = ImageReader(args['video'], True) hogParam = HOGParam( orientations=mainMenuConf['orientations'], pixels_per_cell=mainMenuConf['pixels_per_cell'], cells_per_block=mainMenuConf['cells_per_block'], transform_sqrt=True if mainMenuConf['transform_sqrt'] == 1 else False, block_norm=mainMenuConf['normalize']) if args['write'] != None: fourcc = cv2.VideoWriter_fourcc(*'XVID') out = cv2.VideoWriter(args['write'], fourcc, 30.0, (1280, 720)) mainMenuLoc = None mainMenuImg = None bFound = False frameCnt = 0 searchRegion = None while True: (ret, frame, fname) = imgReader.read() if ret == False: break templateShape = [ mainMenuBox[3] - mainMenuBox[1] + 1, mainMenuBox[2] - mainMenuBox[0] + 1 ] frameOrigin = frame.copy() frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY) key = ' ' if (bFound == True): testImg = frame[mainMenuLoc[1]:mainMenuLoc[3], mainMenuLoc[0]:mainMenuLoc[2]] e1 = cv2.getTickCount() (diff, ratio) = imgDiffRatio(testImg, mainMenuImg) e2 = cv2.getTickCount() time = (e2 - e1) / cv2.getTickFrequency() #print('[{}] ratio {}'.format(frameCnt, ratio)) if (ratio < 0.1): bFound = True (x, y, w, h) = (mainMenuLoc[0], mainMenuLoc[1], mainMenuLoc[2] - mainMenuLoc[0], mainMenuLoc[3] - mainMenuLoc[1]) else: bFound = False else: if searchRegion == None: searchRegion = tuple(mainMenuConf['mainMenuSearchRegion']) e1 = cv2.getTickCount() (bFound, val, (x, y, w, h)) = searchImageByHOGFeature( mainMenufeatureList[0], templateShape, frame, searchRegion, mainMenuConf['mainMenuHOGDistanceThreshold'], hogParam, (10, 10), bMP=False, bVisualize=False) e2 = cv2.getTickCount() time = (e2 - e1) / cv2.getTickFrequency() if bFound == True: frameDetectImg = frame[y:y + h, x:x + w] if bFound == True: print('[{}] search result time {}, loc = {}'.format( frameCnt, time, (x, y, w, h))) searchRegion = (x, y, x + w, y + h) mainMenuLoc = (x, y, x + w, y + h) mainMenuImg = frame[y:y + h, x:x + w] frameDetectImg = mainMenuImg cv2.rectangle(frameOrigin, (x, y), (x + w - 1, y + h - 1), (0, 255, 0), 2) e1 = cv2.getTickCount() (rtn, (fx, fy, fw, fh)) = FrameDetectByOneImage( frameDetectImg, frameDetectImg, minW=200, minH=60, frameRatio=mainMenuConf['mainMenuFrameRectRatio']) e2 = cv2.getTickCount() time = (e2 - e1) / cv2.getTickFrequency() if rtn == True: fx = fx + x fy = fy + y cv2.rectangle(frameOrigin, (fx - 5, fy - 5), (fx + fw + 5, fy + fh + 5), (255, 0, 0), 2) bh = itemBox[3] - itemBox[1] + 1 bw = itemBox[2] - itemBox[0] + 1 roi = frame[fy:fy + bh, fx:fx + bw] e1 = cv2.getTickCount() (feature, _) = itemHOG.describe(roi) predictIdx = itemClassifier.predict(feature) e2 = cv2.getTickCount() time = (e2 - e1) / cv2.getTickFrequency() print(' predict {} takes {}'.format(predictIdx, time)) cv2.putText(frameOrigin, str(predictIdx), (fx + fw + 10, fy + fh), cv2.FONT_HERSHEY_SIMPLEX, 2, (0, 0, 255), 3, cv2.LINE_AA) key = basics.showResizeImg(frameOrigin, 'result', 1) if args['write'] != None: out.write(frameOrigin) else: print('[{}] Not found, takes {}'.format(frameCnt, time)) key = basics.showResizeImg(frameOrigin, 'result', 1) if args['write'] != None: out.write(frameOrigin) if key == ord('q'): break frameCnt = frameCnt + 1 if args['write'] != None: out.release()