コード例 #1
0
ファイル: freshness_bot.py プロジェクト: ar4ik33/yolov3
def handle(message):

    # DownloadFile(message.Photo[message.Photo.Length - 1].FileId, @ "c:\photo.jpg")

    file_info = bot.get_file(message.photo[len(message.photo) - 1].file_id)

    downloaded_file = bot.download_file(file_info.file_path)
    src = r'C:\home\artem_kug\freshness\images\\' + message.photo[
        1].file_id + '.jpg'
    with open(src, 'wb') as new_file:
        new_file.write(downloaded_file)
    opt = {
        'weights': 'runs/train/exp/new.pt',
        'source': 'images',
        'img_size': 640,
        'conf_thres': 0.25,
        'iou_thres': 0.45,
        'device': '',
        'view_img': False,
        'save_txt': False,
        'save_conf': False,
        'nosave': False,
        'classes': None,
        'agnostic_nms': False,
        'augment': False,
        'update': False,
        'project': r'runs/detect',
        'name': 'exp',
        'exist_ok': False
    }
    detect.detect(opt)
    bot.send_photo(
        message.chat.id,
        open('runs\detect\exp\\' + message.photo[1].file_id + '.jpg', 'rb'))
コード例 #2
0
def findElement(img, element):
    global net_door, net_ball, meta_door, meta_ball, cam_param, output_dir_door,output_dir_ball
    img_np = img
    t = time.time()-start_time
    if element == 'door':
        result_dict = detect(net_door,meta_door,cam_param,img_np,isshow=False,issave=True,name=str(t),output_dir=output_dir_door)
    elif element == 'ball':
        result_dict = detect(net_ball,meta_ball,cam_param,img_np,isshow=False,issave=True,name=str(t),output_dir=output_dir_ball)
    return result_dict
コード例 #3
0
def on_message(client, userdata, msg):
    myval = msg.payload.decode('utf-8')
    if myval == "shot":

        camera.capture('/home/pi/project/uploads/image.jpg', resize=(640, 640))
        sleep(2)
        detect.detect("image.jpg")

    else:
        print("wrong")
コード例 #4
0
ファイル: app.py プロジェクト: harikishore96/codeTreasure
def upload_file():
    if request.method == 'POST':
        file = request.files['file']
        filename = secure_filename(file.filename)
        file.save(os.path.join(app.config['UPLOAD_FOLDER'], 'input.jpg'))
        print str(filename)
        filename = os.path.join(app.config['UPLOAD_FOLDER'], 'input.jpg')
        detect(filename)
        return redirect(url_for('display'))
    return render_template('index.html')
コード例 #5
0
def dequeue_wave():
    time.sleep(DETECT_PARAMETER.RECORD_SECONDS)
    while True:
        try:
            now, wave_bytes = WAVE_QUEUE.get(
                block=True, timeout=DETECT_PARAMETER.RECORD_SECONDS)
            detect.detect(now, wave_bytes)
        except queue.Empty as err:
            logger.warning('cannot get microphone input')
            logger.exception(err)
            break
コード例 #6
0
def main():
    images_dir = 'images'
    results_dir = 'results'
    images = os.listdir(images_dir)
    names = [os.path.splitext(img)[0] for img in images]
    images = [os.path.join(images_dir, img) for img in images]
    results = [os.path.join(results_dir, name + '.txt') for name in names]

    t0 = time.time()
    for image, result in zip(images, results):
        detect.detect(image, result)
    print('Elapsed Time: {}s'.format(time.time() - t0))
コード例 #7
0
def dl_and_cut(annot, vid):

    d_set_dir = vid.clips[0].d_set_dir

    frame_dir = d_set_dir.replace("videos", "frames") + vid.yt_id + "\\"
    check_call(['if', 'not', 'exist', frame_dir, 'mkdir', frame_dir],
               shell=True)

    # Use youtube_dl to download the video
    FNULL = open(os.devnull, 'w')
    check_call(['youtube-dl', \
    #'--no-progress', \
      '-f','best[ext=mp4]', \
      '-o',d_set_dir+'/'+vid.yt_id+'_temp.mp4', \
      'youtu.be/'+vid.yt_id ], \
       stdout=FNULL,stderr=subprocess.STDOUT )

    for clip in vid.clips:
        # Verify that the video has been downloaded. Skip otherwise
        if os.path.exists(d_set_dir + '/' + vid.yt_id + '_temp.mp4'):
            # Cut out the clip within the downloaded video and save the clip
            # in the correct class directory. Full re-encoding is used to maintain
            # frame accuracy. See here for more detail:
            # http://www.markbuckler.com/post/cutting-ffmpeg/

            if debug:
                check_call(['ffmpeg',\
                  '-i','file:'+d_set_dir+'\\'+vid.yt_id+'_temp.mp4',\
                  '-ss', str(float(clip.start)/1000 -0.5 ),\
                  '-t', str((float(clip.stop)-float(clip.start))/1000 +0.5),\
                  '-threads','1',\
                  '-vf', 'fps=1',\
                  frame_dir+'\\'+clip.name+'_%04d.jpg'])
            else:
                # If not debugging, hide the error outputs from failed downloads
                check_call(['ffmpeg',\
                  '-i','file:'+d_set_dir+'\\'+vid.yt_id+'_temp.mp4',\
                  '-ss', str(float(clip.start)/1000 -0.5),\
                  '-t', str((float(clip.stop)-float(clip.start)+1)/1000 +0.5),\
                  '-threads','1',\
                  '-vf', 'fps=1',\
                  frame_dir+'\\'+clip.name+'_%04d.jpg'],
                  stdout=FNULL,stderr=subprocess.STDOUT )

    # Remove the temporary video
    os.remove(d_set_dir + '/' + vid.yt_id + '_temp.mp4')
    frame_dir = frame_dir.replace("\\", "/")
    with torch.no_grad():
        detect(frame_dir, vid.yt_id)
    print(frame_dir)
    shutil.rmtree(frame_dir)
    print('end videsa ' + vid.yt_id)
コード例 #8
0
def crop(img_loc_gen):

    count = 1
    while True:
        try:
            detect(str(next(img_loc_gen)))
            print(count)
            count += 1
        except StopIteration:
            print("end of images")
            break
        except ValueError:
            time.sleep(0.005)
コード例 #9
0
def time_detect(image_size):
    t1 = time()
    detect(
        "coors_boa_dataset/custom_yolov3.cfg",
        "coors_boa_dataset/dataset.data",
        "weights/best.pt",
        "W:/",
        "W:/inference_out" + str(image_size),
        img_size=image_size,
        conf_thres=0.4,
        nms_thres=0.4,
    )
    t2 = time()
    return t2 - t1
コード例 #10
0
ファイル: runMe.py プロジェクト: lironzi/CV_project
def run(myAnnFileName, buses):
    
    #if weights file in the project folder
    script_dir = os.path.dirname(__file__)
    rel_path = 'best_yolo5_2.pt'
    weights_path = os.path.join(script_dir, rel_path)
    
    
    rel_path=buses
    detect_path=os.path.join(script_dir, rel_path)
    

    #detect(save_img=False,weights='yolov5s.pt', source='data/images',imgsz=512,conf_thres=0.25,iou_thres=0.45)
    
    detect.detect(False,weights_path,detect_path,512,0.25,0.45,myAnnFileName)
コード例 #11
0
    def openFile(self):
        fileName, _ = QFileDialog.getOpenFileName(self, "Open Video",
                                                  QDir.homePath())

        if fileName != '':
            self.mediaPlayer.setMedia(
                QMediaContent(QUrl.fromLocalFile(fileName)))
            loadFrames.loadFrame(fileName)  # video to frame
            labels = detect.detect()  # detect objects
            item1 = None
            item2 = None
            try:
                item1 = labels[0]
                print(item1)
            except Exception as e:
                pass
            try:
                item2 = labels[1]
                print(item2)
            except Exception as e:
                pass
            sqlite_test.insert_emp(
                str(fileName), item1,
                item2)  #store the directory and objects to database
            print(sqlite_test.get_emps_by_directory(fileName))
            self.playButton.setEnabled(
                True)  #after choosing, enable play button
コード例 #12
0
    def test_detect_greyscale(self):
        """Uses grayscale image with one face to test response of detect method from facelook

        """
        rv = detect(self, 'grayscale1.jpg')
        rv_as_list = ast.literal_eval(rv.data)
        self.assertEqual(len(rv_as_list), 1)
コード例 #13
0
    def test_detect_6faces(self):
        """Uses image that has 6 faces to test response of detect method from facelook

        """
        rv = detect(self, '6faces.jpg')
        rv_as_list = ast.literal_eval(rv.data)
        self.assertEqual(len(rv_as_list), 6)
コード例 #14
0
    def test_detect_png(self):
        """Uses png image that has one face to test response of detect method from facelook

        """
        rv = detect(self, '1.png')
        rv_as_list = ast.literal_eval(rv.data)
        self.assertEqual(len(rv_as_list), 1)
コード例 #15
0
def detectByPathVideo(path, writer):

    video = cv2.VideoCapture(path)
    check, frame = video.read()
    if check == False:
        print(
            'Video Not Found. Please Enter a Valid Path (Full path of Video Should be Provided).'
        )
        return

    print('Detecting people...')
    while video.isOpened():
        #check is True if reading was successful
        check, frame = video.read()

        if check:
            frame = imutils.resize(frame, width=min(800, frame.shape[1]))
            frame = detect.detect(frame)

            if writer is not None:
                writer.write(frame)

            key = cv2.waitKey(1)
            if key == ord('q'):
                break
        else:
            break
    video.release()
    cv2.destroyAllWindows()
コード例 #16
0
ファイル: tracker.py プロジェクト: ThalesII/mn2-tracker
def track(img, cmat, dist, rvec, tvec, flag):
    # Encontra a posição de todos os pontos
    keyp = detect(img)

    for p in keyp:
        p = tuple(p.astype(np.int32))
        img = cv2.drawMarker(img, p, (255, 255, 0))

    if len(keyp) < len(objp):
        return False, img, rvec, tvec

    imgps = []

    # Utilizar solução anterior caso exista
    if flag:
        imgp = order(keyp, rvec, tvec, cmat, dist)
        imgps.append(imgp)

    # Também escolher `num_guess` associações aleatórias
    for _ in range(num_guess):
        i = np.random.choice(len(keyp), len(objp))
        imgp = keyp[i]
        imgps.append(imgp)

    # Descarta associações inválidas (contêm pontos muito próximos)
    imgps = [p for p in imgps if valid(p)]

    for p in imgp:
        ret, rv, tv = solvepnp(objp, p, cmat, dist, rvec, tvec, flag)

        if ret:
            return True, img, rv, tv

    return False, img, rvec, tvec
コード例 #17
0
ファイル: InferAPI.py プロジェクト: pooja-bs-3003/twentyone
def infer():
    # in the response you get task id and data, convert data to right format
    request_method = request.method

    res=None
    if request_method == "GET":
        file = request.files['file']
        # task_id = request.files['task_id']
        foo=file.filename
        print("File Name : ", foo)
        ext = foo.split(".")[-1]
        if ext == "xlsx":
            unparsedFile = file.read()
            dframe = pd.read_excel(file, index_col="date")
            data_config = jsonify(request.data)
            file_data = unparsedFile

            model = Model()
            location = config["model"]["save_location"]
            res = model.infer(location, "okP0KEPL", dframe)
        elif ext.lower() == "jpg":
            with torch.no_grad():
                res, text = detect(foo)

    return str(res)
コード例 #18
0
ファイル: main.py プロジェクト: lkct/ADA-DD
def recog(img, oriimg, resname):
    script_dir = os.path.dirname(__file__)
    CASCADE_FILE = os.path.join(script_dir, '../classifier/cascade.xml')

    gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
    im = Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB))
    im = im.convert('L')
    digits = detect(gray, CASCADE_FILE)
    digits = np.array(digits)
    if digits.shape[0] == 0:
        return digits, np.array([]), np.array([])
    results = crop_detection(im.copy(), digits)
    test = np.float32([np.float32(i.resize(SAMPLE_SIZE)) for i in results])
    test = np.tile(test.reshape((-1, 1, 28, 28)), (1, 3, 1, 1))

    # yhat:list of str=label+prob
    lab, prob = pred.main(test)
    digits, lab, prob = choose(digits, lab, prob)
    yhat = ['%d,%.2f' % (lab[i], prob[i]) for i in range(lab.size)]

    digits[:, 1] += 525 / 2
    oriimg = cv2.resize(oriimg, None, fx=0.5, fy=0.5)

    im = Image.fromarray(cv2.cvtColor(oriimg, cv2.COLOR_BGR2RGB))
    im = im.convert('L')
    font = ImageFont.truetype(FONT_FILE, FONT_SIZE)
    detected = annotate_detection(im.copy(), digits)
    recognized = annotate_recognition(detected, digits, yhat, font)
    recognized.show()
    recognized.save(resname)

    return digits, lab, prob
コード例 #19
0
def extract(filename, flag):
    ##
    # Finds the keywords and their patterns and extracts the parameters
    # \param[in] l_gam : The list which contains the lines of the (GAMESS output) file.
    # \param[in] params : The list which contains extracted data from l_gam file.
    # \param[in] flag : a flag for debugging detect module
    # This function returns the coordinates of atoms, gradients, atomic orbital basis,
    # and molecular orbitals extracted from the file, in the form of dictionary
    #
    # Used in: gamess_to_libra.py/gamess_to_libra and main.py/main

    f = open(filename, "r")
    A = f.readlines()
    f.close()

    # detect the lines including information from gamess output file
    info = detect.detect(A, flag)

    # extract information from gamess output file
    label, Q, R = extract_coordinates(
        A[info["coor_start"]:info["coor_end"] + 1], flag)
    grad = extract_gradient(A[info["grad_start"]:info["grad_end"] + 1], flag)
    E, C = extract_mo(A[info["mo_start"]:info["mo_end"] + 1], info["Ngbf"],
                      flag)
    ao = extract_ao_basis(A[info["ab_start"]:info["ab_end"] + 1], label, R,
                          flag)

    if flag == 1:
        print "********************************************"
        print "extract program ends"
        print "********************************************\n"

    return label, Q, R, grad, E, C, ao, info["tot_ene"]
コード例 #20
0
ファイル: tracking.py プロジェクト: Nathan-Burgess/plateGuard
    def start(self, buff, d_counter):
        print(".")
        self.length['new'] = 0
        d_counter.counter = 0
        # Calls openalpr and receives results
        results = detect.detect(buff.frames[self.frame_counter])
        # Calculates KNN or initializes car objects
        processing.calculate_knn(buff, results, self.frame_counter)

        # Set up trackers
        self.tracker = [cv2.TrackerKCF_create() for i in range(10)]

        # Initialize bbox variables
        for i, car in enumerate(buff.cars):
            if car.coords[self.frame_counter] is not -1:
                self.bbox[i] = self.convert_to_hw(
                    car.coords[self.frame_counter])
                self.tracker[i].init(buff.frames[self.frame_counter],
                                     self.bbox[i])
                self.length['new'] += 1

        if self.length['new'] is not self.length['old']:
            d_counter.max = 1

        self.length['old'] = self.length['new']

        self.frame_counter += 1
        d_counter.counter += 1
コード例 #21
0
def main():
    print("Welcome to Fish Classification System...")

    # Clear previously identified fish images result from Test_Classification, Test_Image_Detection_Results and Result folder.
    print(">>>>>>>>>>>>>>>>>>>")
    print("Cleaning previous result...")
    clear_directory(fp.detection_results_folder)
    clear_directory(fp.image_classification_source)

    # Find the Images path for the object detection using YOLOv3.
    print(">>>>>>>>>>>>>>>>>>>")
    print("Managing photos for detection...")
    test_image_YOLOv3 = find_test_image_YOLOv3()

    # Use YOLOv3 for object detection and return path.
    print(">>>>>>>>>>>>>>>>>>>")
    print("Running YOLOv3 for detection...")
    detection_results_folder = dt.detect(test_image_YOLOv3)

    # Function to crop the detected image from the YOLOv3 prediction.
    print(">>>>>>>>>>>>>>>>>>>")
    print("Cropping detected images from.." + detection_results_folder)
    crop_detected_image(detection_results_folder)

    # classify images using tiny VGGNet
    print(">>>>>>>>>>>>>>>")
    print("Running Smaller VGGNet for classification...")
    cs.classify(fp.vggnet_model_path, fp.pickle_path,
                fp.image_classification_source, fp.image_classification_output)
コード例 #22
0
ファイル: main.py プロジェクト: wenyalintw/Nodule-CADx
    def on_detectButton_clicked(self):
        # show status on statusbar
        self.statusBar().showMessage(
            'Model predicting, please wait for a while ...')
        self.statusBar().repaint()
        QApplication.instance().processEvents()

        # TODO Check if selected scan is already detected
        index_member = self.treeWidget.currentIndex().row()
        index_scan = self.noduletreeWidget.currentIndex().row()

        if use_win10:
            csv_data = detect(
                self.data['members'][index_member]['scans'][index_scan]
                ['scan_path'], self.nodulenet_model, self.classification_model,
                self.data['preferences'])
        else:
            csv_data = None

        self.update_data(index_member, index_scan, csv_data)
        self.data['members'][index_member]['scans'][index_scan][
            'updated'] = True
        self.refresh_scan_list(self.data['members'][index_member])
        status = [
            scan['updated']
            for scan in self.data['members'][index_member]['scans']
        ]
        if False not in status:
            self.data['members'][index_member]['updated'] = True
            self.refresh_patient_list()
        self.statusBar().showMessage('Done.', msecs=5000)
        self.management(index_member)
コード例 #23
0
ファイル: maps_server.py プロジェクト: jasonshere/MAPS
 def ValidatePatient(self, request, context):
     if detect(request.name) is True:
         sense_hat = SenseHat()
         text = 'The Doctor office number is {}'. format(request.office_no)
         sense_hat.show_message(text)
         return maps_pb2.MAPSReply(message='1')
     return maps_pb2.MAPSReply(message='2')
コード例 #24
0
ファイル: invoice_ocr.py プロジェクト: Intsigstephon/invoice
def invoice_ocr(img, lib, basemodel):
    #1. detect vertex
    rslt = []

    dst, boxes, flag = detect(img, lib)

    if flag < 0:
        return rslt

    if debug:
        img_show = dst
        for tmp in boxes:
            cv2.rectangle(img_show, (tmp[0], tmp[1]), (tmp[2], tmp[3]),
                          (0, 0, 255), 2)
        cv2.imwrite("./tmp/a.jpg", img_show)

    #2.crop image:
    imgs = crop(dst, boxes)

    #3.do preprocess
    imgs = preprocess(imgs)

    if debug:
        for i in range(len(boxes)):
            cv2.imwrite("./tmp/" + str(i) + ".jpg", imgs[i])

    #4.recog
    rslt = forward(imgs, basemodel)

    return rslt
コード例 #25
0
def minePositiveNegtiveData(clf, negImages, labels, trainData, trainTarget,
                            miningTime, w, h, n, scale, step):
    """
    Mine positive negtive data
    Parameters:
        - clf: current classifier
        - negImages: list of negative images
        - labels: labels of these negative images
        - trainData: old train data
        - trainTarget: old train target
        - miningTime: mining time
        - w: width of template
        - h: height of template
        - n: pyramid level number
        - scale: pyramid scale
        - step: sliding step
    Return:
        - nTrainData: new train data
        - nTrainTarget: new train target
    """
    print("...Mining Positive Negative Data:")
    t = 0
    for i in range(miningTime):
        # Randomly choose a negtive image
        iNegImage = random.randint(0, len(negImages) - 1)
        ngI = negImages[iNegImage]

        # Detect it using current classifer
        detectionResult = detect(clf, ngI, w, h, n, scale, step)

        # Get all wrong answers
        for result in detectionResult:
            correctResult = list(filter(lambda x: x[0] == iNegImage, labels))
            intersection = False
            for c in correctResult:
                if isIntersect([c[1], c[2], c[3], c[4]], result):
                    intersection = True
                    break
            # Wrong answer
            if not intersection:
                t = t + 1
                # get feature vector of this wrong answer
                p = ngI[round(result[1]):round(result[1] + result[3]),
                        round(result[0]):round(result[0] + result[2])]
                io.imsave(
                    'C:\\Yuan\\CS216\\FinalProject\\images\\Wrong\\{}.png'.
                    format(time.time()), p)
                # scale patch to tamplate size
                ski.transform.resize(p, [h, w])
                feature = hog(p,
                              orientations=8,
                              pixels_per_cell=(8, 8),
                              cells_per_block=(1, 1))
                # add new negative data into old list
                trainData = trainData + [feature]
                trainTarget = trainTarget + [1]

    print("...{}, {}".format(len(trainData), len(trainTarget)))
    print("...Got {}".format(t))
    return trainData, trainTarget
コード例 #26
0
def extract(filename,flag):
    ##
    # Finds the keywords and their patterns and extracts the parameters
    # \param[in] l_gam : The list which contains the lines of the (GAMESS output) file.
    # \param[in] params : The list which contains extracted data from l_gam file.
    # \param[in] flag : a flag for debugging detect module
    # This function returns the coordinates of atoms, gradients, atomic orbital basis,
    # and molecular orbitals extracted from the file, in the form of dictionary
    #
    # Used in: gamess_to_libra.py/gamess_to_libra and main.py/main

    f = open(filename,"r")
    A = f.readlines()
    f.close()

    # detect the lines including information from gamess output file
    info = detect.detect(A,flag)

    # extract information from gamess output file
    label, Q, R = extract_coordinates(A[info["coor_start"]:info["coor_end"]+1], flag)
    grad = extract_gradient(A[info["grad_start"]:info["grad_end"]+1], flag)
    E, C = extract_mo(A[info["mo_start"]:info["mo_end"]+1], info["Ngbf"], flag)
    ao = extract_ao_basis(A[info["ab_start"]:info["ab_end"]+1], label, R, flag)

    
    if flag == 1:
        print "********************************************"
        print "extract program ends"
        print "********************************************\n"

    return label, Q, R, grad, E, C, ao, info["tot_ene"]
コード例 #27
0
 def __init__(self, url, debug = False, angle = 0, init = False):
     if init:
         self.player = None
         self.cam = camera(url = url, angle = angle, debug = debug)
         self.cam.start()
         self.det = detect(self.cam, debug = debug)
         self.first_action = True
コード例 #28
0
def run_model_for_references():
    global target
    global source_references

    mask_refs = Path(target, 'refs')
    mask_refs.mkdir(exist_ok=True)

    ref_files = [f for f in source_references.glob('**/ref_*.png')]

    for f in tqdm(ref_files):

        img = cv2.imread(str(f))
        references = detect(img, detection_mode='references')

        for idx, roi in enumerate(references['rois']):

            class_id = references["class_ids"][idx]

            if class_id != 1:
                continue

            mask = references['masks'][:, :, idx]
            masked = get_masked_img(img, mask, roi)

            mask_file = Path(
                mask_refs,
                f'{f.parent.name}_{f.stem}_{str(idx)}_mask_sign.png')
            cv2.imwrite(str(mask_file), masked)
コード例 #29
0
def call_detect(buff):
    # Gets results from openALPR
    for i in range(len(buff.frames)):
        result = detect.detect(buff.frames[i])
        if result:
            # Saves results to car per frame
            calculate_knn(buff, result, i)
コード例 #30
0
ファイル: video.py プロジェクト: joeyism/deep_sort_mask_rcnn
def main(mask_rcnn):

    filename = sys.argv[1]
    # Definition of the parameters
    max_cosine_distance = 0.01
    nn_budget = None

    # deep_sort
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    reader = imageio.get_reader(filename, "ffmpeg")
    fps = reader.get_meta_data()['fps']
    N = len(reader) - 1

    writer = imageio.get_writer("output/" + get_filename(filename), fps=fps)

    try:
        for i, frame in tqdm(enumerate(reader), desc="Frames ", total=N):
            frame, tracker, encoder = detect(frame,
                                             tracker,
                                             encoder,
                                             mask_rcnn,
                                             threshold=0.95)
            writer.append_data(frame)
    finally:
        writer.close()
コード例 #31
0
def image_callback(msg):

    global start_t, bottom_bdict, bottom_net, bottom_meta, bottom_cam_param, bottom_net_init, bottom_call_count
    global bottom_wf
    print "Bootom Call back ...", bottom_call_count
    if bottom_net_init:
        model_name = 'all_640_xy'
        steps = '160000'
        base_dir = '/home/momenta/mc3/python/weight/weights-'+model_name
        bottom_net = dn.load_net(os.path.join(base_dir,"yolov3-tiny-"+model_name+".cfg"),  
                    os.path.join(base_dir,"yolov3-tiny-"+model_name+"_"+steps+".weights"), 0)
        bottom_meta = dn.load_meta(os.path.join(base_dir,"voc-"+model_name+".data"))
        filename="cam_top.yaml"
        f = open(filename)
        bottom_cam_param = yaml.load(f)
        bottom_net_init = False

    bottom_call_count += 1
    if bottom_call_count%5 != 0:
        return
    np_arr = np.fromstring(msg.data, np.uint8)
    image_np = cv2.imdecode(np_arr, cv2.IMREAD_COLOR)

    t = str(time.time()-start_t)
    bottom_bdict = detect(bottom_net,bottom_meta,bottom_cam_param,image_np,isshow=False,issave=True,output_dir=bottom_out_dir,name= t)
    bottom_info = bottom_bdict
    bottom_info['filename'] = t
    bottom_wf.write(json.dumps(bottom_info,indent=2))
    bottom_wf.write("\n")
コード例 #32
0
def validate(val_loader):
    for i, (input, target, path) in enumerate(val_loader):
        obj_id_batch = []
        for j in range(len(path)):
            objects, class_names = detect(args.cfg, args.weight, path[j],
                                          args.namesfile)
            obj_hot_vector = get_hot_vector(objects, class_names)
            obj_id_batch.append(obj_hot_vector)
コード例 #33
0
ファイル: start.py プロジェクト: lepisma/Sche
def checkUser(img):
	gray = cv2.cvtColor(img, cv.CV_RGB2GRAY)
	gray = cv2.equalizeHist(gray)
	eyes = detect.detect(gray)
	if len(eyes) <= 0:
		return 0
	else:
		return 1
コード例 #34
0
ファイル: automated_tester.py プロジェクト: edgelord/JPL
def test_site4():
    print "Testing for Set4!"
    dir_site = "resources/Set4/solution.pgm"
    our_np_array = dt.detect(dt.surf4)
    their_np_array = io.read_pgm(dir_site)
    
    testing(our_np_array, their_np_array)
    io.write_pgm(our_np_array, "results/test4_our.pgm")
    io.write_pgm(their_np_array, "results/test4_their.pgm")
コード例 #35
0
ファイル: ui.py プロジェクト: guanw/GP
 def OnDetect(self,event):
     issmile=detect.detect()
     if issmile=='false':
         robot_voice.robot_voice("I think you are not in a good mood. Let me play something for you.")
         play_music.play_music()
     elif issmile=='true':
         robot_voice.robot_voice("That's a big nice smile of you! Did you have a good day?")
     elif issmile==-1:
         print("don't even know if this person is the master")
コード例 #36
0
ファイル: Main.py プロジェクト: abhilashr1/IntranetChat
    def run(self):
        if self.name=='ui':
            print('>> Loading...')

            # Wake up the Neighbours

            list_of_neighbours = neighbour.neighbours()
            print(">> Detected list of Neighbours ")

            # Load up the UI
            ui = UI_Loader()
            ui.load()

        elif self.name=='network':
            server = detect.detect('server')
            notifier = detect.detect('client')

            server.start()
            notifier.start()
コード例 #37
0
def main():
    images, labels, num, rows, cols = get_data(LABEL_FILE,
                                               IMAGE_FILE)
    print 'Training OpenCV SVM...'
    svc1 = cvtrain(images[:TRAIN_SIZE], labels[:TRAIN_SIZE], num, rows, cols)

    print 'Training sklearn SVM...'
    svc2 = sktrain(images[:TRAIN_SIZE], labels[:TRAIN_SIZE])

    filenames = glob(TEST_FILES + "/*.jpg")
    for filename in filenames:
        print 'Processing', filename
        img = cv2.imread(filename)
        gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
        im = Image.open(filename)
        digits = detect(gray, CASCADE_FILE)
        results = crop_detection(im.copy(), digits)
        test = [np.float32(i.resize(SAMPLE_SIZE)).ravel() for i in results]

        testdata = preprocess(test, rows, cols).reshape(-1, bin_n * 4)
        yhat1 = svc1.predict_all(testdata)
        yhat1 = yhat1.astype(np.uint8).ravel()
        yhat2 = svc2.predict(test)

        font = ImageFont.truetype(FONT_FILE, FONT_SIZE)
        detected = annotate_detection(im.copy(), digits)

        basename = os.path.basename(filename)
        resultname = RESULT_FILES + '/' + basename

        print 'OpenCV results'
        recognized = annotate_recognition(detected, digits, yhat1, font)
        recognized.show()
        recognized.save(resultname.replace('.jpg', '-cv.jpg'))

        print 'sklearn results'
        recognized = annotate_recognition(detected, digits, yhat2, font)
        recognized.show()
        recognized.save(resultname.replace('.jpg', '-sk.jpg'))
コード例 #38
0
ファイル: test.py プロジェクト: sanskrit/detect.py
def test_basic(data):
    text, scheme = data
    detection = detect(text)
    assert detection == scheme, u'%s == %s (%s)' % (detection, scheme, text)
コード例 #39
0
ファイル: test.py プロジェクト: sanskrit/detect.py
def test_noisy(data):
    noise = ' \t\n 1234567890 !@#$%^&*(),.<>\'\"-_[]{}\\|;:`~ ΣД あア'
    text, scheme = data
    text = ''.join([noise, text, noise])
    assert detect(text) == scheme
コード例 #40
0
                runs = int(current[1])

# update the number of runs of the program
def updateconfig(runs):
    f = fileinput.FileInput("config.txt", inplace=True)
    for line in f:
        sys.stdout.write(line.replace("run {}".format(runs).rstrip(), "run {}".format(runs+1).rstrip()))
    f.close()

def givevars():
    global runs,host,port
    return runs,host,port

if __name__=="__main__" :

    from detect import detect
    from multiprocessing import Process

    global directory, N, MAXREQUESTS, DETECTLOADS, host, port, runs
    initparams()

    client = InfluxDBClient(host, port, database='Traffic_{}'.format(runs))
    client.create_database('Traffic_{}'.format(runs))
    monitor.readpcaps(N,directory,runs,client)

    p = Process(target=detect(runs,host,port))
    p.start()

    updateconfig(runs)

コード例 #41
0
ファイル: test.py プロジェクト: sanskrit/detect.py
def test_decoded(data):
    text, scheme = data
    text = text.decode('utf-8')
    detection = detect(text)
    assert detection == scheme, u'%s == %s (%s)' % (detection, scheme, text)