Exemplo n.º 1
0
def prepare_training_data():
    word_ranks = get_word_ranks()
    corpus = get_corpus()
    segmenter = Segmenter()
    with codecs.open('./data/training.txt', 'w', 'utf-8') as fout:
        for poem in corpus:
            poem['keyword'] = []
            stop=False

            for sentence in poem['sentence']:
                segs = list(filter(lambda seg: seg in word_ranks, segmenter.segment(sentence)))
                if len(segs) == 0:
                    stop = True
                    break
            if len(poem['sentence'])!=4 or stop:
                continue
            for sentence in poem['sentence']:
                segs = list(filter(lambda seg: seg in word_ranks, segmenter.segment(sentence)))
                if len(segs) == 0:
                    print('aaa', sentence)
                keyword = reduce(lambda x,y: x if word_ranks[x]>word_ranks[y] else y, segs)
                poem['keyword'].append(keyword)
                if(len(keyword)>=2):
                    print(sentence, keyword)
                fout.write(sentence + '\t' + keyword + '\n')
Exemplo n.º 2
0
def fetch_labeled_data():
    str_old_time = "2015-08-01 00:00:00"
    str_new_time = "2016-11-31 00:00:00"
    proj_name = "article_cat"

    LabeledCrawler(proj_name=proj_name).rebuild_table()
    LabeledCrawlerIheima(proj_name=proj_name).crawl(str_old_time, str_new_time)
    LabeledCrawlerKanchai(proj_name=proj_name).crawl(str_old_time,
                                                     str_new_time)
    LabeledCrawlerLeiphone(proj_name=proj_name).crawl(str_old_time,
                                                      str_new_time)
    LabeledCrawlerLieyun(proj_name=proj_name).crawl(str_old_time, str_new_time)
    LabeledCrawlerSootoo(proj_name=proj_name).crawl(str_old_time, str_new_time)
    LabeledCrawlerYiou(proj_name=proj_name).crawl(str_old_time, str_new_time)
    LabeledCrawler7tin(proj_name=proj_name).crawl("2000-08-01 00:00:00",
                                                  str_new_time)
    LabeledCrawlerAilab(proj_name=proj_name).crawl("2000-08-01 00:00:00",
                                                   str_new_time)
    LabeledCrawlerBaidu(proj_name=proj_name).crawl("2000-08-01 00:00:00",
                                                   str_new_time)
    LabeledCrawlerSinaVR(proj_name=proj_name).crawl("2000-08-01 00:00:00",
                                                    str_new_time)
    LabeledCrawlerVarkr(proj_name=proj_name).crawl("2000-08-01 00:00:00",
                                                   str_new_time)

    seg = Segmenter(proj_name=proj_name)
    seg.seg(skip_exist=True)
    seg.join_segfile()
Exemplo n.º 3
0
def fetch_nonlabeled_data():
    proj_name = "article150801160830"
    str_old_time = "2015-08-01 00:00:00"
    str_new_time = "2016-12-31 00:00:00"

    # Crawler(proj_name=proj_name).rebuild_table()
    # Crawler163(proj_name=proj_name).crawl(str_old_time, str_new_time)
    # Crawler36Kr(proj_name=proj_name).crawl(str_old_time, str_new_time)
    # CrawlerGeekPark(proj_name=proj_name).crawl(str_old_time, str_new_time)
    # CrawlerLeiphone(proj_name=proj_name).crawl(str_old_time, str_new_time)
    # CrawlerKanchai(proj_name=proj_name).crawl(str_old_time, str_new_time)
    # CrawlerHuxiu(proj_name=proj_name).crawl(str_old_time, str_new_time)

    LabeledCrawlerIheima(proj_name=proj_name).crawl(str_old_time, str_new_time)
    LabeledCrawlerKanchai(proj_name=proj_name).crawl(str_old_time,
                                                     str_new_time)
    LabeledCrawlerLeiphone(proj_name=proj_name).crawl(str_old_time,
                                                      str_new_time)
    LabeledCrawlerLieyun(proj_name=proj_name).crawl(str_old_time, str_new_time)
    LabeledCrawlerSootoo(proj_name=proj_name).crawl(str_old_time, str_new_time)
    LabeledCrawlerYiou(proj_name=proj_name).crawl(str_old_time, str_new_time)
    LabeledCrawler7tin(proj_name=proj_name).crawl("2000-08-01 00:00:00",
                                                  str_new_time)
    LabeledCrawlerAilab(proj_name=proj_name).crawl("2000-08-01 00:00:00",
                                                   str_new_time)
    LabeledCrawlerBaidu(proj_name=proj_name).crawl("2000-08-01 00:00:00",
                                                   str_new_time)
    LabeledCrawlerSinaVR(proj_name=proj_name).crawl("2000-08-01 00:00:00",
                                                    str_new_time)
    LabeledCrawlerVarkr(proj_name=proj_name).crawl("2000-08-01 00:00:00",
                                                   str_new_time)

    seg = Segmenter(proj_name=proj_name)
    seg.seg(skip_exist=True)
    seg.join_segfile()
Exemplo n.º 4
0
def get_text_ranks():
    segmenter = Segmenter()
    stopwords = get_stopwords()
    print("Start TextRank over the selected quatrains ...")
    corpus = get_corpus()
    adjlist = dict()
    for idx, poem in enumerate(corpus):
        if 0 == (idx + 1) % 10000:
            print("[TextRank] Scanning %d/%d poems ..." %
                  (idx + 1, len(corpus)))
        for sentence in poem['sentence']:
            segs = list(
                filter(lambda word: word not in stopwords,
                       segmenter.segment(sentence)))
            for seg in segs:
                if seg not in adjlist:
                    adjlist[seg] = dict()

            for i, seg in enumerate(segs):
                for _, other in enumerate(segs[i + 1:]):
                    if seg != other:
                        adjlist[seg][other] = adjlist[seg][other] + 1 \
                            if other in adjlist[seg] else 1.0
                        adjlist[other][seg] = adjlist[other][seg] + 1 \
                            if seg in adjlist[other] else 1.0

    for word in adjlist:
        w_sum = sum(weight for other, weight in adjlist[word].items())
        for other in adjlist[word]:
            adjlist[word][other] /= w_sum
    print("[TextRank] Weighted graph has been built.")
    _text_rank(adjlist)
Exemplo n.º 5
0
def applySegmentation(metafilename,img,device='/gpu:0',conf=None):
    '''
    Loads the graph from the meta file `metafilename' and predicts segmentations for the image stack `img'. The graph is
    expected to have a collection "endpoints" storing the x,y_,y,ypred list of tensors where x is the input and ypred the
    predicted segmentation. The first 2 dimensions of `img' must be the XY dimensions, other dimensions are flattened out.
    The values of `img' are also expected to be normalized. Returns a stack of binary masks with the same shape as `img'.
    '''
    seg=Segmenter(metafilename,device,conf)
    
    origshape=tuple(img.shape)
    tf.logging.info('Input dimensions: %r'%(origshape,))
    
    shape=origshape+(1,1,1) # add extra dimensions to the shape to make a 4D shape
    shape=shape[:5] # clip extra dimensions off so that this is a 5D shape description
    width,height,slices,timesteps,depth=shape
    
    img=img.astype(np.dtype('<f8')).reshape(shape) # convert input into a 5D image of shape XYZTC
    img=rescaleArray(img)
    
    for s in range(slices):
        tf.logging.info('Segmenting slice %s'%s)
        for t in range(timesteps):
            st=img[...,s,t,:]
            
            if st.max()>st.min(): # segment if the image is not blank
                pred=seg.apply(st)
                img[...,s,t,:]=0
                img[...,s,t,0]=pred
                
    return img.reshape(origshape) 
Exemplo n.º 6
0
	def __init__(self, tagger_pickle = None):
		if tagger_pickle == None:
			path = os.path.abspath(__file__)\
					.replace(os.path.basename(__file__), '')
        	tagger_pickle = \
				'%smodels/sinica_treebank_brill_aubt.pickle' % (path)
		self.tagger = pickle.load(open(tagger_pickle))
		self.segmenter = Segmenter()
Exemplo n.º 7
0
    def __init__(self, app, config=None):

        self.app = app

        ngrams_file = config.get('segmenter', 'ngrams_file')
        self.app.log.debug(f'Trying to load ngrams file {ngrams_file}')
        try:
            with open(ngrams_file, 'r') as nf:
                ngrams = json.load(nf)
                self.app.log.debug(f'Loaded ngrams file {ngrams_file}.')
        except FileNotFoundError as err:
            self.app.log.info(
                f'Ngrams file {ngrams_file} not found. Using default configuration.'
            )

        self._ws = Segmenter(ngrams)
Exemplo n.º 8
0
def main():
    img_size = 505

    gpu_id, net_path, model_path, img_paths = process_arguments(sys.argv)
    palette = pascal_palette_invert()
    net = Segmenter(net_path, model_path, gpu_id)

    for img_path in img_paths:
        img, cur_h, cur_w = preprocess_image(img_path, img_size)
        segm_result = net.predict([img])
        segm_post = postprocess_segmentation(segm_result, cur_h, cur_w,
                                             palette)

        concatenate = True
        segm_name = os.path.basename(img_path).split('.')[0] + '-label.png'
        save_result(segm_post, segm_name, concatenate, img_path)
Exemplo n.º 9
0
    def load_segmenter(self):
        training_file_name = os.path.splitext(
            self.settings['training_file'])[0]
        outpath = training_file_name + '.segmenter.pickle'
        segmenter = None
        if os.path.exists(outpath):
            with open(outpath, 'rb') as handle:
                segmenter = pickle.load(handle)
        else:
            if outpath is not None:
                segmenter = Segmenter(self.settings['training_file'])
                with open(outpath, 'wb') as f:
                    pickle.dump(segmenter, f)
                print("Segmenter model written out to {}".format(outpath))

        return segmenter
Exemplo n.º 10
0
    def run(self):
        base_preprocessor = Preprocessor(self.basefile)
        base_preprocessed_image_stack = base_preprocessor.preprocess_basefile()
        arc_preprocessor = Preprocessor(self.arcfile)
        arc_preprocessed_image_stack = arc_preprocessor.preprocess_arcfile()

        x, y, z = self.cell_aggragation_shape
        segmenter = Segmenter(base_preprocessed_image_stack,
                              self.create_np_ellipsoid(x, y, z),
                              "WS",
                              generate_debugging=self.enable_debugging)
        segmented_image_stack = segmenter.run_segmentation()

        analysis = Analysis(segmented_image_stack,
                            arc_preprocessed_image_stack)
        analysis.generate_report()
        colorized_image_stack = analysis.colorize_overlapping_cells()
        self.save_image_stack(colorized_image_stack)
Exemplo n.º 11
0
    def __init__(self, model_file):
        self.max_success_count = 0
        self.success_count = 0
        self.dropbox_dict = {}
        self.dict_list = []
        self.object_list = None
        self.color_list = []
        self.yaml_saved = False
        self.collision_map_complete = False
        self.goal_positions = [0]  #[0, -1.0, 1.0] uncomment to rotate robot

        self.table_cloud = None
        self.collision_map_base_list = []
        self.detected_objects = None

        #load SVM object classifier
        self.model = pickle.load(open(model_file, 'rb'))

        #initialize object segmenter
        self.segmenter = Segmenter(self.model)

        #initialize point cloud publishers
        self.objects_pub = rospy.Publisher("/pcl_objects",
                                           PointCloud2,
                                           queue_size=1)
        #self.table_pub = rospy.Publisher("/pcl_table", PointCloud2, queue_size=1)
        #self.colorized_cluster_pub = rospy.Publisher("/colorized_clusters", PointCloud2, queue_size=1)
        self.object_markers_pub = rospy.Publisher("/object_markers",
                                                  Marker,
                                                  queue_size=1)
        self.detected_objects_pub = rospy.Publisher("/detected_objects",
                                                    DetectedObjectsArray,
                                                    queue_size=1)
        self.denoised_pub = rospy.Publisher("/denoised_cloud",
                                            PointCloud2,
                                            queue_size=1)
        #self.reduced_cloud_pub = rospy.Publisher("/decimated_cloud", PointCloud2, queue_size = 1)

        self.collision_cloud_pub = rospy.Publisher("/pr2/3d_map/points",
                                                   PointCloud2,
                                                   queue_size=1)

        print('PR2 object initialized.')
Exemplo n.º 12
0
    def segment(self, image, slow=False):
        def predictor(X):
            X = np.expand_dims(X, axis=0)
            y = self.predict(X)
            y = np.squeeze(y, axis=0)
            return y

        if slow:
            input_height = self.S.image_height
            input_width = self.S.image_width
        else:
            _, input_height, input_width = image.shape

        segmenter = Segmenter(predictor,
                              self.S.image_depth,
                              input_height,
                              input_width,
                              image)
        return segmenter.predict()
Exemplo n.º 13
0
def main(input_path, output_path):
    imgs_path = sorted(glob(f'{input_path}/*'))
    for img_path in imgs_path:
        img_name = img_path.split('/')[-1].split('.')[0]
        out_file = open(f'{output_path}/{img_name}.txt', "w")
        print(f"Processing new image {img_name}...")
        img = io.imread(img_path)
        img = gray_img(img)
        horizontal = IsHorizontal(img)
        if horizontal == False:
            theta = deskew(img)
            img = rotation(img, theta)
            img = get_gray(img)
            img = get_thresholded(img, threshold_otsu(img))
            img = get_closer(img)
            horizontal = IsHorizontal(img)

        original = img.copy()
        gray = get_gray(img)
        bin_img = get_thresholded(gray, threshold_otsu(gray))

        segmenter = Segmenter(bin_img)
        imgs_with_staff = segmenter.regions_with_staff
        most_common = segmenter.most_common

        # imgs_without_staff = segmenter.regions_without_staff

        imgs_spacing = []
        imgs_rows = []
        coord_imgs = []
        for i, img in enumerate(imgs_with_staff):
            spacing, rows, no_staff_img = coordinator(img, horizontal)
            imgs_rows.append(rows)
            imgs_spacing.append(spacing)
            coord_imgs.append(no_staff_img)

        print("Recognize...")
        recognize(out_file, most_common, coord_imgs,
                  imgs_with_staff, imgs_spacing, imgs_rows)
        out_file.close()
        print("Done...")
Exemplo n.º 14
0
def main():

    # default fpaths
    ngrams1 = "1grams.txt"
    ngrams2 = "2grams.txt"

    # parse arguments
    args = sys.argv[1:]
    for arg in args:
        if arg == "-simple":
            ngrams2 = None

    # check existence of 1grams
    if not os.path.isfile("1grams.txt"):
        print("Error: could not find 1grams.txt.")
        print("Exiting....")
        sys.exit(1)

    # check existence of 2grams and 3grams
    if ngrams2 and not os.path.isfile(ngrams2):
        print("Could not find some files.")
        print("Segmenter will run without use of 2grams and 3grams.")
        ngrams2 = None

    # make segmenter
    global __seg
    __seg = Segmenter(ngrams1, fpath_2grams=ngrams2)

    # show intro msg
    clear()
    global __INTRO
    print(__INTRO)
    print("hello, world!")

    # read, eval
    while (True):
        user_input = str(input("> "))
        eval(user_input)
Exemplo n.º 15
0
def test_net(net_path, model_path, images, labels, lut, gpu_id):
  net = Segmenter(net_path, model_path, gpu_id)

  mean_vec = np.array([103.939, 116.779, 123.68], dtype=np.float32)
  reshaped_mean_vec = mean_vec.reshape(1, 1, 3);

  pa_list    = []
  ma_list    = []
  m_IU_list  = []
  fw_IU_list = []

  pb = ProgressBar(len(images))

  for img_path, label_path in zip(images, labels):
    im, cur_h, cur_w = preprocess_image(img_path, reshaped_mean_vec)
    label = imread(label_path)
    label = lut[label]

    segmentation = net.predict([im])
    pred = segmentation[0:cur_h, 0:cur_w]
   
    pa = pixel_accuracy(pred, label)
    ma = mean_accuracy(pred, label)
    m_IU = mean_IU(pred, label)
    fw_IU = frequency_weighted_IU(pred, label)

    pa_list.append(pa)
    ma_list.append(ma)
    m_IU_list.append(m_IU)
    fw_IU_list.append(fw_IU)

    pb.print_progress()

  print("pixel_accuracy: " + str(np.mean(pa_list)))
  print("mean_accuracy: " + str(np.mean(ma_list)))
  print("mean_IU: " + str(np.mean(m_IU_list)))
  print("frequency_weighted: " + str(np.mean(fw_IU_list)))
 def __init__(self, config, nodeName, loadFromFile=False):
     self.node = config.GetChild(nodeName)
     self.segmenter = Segmenter(config, "__segmenter__")
     self.trained = loadFromFile
     PyMining.Init(config, "__global__", loadFromFile)
def main():
    # construct the argument parse and parse the arguments
    ap = argparse.ArgumentParser()
    ap.add_argument("-m", "--draw-markers", action="store_true", default=False,
                    help="")
    ap.add_argument("-c", "--draw-confidence", action="store_true", default=False,
                    help="")
    ap.add_argument("-t", "--confidence-threshold", type=float, default=0.9,
                    help="")
    ap.add_argument("-p", "--draw-pose", action="store_false", default=True,
                    help="")
    ap.add_argument("-u", "--draw-unstable", action="store_true", default=False,
                    help="")
    ap.add_argument("-s", "--draw-segmented", action="store_true", default=False,
                    help="")
    args = vars(ap.parse_args())

    confidence_threshold = args["confidence_threshold"]

    """MAIN"""
    # Video source from webcam or video file.
    video_src = 0
    cam = cv2.VideoCapture(video_src)
    _, sample_frame = cam.read()

    # Introduce mark_detector to detect landmarks.
    mark_detector = MarkDetector()

    # Setup process and queues for multiprocessing.
    img_queue = Queue()
    box_queue = Queue()
    img_queue.put(sample_frame)

    if isWindows():
        thread = threading.Thread(target=get_face, args=(mark_detector, confidence_threshold, img_queue, box_queue))
        thread.daemon = True
        thread.start()
    else:
        box_process = Process(target=get_face,
                              args=(mark_detector, confidence_threshold, img_queue, box_queue))
        box_process.start()

    # Introduce pose estimator to solve pose. Get one frame to setup the
    # estimator according to the image size.
    height, width = sample_frame.shape[:2]
    pose_estimator = PoseEstimator(img_size=(height, width))

    # Introduce scalar stabilizers for pose.
    pose_stabilizers = [Stabilizer(
        state_num=2,
        measure_num=1,
        cov_process=0.1,
        cov_measure=0.1) for _ in range(6)]

    while True:
        # Read frame, crop it, flip it, suits your needs.
        frame_got, frame = cam.read()
        if frame_got is False:
            break

        # Crop it if frame is larger than expected.
        # frame = frame[0:480, 300:940]

        # If frame comes from webcam, flip it so it looks like a mirror.
        if video_src == 0:
            frame = cv2.flip(frame, 2)

        # Pose estimation by 3 steps:
        # 1. detect face;
        # 2. detect landmarks;
        # 3. estimate pose

        # Feed frame to image queue.
        img_queue.put(frame)

        # Get face from box queue.
        result = box_queue.get()

        if result is not None:
            if args["draw_confidence"]:
                mark_detector.face_detector.draw_result(frame, result)
            # unpack result
            facebox, confidence = result
            # fix facebox if needed
            if facebox[1] > facebox[3]:
                facebox[1] = 0
            if facebox[0] > facebox[2]:
                facebox[0] = 0
            # Detect landmarks from image of 128x128.
            face_img = frame[facebox[1]: facebox[3],
                             facebox[0]: facebox[2]]
            face_img = cv2.resize(face_img, (CNN_INPUT_SIZE, CNN_INPUT_SIZE))
            face_img = cv2.cvtColor(face_img, cv2.COLOR_BGR2RGB)
            marks = mark_detector.detect_marks(face_img)

            # Convert the marks locations from local CNN to global image.
            marks *= (facebox[2] - facebox[0])
            marks[:, 0] += facebox[0]
            marks[:, 1] += facebox[1]

            # segment the image based on markers and facebox
            seg = Segmenter(facebox, marks, frame.shape[1], frame.shape[0])
            if args["draw_segmented"]:
                mark_detector.draw_box(frame, seg.getSegmentBBs())
                cv2.imshow("fg", seg.getSegmentJSON()["faceGrid"])

            if args["draw_markers"]:
                mark_detector.draw_marks(
                    frame, marks, color=(0, 255, 0))

            # Try pose estimation with 68 points.
            pose = pose_estimator.solve_pose_by_68_points(marks)

            # Stabilize the pose.
            stable_pose = []
            pose_np = np.array(pose).flatten()
            for value, ps_stb in zip(pose_np, pose_stabilizers):
                ps_stb.update([value])
                stable_pose.append(ps_stb.state[0])
            stable_pose = np.reshape(stable_pose, (-1, 3))

            if args["draw_unstable"]:
                pose_estimator.draw_annotation_box(
                    frame, pose[0], pose[1], color=(255, 128, 128))

            if args["draw_pose"]:
                pose_estimator.draw_annotation_box(
                    frame, stable_pose[0], stable_pose[1], color=(128, 255, 128))

        # Show preview.
        cv2.imshow("Preview", frame)
        if cv2.waitKey(10) == 27:
            break

    # Clean up the multiprocessing process.
    if not isWindows():
        box_process.terminate()
        box_process.join()
Exemplo n.º 18
0
def thread_func(args, detector, predictor, img_queue, result_queue):
    """Get face from image queue. This function is used for multiprocessing"""

    # Introduce mark_detector to detect landmarks.
    gaze_model = args["gaze_net"]
    eye_size = args["eye_size"]
    face_size = args["face_size"]
    inputs = args["inputs"]
    outputs = args["outputs"]
    print("[INFO] loading gaze predictor...")
    gaze_detector = GazeEstimator(gaze_model=gaze_model,
                                  eye_image_size=eye_size,
                                  face_image_size=face_size,
                                  inputs=inputs,
                                  outputs=outputs)

    # init variables
    detectorWidth = 400
    faceBoxScale = 0.15

    while True:
        # get the image
        try:
            frame = img_queue.get(timeout=1)
        except Q.Empty:
            print("Image Q empty, thread exiting!")
            return
        # update factors
        originalWidth = frame.shape[1]
        factor = originalWidth / detectorWidth
        # resize for face detection
        image = imutils.resize(frame, width=detectorWidth)
        # convert to grayscale for face detection
        gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        # actually run face detection
        faceboxes, scores, idx = detector.run(image, 0)
        if faceboxes is not None and len(faceboxes) > 0:
            facebox = faceboxes[0]
            confidence = scores[0]
            # get 5 landmarks
            marks = predictor(gray, facebox)
            # convert marks to np array
            marks = face_utils.shape_to_np(marks)
            leftEyeMarks = []
            rightEyeMarks = []
            # pull out left and right eye marks
            for (i, (x, y)) in enumerate(marks):
                [x, y] = [int(x * factor), int(y * factor)]
                if i == 0 or i == 1:
                    leftEyeMarks.append([x, y])
                if i == 2 or i == 3:
                    rightEyeMarks.append([x, y])

            # convert the facebox from dlib format to regular BB and
            # rescale it back to original image size
            facebox = utils.dlib_to_box(facebox, factor, faceBoxScale)
            # segment the image based on markers and facebox
            seg = Segmenter(facebox, leftEyeMarks, rightEyeMarks,
                            frame.shape[1], frame.shape[0])
            segments = seg.getSegmentJSON()
            # detect gaze
            gaze = gaze_detector.detect_gaze(frame, segments["leftEye"],
                                             segments["rightEye"],
                                             segments["face"],
                                             segments["faceGrid"])
            # pack result
            result = [gaze, frame]
            result_queue.put(result)
        else:
            result_queue.put(None)
Exemplo n.º 19
0
                                                  "cache_dir", "../../models"))
model = Wav2Vec2ForCTC.from_pretrained("facebook/wav2vec2-base-960h",
                                       cache_dir=os.getenv(
                                           "cache_dir", "../../models"))

audio_ds = [
    os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data',
                 'sample.mp3'),
    os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data',
                 'long_sample.mp3')
]

# create speech segmenter
seg = Segmenter(model_path=os.path.join(
    os.path.dirname(os.path.abspath(__file__)), 'speech_segmenter_models'),
                vad_engine='smn',
                detect_gender=True,
                ffmpeg='ffmpeg',
                batch_size=32)

# it holds audio segmentations
segmentations = []
for audio in audio_ds:
    # [('noEnergy', 0.0, 0.8), ('male', 0.8, 9.84), ('music', 9.84, 10.96), ('male', 10.96, 14.98)]
    # segmentation = seg(audio, start_sec=0, stop_sec=30)
    s = seg(audio)

    res = {}
    res['segmentation'] = s
    res['audio'] = audio
    segmentations.append(res)
Exemplo n.º 20
0
from neuralNetworkTester import NeuralNetworkTester
from remover import Remover
import subprocess
import config
import utils
from datetime import datetime
from logger import Logger

input_file = open(config.CATEGORY_LIST, "r")
categories = input_file.read().splitlines()
input_file.close()

generator = LinkGen()
generator.run(categories)
downloader = Downloader(categories, config.BATCH_SIZE)
segmenter = Segmenter()
remover = Remover()
logger = Logger()
processed_video_count = utils.get_processed_video_count()

while processed_video_count < config.NUMBER_OF_LINKS:
    processed_video_count = utils.get_processed_video_count()
    load = utils.get_checkpoints_flag()

    downloader.run()

    segmenter.run()

    source = config.NEURAL_NETWORK_PATH

    tfrecords_command = 'python2.7 %s/utils/generate_tfrecords_dataset.py' \
Exemplo n.º 21
0
import glob

from PIL import Image

from segmenter import Segmenter

image_paths = glob.glob("input/*.png")  # get all images in the input folder

segmenter = Segmenter()  # create an instance of the classifier
predictions = segmenter.run_images(
    image_paths)  # run the classifier to get predictions

segmenter.save_predictions(predictions,
                           image_paths)  # save the results to output folder

Image.fromarray(
    predictions[0]).show()  # show a single prediction (the first image)
Exemplo n.º 22
0
#! python3
#! PY_PYTHON=3

import sys, os
from segmenter import Segmenter

if __name__ == "__main__":
    seg = Segmenter(sys.argv)
Exemplo n.º 23
0
def optimize(avg_speed=130, length=1055):
    jams = Jam_Simulator()
    segmenter = Segmenter()
    segmenter = segmenter.get_segmented_route(
        length, jams.generate_traffic_jams(route_length=length))
    predictor = Predictor()

    print(segmenter)
    jams = []

    for segment in segmenter:
        if not segment.jam_info == None:
            jam = segment.jam_info
            jam.number = segment.number
            jams.append(jam)
            jam.end = round(
                predictor.predict(
                    np.array([
                        jam.length, jam.avg_speed, jam.reason, jam.start_time
                    ]).reshape((1, 4)))[0][0], 2)
            print(segment.number)
            print("******")

    for jam in reversed(jams):

        strecke = segmenter[jam.number].start

        avg_speed = min(avg_speed, round(strecke / jam.end, 2))

        if avg_speed < 40:
            stri = str("roads are bad. stay at home.")
            f = open("../templates/tmp.txt", "w")
            f.write(stri)
            f.close()
            return "roads are bad. stay at home."

        for i in range(jam.number):
            (segmenter[i]).avg_speed = avg_speed

    time = 0
    benzin = 0
    strom = 0

    stri = str()
    stri += "length, speed\n"

    for segment in segmenter:
        print(segment.number)
        print(round(segment.length, 2))
        print()

        stri += "[" + str(round(segment.length, 2)) + ", " + str(
            round(segment.avg_speed, 2)) + "]\n"
        time += segment.length / segment.avg_speed
        benzin += (3.4 + 0.00966667 * segment.avg_speed - 0.00024 * (segment.avg_speed**2) \
                  + 2.53333*(10**(-6))*(segment.avg_speed**3)) * (segment.length * 0.01)
        strom += (-1.1 + 0.06 * segment.avg_speed - 0.00025 *
                  (segment.avg_speed**2)) * (segment.length * 0.01)

    print("time:")
    print(round(time, 2))
    print("benzin")
    print(round(benzin, 2))
    print("strom")
    print(round(strom, 2))
    stri += "Time: " + str(round(time, 2)) + "\n"
    stri += "Benzin: " + str(round(benzin, 2)) + "\n"
    stri += "Strom: " + str(round(strom, 2)) + "\n"
    f = open("../templates/tmp.txt", "w")
    f.write(stri)
    f.close()