Exemple #1
0
def leaveOneOutCrossValidation(radiographs):
    print("\nSTARTING LEAVE ONE OUT CROSS VALIDATION.\n")
    accuracy = 0
    precision = 0
    recall = 0

    for i in range(len(radiographs)):
        trainSet = radiographs[:i] + radiographs[i + 1:]
        testSet = [radiographs[i]]

        with util.Timer("Building initialization active shape models, leaving radiograph {} out".format(i)):
            initModels = InitializationModel.buildModels(trainSet, PCAComponents, sampleAmount)

        with util.Timer("Building tooth active shape models, leaving radiograph {} out".format(i)):
            models = ToothModel.buildModels(trainSet, PCAComponents, sampleAmount)

        with util.Timer("Generating segmentation masks and comparing to ground truth".format(i)):
            gen = MaskGenerator(testSet[0], models, initModels)
            acc, prec, rec = gen.doSearchAndCompareSegmentations()

        print()
        accuracy += acc
        precision += prec
        recall += rec

    accuracy /= len(radiographs)
    precision /= len(radiographs)
    recall /= len(radiographs)
    print("Averaged results after LOO cross-validation:")
    print("Accuracy = {:.2f}%, Precision = {:.2f}%, Recall = {:.2f}%"
          .format(accuracy * 100, precision * 100, recall * 100))
Exemple #2
0
def buildModel(radiographs, resolutionLevels=5, maxLevelIterations=20, grayLevelModelSize=7, sampleAmount=3,
               pcaComponents=25):
    mouthLandmarks = []  # list of landmarks that contain all 8 teeth

    for rad in radiographs:
        mouthLandmark = Landmark(np.asarray([]), radiographFilename=None, toothNumber=-1)
        mouthLandmark.radiograph = rad

        for toothNumber, landmark in sorted(rad.landmarks.items(), key=lambda i: i[0]):
            mouthLandmark.points = np.concatenate((mouthLandmark.points, landmark.points))

        mouthLandmarks.append(mouthLandmark)

    model = TeethActiveShapeModel(
        mouthLandmarks=mouthLandmarks,
        resolutionLevels=resolutionLevels,
        maxLevelIterations=maxLevelIterations,
        grayLevelModelSize=grayLevelModelSize,
        sampleAmount=sampleAmount,
        pcaComponents=pcaComponents
    )

    with util.Timer("Building multi resolution active shape model: Gray level models"):
        model.buildGrayLevelModels()

    with util.Timer("Building multi resolution active shape model: Procrustes analysis"):
        model.doProcrustesAnalysis()

    with util.Timer("Building multi resolution active shape model: PCA"):
        model.doPCA()

    return model
Exemple #3
0
    def predict_single(self, filename, clf_id, gpu_option='0'):
        # TODO add a line of code to stack labels to create y_test, store in classifier
        graph = tf.Graph()
        probs = []

        path = "./" + "_".join(
            (self.tag, str(self.B), str(self.R), "time")) + ".out"
        log = open(path, 'a')
        predict_timer = util.Timer()

        with graph.as_default():
            # build input pipeline
            labels, indices, values = self.load_sparse(filename, graph)
            X = (indices, values)
            y = labels
            W = tf.constant(self.weights[clf_id])
            b = tf.constant(self.bias[clf_id])
            # build graph
            y_p = tf.nn.softmax(matmul(X, W) + b)
            init_op = tf.group(tf.global_variables_initializer(),
                               tf.local_variables_initializer())
            # sess = tf.Session(config=tf.ConfigProto(
            #     allow_soft_placement=True
            # ))
            sess = None
            if gpu_option == '1' or gpu_option == '0':
                print("single gpu")
                config = tf.ConfigProto()
                config.gpu_options.visible_device_list = gpu_option
                config.gpu_options.allow_growth = True
                sess = tf.Session(config=config)
            else:
                sess = tf.Session()
            sess.run(init_op)
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                timer = util.Timer()
                while not coord.should_stop():
                    probs.append(sess.run(y_p))
            except tf.errors.OutOfRangeError:
                print("Done predicting")
            finally:
                coord.request_stop()
            coord.join(threads)
            sess.close()

            log.write("PREDICT B={} R={} time={}\n".format(
                self.B, self.R, predict_timer.elapsed()))
            log.close()

            return np.concatenate(probs, axis=0)
Exemple #4
0
    def match(self, image):
        dims = len(np.shape(image))
        if dims == 3:
            self.logger.info("Converting image to GRAY before matching")
            image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

        with util.Timer("extract corners"):
            corners = util.get_corners(image, self._max_match_corners)

        image = cv2.GaussianBlur(image, (7, 7), 25)

        key_points_trained = []
        key_points_matched = []
        key_points_pairs = []

        for corner in util.iter_timer(corners,
                                      title="Matching corners",
                                      print_iterations=False):
            probs = np.zeros((self._classes_count, ))

            patch = util.generate_patch(image, corner, self._patch_size)
            for fern_idx, fern in enumerate(self._ferns):
                k = fern.calculate(patch)
                probs += self._fern_p[fern_idx, :, k]

            most_probable_class = np.argmax(probs)
            best_key_point = self.key_points[most_probable_class]

            key_points_trained.append(best_key_point)
            key_points_matched.append(corner)
            key_points_pairs.append((best_key_point, corner))

        return util.flip_points(key_points_trained), \
               util.flip_points(key_points_matched), \
               key_points_pairs
Exemple #5
0
def run_tests(connection, iterations=1):
    c = connection.cursor()

    # Implicit tests
    c.execute('SELECT target FROM targets')
    res = c.fetchall()
    test_urls = ['http://%s/' % (t[0]) for t in res]
    logger.info('Running %d implicit tests, %d iterations' %
                (len(test_urls), iterations))

    # Explicit tests
    c.execute('SELECT test FROM tests')
    res = c.fetchall()
    test_urls.append([t[0] for t in res])
    logger.info('Running %d explicit tests, %d iterations' %
                (len(res), iterations))

    for i in range(0, iterations):
        for test_url in test_urls:
            timer = util.Timer()
            timer.start_timer('Applying rules for %s' % test_url)
            result_url = cache.cache.apply_rules(test_url)
            timer.end_timer('Done. %s -> %s (took {.10f} sec)' %
                            (test_url, result_url))

    c.close()
    def update_network(self, minibatchs):

        indices_all, priorities_all, losses = [], [], []

        with util.Timer("Learner update:"):

            for (indices, weights, samples) in minibatchs:

                samples = [pickle.loads(lz4f.decompress(s)) for s in samples]

                priorities, loss_info = self.update(weights, samples)

                indices_all += indices

                priorities_all += priorities

                losses.append(loss_info)

        current_weights = self.get_weights()

        total_loss = sum([l[0] for l in losses]) / len(losses)
        policy_loss = sum([l[1] for l in losses]) / len(losses)
        value_loss = sum([l[2] for l in losses]) / len(losses)
        reward_loss = sum([l[3] for l in losses]) / len(losses)

        losses_mean = (total_loss, policy_loss, value_loss, reward_loss)

        return (current_weights, indices_all, priorities_all, losses_mean)
Exemple #7
0
 def test_Timer(self):
     sysout = StringIO()
     with util.Timer('test', out=sysout):
         pass
     output = sysout.getvalue()
     # not really much we can test for here, without being too brittle
     assert output.startswith('test')
Exemple #8
0
    def remote_alias(self):
        alias = None
        try:
            ns = int(self.NameSelect)
        except Exception:
            ns = -1

        if 1 <= ns <= 3:
            alias = [self.DisplayName, self.IMName, self.NickName][ns - 1]

        if not alias:
            # does this protocol have enough aliases?
            alias = self.RealName or self.DisplayName or self.UserName or self.IMName or self.NickName

        if not alias and not getattr(self, '_pending_info_request', False):
            util.Timer(
                common.pref('msim.contact_info.request_timeout',
                            type=int,
                            default=5),
                lambda: setattr(self, '_pending_info_request', False)).start()
            self._pending_info_request = True
            self.protocol.request_buddy_info(self.id)

        if isinstance(alias, bytes):
            return alias.decode('utf8')

        return alias
Exemple #9
0
    def next_file(self):
        if self.cancelling:
            return

        if sys.DEV and not sys.opts.force_update:
            self.unchecked_files = []
            self.delete_files, self.update_files = dev_integrity_check(
                self.local_dir)
            self.files_processed()
            return

        if self.unchecked_files:
            if self.fast_mode:
                while self.fast_mode and self.unchecked_files and not self.cancelling:
                    self._check_file()
            else:
                self._check_file()

            import common
            interval = common.pref('digsby.updater.file_integrity_interval',
                                   type=int,
                                   default=0.01)
            if interval == 0:
                interval = 0.01

            t = util.Timer(interval, self.next_file)
            t._verbose = False
            t.start()
        else:
            self.files_processed()
Exemple #10
0
    def queue_next_file(self):
        interval = common.pref('digsby.updater.file_download_interval', type = int, default = 0.1)
        if interval == 0:
            interval = 0.1

        t = util.Timer(interval, self.next_file)
        t._verbose = False
        t.start()
Exemple #11
0
 def set_timeout(self, msgobj):
     timeout = getattr(msgobj, 'timeout', None)
     if timeout is not None and common.pref(
             'msn.socket.use_timeout', type=bool, default=False):
         log.info('Starting timeout for %r', msgobj)
         timer = util.Timer(timeout, self.timeout_handler(msgobj))
         self.timeouts[msgobj.trid] = timer
         timer.start()
Exemple #12
0
def update_check_later(*a):
    '''
    The app performs an initial update check 5 minutes after launching.
    (After that, it's every 6 hours - see UpdateManager.setup)
    '''
    util.Timer(
        common.pref('digsby.updater.initial_delay', type=int, default=5 * 60),
        update_check).start()
Exemple #13
0
 def reset(self):
     self._success = False
     self._failure = False
     self._complete = False
     self._signal = None
     self._started = False
     self._timer = util.Timer()
     self._startFrame = None
Exemple #14
0
 def __init__(self, states):
     if not states:
         raise Exception("State machine can't be empty")
     self.states = list(states)
     self.istates = range(len(states))
     self.initial = states[0]
     self._timer = util.Timer()
     self.transition(self.initial)
     for i in range(len(states)):
         setattr(self, states[i], i)
Exemple #15
0
    def deserialize(file: IO):
        module_logger.info("Deserialiazing FernDetector from {}".format(
            file.name))
        version = int(file.readline().strip())

        if version != 1:
            msg = "Can't deserialize FernDetector from {}. Incorrect version of model. Expected 1, found {}"\
                .format(file.name, version)
            module_logger.error(msg)
            raise AssertionError(msg)

        num_ferns = int(file.readline().strip())
        ph, pw = map(int, file.readline().strip().split(","))

        with util.Timer("Deserializing ferns"):
            ferns = [Fern.deserialize(file) for _ in range(num_ferns)]

        fern_bits, max_train, max_match = map(
            int,
            file.readline().strip().split(","))

        with util.Timer("Deserializing fern_p"):
            F, C, K = map(int, file.readline().strip().split(","))
            fern_p = np.zeros((F, C, K), dtype=float)
            for fern_idx in range(F):
                for class_idx in range(C):
                    line = list(map(float, file.readline().strip().split(",")))
                    fern_p[fern_idx, class_idx, :] = line

        line = file.readline().strip().split(",")
        key_points = list(util.grouper(map(int, line), 2))

        module_logger.info("Creating FernDetector")
        detector = FernDetector(patch_size=(ph, pw),
                                max_train_corners=max_train,
                                max_match_corners=max_match,
                                ferns=ferns,
                                ferns_p=fern_p,
                                classes_cnt=C,
                                key_points=key_points,
                                fern_bits=fern_bits)
        module_logger.info("Deserialization complete.")
        return detector
Exemple #16
0
def merge_all():
    mdf = pd.DataFrame()
    num_per_run = get_num_per_run()
    timer = util.Timer(total=num_jobs)
    for start in range(0, len(fns), num_per_run):
        end = start + num_per_run
        df = pd.read_csv(out_dir + f'features-{start}-{end}.csv', index_col=0)
        mdf = mdf.append(df)
        timer.update()
    mdf.to_csv(out_dir + 'features.csv')
    return
Exemple #17
0
 def __init__(self,
              dest=115 * core.DEG_T_RAD,
              stepSize=26 * core.DEG_T_RAD,
              skipFirstPause=False):
     Task.__init__(self)
     self.dest = dest
     self.stepSize = stepSize
     self.pauseTime = 0.2083
     self.skipFirstPause = skipFirstPause
     self.isPaused = True
     self.timer = util.Timer()
def filter_and_save(name, all_data, mdf):
    # Filter, then convert to dict
    all_d = {}
    print(f'Filtering {name} ...')
    timer = util.Timer(total=len(mdf))
    for idx, row in mdf.iterrows():
        all_d[row['Name (unique)']] = all_data[row['Stepchart index']]
        timer.update()
    with open(out_dir + f'{name}.pkl', 'wb') as f:
        pickle.dump(all_d, f)
    return
 def reset(self):
     if not self._initializing:
         self.trace("Reset")
     self._started = False
     self._finished = False
     self._aborted = False
     self._timer = util.Timer()
     self._iterations = 0
     self._frames = 0
     for k, v in self._kwargs.iteritems():
         if k == 'name': continue
         setattr(self, k, v)
def leaveOneOutCrossValidation(radiographs, resolutionLevels,
                               maxLevelIterations, grayLevelModelSize,
                               sampleAmount, PCAComponents):
    print("\nSTARTING LEAVE ONE OUT CROSS VALIDATION.\n")
    accuracy = 0
    precision = 0
    recall = 0

    mirroredRadiographs = [r for r in radiographs if r.mirrored]
    radiographs = [r for r in radiographs if not r.mirrored]

    for i in range(len(radiographs)):
        trainSet = mirroredRadiographs + radiographs[:i] + radiographs[i + 1:]
        testSet = [radiographs[i]]

        with util.Timer(
                "Building multi resolution active shape model, leaving radiograph {} out"
                .format(i)):
            model = MultiResolutionASM.buildModel(trainSet, resolutionLevels,
                                                  maxLevelIterations,
                                                  grayLevelModelSize,
                                                  sampleAmount, PCAComponents)

        with util.Timer(
                "Generating segmentation masks and comparing to ground truth".
                format(i)):
            gen = MaskGenerator(testSet[0], model)
            acc, prec, rec = gen.doSearchAndCompareSegmentations()

        print()
        accuracy += acc
        precision += prec
        recall += rec

    accuracy /= len(radiographs)
    precision /= len(radiographs)
    recall /= len(radiographs)
    print("Averaged results after LOO cross-validation:")
    print("Accuracy = {:.2f}%, Precision = {:.2f}%, Recall = {:.2f}%".format(
        accuracy * 100, precision * 100, recall * 100))
Exemple #21
0
def merge(start, end):
    start, end = int(start), int(end)

    sub_fns = fns[start:end]

    timer = util.Timer(total=len(sub_fns))
    mdf = pd.DataFrame()
    for fn in sub_fns:
        df = pd.read_csv(inp_dir + fn, index_col=0)
        mdf = mdf.append(df)
        timer.update()

    mdf.to_csv(out_dir + f'features-{start}-{end}.csv')
    return
Exemple #22
0
def restart_and_update(tempdir):
    '''
    Registers _launch_updater as an atexit function, attempts to get the app to quit, and in the event the interpreter
    is still running in 7 seconds, calls _launch_updater anyway.

    Special care is also taken to make sure we're not still in the OnInit handler of the wxApp.
    '''
    import atexit
    atexit.register(_launch_updater, tempdir)
    force_restart_timer = util.Timer(7, _launch_updater, tempdir)
    app = wx.GetApp()
    # Don't call DigsbyCleanupAndQuit if we're not yet out of OnInit - the startup sequence will do it for us.
    if app.IsMainLoopRunning() and app.DigsbyCleanupAndQuit():
        force_restart_timer.start()
Exemple #23
0
    def encode_points_mtx_nd(self, pts_nd, axis=1, returnSparse=False):
        t = util.Timer()
        pts_flt = util.flatten_nd_array(pts_nd, axis=axis)
        P = pts_flt.shape[0]

        (dists, inds) = self.nbrs.kneighbors(pts_flt)

        pts_enc_flt = np.zeros((P, self.K))
        wts = np.exp(-dists**2 / (2 * self.sigma**2))
        wts = wts / np.sum(wts, axis=1)[:, util.na()]

        pts_enc_flt[np.arange(0, P, dtype='int')[:, util.na()], inds] = wts
        pts_enc_nd = util.unflatten_2d_array(pts_enc_flt, pts_nd, axis=axis)

        return pts_enc_nd
    def multiResolutionSearch(self):
        with util.Timer("Multi resolution search"):
            self.setCurrentResolutionLevel(self.model.resolutionLevels)
            self.initializeLandmark()

            for level in range(self.model.resolutionLevels - 1, -1, -1):
                self.setCurrentResolutionLevel(level)
                self.drawLandMarkWithNormals(self.currentLandmark)
                cv2.imshow(self.name, self.img)
                cv2.waitKey(1)

                self.currentLandmark = self.model.improveLandmarkForResolutionLevel(
                    resolutionLevel=level,
                    img=self.currentRadiograph.imgPyramid[level].copy(),
                    landmark=self.currentLandmark)
Exemple #25
0
 def __init__(self, *states):
   if not states:
     raise Exception("State machine can't be empty")
   self.states = dict()
   self.stateValues = set()
   for i in range(len(states)):
     state = states[i]
     self.states[i] = state
     self.stateValues.add(state)
     setattr(self, state, i)
   self.initial = states[0]
   self.state = self.initial
   self._timer = util.Timer()
   self.debug = False
   self.transition(self.initial)
Exemple #26
0
    def set_status_message(self, message, callback=None):
        def on_error(e):
            e_msg = getattr(e, "headers", {}).get("x-opensocial-error", e)
            callback.error(e_msg)
            self.update()

        callback.success += lambda *a: util.Timer(2, self.update).start()
        import hooks
        callback.success += lambda *a: hooks.notify(
            'digsby.myspace.status_updated', *a)
        self.api.call('statusmood/@me/@self',
                      OpenSocial=True).PUT(data=json.dumps(
                          dict(status=message.encode('utf8'), moodName=None)),
                                           retries=1,
                                           success=callback.success,
                                           error=on_error)
Exemple #27
0
    def sync_weights_and_rollout(self, current_weights, T):

        #: 最新のネットワークに同期
        if current_weights:
            self.sync_weights(current_weights)

        with util.Timer("Actor:"):

            if self.lives == 0:
                self.reset_env()

            #: 1episodeのrollout
            game_history = self.rollout(T)

        samples, priorities = self.make_samples(game_history)

        return self.pid, samples, priorities
Exemple #28
0
    def evaluate(self,
                 filename,
                 chunck_size=10000,
                 gpu_option='0',
                 load_probs=False):
        P = None
        if load_probs is False:
            self.predict(filename, gpu_option=gpu_option)
        path = "./" + "_".join((self.tag, str(self.B), str(self.R), "time")) + ".out"
        log = open(path, 'a')
        eval_timer = util.Timer()
        P = self.get_complete_probs()
        y_test = self.get_test_labels(filename, gpu_option=gpu_option)
        k = self.num_classes
        num_test = P[0].shape[0]
        print("num_test", num_test)
        size = chunck_size
        print("start merging")

        pool = ProcessPoolExecutor()
        futures = []

        for i in range(num_test // size):
            x_batch = P[:, i*size:(i+1)*size, :]
            y_batch = y_test[i*size:(i+1)*size]
            futures.append(pool.submit(self.evaluate_chunck, i, x_batch, y_batch))

        if (num_test // size) * size < num_test:
            i = num_test // size
            x_batch = P[:, i*size:, :]
            y_batch = y_test[i*size:]
            futures.append(pool.submit(self.evaluate_chunck, i, x_batch, y_batch))

        wait(futures)

        correct = 0
        for fut in futures:
            correct += fut.result()

        print("total accuracy is", str(correct / num_test))

        log.write("EVALUATE B={} R={} time={}\n".format(self.B, self.R,
                                                eval_timer.elapsed()))
        log.write("ACCURACY={}\n".format(correct / num_test))
        log.close()
Exemple #29
0
    def decode(self, rdata, file=sys.stdout):
        org_tokens = rdata.orgdata[0]
        org_attrs = rdata.orgdata[1] if len(rdata.orgdata) > 1 else None
        n_ins = len(org_tokens)

        timer = util.Timer()
        timer.start()
        inputs = self.gen_inputs(rdata, evaluate=False)
        self.decode_batch(*inputs,
                          org_tokens=org_tokens,
                          org_attrs=org_attrs,
                          file=file)
        timer.stop()

        print(
            'Parsed %d sentences. Elapsed time: %.4f sec (total) / %.4f sec (per sentence)'
            % (n_ins, timer.elapsed, timer.elapsed / n_ins),
            file=sys.stderr)
Exemple #30
0
    def decode(self, rdata, file=sys.stdout):
        n_ins = len(rdata.inputs[0])
        org_tokens = rdata.orgdata[0]

        timer = util.Timer()
        timer.start()
        for ids in trainer.batch_generator(n_ins,
                                           batch_size=self.args.batch_size,
                                           shuffle=False):
            inputs = self.gen_inputs(rdata, ids, evaluate=False)
            ot = [org_tokens[j] for j in ids]
            self.decode_batch(*[inputs], org_tokens=ot, file=file)
        timer.stop()

        print(
            'Parsed {} sentences. Elapsed time: {:.4f} sec (total) / {:.4f} sec (per sentence)'
            .format(n_ins, timer.elapsed, timer.elapsed / n_ins),
            file=sys.stderr)