Exemplo n.º 1
0
def run(x, inner_A=__cheat_A, inner_B=__cheat_B):
    """
    Run the neural network forward on the input x using the matrix A,B.
    
    Log the result as having happened so that we can debug errors and
    improve query efficiency.
    """
    Tracker().query_count += x.shape[0]
    assert len(x.shape) == 2

    orig_x = x

    x = model(torch.tensor(orig_x)).detach().numpy()

    Tracker().save_queries(zip(orig_x, x))

    if TRACK_LINES:
        for line in traceback.format_stack():
            if 'repeated' in line: continue
            line_no = int(line.split("line ")[1].split()[0][:-1])
            if line_no not in Tracker().query_count_at:
                Tracker().query_count_at[line_no] = 0
            Tracker().query_count_at[line_no] += x.shape[0]

    return x
Exemplo n.º 2
0
    def __init__(self, cfg, src, n, LED_pos, LED_tresholds):
        threading.current_thread().name = 'HexTrack'

        self.cfg = cfg
        self.frame_idx = 0
        self.n = n
        self.mask_init = True
        self.made_mask = None

        # Create path to csv log file for tracking mouse position and LED-light state
        path = pkg_resources.resource_filename(__name__, "/data/interim/position_log_files/{}".format(src[len(src)-29:len(src)-10]))
        if not os.path.exists(path):
            try:
                os.mkdir(path)
            except OSError:
                print("Creation of the directory %s failed, this path probably already exists" % path)
        self.path = pkg_resources.resource_filename(__name__, '/data/interim/Position_log_files/{}/pos_log_file_{}.csv'
                                                    .format(src[len(src)-29:len(src)-10], n))

        # Initiation of the Grabbers and Trackers and creation of csv log file
        self.grabber = Grabber(src)
        self.tracker = Tracker(cfg, pos_log_file=open(self.path, 'w'), name=__name__, LED_pos=LED_pos, LED_thresholds=LED_tresholds)

        logging.debug('HexTrack initialization done!')

        self.vid = VideoFileClip(src)
        self.duration = self.vid.duration*15
Exemplo n.º 3
0
def sweep_for_critical_points(std=1, known_T=None):
    while True:
        logger.log("Start another sweep", level=Logger.INFO)
        qs = Tracker().query_count
        sweep = do_better_sweep(offset=np.random.normal(0,
                                                        np.random.uniform(
                                                            std / 10, std),
                                                        size=DIM),
                                known_T=known_T,
                                low=-std * 1e3,
                                high=std * 1e3,
                                debug=False)
        logger.log("Total intersections found", len(sweep), level=Logger.INFO)
        logger.log("delta queries",
                   Tracker().query_count - qs,
                   level=Logger.INFO)
        for point in sweep:
            yield point
Exemplo n.º 4
0
def main(input, project, skip_tracking=False):
    if not skip_tracking:
        # TODO check if input is a csv or a folder
        df = pd.read_csv(input)
        tr = Tracker(project=project)

        all_results = []
        v = None
        old = None
        for i, x in tqdm(df.iterrows(), total=len(df)):
            start = int(x['start']) if 'start' in x else None
            end = int(x['end']) if 'end' in x else None
            fragment = f'{start},{end+1}' if start is not None else None
            media = x['media']
            if media != old:
                v, metadata = uri_utils.uri2video(media)

            res = tr.run(v,
                         export_frames=True,
                         fragment=fragment,
                         video_id=x['media'],
                         verbose=False)
            all_results.append(res)
        with open(f'results_{project}.json', 'w') as f:
            json.dump(all_results, f)

    else:
        with open(f'results_{project}.json', 'r') as f:
            all_results = json.load(f)

    clusters = []
    for r in all_results:
        c = clusterize.main(clusterize.from_dict(r),
                            dominant_ratio=0.6,
                            weighted_dominant_ratio=0.4,
                            confidence_threshold=0.6,
                            merge_cluster=True,
                            min_length=1)
        clusters.append(c)

    with open(f'results_{project}_clusters.json', 'w') as f:
        json.dump(clusters, f)
Exemplo n.º 5
0
def main():
    TIME =  60 * 60 # 60 min * 60s --> s
    # getting twitter keys 
    CONSUMER_KEY = environ['CONSUMER_KEY']
    CONSUMER_SECRET = environ['CONSUMER_SECRET']
    ACCESS_TOKEN = environ['ACCESS_KEY']
    ACCESS_TOKEN_SECRET = environ['ACCESS_SECRET']

    # init bot
    bot = Bot(CONSUMER_KEY=CONSUMER_KEY, CONSUMER_SECRET=CONSUMER_SECRET,
              ACCESS_TOKEN=ACCESS_TOKEN, ACCESS_TOKEN_SECRET=ACCESS_TOKEN_SECRET)
    # init tracker (database api call)
    tracker = Tracker()
    # tweet init 
    tweet = Tweet(totalDeaths=(tracker.getTotalDeaths()),
                  totalInfected=(tracker.getTotalInfected()))

    while True:
        # Get latest data from Tracker
        tracker.update()
        # Generate tweet with latest data
        tweet.update(totalDeaths=(tracker.totalDeaths),
                     totalInfected=(tracker.totalInfected))
        
        # Get old tweets
        oldTweets = bot.getOldTweets()
        # Check if tweet is not duplicated
        if (tweet.isDuplicated(oldTweets=oldTweets) == False):
            bot.postTweet(text=(tweet.text))

        time.sleep(TIME) #s 
	def __init__(self):

		# load the serialized model from disk
		self.net = cv2.dnn.readNet(car_detection_bin_model, car_detection_xml_model)
		self.net2 = cv2.dnn.readNet(car_classification_bin_model, car_classification_xml_model)
		self.net3 = cv2.dnn.readNet(plate_detecttion_bin_model, plate_detection_xml_model)


		# initialize the tracker and frame dimensions
		self.tr = Tracker(df)

		self.last_ids = []
		self.last_positions = []
		self.dt = 0
		self.frame_num = 1
		self.start = 0
		self.ppm = 0
		self.fn = 1
		self.ids = []
		self.fns = []
		self.ids2 = []
		self.sp = []
		self.cnt = 0
def gather_ratios(critical_points, known_T, check_fn, LAYER, COUNT):
    this_layer_critical_points = []
    logger.log("Gathering", COUNT, "critical points", level=Logger.INFO)
    for point in critical_points:
        if LAYER > 0:
            if any(np.any(np.abs(x) < 1e-5) for x in known_T.get_hidden_layers(point)):
                continue
            if CHEATING:
                if np.any(np.abs(cheat_get_inner_layers(point)[0]) < 1e-10):
                    logger.log(cheat_get_inner_layers(point), level=Logger.INFO)
                    logger.log("Looking at one I don't need to", level=Logger.INFO)

        if LAYER > 0 and np.sum(known_T.forward(point) != 0) <= 1:
            logger.log("Not enough hidden values are active to get meaningful data", level=Logger.INFO)
            continue

        if not check_fn(point):
            # print("Check function rejected it")
            continue
        if CHEATING:
            logger.log("What layer is this neuron on (by cheating)?",
                       [(np.min(np.abs(x)), np.argmin(np.abs(x))) for x in cheat_get_inner_layers(point)],
                       level=Logger.INFO)

        tmp = Tracker().query_count
        for EPS in [GRAD_EPS, GRAD_EPS / 10, GRAD_EPS / 100]:
            try:
                normal = get_ratios_lstsq(LAYER, [point], [range(DIM)], known_T, eps=EPS)[0].flatten()
                # normal = get_ratios([point], [range(DIM)], eps=EPS)[0].flatten()
                break
            except AcceptableFailure:
                logger.log("Try again with smaller eps", level=Logger.INFO)
                pass
        # print("LSTSQ Delta queries", query_count-tmp)

        this_layer_critical_points.append((normal, point))

        # coupon collector: we need nlogn points.
        logger.log("Up to", len(this_layer_critical_points), 'of', COUNT, level=Logger.INFO)
        if len(this_layer_critical_points) >= COUNT:
            break

    return this_layer_critical_points
async def download(torrent_file: str, download_location: str, loop=None):
    # Parse torrent file
    torrent = Torrent(torrent_file)
    LOG.info('Torrent: {}'.format(torrent))

    torrent_writer = FileSaver(download_location, torrent)
    session = DownloadSession(torrent,
                              torrent_writer.get_received_blocks_queue())

    # Instantiate tracker object
    tracker = Tracker(torrent)

    peers_info = await tracker.get_peers()

    seen_peers = set()
    peers = [Peer(session, host, port) for host, port in peers_info]
    seen_peers.update([str(p) for p in peers])

    LOG.info('[Peers] {}'.format(seen_peers))

    await (asyncio.gather(*[peer.download() for peer in peers]))
def run_full_attack():
    extracted_normals = []
    extracted_biases = []

    known_T = KnownT(extracted_normals, extracted_biases)

    for layer_num in range(0, len(A) - 1):
        # For each layer of the network ...

        # First setup the critical points generator
        critical_points = sweep_for_critical_points(PARAM_SEARCH_AT_LOCATION,
                                                    known_T)

        # Extract weights corresponding to those critical points
        extracted_normal, extracted_bias, mask = layer_recovery.compute_layer_values(
            critical_points, known_T, layer_num)

        # Report how well we're doing
        check_quality(layer_num, extracted_normal, extracted_bias)

        # Now, make them more precise
        extracted_normal, extracted_bias = refine_precision.improve_layer_precision(
            layer_num, known_T, extracted_normal, extracted_bias)
        logger.log("Query count", Tracker().query_count, level=Logger.INFO)

        # And print how well we're doing
        check_quality(layer_num, extracted_normal, extracted_bias)

        # New generator
        critical_points = sweep_for_critical_points(1e1)

        # Solve for signs
        if layer_num == 0 and sizes[1] <= sizes[0]:
            extracted_sign = sign_recovery.solve_contractive_sign(
                known_T, extracted_normal, extracted_bias, layer_num)
        elif layer_num > 0 and sizes[1] <= sizes[0] and all(
                sizes[x + 1] <= sizes[x] / 2 for x in range(1,
                                                            len(sizes) - 1)):
            try:
                extracted_sign = sign_recovery.solve_contractive_sign(
                    known_T, extracted_normal, extracted_bias, layer_num)
            except AcceptableFailure as e:
                logger.log(
                    "Contractive solving failed; fall back to noncontractive method",
                    level=Logger.INFO)
                if layer_num == len(A) - 2:
                    logger.log("Solve final two", level=Logger.INFO)
                    break

                extracted_sign, _ = sign_recovery.solve_layer_sign(
                    known_T,
                    extracted_normal,
                    extracted_bias,
                    critical_points,
                    layer_num,
                    l1_mask=np.int32(np.sign(mask)))

        else:
            if layer_num == len(A) - 2:
                logger.log("Solve final two", level=Logger.INFO)
                break

            extracted_sign, _ = sign_recovery.solve_layer_sign(
                known_T,
                extracted_normal,
                extracted_bias,
                critical_points,
                layer_num,
                l1_mask=np.int32(np.sign(mask)))

        logger.log("Extracted", extracted_sign, level=Logger.INFO)
        logger.log('real sign', np.int32(np.sign(mask)), level=Logger.INFO)

        logger.log("Total query count",
                   Tracker().query_count,
                   level=Logger.INFO)

        # Correct signs
        extracted_normal *= extracted_sign
        extracted_bias *= extracted_sign
        extracted_bias = np.array(extracted_bias, dtype=np.float64)

        # Report how we're doing
        extracted_normal, extracted_bias = check_quality(layer_num,
                                                         extracted_normal,
                                                         extracted_bias,
                                                         do_fix=True)

        extracted_normals.append(extracted_normal)
        extracted_biases.append(extracted_bias)

    known_T = KnownT(extracted_normals, extracted_biases)

    for a, b in sorted(Tracker().query_count_at.items(), key=lambda x: -x[1]):
        logger.log('count',
                   b,
                   '\t',
                   'line:',
                   a,
                   ':',
                   self_lines[a - 1].strip(),
                   level=Logger.INFO)

    # And then finish up
    if len(extracted_normals) == len(sizes) - 2:
        logger.log("Just solve final layer", level=Logger.INFO)
        N = int(Tracker().nr_of_queries / 1000) or 1
        ins, outs = zip(*Tracker().saved_queries[::N])
        solve_final_layer(known_T, np.array(ins), np.array(outs))
    else:
        logger.log("Solve final two", level=Logger.INFO)
        solve_final_two_layers(known_T, extracted_normal, extracted_bias)
def solve_final_two_layers(known_T, known_A0, known_B0):
    ## Recover the final two layers through brute forcing signs, then least squares
    ## Yes, this is mostly a copy of solve_layer_sign. I am repeating myself. Sorry.
    LAYER = len(sizes) - 2
    filtered_inputs = []
    filtered_outputs = []

    # How many unique points to use. This seems to work. Tweak if needed...
    # (In checking consistency of the final layer signs)
    N = int(Tracker().nr_of_queries / 100) or 1
    ins, outs = zip(*Tracker().saved_queries[::N])
    filtered_inputs, filtered_outputs = zip(*Tracker().saved_queries[::N])
    logger.log('Total query count', Tracker().nr_of_queries, level=Logger.INFO)
    logger.log("Solving on", len(filtered_inputs), level=Logger.INFO)

    inputs, outputs = np.array(filtered_inputs), np.array(filtered_outputs)
    known_hidden_so_far = known_T.forward(inputs, with_relu=True)

    K = sizes[LAYER]
    logger.log("K IS", K, level=Logger.INFO)
    shuf = list(range(1 << K))[::-1]

    logger.log("Here before start",
               known_hidden_so_far.shape,
               level=Logger.INFO)

    start_time = time.time()

    extra_args_tup = (known_A0, known_B0, LAYER - 1, known_hidden_so_far, K,
                      -outputs)

    def shufpp(s):
        for elem in s:
            yield elem, extra_args_tup

    # Brute force all sign assignments...
    all_res = pool[0].map(sign_recovery.is_solution, shufpp(shuf))

    end_time = time.time()

    scores = [r[0] for r in all_res]
    solution_attempts = sum([r[1] for r in all_res])
    total_attempts = len(all_res)

    logger.log("Attempts at solution:", (solution_attempts),
               'out of',
               level=Logger.INFO)
    logger.log("Took", end_time - start_time, 'seconds', level=Logger.INFO)

    std = np.std([x[0] for x in scores])
    logger.log('std', std, level=Logger.INFO)
    logger.log('median', np.median([x[0] for x in scores]), level=Logger.INFO)
    logger.log('min', np.min([x[0] for x in scores]), level=Logger.INFO)

    score, recovered_signs, final = min(scores, key=lambda x: x[0])
    logger.log('recover', recovered_signs, level=Logger.INFO)

    known_A0 *= recovered_signs
    known_B0 *= recovered_signs

    out = known_T.extend_by(known_A0, known_B0)

    return solve_final_layer(out, inputs, outputs)
Exemplo n.º 11
0
class OfflineHextrack:
    def __init__(self, cfg, src, n, LED_pos, LED_tresholds):
        threading.current_thread().name = 'HexTrack'

        self.cfg = cfg
        self.frame_idx = 0
        self.n = n
        self.mask_init = True
        self.made_mask = None

        # Create path to csv log file for tracking mouse position and LED-light state
        path = pkg_resources.resource_filename(__name__, "/data/interim/position_log_files/{}".format(src[len(src)-29:len(src)-10]))
        if not os.path.exists(path):
            try:
                os.mkdir(path)
            except OSError:
                print("Creation of the directory %s failed, this path probably already exists" % path)
        self.path = pkg_resources.resource_filename(__name__, '/data/interim/Position_log_files/{}/pos_log_file_{}.csv'
                                                    .format(src[len(src)-29:len(src)-10], n))

        # Initiation of the Grabbers and Trackers and creation of csv log file
        self.grabber = Grabber(src)
        self.tracker = Tracker(cfg, pos_log_file=open(self.path, 'w'), name=__name__, LED_pos=LED_pos, LED_thresholds=LED_tresholds)

        logging.debug('HexTrack initialization done!')

        self.vid = VideoFileClip(src)
        self.duration = self.vid.duration*15

    # Loops through grabbing and tracking each frame of the video file
    def loop(self):
        pbar = tqdm(range(int(self.duration)))
        # pbar = tqdm(range(2000))
        for i in pbar:
            frame = self.grabber.next()
            if frame is None:
                break

            # Checks if the frame has a mask already, if not, it creates a new mask
            if self.mask_init:
                self.tracker.apply(frame, self.frame_idx, n=self.n)
            elif not self.mask_init:
                self.tracker.apply(frame, self.frame_idx, mask_frame=self.made_mask, n=self.n)

            # At the second frame, show computer-generated mask
            # If not sufficient, gives possibility to input user-generated mask
            # if self.frame_idx == 1:
            #     path = pkg_resources.resource_filename(__name__, '/output/Masks/mask_{}.png'.format(n))
            #     mask = cv2.imread(path)
            #     plt.figure('Mask check')
            #     plt.imshow(mask)
            #     plt.show()
            #     mask_check = input("If the mask is sufficient, enter y: ")
            #     if mask_check != 'y':
            #         input('Please upload custom mask under the name new_mask.png to the output folder and press enter')
            #         self.made_mask = cv2.imread('new_mask.png', 0)
            #         self.mask_init = False
            # self.frame_idx += 1
        self.tracker.close()
        pbar.close()
        self.vid.reader.close()

    # Redundant, might be deleted later
    def process_events(self, display=False):
        if not display:
            return

        # Event loop call
        key = cv2.waitKey(1)

        # Process Keypress Events
        if key == ord('q'):
            self.stop()

    def stop(self):
        self.tracker.close()
        cv2.destroyAllWindows()
        raise SystemExit
Exemplo n.º 12
0
class OfflineHextrack:
    def __init__(self, cfg, src, n, LED_pos, LED_thresholds, sources):

        # Video frame sources (top and bottom)
        self.sources = sources

        self.cfg = cfg
        self.frame_idx = 0
        self.n = n
        self.mask_init = True
        self.made_mask = None

        # Create path to csv log file for tracking mouse position and LED-light state
        path = pkg_resources.resource_filename(__name__, "/data/interim/position_log_files/{}".format(src[len(src)-29:
                                                                                                          len(src)-10]))
        if not os.path.exists(path):
            try:
                os.mkdir(path)
            except OSError:
                print("Creation of the directory %s failed, this path probably already exists" % path)
        self.path = pkg_resources.resource_filename(__name__, '/data/interim/Position_log_files/{}/pos_log_file_{}.csv'
                                                    .format(src[len(src)-29:len(src)-10], n))

        # Initiation of the Grabbers and Trackers and creation of csv log file
        self.grabber = Grabber(src)
        self.tracker = Tracker(cfg, pos_log_file=open(self.path, 'w'), name=__name__, LED_pos=LED_pos,
                               LED_thresholds=LED_thresholds)

        logging.debug('HexTrack initialization done!')

        # Video reader used to infer amount of frames
        self.vid = VideoFileClip(src)
        self.duration = self.vid.duration*15
        self.src = src

    # Loops through grabbing and tracking each frame of the video file
    def loop(self):
        """"Loop through all frames in video and track mouse positions"""

        # tqdm package used to monitor tracking progress
        pbar = tqdm(range(int(self.duration)))
        for i in pbar:

            # Grab next frame, stops loop if no new frame is present (happens when all frames in video tracked)
            frame = self.grabber.next()
            if frame is None:
                break

            # Checks if the frame has a mask already, if not, it creates a new mask
            if self.mask_init:
                self.tracker.apply(frame, self.frame_idx, n=self.n, src=self.src)
            elif not self.mask_init:
                self.tracker.apply(frame, self.frame_idx, mask_frame=self.made_mask, n=self.n, src=self.src)

            if Mask_check:

                # At the second frame, show computer-generated mask
                # If not sufficient, gives possibility to input user-generated mask
                if self.frame_idx == 0:
                    path = pkg_resources.resource_filename(__name__, "/data/raw/{}/Masks/mask_{}.png"
                                                           .format(self.sources[0][len(self.sources[0])-29:
                                                                                   len(self.sources[0])-10], n))
                    mask = cv2.imread(path)
                    plt.figure('Mask check')
                    plt.imshow(mask)
                    plt.show()
                    mask_check = input("If the mask is sufficient, enter y: ")
                    if mask_check != 'y':
                        input('Please upload custom mask under the name new_mask.png to the output folder'
                              ' and press enter')
                        mask_path = pkg_resources.resource_filename(__name__, "/Input_mask/new_mask.png")
                        self.made_mask = cv2.imread(mask_path, 0)
                        self.mask_init = False

            self.frame_idx += 1

        # Close down tracker position log file, tqdm progress bar and video reader
        self.tracker.close()
        pbar.close()
        self.vid.reader.close()

    def stop(self):
        """Closes the position log files for following steps to be used"""

        self.tracker.close()
        cv2.destroyAllWindows()
        raise SystemExit
Exemplo n.º 13
0
def start(sequence_path, detection_path, output_path, min_conf,
        nms_thresh, min_detect_height_thresh, cosine_thresh,nn_budget, disp):
    image_dir = os.path.join(sequence_path, "img1")
    images = {
        int(os.path.splitext(f)[0]): os.path.join(image_dir, f)
        for f in os.listdir(image_dir)}
    gt_file = os.path.join(sequence_path, "gt/gt.txt")
    detections = None
    if detection_path is not None:
        detections = np.load(detection_path)
    gt = None
    if os.path.exists(gt_file):
        gt = np.loadtxt(gt_file, delimiter=',')
    if len(images) > 0:
        image = cv2.imread(next(iter(images.values())), cv2.IMREAD_GRAYSCALE)
        image_shape = image.shape
    else:
        image_shape = None
    if len(images) > 0:
        min_idx = min(images.keys())
        max_idx = max(images.keys())
    else:
        min_idx = int(detections[:, 0].min())
        max_idx = int(detections[:, 0].max())
    info_filename = os.path.join(sequence_path, "seqinfo.ini")
    if os.path.exists(info_filename):
        with open(info_filename, "r") as f:
            line_splits = [l.split('=') for l in f.read().splitlines()[1:]]
            info_dict = dict(
                s for s in line_splits if isinstance(s, list) and len(s) == 2)
        update_ms = 1000 / int(info_dict["frameRate"])
    else:
        update_ms = None
    print(len(detections))		
    feature_dim = detections.shape[1] - 10 if detections is not None else 0
    info = {
        "sequence_name": os.path.basename(sequence_path),
        "images": images,
        "detections": detections,
        "gt": gt,
        "image_shape": image_shape,
        "min_idx": min_idx,
        "max_idx": max_idx,
        "feature_dim": feature_dim,
        "update_ms": update_ms
    }
    metric = NNDistanceMetric(cosine_thresh, nn_budget) 
    trker = Tracker(metric)   
    outputs = []
    def process_frames(visualizer, index_frame):
        detection_list = []
        for row in info["detections"][info["detections"][:, 0].astype(np.int) == index_frame]:
            bbox, confidence, feature = row[2:6], row[6], row[10:]
            if bbox[3] < min_detect_height_thresh:
                continue
            detection_list.append(Detection(bbox, confidence, feature))
        detections = [d for d in detection_list if d.score >= min_conf]
        indices = nms(
            np.array([d.bbox for d in detections]), nms_thresh, 
            np.array([d.score for d in detections]))
        detections = [detections[i] for i in indices]
        trker.predict_tracker()
        trker.update_tracker(detections)
        if disp=="True":
            img = cv2.imread(info["images"][index_frame], cv2.IMREAD_COLOR)
            visualizer.image = img
            visualizer.detections(detections)
            visualizer.trackers(trker.track_list)
        for trk in trker.track_list:
            if not trk.state==2 or trk.last_update > 1: 
                continue
            bbox = trk.to_bbox()
            outputs.append([
                index_frame, trk.id, bbox[0], bbox[1], bbox[2], bbox[3]])

    if disp =="False":
        while info['min_idx'] <= info['max_idx']:
            process_frames(None,info['min_idx'])
            info['min_idx'] += 1
    else:
        vis = visualize.Visualization(info, time_to_update_in_ms=15)
		vis.start_viewer_(process_frames)
Exemplo n.º 14
0
logging.getLogger('src.discord_handler').addHandler(handler)

logging.getLogger('src.epicinium_client').setLevel(log_level)
logging.getLogger('src.epicinium_client').addHandler(handler)

epicinium_application_id = config['application-id']
guild_id = config['guild-id']
listen_to_dm = config['listen-to-dm']

intents = discord.Intents.default()
intents.members = True
intents.presences = True

bot = commands.Bot(command_prefix='!', help_command=None, intents=intents)
bot.add_cog(State())
bot.add_cog(Tracker(bot))
bot.add_cog(DiscordManager(bot, guild_id))
bot.add_cog(BotData(bot))
bot.add_cog(DynoPlaceholder())
bot.add_cog(DiscordHandler(bot))
bot.add_cog(EpiciniumClient(bot, config))


@bot.event
async def on_ready():
    log.info("Logged in as {0.user}".format(bot))
    print("Logged in as {0.user}".format(bot))
    tracker = cast(Tracker, bot.get_cog('Tracker'))
    tracker.go_online()

    def get_more_points(NUM):
        """
        Gather more points. This procedure is really kind of ugly and should probably be fixed.
        We want to find points that are near where we expect them to be.

        So begin by finding preimages to points that are on the line with gradient descent.
        This should be completely possible, because we have d_0 input dimensions but 
        only want to control one inner layer.
        """
        logger.log("Gather some more actual critical points on the plane",
                   level=Logger.INFO)
        stepsize = .1
        critical_points = []
        while len(critical_points) <= NUM:
            logger.log("On this iteration I have ",
                       len(critical_points),
                       "critical points on the plane",
                       level=Logger.INFO)
            points = np.random.normal(0, 1e3, size=(
                100,
                DIM,
            ))

            lr = 10
            for step in range(5000):
                # Use JaX's built in optimizer to do this.
                # We want to adjust the LR so that we get a better solution
                # as we optimize. Probably there is a better way to do this,
                # but this seems to work just fine.

                # No queries involvd here.
                if step % 1000 == 0:
                    lr *= .5
                    init, opt_update, get_params = jax.experimental.optimizers.adam(
                        lr)

                    @jax.jit
                    def update(i, opt_state, batch):
                        params = get_params(opt_state)
                        return opt_update(i, loss_grad(batch, row), opt_state)

                    opt_state = init(points)

                if step % 100 == 0:
                    ell = loss(points, row)
                    if CHEATING:
                        # This isn't cheating, but makes things prettier
                        print(ell)
                    if ell < 1e-5:
                        break
                opt_state = update(step, opt_state, points)
                points = opt_state.packed_state[0][0]

            for point in points:
                # For each point, try to see where it actually is.

                # First, if optimization failed, then abort.
                if loss(point, row) > 1e-5:
                    continue

                if LAYER > 0:
                    # If wee're on a deeper layer, and if a prior layer is zero, then abort
                    if min(
                            np.min(np.abs(x))
                            for x in known_T.get_hidden_layers(point)) < 1e-4:
                        logger.log("is on prior", level=Logger.INFO)
                        continue

                # print("Stepsize", stepsize)
                tmp = Tracker().query_count
                solution = do_better_sweep(offset=point,
                                           low=-stepsize,
                                           high=stepsize,
                                           known_T=known_T)
                # print("qs", query_count-tmp)
                if len(solution) == 0:
                    stepsize *= 1.1
                elif len(solution) > 1:
                    stepsize /= 2
                elif len(solution) == 1:
                    stepsize *= 0.98
                    potential_solution = solution[0]

                    hiddens = extended_T.get_hidden_layers(potential_solution)

                    this_hidden_vec = extended_T.forward(potential_solution)
                    this_hidden = np.min(np.abs(this_hidden_vec))
                    if min(np.min(np.abs(x)) for x in
                           this_hidden_vec) > np.abs(this_hidden) * 0.9:
                        critical_points.append(potential_solution)
                    else:
                        logger.log("Reject it", level=Logger.INFO)
        logger.log("Finished with a total of",
                   len(critical_points),
                   "critical points",
                   level=Logger.INFO)
        return critical_points
Exemplo n.º 16
0
def main(track_alg):
    warnings.filterwarnings('ignore')
    path = get_path()
    beetle_tracker = Tracker(video_path=path, track_alg=track_alg)
    # read video
    video = cv2.VideoCapture(find_data_file(beetle_tracker._video))
    out = cv2.VideoWriter(
        "tracked_%s" % beetle_tracker.file_name, beetle_tracker.fourcc,
        beetle_tracker.fps,
        (beetle_tracker.resolution[0], beetle_tracker.resolution[1] + 80))
    # exit if video not opend
    if not video.isOpened():
        beetle_tracker.alert(
            'Could not open video: %s \n %s' %
            (beetle_tracker._video, find_data_file(beetle_tracker._video)))
        sys.exit()
    # store the length of frame and read the first frame
    beetle_tracker._frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
    ok, frame = video.read()

    # setup up the window and mouse callback
    cv2.namedWindow(beetle_tracker.window_name, cv2.WINDOW_AUTOSIZE)
    cv2.setMouseCallback(beetle_tracker.window_name, beetle_tracker._mouse_ops)
    while True:
        # Read a new frame and wait for a keypress
        video.set(cv2.CAP_PROP_POS_FRAMES, beetle_tracker.count - 1)
        ok, beetle_tracker.frame = video.read()
        beetle_tracker._fix_target = False
        key = cv2.waitKey(1)
        # check if we have reached the end of the video
        if not ok:
            break
        # resize the frame into 960 x 720
        beetle_tracker._init_frame()
        beetle_tracker.detect_rat_contour()

        if len(beetle_tracker._roi) > 0:
            beetle_tracker._roi = [
                convert(a[0], a[1], a[2], a[3]) for a in beetle_tracker._bboxes
            ]

        # if this is init mode, let user targets the beetles
        if beetle_tracker._add_box:
            time.sleep(0.2)
            beetle_tracker._add_bboxes()
        if beetle_tracker.count > 1:
            beetle_tracker._start = time.clock()

        # run stop model
        if len(beetle_tracker._bboxes) > 0 and beetle_tracker._run_model:
            beetle_tracker._is_stop, beetle_tracker._stop_obj = beetle_tracker.detect_and_auto_update(
                RESIZE, N_MAX)

        if beetle_tracker._run_motion:
            beetle_tracker._motion_detector(RESIZE, N_MAX)
            beetle_tracker._draw_bbox()
            cv2.imshow(beetle_tracker.window_name, beetle_tracker.frame)
            beetle_tracker._ask_add_box()

        # if 'r' was pressed or stop model return True, enter to retarget mode
        if key == KEY_RETARGET or beetle_tracker._is_stop:

            if len(beetle_tracker._bboxes) > 0:
                beetle_tracker._retarget_bboxes()
            else:
                beetle_tracker._add_bboxes()
        # if 'a' was pressed, enter add boudning box mode
        elif key == KEY_ADD:
            beetle_tracker._add_bboxes()
        # if 'd' was pressed, enter delete boudning box mode
        elif key == KEY_DELETE:
            beetle_tracker._delete_bboxes()
        elif key == KEY_CONTINUE:
            beetle_tracker._pause_frame()
        elif key == KEY_MODEL:
            beetle_tracker._run_model = not beetle_tracker._run_model
        elif key == KEY_MOTION:
            beetle_tracker._run_motion = not beetle_tracker._run_motion
        elif key == KEY_HELP:
            beetle_tracker.help()
        # elif key == KEY_UPDATE:
        #     beetle_tracker._update = not beetle_tracker._update
        elif key == KEY_JUMP:
            beetle_tracker._jump_frame()
        # restart the program
        elif key == KEY_CHANGE:
            cv2.destroyAllWindows()
            main(track_alg=TRACK_ALGORITHM)
        # friendly switch on off for detector
        elif key in [ord('1'), ord('2'), ord('3'), ord('4')]:
            beetle_tracker.switch(key)
        elif key == KEY_RAT:
            beetle_tracker._show_rat = not beetle_tracker._show_rat
        # otherwise, update bounding boxes from tracker
        else:
            ok, beetle_tracker._bboxes = beetle_tracker.tracker.update(
                beetle_tracker.frame)
            beetle_tracker._roi = [
                convert(a[0], a[1], a[2], a[3]) for a in beetle_tracker._bboxes
            ]

        if key == KEY_ESC or (cv2.getWindowProperty(beetle_tracker.window_name,
                                                    0) < 0):
            # draw current frame
            beetle_tracker._draw_bbox()
            cv2.imshow(beetle_tracker.window_name, beetle_tracker.frame)
            if beetle_tracker._ask_quit():
                break
            else:
                pass
        if ok:
            beetle_tracker.frame = beetle_tracker.orig_col.copy()
            # draw current frame
            beetle_tracker._draw_bbox()
            # append trace and img
            beetle_tracker._append_record()
            # save image inside the bounding boxes
            beetle_tracker._write_bboxes()
            if args['save_pos'] and len(beetle_tracker.object_name) > 0:
                beetle_tracker._save_pos()

            # write current frame to output video
            out.write(beetle_tracker.frame)
            beetle_tracker.count += 1
            beetle_tracker._n_pass_frame += 1
        else:
            break
        # Display result
        cv2.imshow(beetle_tracker.window_name, beetle_tracker.frame)

    video.release()
    out.release()
    cv2.destroyAllWindows()
def solve_layer_sign(known_T, known_A0, known_B0, critical_points, LAYER,
                     already_checked_critical_points=False,
                     only_need_positive=False, l1_mask=None):
    """
    Compute the signs for one layer of the network.

    known_T is the transformation that computes up to layer K-1, with
    known_A and known_B being the layer K matrix up to sign.
    """

    def get_critical_points():
        logger.log("Init", level=Logger.INFO)
        logger.log(critical_points, level=Logger.INFO)
        for point in critical_points:
            logger.log("Tick", level=Logger.INFO)
            if already_checked_critical_points or is_on_following_layer(known_T, known_A0, known_B0, point):
                logger.log("Found layer N point at ", point, already_checked_critical_points, level=Logger.INFO)
                yield point

    get_critical_point = get_critical_points()

    logger.log("Start looking for critical point", level=Logger.INFO)
    MAX_POINTS = 200
    which_point = next(get_critical_point)
    logger.log("Done looking for critical point", level=Logger.INFO)

    initial_points = []
    history = []
    pts = []
    if already_checked_critical_points:
        for point in get_critical_point:
            initial_points.append(point)
            pts.append(point)
            which_polytope = get_polytope_at(known_T, known_A0, known_B0, point, False)  # [-1 1 -1]
            hidden_vector = get_hidden_at(known_T, known_A0, known_B0, LAYER, point, False)
            if CHEATING:
                layers = cheat_get_inner_layers(point)
                logger.log('have', [(np.argmin(np.abs(x)), np.min(np.abs(x))) for x in layers], level=Logger.INFO)
            history.append((which_polytope,
                            hidden_vector,
                            np.copy(point)))

    while True:
        if not already_checked_critical_points:
            history = []
            pts = []

        prev_count = -10
        good = False
        while len(pts) > prev_count + 2:
            logger.log("======" * 10, level=Logger.INFO)
            logger.log("RESTART SEARCH", len(pts), prev_count, level=Logger.INFO)
            logger.log(which_point, level=Logger.INFO)
            prev_count = len(pts)
            more_points, done = follow_hyperplane(LAYER, which_point,
                                                  known_T,
                                                  known_A0, known_B0,
                                                  history=history,
                                                  only_need_positive=only_need_positive)
            pts.extend(more_points)
            if len(pts) >= MAX_POINTS:
                logger.log("Have enough; break", level=Logger.INFO)
                break

            if len(pts) == 0:
                break

            neuron_values = known_T.extend_by(known_A0, known_B0).forward(pts)

            neuron_positive_count = np.sum(neuron_values > 1, axis=0)
            neuron_negative_count = np.sum(neuron_values < -1, axis=0)
            logger.log("Counts", level=Logger.INFO)
            logger.log(neuron_positive_count, level=Logger.INFO)
            logger.log(neuron_negative_count, level=Logger.INFO)

            logger.log("SHOULD BE DONE?", done, only_need_positive, level=Logger.INFO)
            if done and only_need_positive:
                good = True
                break
            if np.all(neuron_positive_count > 0) and np.all(neuron_negative_count > 0) or \
                    (only_need_positive and np.all(neuron_positive_count > 0)):
                logger.log("Have all the points we need (2)", level=Logger.INFO)
                good = True
                break

        if len(pts) < MAX_POINTS / 2 and good == False:
            logger.log("=======" * 10, level=Logger.INFO)
            logger.log("Select a new point to start from", level=Logger.INFO)
            logger.log("=======" * 10, level=Logger.INFO)
            if already_checked_critical_points:
                logger.log("CHOOSE FROM", len(initial_points), initial_points, level=Logger.INFO)
                which_point = initial_points[np.random.randint(0, len(initial_points) - 1)]
            else:
                which_point = next(get_critical_point)
        else:
            logger.log("Abort", level=Logger.INFO)
            break

    critical_points = np.array(pts)  # sorted(list(set(map(tuple,pts))))

    logger.log("Now have critical points", len(critical_points), level=Logger.INFO)

    if CHEATING:
        layer = [[np.min(np.abs(x)) for x in cheat_get_inner_layers(x[np.newaxis, :])][LAYER + 1] for x in
                 critical_points]

        # print("Which layer is zero?", sorted(layer))
        layer = np.abs(cheat_get_inner_layers(np.array(critical_points))[LAYER + 1])

        logger.log(layer, level=Logger.INFO)

        which_is_zero = np.argmin(layer, axis=1)
        logger.log("Which neuron is zero?", which_is_zero, level=Logger.INFO)

        which_is_zero = which_is_zero[0]

    logger.log("Query count", Tracker().query_count, level=Logger.INFO)

    K = neuron_count[LAYER + 1]
    MAX = (1 << K)
    if already_checked_critical_points:
        bounds = [(MAX - 1, MAX)]
    else:
        bounds = []
        for i in range(1024):
            bounds.append(((MAX * i) // 1024, (MAX * (i + 1)) // 1024))

    logger.log("Created a list", level=Logger.INFO)

    known_hidden_so_far = known_T.forward(critical_points, with_relu=True)
    debug = False

    start_time = time.time()

    extra_args_tup = (known_A0, known_B0, LAYER, known_hidden_so_far, K, None)

    all_res = pool[0].map(is_solution_map, [(bound, extra_args_tup) for bound in bounds])

    end_time = time.time()

    logger.log("Done map, now collect results", level=Logger.INFO)
    logger.log("Took", end_time - start_time, 'seconds', level=Logger.INFO)

    all_res = [x for y in all_res for x in y]

    scores = [r[0] for r in all_res]
    solution_attempts = sum([r[1] for r in all_res])
    total_attempts = len(all_res)

    logger.log("Attempts at solution:", (solution_attempts), 'out of', total_attempts, level=Logger.INFO)

    std = np.std([x[0] for x in scores])
    logger.log('std', std, level=Logger.INFO)
    logger.log('median', np.median([x[0] for x in scores]), level=Logger.INFO)
    logger.log('min', np.min([x[0] for x in scores]), level=Logger.INFO)

    return min(scores, key=lambda x: x[0])[1], critical_points
def follow_hyperplane(LAYER, start_point, known_T, known_A, known_B,
                      history=[], MAX_POINTS=1e3, only_need_positive=False):
    """
    This is the ugly algorithm that will let us recover sign for expansive networks.
    Assumes we have extracted up to layer K-1 correctly, and layer K up to sign.

    start_point is a neuron on layer K+1

    known_T is the transformation that computes up to layer K-1, with
    known_A and known_B being the layer K matrix up to sign.

    We're going to come up with a bunch of different inputs,
    each of which has the same critical point held constant at zero.
    """

    def choose_new_direction_from_minimize(previous_axis):
        """
        Given the current point which is at a critical point of the next
        layer neuron, compute which direction we should travel to continue
        with finding more points on this hyperplane.

        Our goal is going to be to pick a direction that lets us explore
        a new part of the space we haven't seen before.
        """

        logger.log("Choose a new direction to travel in", level=Logger.INFO)
        if len(history) == 0:
            which_to_change = 0
            new_perp_dir = perp_dir
            new_start_point = start_point
            initial_signs = get_polytope_at(known_T, known_A, known_B, start_point)

            # If we're in the 1 region of the polytope then we try to make it smaller
            # otherwise make it bigger
            fn = min if initial_signs[0] == 1 else max
        else:
            neuron_values = np.array([x[1] for x in history])

            neuron_positive_count = np.sum(neuron_values > 1, axis=0)
            neuron_negative_count = np.sum(neuron_values < -1, axis=0)

            mean_plus_neuron_value = neuron_positive_count / (neuron_positive_count + neuron_negative_count + 1)
            mean_minus_neuron_value = neuron_negative_count / (neuron_positive_count + neuron_negative_count + 1)

            # we want to find values that are consistently 0 or 1
            # So map 0 -> 0 and 1 -> 0 and the middle to higher values
            if only_need_positive:
                neuron_consistency = mean_plus_neuron_value
            else:
                neuron_consistency = mean_plus_neuron_value * mean_minus_neuron_value

            # Print out how much progress we've made.
            # This estimate is probably worse than Windows 95's estimated time remaining.
            # At least it's monotonic. Be thankful for that.
            logger.log("Progress", "%.1f" % int(np.mean(neuron_consistency != 0) * 100) + "%", level=Logger.INFO)
            logger.log("Counts on each side of each neuron", level=Logger.INFO)
            logger.log(neuron_positive_count, level=Logger.INFO)
            logger.log(neuron_negative_count, level=Logger.INFO)

            # Choose the smallest value, which is the most consistent
            which_to_change = np.argmin(neuron_consistency)

            logger.log("Try to explore the other side of neuron", which_to_change, level=Logger.INFO)

            if which_to_change != previous_axis:
                if previous_axis is not None and neuron_consistency[previous_axis] == neuron_consistency[
                    which_to_change]:
                    # If the previous thing we were working towards has the same value as this one
                    # the don't change our mind and just keep going at that one
                    # (almost always--sometimes we can get stuck, let us get unstuck)
                    which_to_change = previous_axis
                    new_start_point = start_point
                    new_perp_dir = perp_dir
                else:
                    valid_axes = np.where(neuron_consistency == neuron_consistency[which_to_change])[0]

                    best = (np.inf, None, None)

                    for _, potential_hidden_vector, potential_point in history[-1:]:
                        for potential_axis in valid_axes:
                            value = potential_hidden_vector[potential_axis]
                            if np.abs(value) < best[0]:
                                best = (np.abs(value), potential_axis, potential_point)

                    _, which_to_change, new_start_point = best
                    new_perp_dir = perp_dir

            else:
                new_start_point = start_point
                new_perp_dir = perp_dir

            # If we're in the 1 region of the polytope then we try to make it smaller
            # otherwise make it bigger
            fn = min if neuron_positive_count[which_to_change] > neuron_negative_count[which_to_change] else max
            arg_fn = np.argmin if neuron_positive_count[which_to_change] > neuron_negative_count[
                which_to_change] else np.argmax
            logger.log("Changing", which_to_change, 'to flip sides because mean is',
                       mean_plus_neuron_value[which_to_change], level=Logger.INFO)

        val = matmul(known_T.forward(new_start_point, with_relu=True), known_A, known_B)[which_to_change]

        initial_signs = get_polytope_at(known_T, known_A, known_B, new_start_point)

        # Now we're going to figure out what direction makes this biggest/smallest
        # this doesn't take any queries
        # There's probably an analytical way to do this.
        # But thinking is hard. Just try 1000 random angles.
        # There are no queries involved in this process.

        choices = []
        for _ in range(1000):
            random_dir = np.random.normal(size=DIM)
            perp_component = np.dot(random_dir, new_perp_dir) / (np.dot(new_perp_dir, new_perp_dir)) * new_perp_dir
            parallel_dir = random_dir - perp_component

            # This is the direction we're going to travel in.
            go_direction = parallel_dir / np.sum(parallel_dir ** 2) ** .5

            try:
                a_bit_further, high = binary_search_towards(known_T,
                                                            known_A, known_B,
                                                            new_start_point,
                                                            initial_signs,
                                                            go_direction)
            except AcceptableFailure:
                continue
            if a_bit_further is None:
                continue

            # choose a direction that makes the Kth value go down by the most
            val = matmul(known_T.forward(a_bit_further[np.newaxis, :], with_relu=True), known_A, known_B)[0][
                which_to_change]

            # print('\t', val, high)

            choices.append([val,
                            new_start_point + high * go_direction])

        best_value, multiple_intersection_point = fn(choices, key=lambda x: x[0])

        logger.log('Value', best_value, level=Logger.INFO)
        return new_start_point, multiple_intersection_point, which_to_change

    ###################################################
    ### Actual code to do the sign recovery starts. ###
    ###################################################

    start_box_step = 0
    points_on_plane = []

    if CHEATING:
        layer = np.abs(cheat_get_inner_layers(np.array(start_point))[LAYER + 1])
        logger.log("Layer", layer, level=Logger.INFO)
        which_is_zero = np.argmin(layer)

    current_change_axis = 0

    while True:
        logger.log("\n\n", level=Logger.INFO)
        logger.log("-----" * 10, level=Logger.INFO)

        if CHEATING:
            layer = np.abs(cheat_get_inner_layers(np.array(start_point))[LAYER + 1])
            # print('layer',LAYER+1, layer)
            # print('all inner layers')
            # for e in cheat_get_inner_layers(np.array(start_point)):
            #    print(e)
            which_is_zero_2 = np.argmin(np.abs(layer))

            if which_is_zero_2 != which_is_zero:
                logger.log("STARTED WITH", which_is_zero, "NOW IS", which_is_zero_2, level=Logger.INFO)
                logger.log(layer, level=Logger.INFO)
                raise

        # Keep track of where we've been, so we can go to new places.
        which_polytope = get_polytope_at(known_T, known_A, known_B, start_point, False)  # [-1 1 -1]
        hidden_vector = get_hidden_at(known_T, known_A, known_B, LAYER, start_point, False)
        sign_at_init = sign_to_int(which_polytope)  # 0b010 -> 2

        logger.log("Number of collected points", len(points_on_plane), level=Logger.INFO)
        if len(points_on_plane) > MAX_POINTS:
            return points_on_plane, False

        neuron_values = np.array([x[1] for x in history])

        neuron_positive_count = np.sum(neuron_values > 1, axis=0)
        neuron_negative_count = np.sum(neuron_values < -1, axis=0)

        if (np.all(neuron_positive_count > 0) and np.all(neuron_negative_count > 0)) or \
                (only_need_positive and np.all(neuron_positive_count > 0)):
            logger.log("Have all the points we need (1)", level=Logger.INFO)
            logger.log(Tracker().query_count, level=Logger.INFO)
            logger.log(neuron_positive_count, level=Logger.INFO)
            logger.log(neuron_negative_count, level=Logger.INFO)

            neuron_values = np.array(
                [get_hidden_at(known_T, known_A, known_B, LAYER, x, False) for x in points_on_plane])

            neuron_positive_count = np.sum(neuron_values > 1, axis=0)
            neuron_negative_count = np.sum(neuron_values < -1, axis=0)

            logger.log(neuron_positive_count, level=Logger.INFO)
            logger.log(neuron_negative_count, level=Logger.INFO)

            return points_on_plane, True

        # 1. find a way to move along the hyperplane by computing the normal
        # direction using the ratios function. Then find a parallel direction.

        try:
            # perp_dir = get_ratios([start_point], [range(DIM)], eps=1e-4)[0].flatten()
            perp_dir = get_ratios_lstsq(0, [start_point], [range(DIM)], KnownT([], []), eps=1e-5)[0].flatten()

        except AcceptableFailure:
            logger.log("Failed to compute ratio at start point. Something very bad happened.", level=Logger.ERROR)
            return points_on_plane, False

        # Record these points.
        history.append((which_polytope,
                        hidden_vector,
                        np.copy(start_point)))

        # We can't just pick any parallel direction. If we did, then we would
        # not end up covering much of the input space.

        # Instead, we're going to figure out which layer-1 hyperplanes are "visible"
        # from the current point. Then we're going to try and go reach all of them.

        # This is the point at which the first and second layers intersect.
        start_point, multiple_intersection_point, new_change_axis = choose_new_direction_from_minimize(
            current_change_axis)

        if new_change_axis != current_change_axis:
            start_point, multiple_intersection_point, current_change_axis = choose_new_direction_from_minimize(None)

        # if CHEATING:
        #    print("INIT MULTIPLE", cheat_get_inner_layers(multiple_intersection_point))

        # Refine the direction we're going to travel in---stay numerically stable.
        towards_multiple_direction = multiple_intersection_point - start_point
        step_distance = np.sum(towards_multiple_direction ** 2) ** .5

        logger.log("Distance we need to step:", step_distance, level=Logger.INFO)

        if step_distance > 1 or True:
            mid_point = 1e-4 * towards_multiple_direction / np.sum(towards_multiple_direction ** 2) ** .5 + start_point

            random_dir = np.random.normal(size=DIM)

            mid_points = do_better_sweep(mid_point, perp_dir / np.sum(perp_dir ** 2) ** .5,
                                         low=-1e-3,
                                         high=1e-3,
                                         known_T=known_T)

            if len(mid_points) > 0:
                mid_point = mid_points[np.argmin(np.sum((mid_point - mid_points) ** 2, axis=1))]

                towards_multiple_direction = mid_point - start_point
                towards_multiple_direction = towards_multiple_direction / np.sum(towards_multiple_direction ** 2) ** .5

                initial_signs = get_polytope_at(known_T, known_A, known_B, start_point)
                _, high = binary_search_towards(known_T,
                                                known_A, known_B,
                                                start_point,
                                                initial_signs,
                                                towards_multiple_direction)

                multiple_intersection_point = towards_multiple_direction * high + start_point

        # Find the angle of the next hyperplane
        # First, take random steps away from the intersection point
        # Then run the search algorithm to find some intersections
        # what we find will either be a layer-1 or layer-2 intersection.

        logger.log("Now try to find the continuation direction", level=Logger.INFO)
        success = None
        while success is None:
            if start_box_step < 0:
                start_box_step = 0
                logger.log("VERY BAD FAILURE", level=Logger.INFO)
                logger.log("Choose a new random point to start from", level=Logger.INFO)
                which_point = np.random.randint(0, len(history))
                start_point = history[which_point][2]
                logger.log("New point is", which_point, level=Logger.INFO)
                current_change_axis = np.random.randint(0, sizes[LAYER + 1])
                logger.log("New axis to change", current_change_axis, level=Logger.INFO)
                break

            logger.log("\tStart the box step with size", start_box_step, level=Logger.INFO)
            try:
                success, camefrom, stepsize = find_plane_angle(known_T,
                                                               known_A, known_B,
                                                               multiple_intersection_point,
                                                               sign_at_init,
                                                               start_box_step)
            except AcceptableFailure:
                # Go back to the top and try with a new start point
                logger.log("\tOkay we need to try with a new start point", level=Logger.INFO)
                start_box_step = -10

            start_box_step -= 2

        if success is None:
            continue

        val = matmul(known_T.forward(multiple_intersection_point, with_relu=True), known_A, known_B)[new_change_axis]

        logger.log("Value at multiple:", val, level=Logger.INFO)
        val = matmul(known_T.forward(success, with_relu=True), known_A, known_B)[new_change_axis]

        logger.log("Value at success:", val, level=Logger.INFO)

        if stepsize < 10:
            new_move_direction = success - multiple_intersection_point

            # We don't want to be right next to the multiple intersection point.
            # So let's binary search to find how far away we can go while remaining in this polytope.
            # Then we'll go half as far as we can maximally go.

            initial_signs = get_polytope_at(known_T, known_A, known_B, success)
            logger.log("polytope at initial", sign_to_int(initial_signs), level=Logger.INFO)
            low = 0
            high = 1
            while high - low > 1e-2:
                mid = (high + low) / 2
                query_point = multiple_intersection_point + mid * new_move_direction
                next_signs = get_polytope_at(known_T, known_A, known_B, query_point)

                logger.log("polytope at", mid, sign_to_int(next_signs),
                           "%x" % (sign_to_int(next_signs) ^ sign_to_int(initial_signs)), level=Logger.INFO)
                if initial_signs == next_signs:
                    low = mid
                else:
                    high = mid

            logger.log("GO TO", mid, level=Logger.INFO)

            success = multiple_intersection_point + (mid / 2) * new_move_direction

            val = matmul(known_T.forward(success, with_relu=True), known_A, known_B)[new_change_axis]
            logger.log("Value at moved success:", val, level=Logger.INFO)

        logger.log("Adding the points to the set of known good points", level=Logger.INFO)

        points_on_plane.append(start_point)

        if camefrom is not None:
            points_on_plane.append(camefrom)
        # print("Old start point", start_point)
        # print("Set to success", success)
        start_point = success
        start_box_step = max(stepsize - 1, 0)

    return points_on_plane, False
class Api():
	def __init__(self):

		# load the serialized model from disk
		self.net = cv2.dnn.readNet(car_detection_bin_model, car_detection_xml_model)
		self.net2 = cv2.dnn.readNet(car_classification_bin_model, car_classification_xml_model)
		self.net3 = cv2.dnn.readNet(plate_detecttion_bin_model, plate_detection_xml_model)


		# initialize the tracker and frame dimensions
		self.tr = Tracker(df)

		self.last_ids = []
		self.last_positions = []
		self.dt = 0
		self.frame_num = 1
		self.start = 0
		self.ppm = 0
		self.fn = 1
		self.ids = []
		self.fns = []
		self.ids2 = []
		self.sp = []
		self.cnt = 0





	def build_app(self, frame):

		# initiliaze the parameters
		fps0 = 25	

		xmin = 0
		ymin = 0
		xmax = 0
		ymax = 0

		w_size = 600
		h_size = 400

		speed = 0
		sp = []
		idsp = []


		# border for padding the croped image
		topBorderWidth = 300 
		bottomBorderWidth = 300 
		leftBorderWidth = 300 
		rightBorderWidth = 300 


		# resize the image to be same size for different input size
		frame = resizeImg(frame, h_size, w_size)
		# copy of frame
		frame_copy = frame.copy()

		# this CNN requires fixed spatial dimensions for the input image(s)
		# so we need to ensure it is resized to (672, 384) 
		blob = cv2.dnn.blobFromImage(frame, size=(672, 384))
		# set the blob as input to the network and perform a forward-pass to
		# obtain our output classification
		self.net.setInput(blob)
		out = self.net.forward()

		rects = []

		# make a ROI to estimate the speed
		pts = [(0,240), (420,240), (0,320), (440,320)]
		cv2.line(frame, pts[0],pts[1], (250,0,0), 2)
		cv2.line(frame, pts[2],pts[3], (250,0,0), 2)


		# loop over the predictions and display them
		for detection in out.reshape(-1, 7):
			confidence = float(detection[2])
			# the confidence above threshold can be proceed
			if confidence > thr_box:
				xmin = int(detection[3] * frame.shape[1])
				ymin = int(detection[4] * frame.shape[0])
				xmax = int(detection[5] * frame.shape[1])
				ymax = int(detection[6] * frame.shape[0])

				# check if boundig boxes are out of the frame size
				if (xmin < 0 or ymin < 0 or xmax >= frame.shape[1] or ymax >= frame.shape[0]):
				 continue
				
				# collect the high confidence detection boxes
				object_box = (xmin, ymin, (xmax), (ymax))
				rects.append(object_box)


		# update our centroid tracker using the computed set of bounding
		# box rectangles
		objects = self.tr.update(rects)

		list1 = []
		ids = []
		positions = []

		frame_cnt = 1


		# loop over the tracked objects
		for (trackID, rect) in objects.items():	
			

			if (rect[0] < 0 or rect[1] < 0 or rect[2] >= frame.shape[1] or rect[3] >= frame.shape[0]):
				continue


			if (rect[2] - rect[0]) < 100 or (rect[3] - rect[1]) < 100:
				continue
				
			
			carBoxWidth = (rect[2] - rect[0])
			carBoxHeight = (rect[3] - rect[1])

			croped = frame_copy[rect[1]:rect[3], rect[0]:rect[2]] 


			if np.shape(croped) == ():
				continue

          
			# resize the image to make it proper for calssification 
			resized_c = resizeImg(croped, h_size, w_size)

			# apply this function to get type and color of the vehicles
			# ["car", "bus", "truck", "van"] and ["white", "gray", "yellow", "red", "green", "blue", "black"]
			type_index, color_index = type_color(resized_c, self.net2)
			
			# find the centroid point of boxes for speed estimation 
			centroid = rect_point_center(rect)

			# check if vehicles pass the firt line of ROI
			if (centroid[1] <= pts[2][1] and centroid[1] > pts[0][1] and centroid[0] < pts[1][0]):
				# check the id if is not in the list
				if trackID not in self.ids:
					# collect ids and their frame number recorded 
					self.ids.append(trackID)
					self.fns.append(self.frame_num)

			# chekc if vehicles pass the second line of ROI
			if (centroid[1] <= pts[0][1] and centroid[0] < pts[1][0]):				
				# check if the id is in the list
				if trackID in self.ids:
					fps = fps0
					# find the index of the porposed id 
					ind = self.ids.index(trackID)
					# find the number of frame which take by proposed vehicle by passing the ROI
					frame_cnt = np.abs(self.fns[ind] - self.frame_num)
					# this function estimate the speed of the proposed vehicle
					speed0 = estimateSpeed(dst, frame_cnt, fps)
					speed = int(speed0)
					# collect the speed and its id
					self.sp.append(speed)
					self.ids2.append(trackID)
					# delete the index and id that already estimated
					self.ids.pop(ind)
					self.fns.pop(ind)
					
			
			
			speed_ = 0
			
			# show the speed of the vehicle that already estimated
			if trackID in self.ids2:
				ind = self.ids2.index(trackID)
				speed_ = self.sp[ind]
				sc = ybg_b(frame, rect)
				cv2.putText(frame, str(self.sp[ind]), (rect[2]+5, rect[1]+20), cv2.FONT_HERSHEY_SIMPLEX, 2*sc, (0, 0, 0), 1)
				cv2.putText(frame, "Km/h", (rect[2]+5, rect[1]+40), cv2.FONT_HERSHEY_SIMPLEX, 2*sc, (0, 0, 0), 1)
				if self.cnt == 200:
					self.ids2.clear()
					self.sp.clear()
					self.cnt = 0 
				
			
			id_ = "{}".format(trackID)

			# show the parameters that already provided 
			cv2.rectangle(frame, (rect[0], rect[1]), (rect[2], rect[3]), (0, 250, 0), 2)
			# make a black backgraound to see parameters clearly
			sc = bbg_b(frame, rect)
			cv2.putText(frame, (id_)+", "+(color_classes[color_index])+", "+(type_classes[type_index]), (rect[0], rect[1]-10), cv2.FONT_HERSHEY_SIMPLEX, 2*sc, (0, 0, 250), 1)
			# show the center of the boxes
			cv2.circle(frame, (centroid[0], centroid[1]), 4, (0, 255, 250), -1)


			# make a copy boorder to be proper for licence plate detection
			resized = cv2.copyMakeBorder(croped, 
										topBorderWidth, 
										bottomBorderWidth, 
										leftBorderWidth, 
										rightBorderWidth, 
										cv2.BORDER_CONSTANT, 
										value=(0,0,0)
										)

		
			# this CNN requires fixed spatial dimensions for the input image(s)
			# so we need to ensure it is resized to (300, 300) 
			blob3 = cv2.dnn.blobFromImage(resized, size=(300, 300))
			# set the blob as input to the network and perform a forward-pass to
			# obtain our output classification
			self.net3.setInput(blob3)
			out3 = self.net3.forward() 

			for detection in out3.reshape(-1, 7):
				confidence = float(detection[2])
				xmin_ = int(detection[3] * resized.shape[1])
				ymin_ = int(detection[4] * resized.shape[0])
				xmax_ = int(detection[5] * resized.shape[1])
				ymax_ = int(detection[6] * resized.shape[0])                                

				# suitable threshold and a max range for detected plate box
				if confidence > thr_plate and (ymax_ - ymin_) < 50:
					x1 = rect[0] + xmin_ - leftBorderWidth
					y1 = rect[1] + ymin_ - topBorderWidth
					x2 = rect[0] + xmax_ - rightBorderWidth
					y2 = rect[1] + ymax_ - bottomBorderWidth

					rect_ = (x1, y1, x2, y2)

					cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 250, 0), 2)

					
					croped_plate = resized[ymin_:ymax_, xmin_:xmax_]
					# the plate number can be read by this function
					text = build_tesseract_text(croped_plate)


					if text:
						sc = bbg_p(frame, rect_)
						cv2.putText(frame, text, (x1, y1-4), cv2.FONT_HERSHEY_SIMPLEX, 4*sc, (0, 0, 250), 1)
					else:
						text = "Not Recognized"

					# collect all parameters to save in database
					list1.append((trackID, color_classes[color_index], type_classes[type_index], (speed_), text))

			

		self.frame_num += 1
		self.cnt += 1



		return list1, frame
def compute_layer_values(critical_points, known_T, LAYER):
    if LAYER == 0:
        COUNT = neuron_count[LAYER + 1] * 3
    else:
        COUNT = neuron_count[LAYER + 1] * np.log(sizes[LAYER + 1]) * 3

    # type: [(ratios, critical_point)]
    this_layer_critical_points = []

    partial_weights = None
    partial_biases = None

    def check_fn(point):
        if partial_weights is None:
            return True
        hidden = matmul(known_T.forward(point, with_relu=True), partial_weights.T, partial_biases)
        if np.any(np.abs(hidden) < 1e-4):
            return False

        return True

    logger.log("", level=Logger.INFO)
    logger.log("Start running critical point search to find neurons on layer", LAYER, level=Logger.INFO)
    while True:
        logger.log("At this iteration I have", len(this_layer_critical_points), "critical points", level=Logger.INFO)

        def reuse_critical_points():
            for witness in critical_points:
                yield witness

        this_layer_critical_points.extend(gather_ratios(reuse_critical_points(), known_T, check_fn,
                                                        LAYER, COUNT))

        logger.log("Query count after that search:", Tracker().query_count, level=Logger.INFO)
        logger.log("And now up to ", len(this_layer_critical_points), "critical points", level=Logger.INFO)

        ## filter out duplicates
        filtered_points = []

        # Let's not add points that are identical to onees we've already done.
        for i, (ratio1, point1) in enumerate(this_layer_critical_points):
            for ratio2, point2 in this_layer_critical_points[i + 1:]:
                if np.sum((point1 - point2) ** 2) ** .5 < 1e-10:
                    break
            else:
                filtered_points.append((ratio1, point1))

        this_layer_critical_points = filtered_points

        logger.log("After filtering duplicates we're down to ", len(this_layer_critical_points), "critical points",
                   level=Logger.INFO)

        logger.log("Start trying to do the graph solving", level=Logger.INFO)
        try:
            critical_groups, extracted_normals = graph_solve([x[0] for x in this_layer_critical_points],
                                                             [x[1] for x in this_layer_critical_points],
                                                             neuron_count[LAYER + 1],
                                                             LAYER=LAYER,
                                                             debug=True)
            break
        except GatherMoreData as e:
            logger.log("Graph solving failed because we didn't explore all sides of at least one neuron",
                       level=Logger.INFO)
            logger.log("Fall back to the hyperplane following algorithm in order to get more data", level=Logger.INFO)

            def mine(r):
                while len(r) > 0:
                    logger.log("Yielding a point", level=Logger.INFO)
                    yield r[0]
                    r = r[1:]
                logger.log("No more to give!", level=Logger.INFO)

            prev_T = KnownT(known_T.A[:-1], known_T.B[:-1])

            _, more_critical_points = sign_recovery.solve_layer_sign(prev_T, known_T.A[-1], known_T.B[-1], mine(e.data),
                                                                     LAYER - 1, already_checked_critical_points=True,
                                                                     only_need_positive=True)

            logger.log("Add more", len(more_critical_points), level=Logger.INFO)
            this_layer_critical_points.extend(gather_ratios(more_critical_points, known_T, check_fn,
                                                            LAYER, 1e6))
            logger.log("Done adding", level=Logger.INFO)

            COUNT = neuron_count[LAYER + 1]
        except AcceptableFailure as e:
            logger.log("Graph solving failed; get more points", level=Logger.INFO)
            COUNT = neuron_count[LAYER + 1]
            if 'partial_solution' in dir(e):

                if len(e.partial_solution[0]) > 0:
                    partial_weights, corresponding_examples = e.partial_solution
                    logger.log("Got partial solution with shape", partial_weights.shape, level=Logger.INFO)
                    if CHEATING:
                        logger.log("Corresponding to",
                                   np.argmin(
                                       np.abs(cheat_get_inner_layers([x[0] for x in corresponding_examples])[LAYER]),
                                       axis=1), level=Logger.INFO)

                    partial_biases = []
                    for weight, examples in zip(partial_weights, corresponding_examples):
                        hidden = known_T.forward(examples, with_relu=True)
                        logger.log("hidden", np.array(hidden).shape, level=Logger.INFO)
                        bias = -np.median(np.dot(hidden, weight))
                        partial_biases.append(bias)
                    partial_biases = np.array(partial_biases)

    logger.log("Number of critical points per cluster", [len(x) for x in critical_groups], level=Logger.INFO)

    point_per_class = [x[0] for x in critical_groups]

    extracted_normals = np.array(extracted_normals).T

    # Compute the bias because we know wx+b=0
    extracted_bias = [matmul(known_T.forward(point_per_class[i], with_relu=True), extracted_normals[:, i], c=None) for i
                      in range(neuron_count[LAYER + 1])]

    # Don't forget to negate it.
    # That's important.
    # No, I definitely didn't forget this line the first time around.
    extracted_bias = -np.array(extracted_bias)

    # For the failed-to-identify neurons, set the bias to zero
    extracted_bias *= np.any(extracted_normals != 0, axis=0)[:, np.newaxis]

    if CHEATING:
        # Compute how far we off from the true matrix
        real_scaled = A[LAYER] / A[LAYER][0]
        extracted_scaled = extracted_normals / extracted_normals[0]

        mask = []
        reorder_rows = []
        for i in range(len(extracted_bias)):
            which_idx = np.argmin(np.sum(np.abs(real_scaled - extracted_scaled[:, [i]]), axis=0))
            reorder_rows.append(which_idx)
            mask.append((A[LAYER][0, which_idx]))

        logger.log('matrix norm difference', np.sum(np.abs(extracted_normals * mask - A[LAYER][:, reorder_rows])),
                   level=Logger.INFO)
    else:
        mask = [1] * len(extracted_bias)

    return extracted_normals, extracted_bias, mask
Exemplo n.º 21
0
 def __init__(self, init_thread):
     self.tracker = Tracker(self, init_thread)
     self.thread = create_and_start_thread(self.tracker.loop)
Exemplo n.º 22
0
})
db = firebase.database()

from src.detector import Detectors
from src.tracker import Tracker
# from src.counting import *

_lock = threading.Lock()
_server_ip = '192.168.192.50'
_port_number = '5000'
_phone_number = '123456789'

_detector = Detectors()
_tracker = Tracker(dist_thresh=80,
                   max_frames_to_skip=50,
                   max_trace_length=5,
                   axis='x',
                   direction=1)
_diret_id = 0
_roi_line = []
_total_in, _total_ou = 0, 0

_banner_shape = [50, 150]
_spaces = [.1, .4, .8]
_s_point, _e_point = (0, 0), (0, 0)

track_colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0),
                (0, 255, 255), (255, 0, 255), (255, 127, 255), (127, 0, 255),
                (127, 0, 127)]

Exemplo n.º 23
0
import cv2
import numpy as np
import time
import imutils
from filterpy.kalman import KalmanFilter
from src.tracker import Tracker
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('--file_name', '-f', type=str, default=0)
args = parser.parse_args()

track = Tracker()
#if file name is provided, setupVideoStream will use the file
#otherwise it will use 0, leading to camera capture
track.setupVideoStream(args.file_name)
track.drawTrackbars()

while (True):
    track.setFrame()
    time.sleep(.01)

    ball_mask = track.applyMask(track.currentFrame, track.BALL_HSV[0],
                                track.BALL_HSV[1], "Mask")
    track.findContours(ball_mask)
    track.showFrame()
    track.returnTrackbarPosition()

    if cv2.waitKey(1) & 0xFF == ord('q'):
        break
Exemplo n.º 24
0
from src.tracker import Tracker

if __name__ == "__main__":
    Tracker()