Beispiel #1
0
def respond_role_menu(call, user):
    role = Role.get(Role.role_name == call.data)
    prev_value = RoleSelectionTracker.get(
        RoleSelectionTracker.user == user,
        RoleSelectionTracker.role == role).checked

    new_value = not prev_value
    RoleSelectionTracker.update(checked=new_value).where(
        RoleSelectionTracker.user == user,
        RoleSelectionTracker.role == role).execute()

    if new_value:
        answer_callback(f"نقش {role.role_name} اضافه شد.", call.id)
    else:
        answer_callback(f"نقش {role.role_name} حذف شد.", call.id)

    # update role team keyboard
    if role.team == "مافیا":
        message_id = Tracker.get(Tracker.id == user.id).mafia_message_id
    else:
        message_id = Tracker.get(Tracker.id == user.id).citizen_message_id

    # edit reply keyboard with new values
    edit_message_reply_markup(
        create_role_selection_menu(user, role.team),
        chat_id=user.id,
        message_id=message_id,
    )

    # send the new list of roles to every player
    players = get_players(user, include_god=True)
    num_players = len(players) - 1
    for player in players:
        send_current_roles(player, num_players, edit=True)
Beispiel #2
0
async def add_shape(sid: int, data: Dict[str, Any]):
    pr: PlayerRoom = game_state.get(sid)

    if "temporary" not in data:
        data["temporary"] = False

    floor = pr.active_location.floors.select().where(
        Floor.name == data["shape"]["floor"])[0]
    layer = floor.layers.where(Layer.name == data["shape"]["layer"])[0]

    if pr.role != Role.DM and not layer.player_editable:
        logger.warning(
            f"{pr.player.name} attempted to add a shape to a dm layer")
        return
    if data["temporary"]:
        game_state.add_temp(sid, data["shape"]["uuid"])
    else:
        with db.atomic():
            data["shape"]["layer"] = layer
            data["shape"]["index"] = layer.shapes.count()
            # Shape itself
            shape = Shape.create(**reduce_data_to_model(Shape, data["shape"]))
            # Subshape
            type_table = get_table(shape.type_)
            type_table.create(
                shape=shape,
                **type_table.pre_create(
                    **reduce_data_to_model(type_table, data["shape"])),
            )
            # Owners
            for owner in data["shape"]["owners"]:
                ShapeOwner.create(
                    shape=shape,
                    user=User.by_name(owner["user"]),
                    edit_access=owner["edit_access"],
                    movement_access=owner["movement_access"],
                    vision_access=owner["vision_access"],
                )
            # Trackers
            for tracker in data["shape"]["trackers"]:
                Tracker.create(**reduce_data_to_model(Tracker, tracker),
                               shape=shape)
            # Auras
            for aura in data["shape"]["auras"]:
                Aura.create(**reduce_data_to_model(Aura, aura), shape=shape)

    for room_player in pr.room.players:
        is_dm = room_player.role == Role.DM
        for psid in game_state.get_sids(player=room_player.player,
                                        active_location=pr.active_location):
            if psid == sid:
                continue
            if not is_dm and not layer.player_visible:
                continue
            if not data["temporary"]:
                data["shape"] = shape.as_dict(room_player.player, is_dm)
            await sio.emit("Shape.Add",
                           data["shape"],
                           room=psid,
                           namespace=GAME_NS)
Beispiel #3
0
def GetTracker(key):
    """Get a tracker, or create one if it dosent exist"""
    tracker = Tracker.all().filter('name =',key).get()
    if tracker==None:
        tracker = Tracker(name=key)
        tracker.put()
    return tracker
Beispiel #4
0
def next_tracking():
    global _tracker

    if not session.get('vid'):
        abort(401)

    previous_frame = request.args.get('previous_frame', 0, type=int)
    current_frame = request.args.get('current_frame', 0, type=int)
    left = request.args.get('left', 0, int)
    top = request.args.get('top', 0, int)
    width = request.args.get('width', 0, int)
    height = request.args.get('height', 0, int)
    is_new_box = request.args.get('new_box', 0, int)

    if not _tracker:
        _tracker = Tracker(Video.query.get(session.get('vid')).json,
                                      previous_frame, left, top, width, height)
    elif is_new_box:
        _tracker.start_new_tracking(previous_frame, [(left, top, left+width, top+height)])

    new_left, new_top, new_width, new_height = _tracker.get_next_box(current_frame)
    return jsonify(left=new_left,
                   top=new_top,
                   width=new_width,
                   height=new_height)
Beispiel #5
0
async def add_shape(sid, data):
    sid_data = state.sid_map[sid]
    user = sid_data["user"]
    room = sid_data["room"]
    location = sid_data["location"]

    if "temporary" not in data:
        data["temporary"] = False

    floor = location.floors.select().where(
        Floor.name == data["shape"]["floor"])[0]
    layer = floor.layers.where(Layer.name == data["shape"]["layer"])[0]

    if room.creator != user and not layer.player_editable:
        logger.warning(f"{user.name} attempted to add a shape to a dm layer")
        return
    if data["temporary"]:
        state.add_temp(sid, data["shape"]["uuid"])
    else:
        with db.atomic():
            data["shape"]["layer"] = layer
            data["shape"]["index"] = layer.shapes.count()
            # Shape itself
            shape = Shape.create(**reduce_data_to_model(Shape, data["shape"]))
            # Subshape
            type_table = get_table(shape.type_)
            type_table.create(shape=shape,
                              **reduce_data_to_model(type_table,
                                                     data["shape"]))
            # Owners
            ShapeOwner.create(shape=shape, user=user)
            # Trackers
            for tracker in data["shape"]["trackers"]:
                Tracker.create(**reduce_data_to_model(Tracker, tracker),
                               shape=shape)
            # Auras
            for aura in data["shape"]["auras"]:
                Aura.create(**reduce_data_to_model(Aura, aura), shape=shape)

    if layer.player_visible:
        for room_player in room.players:
            for psid in state.get_sids(user=room_player.player, room=room):
                if psid == sid:
                    continue
                if not data["temporary"]:
                    data["shape"] = shape.as_dict(room_player.player, False)
                await sio.emit("Shape.Add",
                               data["shape"],
                               room=psid,
                               namespace="/planarally")

    for csid in state.get_sids(user=room.creator, room=room):
        if csid == sid:
            continue
        if not data["temporary"]:
            data["shape"] = shape.as_dict(room.creator, True)
        await sio.emit("Shape.Add",
                       data["shape"],
                       room=csid,
                       namespace="/planarally")
Beispiel #6
0
 def __init__(self,
              update_facebank=False,
              show_results=True,
              dataset_path=None,
              save=False):
     """
     :param update_facebank: Calculate new facebank or use old one
     :param show_results: Draw bboxes and ident info at frame
     :param videoPath: Path for processing the video
     :param save: Save processed video
     """
     self.dataset_path = dataset_path
     self.video_path = f'{self.dataset_path}/{self.find_video()}'
     self.face_identificator = FaceIdentificator(
         update_facebank, f'{os.getcwd()}/{self.dataset_path}')
     self.tracker = Tracker()
     self.unique_facebank_names = list(
         self.face_identificator.saved_embeddings.keys())
     # TODO Create smth more smarter for scores initialization
     self.saved_scores = dict(
         zip(self.unique_facebank_names, [
             self.face_identificator.threshold
             for name in self.unique_facebank_names
         ]))
     self.mapped_tracks = {}
     self.show_results = show_results
     self.treshold = 0.4
     self.save = save
Beispiel #7
0
    def get(self):
        if os.environ.get('HTTP_HOST'):
            url = os.environ['HTTP_HOST']
        else:
            url = os.environ['SERVER_NAME']

        self.response.headers['Content-Type'] = 'text/plain'
        self.response.out.write(
"""OpenTrack\n
Extreamly Basic tracking system, for use with p2p programs

To use the tracker visit

http://%s/trk/<tracker name>

The tracker does not need to be setup beforhand
It will return a list of IP adresses that have visited
that tracker and add yours to the list

Optionally add "?tick=<seconds>" to the url where seconds
is how recently it has been visited the default tick is 60 seconds

NEW: http://%s/tick/<tracker name>
provides a way of refreshing yourself in a tracker without retrieveing a list
 
\nCurrent Trackers (activity last 24 hours)\n"""%(url,url))
        trackers = Tracker.all()
        for tracker in trackers:
            self.response.out.write(" - %s\n"%tracker.name)
Beispiel #8
0
async def create_tracker(sid: str, data: TrackerDelta):
    pr: PlayerRoom = game_state.get(sid)

    shape = get_shape_or_none(pr, data["shape"], "Tracker.Create")
    if shape is None:
        return

    model = reduce_data_to_model(Tracker, data)
    tracker = Tracker.create(**model)
    tracker.save()

    owners = [*get_owner_sids(pr, shape, skip_sid=sid)]
    for psid in owners:
        await sio.emit(
            "Shape.Options.Tracker.Create", data, room=psid, namespace=GAME_NS,
        )
    if tracker.visible:
        for psid in game_state.get_sids(
            active_location=pr.active_location, skip_sid=sid
        ):
            if psid in owners:
                continue
            await sio.emit(
                "Shape.Options.Tracker.Create", data, room=psid, namespace=GAME_NS,
            )
Beispiel #9
0
def register_user(message):
    user = User.get_or_none(User.id == message.chat.id)
    if user is not None:
        game = Game.get_or_none(Game.user == user)
        if game is None:
            send_message(message.chat.id,
                         f"Hi again <b>{user.name}</b>!",
                         reply_markup=keyboards.main)
        else:
            text = f"Hi again <b>{user.name}</b>!\n\n"
            text += ":game_die: Note that you're in the middle of a game."
            send_message(
                message.chat.id,
                text,
            )

        return False

    send_message(message.chat.id,
                 f"Hi <b>{message.chat.first_name}</b>!",
                 reply_markup=keyboards.main)

    # tracker and user information
    Tracker.replace(id=message.chat.id).on_conflict_replace().execute()
    User.replace(
        id=message.chat.id,
        name=message.chat.first_name,
        username=message.chat.username,
    ).on_conflict_replace().execute()

    # default game settings for each user
    user = User.get(User.id == message.chat.id)
    GameSettings.insert(user=user).execute()

    # insert selected roles tracker
    roles = Role.select().where(Role.is_default == True)
    data = []
    for r in roles:
        data.append((r, user, False))
    RoleSelectionTracker.insert_many(data,
                                     fields=[
                                         RoleSelectionTracker.role,
                                         RoleSelectionTracker.user,
                                         RoleSelectionTracker.checked
                                     ]).execute()

    return True
Beispiel #10
0
 def get_user_location(self, request):
     """Get tracker location of user"""
     entity = User.get_by_id(request.id)
     if entity:
         entity = Tracker.get_trackers(request)
         return entity
     else:
         raise endpoints.NotFoundException()
Beispiel #11
0
 def update_user_location(self, request):
     """Update current location of user"""
     entity = User.get_by_id(request.id)
     if entity:
         entity = Tracker.put_from_message(request)
         return entity
     else:
         raise endpoints.NotFoundException()
async def update_tracker(sid: str, data: TrackerDelta):
    pr: PlayerRoom = game_state.get(sid)

    shape = get_shape_or_none(pr, data["shape"], "Tracker.Update")
    if shape is None:
        return

    tracker = Tracker.get_by_id(data["uuid"])
    changed_visible = tracker.visible != data.get("visible", tracker.visible)
    update_model_from_dict(tracker, data)
    tracker.save()

    owners = [*get_owner_sids(pr, shape, skip_sid=sid)]
    for psid in owners:
        await sio.emit(
            "Shape.Options.Tracker.Update",
            data,
            room=psid,
            namespace=GAME_NS,
        )
    for psid in game_state.get_sids(active_location=pr.active_location,
                                    skip_sid=sid):
        if psid in owners:
            continue
        if changed_visible:
            if tracker.visible:
                await sio.emit(
                    "Shape.Options.Tracker.Create",
                    {
                        "shape": shape.uuid,
                        **tracker.as_dict()
                    },
                    room=psid,
                    namespace=GAME_NS,
                )
            else:
                await sio.emit(
                    "Shape.Options.Tracker.Remove",
                    {
                        "shape": shape.uuid,
                        "value": tracker.uuid
                    },
                    room=psid,
                    namespace=GAME_NS,
                )
        else:
            await sio.emit(
                "Shape.Options.Tracker.Update",
                data,
                room=psid,
                namespace=GAME_NS,
            )
Beispiel #13
0
async def remove_tracker(sid: str, data: ShapeSetStringValue):
    pr: PlayerRoom = game_state.get(sid)

    shape = get_shape_or_none(pr, data["shape"], "Tracker.Remove")
    if shape is None:
        return

    tracker: Tracker = Tracker.get_by_id(data["value"])
    tracker.delete_instance(True)

    await sio.emit(
        "Shape.Options.Tracker.Remove",
        data,
        skip_sid=sid,
        room=pr.active_location.get_path(),
        namespace=GAME_NS,
    )
async def update_shape_tracker(sid: str, data: TrackerUpdateData):
    pr: PlayerRoom = game_state.get(sid)

    if data["_type"] == "tracker":
        tracker = Tracker.get_by_id(data["uuid"])
    else:
        tracker = Aura.get_by_id(data["uuid"])

    tracker.value = data["value"]
    tracker.save()

    await sio.emit(
        "Shapes.Trackers.Update",
        data,
        room=pr.active_location.get_path(),
        skip_sid=sid,
        namespace=GAME_NS,
    )
Beispiel #15
0
async def move_tracker(sid: str, data: TrackerMove):
    pr: PlayerRoom = game_state.get(sid)

    new_shape = get_shape_or_none(pr, data["new_shape"],
                                  "Tracker.Options.Tracker.Move")
    if new_shape is None:
        return

    tracker = Tracker.get_by_id(data["tracker"])
    tracker.shape = new_shape
    tracker.save()

    await sio.emit(
        "Shape.Options.Tracker.Move",
        data,
        skip_sid=sid,
        room=pr.active_location.get_path(),
        namespace=GAME_NS,
    )
Beispiel #16
0
    def get(self):
        ## remove any old peers
        plist = Peer.all().filter('datetime <',datetime.now() - timedelta(days=1))
        peercount = plist.count()
        for p in plist:
            p.delete()

        ## remove any empty trackers
        trackers = Tracker.all()
        trackercount = 0
        for tracker in trackers:
            p = Peer.all().filter('tracker =',tracker).order('-datetime').get()
            if p == None:
                tracker.delete()
                trackercount += 1

        ## some simple stats
        self.response.headers['Content-Type'] = 'text/plain'
        self.response.out.write("Cleaned %d trackers and %d peers"%(trackercount,peercount))
        if trackercount and peercount:
            logging.info("Cleaned %d trackers and %d peers"%(trackercount,peercount))
Beispiel #17
0
    def get(self):
        user = User.get(id=int(get_jwt_identity()))
        if user.account_type != "admin":
            return {}, 403

        week_ago = datetime.date.today() - datetime.timedelta(days=7)
        base = Tracker.select().where((Tracker.timestamp >= week_ago)
                                      & (Tracker.page_type == "car"))
        last_7days_view = base.count()
        diff_users = base.select(Tracker.ip).group_by(Tracker.ip).count()
        top_10pages = base.select(Tracker.title, fn.Count(
            Tracker.id)).group_by(Tracker.title).order_by(
                fn.Count(Tracker.id).desc()).tuples()[:10]
        for track in top_10pages:
            print track
        return {
            "data": {
                "last_7days": last_7days_view,
                "top_10": top_10pages
            },
            "message": "",
            "status": "success"
        }
Beispiel #18
0
def update_state(user, state):
    Tracker.update(state=state).where(Tracker.id == user.id).execute()
Beispiel #19
0
async def update_shape(sid, data):
    sid_data = state.sid_map[sid]
    user = sid_data["user"]
    room = sid_data["room"]
    location = sid_data["location"]

    shape, layer = await _get_shape(data, location, user)

    if not await has_ownership(layer, room, data, user, shape):
        return

    # Overwrite the old data with the new data
    if not data["temporary"]:
        with db.atomic():
            data["shape"]["layer"] = Layer.get(location=location,
                                               name=data["shape"]["layer"])
            # Shape
            update_model_from_dict(shape,
                                   reduce_data_to_model(Shape, data["shape"]))
            shape.save()
            # Subshape
            type_table = get_table(shape.type_)
            type_instance = type_table.get(uuid=shape.uuid)
            # no backrefs on these tables
            update_model_from_dict(type_instance,
                                   data["shape"],
                                   ignore_unknown=True)
            type_instance.save()
            # Owners
            old_owners = {owner.user.name for owner in shape.owners}
            new_owners = set(data["shape"]["owners"])
            for owner in old_owners ^ new_owners:
                if owner == "":
                    continue
                delta_owner = User.by_name(owner)
                if owner in new_owners:
                    ShapeOwner.create(shape=shape, user=delta_owner)
                else:
                    ShapeOwner.get(shape=shape,
                                   user=delta_owner).delete_instance(True)
                await send_client_initiatives(room, location, delta_owner)
            # Trackers
            for tracker in data["shape"]["trackers"]:
                tracker_db = Tracker.get_or_none(uuid=tracker["uuid"])
                reduced = reduce_data_to_model(Tracker, tracker)
                reduced["shape"] = shape
                if tracker_db:
                    update_model_from_dict(tracker_db, reduced)
                    tracker_db.save()
                else:
                    Tracker.create(**reduced)
            # Auras
            for aura in data["shape"]["auras"]:
                aura_db = Aura.get_or_none(uuid=aura["uuid"])
                reduced = reduce_data_to_model(Aura, aura)
                reduced["shape"] = shape
                if aura_db:
                    update_model_from_dict(aura_db, reduced)
                    aura_db.save()
                else:
                    Aura.create(**reduced)

    await sync_shape_update(layer, room, data, sid, shape)
Beispiel #20
0
    'init_beta':
    1,
    'init_v':
    np.diag([4, 2]),
    'init_nu':
    5,
}

from models import GgiwTracker as Tracker

#cell 8
### Run Tracker

#cell 9
# tracker definition
tracker = Tracker(dt=dt, **config)

pr = cProfile.Profile()
for i in range(steps):
    scan = measurements[measurements['ts'] == i]
    pr.enable()
    tracker.step(scan)
    pr.disable()

estimates, log_lik = tracker.extract()
bboxes = tracker.extrackt_bbox()

s = io.StringIO()
ps = pstats.Stats(pr, stream=s).sort_stats('cumulative')
ps.print_stats(10)
Beispiel #21
0
def main(args):
    cap = cv2.VideoCapture(args.video_input)
    frame_rate = int(round(cap.get(cv2.CAP_PROP_FPS)))
    video_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    video_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))

    demo_images_path = os.path.join(args.demo_output, 'demo_images')
    if not os.path.exists(demo_images_path):
        os.makedirs(demo_images_path)

    device = torch.device(args.device)
    model, _, postprocessors = build_tracktest_model(args)
    model.to(device)
    model.eval()
    tracker = Tracker(score_thresh=args.track_thresh)

    checkpoint = torch.load(args.resume, map_location='cpu')
    _, _ = model.load_state_dict(checkpoint['model'], strict=False)
    print("Model is loaded")

    mean = [0.485, 0.456, 0.406]
    std = [0.229, 0.224, 0.225]
    color_list = colormap()

    print("Starting inference")
    count = 0
    tracker.reset_all()
    pre_embed = None
    res, img = cap.read()

    while res:
        count += 1
        resized_img, nh, nw = resize(img)
        rgb_img = cv2.cvtColor(resized_img, cv2.COLOR_BGR2RGB)
        tensor_img = F.normalize(F.to_tensor(rgb_img), mean, std)
        samples = nested_tensor_from_tensor_list([tensor_img]).to(device)
        outputs, pre_embed = model(samples, pre_embed)

        orig_sizes = torch.stack(
            [torch.as_tensor([video_height, video_width])], dim=0).to(device)
        results = postprocessors['bbox'](outputs, orig_sizes)

        if count == 1:
            res_track = tracker.init_track(results[0])
        else:
            res_track = tracker.step(results[0])

        for ret in res_track:
            if ret['active'] == 0:
                continue
            bbox = ret['bbox']
            tracking_id = ret['tracking_id']

            cv2.rectangle(img, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])),
                          color_list[tracking_id % 79].tolist(),
                          thickness=2)
            cv2.putText(img, "{}".format(tracking_id),
                        (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_SIMPLEX,
                        0.8, color_list[tracking_id % 79].tolist(), 2)

        cv2.imwrite(
            os.path.join(demo_images_path, "demo{:0>6d}.png".format(count)),
            img)
        print('Frame{:d} of the video is done'.format(count))

        res, img = cap.read()

    print('Lenth of the video: {:d} frames'.format(count))

    print("Starting img2video")
    img_paths = gb.glob(os.path.join(demo_images_path, "*.png"))
    size = (video_width, video_height)
    videowriter = cv2.VideoWriter(
        os.path.join(args.demo_output, "demo_video.avi"),
        cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), frame_rate, size)

    for img_path in sorted(img_paths):
        img = cv2.imread(img_path)
        img = cv2.resize(img, size)
        videowriter.write(img)

    videowriter.release()
    print("img2video is done")
Beispiel #22
0
 def post(self):
     data = parser.parse_args()
     data['request'] = request
     print "[*] Request: ", data["request"]
     Tracker.create_from_request(**data)
     return {}
Beispiel #23
0
async def update_shape(sid: int, data: Dict[str, Any]):
    pr: PlayerRoom = game_state.get(sid)

    if data["temporary"] and not has_ownership_temp(data["shape"], pr):
        logger.warning(
            f"User {pr.player.name} tried to update a shape it does not own.")
        return

    # todo clean up this mess that deals with both temporary and non temporary shapes
    shape, layer = await _get_shape(data, pr)

    # Overwrite the old data with the new data
    if not data["temporary"]:
        if not has_ownership(shape, pr):
            logger.warning(
                f"User {pr.player.name} tried to update a shape it does not own."
            )
            return
        with db.atomic():
            # Shape
            update_model_from_dict(shape,
                                   reduce_data_to_model(Shape, data["shape"]))
            shape.save()
            # Subshape
            type_instance = shape.subtype
            # no backrefs on these tables
            type_instance.update_from_dict(data["shape"], ignore_unknown=True)
            type_instance.save()
            # Trackers
            old_trackers = {tracker.uuid for tracker in shape.trackers}
            new_trackers = {
                tracker["uuid"]
                for tracker in data["shape"]["trackers"]
            }
            for tracker_id in old_trackers | new_trackers:
                remove = tracker_id in old_trackers - new_trackers
                if not remove:
                    tracker = next(tr for tr in data["shape"]["trackers"]
                                   if tr["uuid"] == tracker_id)
                    reduced = reduce_data_to_model(Tracker, tracker)
                    reduced["shape"] = shape
                if tracker_id in new_trackers - old_trackers:
                    Tracker.create(**reduced)
                    continue
                tracker_db = Tracker.get(uuid=tracker_id)
                if remove:
                    tracker_db.delete_instance(True)
                else:
                    update_model_from_dict(tracker_db, reduced)
                    tracker_db.save()

            # Auras
            old_auras = {aura.uuid for aura in shape.auras}
            new_auras = {aura["uuid"] for aura in data["shape"]["auras"]}
            for aura_id in old_auras | new_auras:
                remove = aura_id in old_auras - new_auras
                if not remove:
                    aura = next(au for au in data["shape"]["auras"]
                                if au["uuid"] == aura_id)
                    reduced = reduce_data_to_model(Aura, aura)
                    reduced["shape"] = shape
                if aura_id in new_auras - old_auras:
                    Aura.create(**reduced)
                    continue
                aura_db = Aura.get_or_none(uuid=aura_id)
                if remove:
                    aura_db.delete_instance(True)
                else:
                    update_model_from_dict(aura_db, reduced)
                    aura_db.save()
            # Labels
            for label in data["shape"]["labels"]:
                label_db = Label.get_or_none(uuid=label["uuid"])
                reduced = reduce_data_to_model(Label, label)
                reduced["user"] = User.by_name(reduced["user"])
                if label_db:
                    update_model_from_dict(label_db, reduced)
                    label_db.save()
                else:
                    Label.create(**reduced)
            old_labels = {
                shape_label.label.uuid
                for shape_label in shape.labels
            }
            new_labels = set(label["uuid"]
                             for label in data["shape"]["labels"])
            for label in old_labels ^ new_labels:
                if label == "":
                    continue
                if label in new_labels:
                    ShapeLabel.create(shape=shape, label=Label.get(uuid=label))
                else:
                    ShapeLabel.get(label=Label.get(uuid=label),
                                   shape=shape).delete_instance(True)

    await sync_shape_update(layer, pr, data, sid, shape)
Beispiel #24
0
async def update_shape(sid, data):
    sid_data = state.sid_map[sid]
    user = sid_data["user"]
    room = sid_data["room"]
    location = sid_data["location"]

    shape, layer = await _get_shape(data, location, user)

    if not await has_ownership(layer, room, data, user, shape):
        return

    # Overwrite the old data with the new data
    if not data["temporary"]:
        with db.atomic():
            data["shape"]["layer"] = Layer.get(location=location,
                                               name=data["shape"]["layer"])
            # Shape
            update_model_from_dict(shape,
                                   reduce_data_to_model(Shape, data["shape"]))
            shape.save()
            # Subshape
            type_instance = shape.subtype
            # no backrefs on these tables
            type_instance.update_from_dict(data["shape"], ignore_unknown=True)
            type_instance.save()
            # Owners
            old_owners = {owner.user.name for owner in shape.owners}
            new_owners = set(data["shape"]["owners"])
            for owner in old_owners ^ new_owners:
                if owner == "":
                    continue
                delta_owner = User.by_name(owner)
                if owner in new_owners:
                    ShapeOwner.create(shape=shape, user=delta_owner)
                else:
                    ShapeOwner.get(shape=shape,
                                   user=delta_owner).delete_instance(True)
                await send_client_initiatives(room, location, delta_owner)
            # Trackers
            old_trackers = {tracker.uuid for tracker in shape.trackers}
            new_trackers = {
                tracker["uuid"]
                for tracker in data["shape"]["trackers"]
            }
            for tracker_id in old_trackers | new_trackers:
                remove = tracker_id in old_trackers - new_trackers
                if not remove:
                    tracker = next(tr for tr in data["shape"]["trackers"]
                                   if tr["uuid"] == tracker_id)
                    reduced = reduce_data_to_model(Tracker, tracker)
                    reduced["shape"] = shape
                if tracker_id in new_trackers - old_trackers:
                    Tracker.create(**reduced)
                    continue
                tracker_db = Tracker.get(uuid=tracker_id)
                if remove:
                    tracker_db.delete_instance(True)
                else:
                    update_model_from_dict(tracker_db, reduced)
                    tracker_db.save()

            # Auras
            old_auras = {aura.uuid for aura in shape.auras}
            new_auras = {aura["uuid"] for aura in data["shape"]["auras"]}
            for aura_id in old_auras | new_auras:
                remove = aura_id in old_auras - new_auras
                if not remove:
                    aura = next(au for au in data["shape"]["auras"]
                                if au["uuid"] == aura_id)
                    reduced = reduce_data_to_model(Aura, aura)
                    reduced["shape"] = shape
                if aura_id in new_auras - old_auras:
                    Aura.create(**reduced)
                    continue
                aura_db = Aura.get_or_none(uuid=aura_id)
                if remove:
                    aura_db.delete_instance(True)
                else:
                    update_model_from_dict(aura_db, reduced)
                    aura_db.save()
            # Labels
            for label in data["shape"]["labels"]:
                label_db = Label.get_or_none(uuid=label["uuid"])
                reduced = reduce_data_to_model(Label, label)
                reduced["user"] = User.by_name(reduced["user"])
                if label_db:
                    update_model_from_dict(label_db, reduced)
                    label_db.save()
                else:
                    Label.create(**reduced)
                shape_label_db = ShapeLabel.get_or_none(shape=shape,
                                                        label=label_db)
            old_labels = {
                shape_label.label.uuid
                for shape_label in shape.labels
            }
            new_labels = set(label["uuid"]
                             for label in data["shape"]["labels"])
            for label in old_labels ^ new_labels:
                if label == "":
                    continue
                if label in new_labels:
                    ShapeLabel.create(shape=shape, label=Label.get(uuid=label))
                else:
                    ShapeLabel.get(label=Label.get(uuid=label),
                                   shape=shape).delete_instance(True)

    await sync_shape_update(layer, room, data, sid, shape)
Beispiel #25
0
def respond_message(message):

    if message.text.startswith('/start'):
        register_user(message)

    message.text = emoji.demojize(message.text)
    t = Tracker.get_or_none(Tracker.id == message.chat.id)
    user = User.get_or_none(id=message.chat.id)

    if not t or not user:
        send_message(
            message.chat.id,
            ":cross_mark: Not a registered user. Please click on /start.")
        return

    # update the username with every message
    # this is important as it is the only way to find out the user identity
    User.update(username=message.chat.username, ).where(
        User.id == message.chat.id).execute()

    # ------------------------------------
    # HOST a game
    # ------------------------------------
    if t.state == 'start' and message.text == ":desktop_computer: Host a Game":
        host_start(message, user)

    # end game
    elif t.state == 'host_game' and message.text == ":cross_mark: Leave":
        host_leave(message, user)

    # select from a list of roles
    elif t.state == 'host_game' and message.text == ":right_arrow: Next":
        # FIXME: poll is deactivated. inline keyboard is used now.
        # host_select_roles_with_poll(message, user)
        host_select_roles(message, user)

    # select from a list of roles
    elif t.state == 'host_game' and message.text == ":envelope: Send Roles":
        host_send_roles(message, user)

    # ------------------------------------
    # New Game
    # ------------------------------------
    join_code_pattern = r"^/start (?P<code>\w{4})$"
    match = re.match(join_code_pattern, message.text)
    if match:
        code = match.group("code")
        message.text = code

    if t.state == 'start' and message.text == ":game_die: Join a Game":
        player_leave(message, user)

    elif t.state == 'join_game':
        player_start(message, user)

    elif t.state == 'start' and match:
        player_start(message, user)

    # ------------------------------------
    # Change Name
    # ------------------------------------
    if t.state == 'start' and message.text == ":bust_in_silhouette: Change Name":
        update_state(user, 'change_name')
        text = f":bust_in_silhouette: Current name: <b>{f2p(user.name)}</b>\n\n"
        text += ":input_latin_letters: Enter your new name:"
        send_message(user.id,
                     text,
                     reply_markup=create_keyboard([":cross_mark: Discard"]))

    elif t.state == 'change_name' and message.text == ":cross_mark: Discard":
        update_state(user, 'start')
        send_message(user.id,
                     ":cross_mark: Discard",
                     reply_markup=keyboards.main)

    elif t.state == 'change_name':
        if len(message.text) > 100:
            send_message(user.id,
                         "Name length must be less than 100 characters.")
            return

        User.update(name=message.text).where(User.id == user.id).execute()
        update_state(user, 'start')
        send_message(
            user.id,
            f":white_heavy_check_mark: Your Name is updated now to: <b>{message.text}</b>",
            reply_markup=keyboards.main)

    # ------------------------------------
    # Settings
    # ------------------------------------
    if t.state == 'start' and message.text == ":gear_selector: Settings":
        edit_game_settings(message, user)
Beispiel #26
0
def main(args):
    utils.init_distributed_mode(args)
    print("git:\n  {}\n".format(utils.get_sha()))

    if args.frozen_weights is not None:
        assert args.masks, "Frozen training is meant for segmentation only"
    print(args)

    device = torch.device(args.device)

    # fix the seed for reproducibility
    seed = args.seed + utils.get_rank()
    torch.manual_seed(seed)
    np.random.seed(seed)
    random.seed(seed)

    scaler = torch.cuda.amp.GradScaler(enabled=args.fp16)
    if args.det_val:
        assert args.eval, 'only support eval mode of detector for track'
        model, criterion, postprocessors = build_model(args)
    elif args.eval:
        model, criterion, postprocessors = build_tracktest_model(args)
    else:
        model, criterion, postprocessors = build_tracktrain_model(args)

    model.to(device)

    model_without_ddp = model
    n_parameters = sum(p.numel() for p in model.parameters()
                       if p.requires_grad)
    print('number of params:', n_parameters)

    dataset_train = build_dataset(image_set=args.track_train_split, args=args)
    dataset_val = build_dataset(image_set=args.track_eval_split, args=args)

    if args.distributed:
        if args.cache_mode:
            sampler_train = samplers.NodeDistributedSampler(dataset_train)
            sampler_val = samplers.NodeDistributedSampler(dataset_val,
                                                          shuffle=False)
        else:
            sampler_train = samplers.DistributedSampler(dataset_train)
            sampler_val = samplers.DistributedSampler(dataset_val,
                                                      shuffle=False)
    else:
        sampler_train = torch.utils.data.RandomSampler(dataset_train)
        sampler_val = torch.utils.data.SequentialSampler(dataset_val)

    batch_sampler_train = torch.utils.data.BatchSampler(sampler_train,
                                                        args.batch_size,
                                                        drop_last=True)

    data_loader_train = DataLoader(dataset_train,
                                   batch_sampler=batch_sampler_train,
                                   collate_fn=utils.collate_fn,
                                   num_workers=args.num_workers,
                                   pin_memory=True)
    data_loader_val = DataLoader(dataset_val,
                                 args.batch_size,
                                 sampler=sampler_val,
                                 drop_last=False,
                                 collate_fn=utils.collate_fn,
                                 num_workers=args.num_workers,
                                 pin_memory=True)

    # lr_backbone_names = ["backbone.0", "backbone.neck", "input_proj", "transformer.encoder"]
    def match_name_keywords(n, name_keywords):
        out = False
        for b in name_keywords:
            if b in n:
                out = True
                break
        return out

    for n, p in model_without_ddp.named_parameters():
        print(n)

    param_dicts = [{
        "params": [
            p for n, p in model_without_ddp.named_parameters()
            if not match_name_keywords(n, args.lr_backbone_names)
            and not match_name_keywords(n, args.lr_linear_proj_names)
            and p.requires_grad
        ],
        "lr":
        args.lr,
    }, {
        "params": [
            p for n, p in model_without_ddp.named_parameters() if
            match_name_keywords(n, args.lr_backbone_names) and p.requires_grad
        ],
        "lr":
        args.lr_backbone,
    }, {
        "params": [
            p for n, p in model_without_ddp.named_parameters()
            if match_name_keywords(n, args.lr_linear_proj_names)
            and p.requires_grad
        ],
        "lr":
        args.lr * args.lr_linear_proj_mult,
    }]
    if args.sgd:
        optimizer = torch.optim.SGD(param_dicts,
                                    lr=args.lr,
                                    momentum=0.9,
                                    weight_decay=args.weight_decay)
    else:
        optimizer = torch.optim.AdamW(param_dicts,
                                      lr=args.lr,
                                      weight_decay=args.weight_decay)
    lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, args.lr_drop)

    if args.distributed:
        model = torch.nn.parallel.DistributedDataParallel(
            model, device_ids=[args.gpu], find_unused_parameters=True)
        model_without_ddp = model.module

    if args.dataset_file == "coco_panoptic":
        # We also evaluate AP during panoptic training, on original coco DS
        coco_val = datasets.coco.build("val", args)
        base_ds = get_coco_api_from_dataset(coco_val)
    else:
        base_ds = get_coco_api_from_dataset(dataset_val)

    if args.frozen_weights is not None:
        checkpoint = torch.load(args.frozen_weights, map_location='cpu')
        model_without_ddp.detr.load_state_dict(checkpoint['model'])

    output_dir = Path(args.output_dir)
    if args.resume:
        if args.resume.startswith('https'):
            checkpoint = torch.hub.load_state_dict_from_url(args.resume,
                                                            map_location='cpu',
                                                            check_hash=True)
        else:
            checkpoint = torch.load(args.resume, map_location='cpu')
        missing_keys, unexpected_keys = model_without_ddp.load_state_dict(
            checkpoint['model'], strict=False)
        unexpected_keys = [
            k for k in unexpected_keys
            if not (k.endswith('total_params') or k.endswith('total_ops'))
        ]
        if len(missing_keys) > 0:
            print('Missing Keys: {}'.format(missing_keys))
        if len(unexpected_keys) > 0:
            print('Unexpected Keys: {}'.format(unexpected_keys))
        if not args.eval and 'optimizer' in checkpoint and 'lr_scheduler' in checkpoint and 'epoch' in checkpoint:
            import copy
            p_groups = copy.deepcopy(optimizer.param_groups)
            optimizer.load_state_dict(checkpoint['optimizer'])
            for pg, pg_old in zip(optimizer.param_groups, p_groups):
                pg['lr'] = pg_old['lr']
                pg['initial_lr'] = pg_old['initial_lr']
            print(optimizer.param_groups)
            lr_scheduler.load_state_dict(checkpoint['lr_scheduler'])
            # todo: this is a hack for doing experiment that resume from checkpoint and also modify lr scheduler (e.g., decrease lr in advance).
            args.override_resumed_lr_drop = True
            if args.override_resumed_lr_drop:
                print(
                    'Warning: (hack) args.override_resumed_lr_drop is set to True, so args.lr_drop would override lr_drop in resumed lr_scheduler.'
                )
                lr_scheduler.step_size = args.lr_drop
                lr_scheduler.base_lrs = list(
                    map(lambda group: group['initial_lr'],
                        optimizer.param_groups))
            lr_scheduler.step(lr_scheduler.last_epoch)
            args.start_epoch = checkpoint['epoch'] + 1
        # check the resumed model
#         if not args.eval:
#             test_stats, coco_evaluator, _ = evaluate(
#                 model, criterion, postprocessors, data_loader_val, base_ds, device, args.output_dir
#             )

    if args.eval:
        assert args.batch_size == 1, print("Now only support 1.")
        tracker = Tracker(score_thresh=args.track_thresh)
        test_stats, coco_evaluator, res_tracks = evaluate(model,
                                                          criterion,
                                                          postprocessors,
                                                          data_loader_val,
                                                          base_ds,
                                                          device,
                                                          args.output_dir,
                                                          tracker=tracker,
                                                          phase='eval',
                                                          det_val=args.det_val,
                                                          fp16=args.fp16)
        if args.output_dir:
            #             utils.save_on_master(coco_evaluator.coco_eval["bbox"].eval, output_dir / "eval.pth")
            if res_tracks is not None:
                print("Creating video index for {}.".format(args.dataset_file))
                video_to_images = defaultdict(list)
                video_names = defaultdict()
                for _, info in dataset_val.coco.imgs.items():
                    video_to_images[info["video_id"]].append({
                        "image_id":
                        info["id"],
                        "frame_id":
                        info["frame_id"]
                    })
                    video_name = info["file_name"].split("/")[0]
                    if video_name not in video_names:
                        video_names[info["video_id"]] = video_name
                assert len(video_to_images) == len(video_names)
                # save mot results.
                save_track(res_tracks, args.output_dir, video_to_images,
                           video_names, args.track_eval_split)

        return

    print("Start training")
    start_time = time.time()
    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            sampler_train.set_epoch(epoch)
        train_stats = train_one_epoch(model,
                                      criterion,
                                      data_loader_train,
                                      optimizer,
                                      device,
                                      scaler,
                                      epoch,
                                      args.clip_max_norm,
                                      fp16=args.fp16)
        lr_scheduler.step()
        if args.output_dir:
            checkpoint_paths = [output_dir / 'checkpoint.pth']
            # extra checkpoint before LR drop and every 5 epochs
            if (epoch + 1) % args.lr_drop == 0 or (epoch + 1) % 5 == 0:
                checkpoint_paths.append(output_dir /
                                        f'checkpoint{epoch:04}.pth')
            for checkpoint_path in checkpoint_paths:
                utils.save_on_master(
                    {
                        'model': model_without_ddp.state_dict(),
                        'optimizer': optimizer.state_dict(),
                        'lr_scheduler': lr_scheduler.state_dict(),
                        'epoch': epoch,
                        'args': args,
                    }, checkpoint_path)

        log_stats = {
            **{f'train_{k}': v
               for k, v in train_stats.items()}, 'epoch': epoch,
            'n_parameters': n_parameters
        }

        if epoch % 10 == 0 or epoch > args.epochs - 5:
            test_stats, coco_evaluator, _ = evaluate(model,
                                                     criterion,
                                                     postprocessors,
                                                     data_loader_val,
                                                     base_ds,
                                                     device,
                                                     args.output_dir,
                                                     fp16=args.fp16)
            log_test_stats = {
                **{f'test_{k}': v
                   for k, v in test_stats.items()}
            }
            log_stats.update(log_test_stats)

        if args.output_dir and utils.is_main_process():
            with (output_dir / "log.txt").open("a") as f:
                f.write(json.dumps(log_stats) + "\n")

            # for evaluation logs


#             if coco_evaluator is not None:
#                 (output_dir / 'eval').mkdir(exist_ok=True)
#                 if "bbox" in coco_evaluator.coco_eval:
#                     filenames = ['latest.pth']
#                     if epoch % 50 == 0:
#                         filenames.append(f'{epoch:03}.pth')
#                     for name in filenames:
#                         torch.save(coco_evaluator.coco_eval["bbox"].eval,
#                                    output_dir / "eval" / name)

    total_time = time.time() - start_time
    total_time_str = str(datetime.timedelta(seconds=int(total_time)))
    print('Training time {}'.format(total_time_str))
Beispiel #27
0
        """
        link = "https://github.com/patillacode/twallery"
        sys.stderr.write('\nerror: {0}\n\n'.format(message))
        self.print_help()
        sys.stderr.write('\nPlease check the README or go to {0}\n\n'.format(
            link))
        sys.exit(2)

if __name__ == '__main__':

    try:
        parser = TrackParser()
        mandatory = parser.add_argument_group("mandatory arguments")
        mandatory.add_argument('--hashtags',
                               required=True,
                               nargs='*',
                               help="")

        args = parser.parse_args()
        # Create Tracker with given hashtags
        tracker = Tracker(args.hashtags)
        stream = tracker.authenticate()
        # Capture data by the keywords
        stream.filter(track=tracker.hashtags)

    except (KeyboardInterrupt, SystemExit):
        logger.debug("Farewell my friend!")

    except:
        logger.error(traceback.format_exc())
Beispiel #28
0
async def update_shape(sid, data):
    sid_data = state.sid_map[sid]
    user = sid_data["user"]
    room = sid_data["room"]
    location = sid_data["location"]

    # We're first gonna retrieve the existing server side shape for some validation checks
    if data["temporary"]:
        # This stuff is not stored so we cannot do any server side validation /shrug
        shape = data["shape"]
        layer = location.layers.where(Layer.name == data["shape"]["layer"])[0]
    else:
        # Use the server version of the shape.
        try:
            shape = Shape.get(uuid=data["shape"]["uuid"])
        except Shape.DoesNotExist:
            logger.warning(f"Attempt to update unknown shape by {user.name}")
            return
        layer = shape.layer

    # Ownership validatation
    if room.creator != user:
        if not layer.player_editable:
            logger.warning(
                f"{user.name} attempted to move a shape on a dm layer")
            return

        if data["temporary"]:
            if user.name not in shape["owners"]:
                logger.warning(
                    f"{user.name} attempted to move asset it does not own")
                return
        else:
            if not ShapeOwner.get_or_none(shape=shape, user=user):
                logger.warning(
                    f"{user.name} attempted to move asset it does not own")
                return

    # Overwrite the old data with the new data
    if not data["temporary"]:
        with db.atomic():
            data["shape"]["layer"] = Layer.get(location=location,
                                               name=data["shape"]["layer"])
            # Shape
            update_model_from_dict(shape,
                                   reduce_data_to_model(Shape, data["shape"]))
            shape.save()
            # Subshape
            type_table = get_table(shape.type_)
            type_instance = type_table.get(uuid=shape.uuid)
            # no backrefs on these tables
            update_model_from_dict(type_instance,
                                   data["shape"],
                                   ignore_unknown=True)
            type_instance.save()
            # Owners
            old_owners = {owner.user.name for owner in shape.owners}
            new_owners = set(data["shape"]["owners"])
            for owner in old_owners ^ new_owners:
                if owner == "":
                    continue
                delta_owner = User.by_name(owner)
                if owner in new_owners:
                    ShapeOwner.create(shape=shape, user=delta_owner)
                else:
                    ShapeOwner.get(shape=shape,
                                   user=delta_owner).delete_instance(True)
                await send_client_initiatives(room, location, delta_owner)
            # Trackers
            for tracker in data["shape"]["trackers"]:
                tracker_db = Tracker.get_or_none(uuid=tracker['uuid'])
                reduced = reduce_data_to_model(Tracker, tracker)
                reduced['shape'] = shape
                if tracker_db:
                    update_model_from_dict(tracker_db, reduced)
                    tracker_db.save()
                else:
                    Tracker.create(**reduced)
            # Auras
            for aura in data["shape"]["auras"]:
                aura_db = Aura.get_or_none(uuid=aura['uuid'])
                reduced = reduce_data_to_model(Aura, aura)
                reduced['shape'] = shape
                if aura_db:
                    update_model_from_dict(aura_db, reduced)
                    aura_db.save()
                else:
                    Aura.create(**reduced)

    # Send to players
    if layer.player_visible:
        for room_player in room.players:
            for psid in state.get_sids(user=room_player.player, room=room):
                if psid == sid:
                    continue
                if not data["temporary"]:
                    data["shape"] = shape.as_dict(room_player.player, False)
                await sio.emit("Shape.Update",
                               data,
                               room=psid,
                               namespace="/planarally")

    # Send to DM
    for csid in state.get_sids(user=room.creator, room=room):
        if csid == sid:
            continue
        if not data["temporary"]:
            data["shape"] = shape.as_dict(room.creator, True)
        await sio.emit("Shape.Update",
                       data,
                       room=csid,
                       namespace="/planarally")