def milestones(self): j = self.provider.schedule(game_id=self.game_id) try: milestones = j["dates"][0]["games"][0]["content"]["media"][ "milestones"] except: raise return AttrDict() start_timestamps = [] start_time = next(m["timeAbsolute"] for m in milestones["items"] if m["type"] == "BROADCAST_START") # start_timestamps.append( # ("S", start_time) # ) start_offset = next(m["timeOffset"] for m in milestones["items"] if m["type"] == "BROADCAST_START") start_timestamps.append(("Start", int(start_offset))) timestamps = AttrDict(start_timestamps) timestamps.update( AttrDict([(f"P{m['ordinalNum'][0]}" if int(m["period"]) <= 3 else m['ordinalNum'], int(m["timeOffset"])) for m in milestones["items"] if m["type"] == "PERIOD_START"])) # raise Exception(timestamps) timestamps.update([("Live", None)]) return timestamps
def media_timestamps(self, game_id, media_id): j = self.schedule(game_id=game_id) try: milestones = j["dates"][0]["games"][0]["content"]["media"]["milestones"] except: return AttrDict() start_timestamps = [] start_time = next( m["timeAbsolute"] for m in milestones["items"] if m["type"] == "BROADCAST_START" ) start_timestamps.append( ("S", start_time) ) start_offset = next( m["timeOffset"] for m in milestones["items"] if m["type"] == "BROADCAST_START" ) start_timestamps.append( ("SO", int(start_offset)) ) timestamps = AttrDict(start_timestamps) timestamps.update(AttrDict([ (m["period"] if int(m["period"]) <= 3 else "O", int(m["timeOffset"])) for m in milestones["items"] if m["type"] == "PERIOD_START" ])) # raise Exception(timestamps) return timestamps
def media_timestamps(self, game_id, media_id): try: airing = next(a for a in self.airings(game_id) if a["mediaId"] == media_id) except StopIteration: raise MLBSessionException("No airing for media %s" % (media_id)) timestamps = AttrDict([ ("S", next(t["startDatetime"] for t in next( m for m in airing["milestones"] if m["milestoneType"] == "BROADCAST_START")["milestoneTime"] if t["type"] == "absolute")), ("SO", next(t["start"] for t in next( m for m in airing["milestones"] if m["milestoneType"] == "BROADCAST_START")["milestoneTime"] if t["type"] == "offset")), ]) timestamps.update( AttrDict([("%s%s" % ("T" if next( k for k in m["keywords"] if k["type"] == "top")["value"] == "true" else "B", int( next(k for k in m["keywords"] if k["type"] == "inning")["value"])), next(t["start"] for t in m["milestoneTime"] if t["type"] == "offset")) for m in airing["milestones"] if m["milestoneType"] == "INNING_START"])) return timestamps
def save_file(self, upload, filename, if_exists): original_name = upload.get('filename', None) filemeta = AttrDict(filename=original_name) filename = filename or original_name or 'data.bin' filepath = os.path.join(self.path, filename) # Security check: don't allow files to be written outside path: if not os.path.realpath(filepath).startswith( os.path.realpath(self.path)): raise HTTPError( FORBIDDEN, reason='FileUpload: filename %s is outside path: %s' % (filename, self.path)) if os.path.exists(filepath): if if_exists == 'error': raise HTTPError(FORBIDDEN, reason='FileUpload: file exists: %s' % filename) elif if_exists == 'unique': # Rename to file.1.ext or file.2.ext etc -- whatever's available name, ext = os.path.splitext(filepath) name_pattern = name + '.%s' + ext i = 1 while os.path.exists(name_pattern % i): i += 1 filepath = name_pattern % i elif if_exists == 'backup': name, ext = os.path.splitext(filepath) backup = '{}.{:%Y%m%d-%H%M%S}{}'.format( name, datetime.now(), ext) shutil.copyfile(filepath, backup) filemeta['backup'] = os.path.relpath(backup, self.path).replace( os.path.sep, '/') elif if_exists != 'overwrite': raise HTTPError(INTERNAL_SERVER_ERROR, reason='FileUpload: if_exists: %s invalid' % if_exists) # Create the directory to write in, if reuqired folder = os.path.dirname(filepath) if not os.path.exists(folder): os.makedirs(folder) # Save the file with open(filepath, 'wb') as handle: handle.write(upload['body']) mime = upload['content_type'] or mimetypes.guess_type(filepath, strict=False)[0] filemeta.update( file=os.path.relpath(filepath, self.path).replace(os.path.sep, '/'), size=os.stat(filepath).st_size, mime=mime or 'application/octet-stream', created=time.time() * MILLISECONDS, # JS parseable timestamp ) return filemeta
def media_timestamps(self, game_id, media_id): try: airing = next(a for a in self.airings(game_id) if a["mediaId"] == media_id) except StopIteration: raise MLBSessionException("No airing for media %s" % (media_id)) start_timestamps = [] try: start_time = next(t["startDatetime"] for t in next( m for m in airing["milestones"] if m["milestoneType"] == "BROADCAST_START")["milestoneTime"] if t["type"] == "absolute") except StopIteration: # Some streams don't have a "BROADCAST_START" milestone. We need # something, so we use the scheduled game start time, which is # probably wrong. start_time = airing["startDate"] start_timestamps.append(("S", start_time)) try: start_offset = next(t["start"] for t in next( m for m in airing["milestones"] if m["milestoneType"] == "BROADCAST_START")["milestoneTime"] if t["type"] == "offset") except StopIteration: # Same as above. Missing BROADCAST_START milestone means we # probably don't get accurate offsets for inning milestones. start_offset = 0 start_timestamps.append(("SO", start_offset)) timestamps = AttrDict(start_timestamps) timestamps.update( AttrDict([("%s%s" % ("T" if next( k for k in m["keywords"] if k["type"] == "top")["value"] == "true" else "B", int( next(k for k in m["keywords"] if k["type"] == "inning")["value"])), next(t["start"] for t in m["milestoneTime"] if t["type"] == "offset")) for m in airing["milestones"] if m["milestoneType"] == "INNING_START"])) return timestamps
def milestones(self): try: # try to get the precise timestamps for this stream airing = next( a for a in self.provider.session.airings(self.game_id) if len(a["milestones"]) and a["mediaId"] == self.media_id) except StopIteration: # welp, no timestamps -- try to get them from whatever feed has them try: airing = next( a for a in self.provider.session.airings(self.game_id) if len(a["milestones"])) except StopIteration: logger.warning( SGStreamSessionException("No airing for media %s" % (self.media_id))) return AttrDict([("Start", 0)]) start_timestamps = [] try: start_time = next(t["startDatetime"] for t in next( m for m in airing["milestones"] if m["milestoneType"] == "BROADCAST_START")["milestoneTime"] if t["type"] == "absolute") except StopIteration: # Some streams don't have a "BROADCAST_START" milestone. We need # something, so we use the scheduled game start time, which is # probably wrong. start_time = airing["startDate"] # start_timestamps.append( # ("Start", start_time) # ) try: start_offset = next(t["start"] for t in next( m for m in airing["milestones"] if m["milestoneType"] == "BROADCAST_START")["milestoneTime"] if t["type"] == "offset") except StopIteration: # Same as above. Missing BROADCAST_START milestone means we # probably don't get accurate offsets for inning milestones. start_offset = 0 start_timestamps.append(("Start", start_offset)) timestamps = AttrDict(start_timestamps) timestamps.update( AttrDict([("%s%s" % ("T" if next( k for k in m["keywords"] if k["type"] == "top")["value"] == "true" else "B", int( next(k for k in m["keywords"] if k["type"] == "inning")["value"])), next(t["start"] for t in m["milestoneTime"] if t["type"] == "offset")) for m in airing["milestones"] if m["milestoneType"] == "INNING_START"])) # If we didn't get a BROADCAST_START timestamp but did get a timestamp # for the first inning, just use something reasonable (1st inning start # minus 15 minutes.) if timestamps.get("Start") == 0 and "T1" in timestamps: timestamps["Start"] = timestamps["T1"] - 900 timestamps.update([("Live", None)]) return timestamps
def _call(self, inp, inp_features, is_training, is_posterior=True, prop_state=None): print("\n" + "-" * 10 + " ConvGridObjectLayer(is_posterior={}) ".format(is_posterior) + "-" * 10) # --- set up sub networks and attributes --- self.maybe_build_subnet("box_network", builder=cfg.build_conv_lateral, key="box") self.maybe_build_subnet("attr_network", builder=cfg.build_conv_lateral, key="attr") self.maybe_build_subnet("z_network", builder=cfg.build_conv_lateral, key="z") self.maybe_build_subnet("obj_network", builder=cfg.build_conv_lateral, key="obj") self.maybe_build_subnet("object_encoder") _, H, W, _, n_channels = tf_shape(inp_features) if self.B != 1: raise Exception("NotImplemented") if not self.initialized: # Note this limits the re-usability of this module to images # with a fixed shape (the shape of the first image it is used on) self.batch_size, self.image_height, self.image_width, self.image_depth = tf_shape(inp) self.H = H self.W = W self.HWB = H*W self.batch_size = tf.shape(inp)[0] self.is_training = is_training self.float_is_training = tf.to_float(is_training) inp_features = tf.reshape(inp_features, (self.batch_size, H, W, n_channels)) is_posterior_tf = tf.ones_like(inp_features[..., :2]) if is_posterior: is_posterior_tf = is_posterior_tf * [1, 0] else: is_posterior_tf = is_posterior_tf * [0, 1] objects = AttrDict() base_features = tf.concat([inp_features, is_posterior_tf], axis=-1) # --- box --- layer_inp = base_features n_features = self.n_passthrough_features output_size = 8 network_output = self.box_network(layer_inp, output_size + n_features, self.is_training) rep_input, features = tf.split(network_output, (output_size, n_features), axis=-1) _objects = self._build_box(rep_input, self.is_training) objects.update(_objects) # --- attr --- if is_posterior: # --- Get object attributes using object encoder --- yt, xt, ys, xs = tf.split(objects['normalized_box'], 4, axis=-1) yt, xt, ys, xs = coords_to_image_space( yt, xt, ys, xs, (self.image_height, self.image_width), self.anchor_box, top_left=False) transform_constraints = snt.AffineWarpConstraints.no_shear_2d() warper = snt.AffineGridWarper( (self.image_height, self.image_width), self.object_shape, transform_constraints) _boxes = tf.concat([xs, 2*xt - 1, ys, 2*yt - 1], axis=-1) _boxes = tf.reshape(_boxes, (self.batch_size*H*W, 4)) grid_coords = warper(_boxes) grid_coords = tf.reshape(grid_coords, (self.batch_size, H, W, *self.object_shape, 2,)) if self.edge_resampler: glimpse = resampler_edge.resampler_edge(inp, grid_coords) else: glimpse = tf.contrib.resampler.resampler(inp, grid_coords) else: glimpse = tf.zeros((self.batch_size, H, W, *self.object_shape, self.image_depth)) # Create the object encoder network regardless of is_posterior, otherwise messes with ScopedFunction encoded_glimpse = apply_object_wise( self.object_encoder, glimpse, n_trailing_dims=3, output_size=self.A, is_training=self.is_training) if not is_posterior: encoded_glimpse = tf.zeros_like(encoded_glimpse) layer_inp = tf.concat([base_features, features, encoded_glimpse, objects['local_box']], axis=-1) network_output = self.attr_network(layer_inp, 2 * self.A + n_features, self.is_training) attr_mean, attr_log_std, features = tf.split(network_output, (self.A, self.A, n_features), axis=-1) attr_std = self.std_nonlinearity(attr_log_std) attr = Normal(loc=attr_mean, scale=attr_std).sample() objects.update(attr_mean=attr_mean, attr_std=attr_std, attr=attr, glimpse=glimpse) # --- z --- layer_inp = tf.concat([base_features, features, objects['local_box'], objects['attr']], axis=-1) n_features = self.n_passthrough_features network_output = self.z_network(layer_inp, 2 + n_features, self.is_training) z_mean, z_log_std, features = tf.split(network_output, (1, 1, n_features), axis=-1) z_std = self.std_nonlinearity(z_log_std) z_mean = self.training_wheels * tf.stop_gradient(z_mean) + (1-self.training_wheels) * z_mean z_std = self.training_wheels * tf.stop_gradient(z_std) + (1-self.training_wheels) * z_std z_logit = Normal(loc=z_mean, scale=z_std).sample() z = self.z_nonlinearity(z_logit) objects.update(z_logit_mean=z_mean, z_logit_std=z_std, z_logit=z_logit, z=z) # --- obj --- layer_inp = tf.concat([base_features, features, objects['local_box'], objects['attr'], objects['z']], axis=-1) rep_input = self.obj_network(layer_inp, 1, self.is_training) _objects = self._build_obj(rep_input, self.is_training) objects.update(_objects) # --- final --- _objects = AttrDict() for k, v in objects.items(): _, _, _, *trailing_dims = tf_shape(v) _objects[k] = tf.reshape(v, (self.batch_size, self.HWB, *trailing_dims)) objects = _objects if prop_state is not None: objects.prop_state = tf.tile(prop_state[0:1, None], (self.batch_size, self.HWB, 1)) objects.prior_prop_state = tf.tile(prop_state[0:1, None], (self.batch_size, self.HWB, 1)) # --- misc --- objects.n_objects = tf.fill((self.batch_size,), self.HWB) objects.pred_n_objects = tf.reduce_sum(objects.obj, axis=(1, 2)) objects.pred_n_objects_hard = tf.reduce_sum(tf.round(objects.obj), axis=(1, 2)) return objects