def cleanup(interval, verbose, is_dry_run): thresh = datetime.datetime.fromtimestamp( time.time() - interval * 60 * 60) store = uutils.get_store() count = 0 for group in store.find(Group, Group.is_active == True): # Iterate over all active groups. last_event = store.find(GroupEvent, GroupEvent.group == group).order_by(Desc(GroupEvent.created)).first() if (last_event is None and group.created < thresh or last_event is not None and last_event.created < thresh): # Group is old and has no event, or last event is old. count += 1 if verbose: print "deactivating group %d (%s)" % (group.id, group.name) if not is_dry_run: group.is_active = False if group.is_automatic: for cluster in group.clusters: # There should be only one cluster. cluster.group = None for user in group.users: user.group = None store.commit() return count
def cleanup(interval, verbose): thresh = datetime.datetime.fromtimestamp(time.time() - interval * 60 * 60) store = uutils.get_store() for group in store.find(Group, Group.is_active == True): # Iterate over all active groups. last_event = store.find(GroupEvent, GroupEvent.group == group).order_by( Desc(GroupEvent.created)).first() if (last_event is None and group.created < thresh or last_event is not None and last_event.created < thresh): # Group is old and has no event, or last event is old. if verbose: print "deactivating group %d (%s)" % (group.id, group.name) group.is_active = False for user in group.users: user.group = None store.commit()
def cleanup(interval, verbose): thresh = datetime.datetime.fromtimestamp( time.time() - interval * 60 * 60) store = uutils.get_store() for group in store.find(Group, Group.is_active == True): # Iterate over all active groups. last_event = store.find(GroupEvent, GroupEvent.group == group).order_by(Desc(GroupEvent.created)).first() if (last_event is None and group.created < thresh or last_event is not None and last_event.created < thresh): # Group is old and has no event, or last event is old. if verbose: print "deactivating group %d (%s)" % (group.id, group.name) group.is_active = False for user in group.users: user.group = None store.commit()
from libunison.models import User from storm.locals import * def _parse_args(): parser = argparse.ArgumentParser() parser.add_argument('user', nargs='*', type=int) parser.add_argument('--null', action='store_true') parser.add_argument('--all', action='store_true') return parser.parse_args() if __name__ == '__main__': args = _parse_args() store = uutils.get_store() users = set() for uid in args.user: user = store.get(User, uid) if user is not None: users.add(user) if args.all: users.update(store.find(User)) elif args.null: users.update(store.find(User, User.model == None)) for user in users: print "updating model for user %s..." % user.email model = predict.Model(user) model.generate(store) print " ... %d components" % model.get_nb_components() store.commit()
track.listeners = info['listeners'] self._store.commit() self._logger.info("fetched track info for: %r" % meta) def _parse_args(): parser = argparse.ArgumentParser() parser.add_argument('--rate', type=float, default=DEFAULT_RATE) return parser.parse_args() def _get_logger(): formatter = logging.Formatter('%(asctime)s: %(levelname)s - %(message)s') handler = logging.StreamHandler() # Log to stderr. handler.setFormatter(formatter) logger = logging.getLogger('tagfetcher') logger.setLevel(logging.DEBUG) logger.addHandler(handler) return logger if __name__ == '__main__': args = _parse_args() logger = _get_logger() for key in [CONFIG['lastfm']['key']] + CONFIG['lastfm']['addkeys']: lfm = liblfm.LastFM(key) store = uutils.get_store(CONFIG['database']['string']) fetcher = Fetcher(args.rate, lfm, store, logger) # Launch the fetcher ! fetcher.start()