コード例 #1
0
ファイル: event.py プロジェクト: giacaglia/wigo2-server
    def get(self, headers):
        user = g.user
        group = g.group

        query = self.select().group(group)
        query = query.min(epoch(group.get_day_end() - timedelta(days=8)))
        query = query.max(epoch(group.get_day_end() + timedelta(hours=4)))
        count, page, events = query.execute()

        if count == 0 and group.status == 'initializing':
            tries = int(request.args.get('tries', 0))
            if tries < 5:
                sleep(1)  # clients seem to not obey Retry-After, so artificially delay
                request_arguments = request.args.copy().to_dict()
                request_arguments['tries'] = tries
                response = redirect('%s?%s' % (request.path, url_encode(request_arguments)))
                response.headers.add('Retry-After', 1)
                return response
            else:
                # return without "headers" so this response isn't cached
                return self.serialize_list(self.model, events, count, page), 200

        attending_id = g.user.get_attending_id()
        if attending_id:
            try:
                attending = Event.find(attending_id)
                if attending in events:
                    events.remove(attending)
                if 'page' not in request.args:
                    events.insert(0, attending)
                    attending.current_user_attending = True
            except DoesNotExist:
                logger.warn('Event {} not found'.format(attending_id))

        return self.serialize_list(self.model, events, count, page), 200, headers
コード例 #2
0
ファイル: event.py プロジェクト: giacaglia/wigo2-server
 def get(self, headers):
     user = g.user
     group = g.group
     query = self.select().group(group).order('asc')
     query = query.min(epoch(group.get_day_start()))
     query = query.max(epoch(group.get_day_end() + timedelta(days=10)))
     count, page, events = query.execute()
     return self.serialize_list(self.model, events, count, page), 200, headers
コード例 #3
0
ファイル: query.py プロジェクト: giacaglia/wigo2-server
    def __get_page(self, key):
        start_page = self._page

        min = self._min or '-inf'
        max = self._max or '+inf'

        count = self.db.get_sorted_set_size(key, min, max)
        if count == 0:
            return 0, start_page, []

        if self._model_class == Event and self._group:
            if min is None:
                min = epoch(self._group.get_day_start() - timedelta(days=7))
            if max is None:
                # add 1 hour to account for sub-scoring
                max = epoch(self._group.get_day_end() + timedelta(hours=1))

        if self._order == 'desc':
            range_f = self.db.sorted_set_rrange_by_score
            rank_f = self.db.sorted_set_rrank
            min, max = max, min
        else:
            range_f = self.db.sorted_set_range_by_score
            rank_f = self.db.sorted_set_rank

        pages = int(math.ceil(float(count) / self._limit))

        if self._start:
            position = rank_f(key, self._start)
            if position is not None:
                start_page = int((float(position) / self._limit) + 1)

        collected = []
        page = start_page
        for page in range(start_page, pages + 1):
            start = (page - 1) * self._limit
            model_ids = range_f(key,
                                min,
                                max,
                                start,
                                self._limit,
                                withscores=True)
            instances = self._model_class.find([m[0] for m in model_ids])
            for index, instance in enumerate(instances):
                if instance is not None:
                    instance.score = model_ids[index][1]
            secured = self.__clean_results(instances)
            collected.extend(secured)

            # if we have page size results, break
            if len(collected) >= self._limit:
                break

        return count, page, collected
コード例 #4
0
ファイル: event.py プロジェクト: giacaglia/wigo2-server
    def index(self):
        super(EventMessage, self).index()

        event = self.event

        # record to the global by_votes sort
        with self.db.transaction(commit_on_select=False):
            num_votes = self.db.get_sorted_set_size(skey(self, 'votes'))
            sub_sort = epoch(self.created) / epoch(event.expires + timedelta(days=365))
            by_votes_key = skey(event, 'messages', 'by_votes')
            self.db.sorted_set_add(by_votes_key, self.id, num_votes + sub_sort)
            self.db.expire(by_votes_key, self.ttl())

            self.record_for_user(self.user)
コード例 #5
0
ファイル: event.py プロジェクト: giacaglia/wigo2-server
    def index(self):
        super(EventMessageVote, self).index()

        user = self.user
        message = self.message
        event = message.event

        # record the vote into the global "by_votes" sort order
        with self.db.transaction(commit_on_select=False):
            num_votes = self.db.get_sorted_set_size(skey(message, 'votes'))
            sub_sort = epoch() / epoch(event.expires + timedelta(days=365))
            by_votes = skey(event, 'messages', 'by_votes')
            self.db.sorted_set_add(by_votes, self.message_id, num_votes + sub_sort, replicate=False)
            self.db.expire(by_votes, self.ttl())
コード例 #6
0
ファイル: event.py プロジェクト: giacaglia/wigo2-server
    def get(self, headers):
        group = g.group
        meta = {}

        dates = [(group.get_day_end() + timedelta(days=i)) for i in range(10)]

        p = wigo_db.redis.pipeline()
        for d in dates:
            p.zcount(skey(group, 'events'), epoch(d), epoch(d + timedelta(hours=3)))
        counts = p.execute()

        for index, d in enumerate(dates):
            meta[(d - timedelta(days=1)).date().isoformat()] = counts[index]

        return meta, 200, headers
コード例 #7
0
ファイル: data.py プロジェクト: giacaglia/wigo2-server
def new_friend(user_id, friend_id):
    user = User.find(user_id)
    friend = User.find(friend_id)

    if not user.is_friend(friend):
        return

    min = epoch(datetime.utcnow() - timedelta(days=8))

    # tells each friend about the event history of the other
    def capture_history(u, f):
        # capture each of the users posted photos
        with wigo_db.transaction(commit_on_select=False):
            for message in EventMessage.select().key(skey(
                    u, 'event_messages')).min(min):
                if message.user and message.event:
                    message.record_for_user(f)

        # capture the events being attended
        for event in Event.select().user(u).min(min):
            if u.is_attending(event) and f.can_see_event(event):
                event.add_to_user_attending(f, u)

    capture_history(user, friend)
    capture_history(friend, user)
コード例 #8
0
ファイル: event.py プロジェクト: giacaglia/wigo2-server
    def index(self):
        super(EventAttendee, self).index()

        user = self.user
        event = self.event

        # check if the user is switching events for the date the event is on
        current_event_id = user.get_attending_id(event)
        if current_event_id and current_event_id != event.id:
            EventAttendee({'event_id': current_event_id, 'user_id': user.id}).delete()

        with self.db.transaction(commit_on_select=False):
            # first update the global state of the event
            attendees_key = skey(event, 'attendees')
            self.db.sorted_set_add(attendees_key, user.id, epoch(self.created))
            self.db.expire(attendees_key, event.ttl())

            # now update the users view of the events
            # record the exact event the user is currently attending
            user.set_attending(event)

            # record current user as an attendee
            attendees_key = user_attendees_key(user, event)
            self.db.sorted_set_add(attendees_key, user.id, 'inf')
            self.db.expire(attendees_key, event.ttl())

        # record the event into the events the user can see
        with self.db.transaction(commit_on_select=False):
            event.update_global_events()
            event.update_user_events(user)
コード例 #9
0
ファイル: db.py プロジェクト: giacaglia/wigo2-server
def rate_limit(key, expires, lock_timeout=30):
    if Configuration.ENVIRONMENT in ('dev', 'test'):
        yield False
    else:
        # TODO this is legacy and can be removed at some point
        if redis.exists('rate_limit:{}'.format(key)):
            yield True
            return

        if isinstance(expires, datetime):
            expires = expires - datetime.utcnow()

        rate_limit_set = 'rate_limits_{}'.format(
            wigo_db.redis.get_server_name(key))
        score = wigo_db.sorted_set_get_score(rate_limit_set, key)

        if score and float(score) > time():
            yield True
        else:
            lock = redis.lock('locks:{}'.format(key), timeout=lock_timeout)
            if lock.acquire(blocking=False):
                score = wigo_db.sorted_set_get_score(rate_limit_set, key)
                if score and float(score) > time():
                    yield True
                else:
                    try:
                        yield False
                        wigo_db.sorted_set_add(
                            rate_limit_set, key,
                            epoch(datetime.utcnow() + expires))
                    finally:
                        lock.release()
            else:
                yield True
コード例 #10
0
ファイル: db.py プロジェクト: giacaglia/wigo2-server
 def expire(self, key, expires, long_term_expires=None):
     redis = self.get_redis(True)
     expires = check_expires(expires)
     redis.zadd(self.get_expire_key(key), key,
                epoch(datetime.utcnow() + expires))
     if self.queued_db and long_term_expires:
         self.queued_db.expire(key, expires, long_term_expires)
コード例 #11
0
ファイル: event.py プロジェクト: giacaglia/wigo2-server
    def record_for_user(self, user):
        event = self.event

        # record into users list of messages by time
        with self.db.transaction(commit_on_select=False):
            user_emessages_key = user_eventmessages_key(user, event)
            self.db.sorted_set_add(user_emessages_key, self.id, epoch(self.created))
            self.db.expire(user_emessages_key, self.ttl())

            # record into users list by vote count
            num_votes = self.db.get_sorted_set_size(skey(self, 'votes'))
            sub_sort = epoch() / epoch(event.expires + timedelta(days=365))
            by_votes_key = user_eventmessages_key(user, event, True)
            self.db.sorted_set_add(by_votes_key, self.id, num_votes + sub_sort, replicate=False)
            self.db.expire(by_votes_key, self.ttl())

            user.track_meta('last_event_change')
コード例 #12
0
ファイル: event.py プロジェクト: giacaglia/wigo2-server
def get_score_key(time, distance, num_attending):
    if num_attending > 999:
        num_attending = 999

    targets = [0, 20, 50, 70, 100]
    distance_bucket = next(reversed([t for t in targets if t <= distance]), None) + 10
    adjustment = (1 - (distance_bucket / 1000.0)) + (num_attending / 10000.0)
    return str(Decimal(epoch(time)) + Decimal(adjustment))
コード例 #13
0
ファイル: user.py プロジェクト: giacaglia/wigo2-server
    def get_tapped_ids(self):
        from server.db import wigo_db

        return wigo_db.sorted_set_range_by_score(
            skey(self, 'tapped'),
            epoch(self.group.get_day_start()),
            'inf',
            limit=5000)
コード例 #14
0
ファイル: user.py プロジェクト: giacaglia/wigo2-server
 def index(self):
     super(Notification, self).index()
     key = skey('user', self.user_id, 'notifs')
     primitive = self.to_primitive()
     self.db.sorted_set_add(key,
                            primitive,
                            epoch(self.created),
                            dt=dict,
                            replicate=False)
     self.db.clean_old(key, self.TTL)
コード例 #15
0
ファイル: user.py プロジェクト: giacaglia/wigo2-server
    def get(self, user_id, headers):
        user = g.user
        group = g.group

        query = self.select().user(user)
        query = query.min(epoch(group.get_day_end() - timedelta(days=15)))
        query = query.max(time())
        count, page, instances = query.execute()

        return self.serialize_list(self.model, instances, count,
                                   page), 200, headers
コード例 #16
0
ファイル: event.py プロジェクト: giacaglia/wigo2-server
    def record_for_user(self, user):
        message = self.message
        event = message.event

        with self.db.transaction(commit_on_select=False):
            user_votes = user_votes_key(user, self.message)
            self.db.sorted_set_add(user_votes, self.user.id, epoch(self.created), replicate=False)
            self.db.expire(user_votes, self.ttl())

        # this forces the message to update its indexes for the user
        message.record_for_user(user)
コード例 #17
0
ファイル: user.py プロジェクト: giacaglia/wigo2-server
 def setup(u1, u2):
     self.db.sorted_set_add(skey(u1, 'friends'), u2.id,
                            epoch(self.created))
     self.db.sorted_set_add(skey(u1, 'friends', 'top'), u2.id,
                            10000)
     self.db.sorted_set_add(skey(u1, 'friends', 'alpha'),
                            u2.id,
                            prefix_score(u2.full_name.lower()),
                            replicate=False)
     if u2.privacy == 'private':
         self.db.set_add(skey(u1, 'friends', 'private'),
                         u2.id,
                         replicate=False)
コード例 #18
0
ファイル: db.py プロジェクト: giacaglia/wigo2-server
    def set(self, key, value, expires=None, long_term_expires=None):
        redis = self.get_redis(True)
        expires = check_expires(expires)
        result = redis.set(key, self.encode(value, dict))

        if expires:
            redis.zadd(self.get_expire_key(key), key,
                       epoch(datetime.utcnow() + expires))

        if self.queued_db and long_term_expires != 0:
            self.queued_db.set(key, value, expires, long_term_expires)

        return result
コード例 #19
0
ファイル: event.py プロジェクト: giacaglia/wigo2-server
    def get(self, user_id, headers):
        user = g.user
        group = g.group

        query = self.select().user(user)
        query = query.min(epoch(group.get_day_end() - timedelta(days=8)))
        query = query.max(epoch(group.get_day_end() + timedelta(hours=4)))

        count, page, events = query.execute()

        attending_id = user.get_attending_id()
        if attending_id:
            try:
                attending = Event.find(attending_id)
                if attending in events:
                    events.remove(attending)
                if 'page' not in request.args:
                    events.insert(0, attending)
                    attending.current_user_attending = True
            except DoesNotExist:
                logger.warn('Event {} not found'.format(attending_id))

        return self.serialize_list(self.model, events, count, page), 200, headers
コード例 #20
0
 def notifications_model_listener(sender, instance, created):
     if isinstance(instance, User):
         if is_new_user(instance, created):
             new_user.delay(instance.id)
         if not created and (instance.was_changed('status')
                             and instance.get_previous_old_value('status') == 'waiting'):
             notify_unlocked.delay(instance.id)
     elif isinstance(instance, EventAttendee) and created:
         notify_on_attendee.delay(instance.event_id, instance.user_id)
     elif isinstance(instance, EventMessage) and created:
         notify_on_eventmessage.delay(instance.id)
     elif isinstance(instance, EventMessageVote) and created:
         notify_on_eventmessage_vote.delay(instance.user_id, instance.message_id)
     elif isinstance(instance, Message) and created:
         notify_on_message.delay(instance.id)
     elif isinstance(instance, Friend) and created:
         notify_on_friend.delay(instance.user_id, instance.friend_id, instance.accepted)
     elif isinstance(instance, Tap) and created:
         notify_on_tap.delay(instance.user_id, instance.tapped_id)
     elif isinstance(instance, Invite) and created:
         notify_on_invite.delay(instance.user_id, instance.invited_id, instance.event_id)
     elif isinstance(instance, Notification) and created:
         instance.user.track_meta('last_notification', epoch(instance.created))
コード例 #21
0
def save_data(data):
    observations = data["observations"]
    now_epoch = epoch(datetime.utcnow())
    out = []
    for i in observations:
        # NOTE: getting the user does not have to happen here (as the total response time for the json post should be < 500ms)
        # It could be off loaded to do a daily batch. This will be something like "user" below will be the email address, id, username, etc... of the device, or None
        device_owner = get_client_owner(i["clientMac"])
        user = device_owner["user"] if device_owner else None
        location = i.get("location")
        out.append({
            "ap": data["apMac"],
            "client_mac": i["clientMac"],
            "lat": location.get("lat") if location else None,
            "lng": location.get("lng") if location else None,
            "identity": user,
            "time_present": i["seenEpoch"],
            "time_now": now_epoch
        })

    # printing here - can send to api, log file, etc...
    # to_log_file(out)
    # to_firehose(out)
    pprint(out)
コード例 #22
0
ファイル: data.py プロジェクト: giacaglia/wigo2-server
def new_group(group_id):
    group = Group.find(group_id)
    logger.info('new group {} created, importing events'.format(
        group.name.encode('utf-8')))
    num_imported = 0
    imported = set()

    min = epoch(group.get_day_end() - timedelta(days=7))

    with wigo_db.transaction(commit_on_select=False):
        for close_group in get_close_groups(group.latitude, group.longitude,
                                            100):
            if close_group.id == group.id:
                continue

            for event in Event.select().group(close_group).min(min):
                # only import the events the group actually owns
                if event.group_id != close_group.id:
                    continue
                # no double imports
                if event.id not in imported:
                    event.update_global_events(group=group)
                    imported.add(event.id)
                    num_imported += 1

        for event in Event.select().key(skey('global', 'events')).min(min):
            if event.id not in imported:
                event.update_global_events(group=group)
                imported.add(event.id)
                num_imported += 1

    logger.info('imported {} events into group {}'.format(
        num_imported, group.name.encode('utf-8')))
    group.track_meta('last_event_change', expire=None)
    group.status = 'active'
    group.save()
コード例 #23
0
def main():

    parser = argparse.ArgumentParser(description='Parameter Processing')
    parser.add_argument('--dataset',
                        type=str,
                        default='CIFAR10',
                        help='dataset')
    parser.add_argument('--model', type=str, default='ConvNet', help='model')
    parser.add_argument('--ipc',
                        type=int,
                        default=1,
                        help='image(s) per class')
    parser.add_argument(
        '--eval_mode', type=str, default='S', help='eval_mode'
    )  # S: the same to training model, M: multi architectures,  W: net width, D: net depth, A: activation function, P: pooling layer, N: normalization layer,
    parser.add_argument('--num_exp',
                        type=int,
                        default=5,
                        help='the number of experiments')
    parser.add_argument(
        '--num_eval',
        type=int,
        default=10,
        help='the number of evaluating randomly initialized models')
    parser.add_argument('--epoch_eval_train',
                        type=int,
                        default=300,
                        help='epochs to train a model with synthetic data')
    parser.add_argument('--Iteration',
                        type=int,
                        default=1000,
                        help='training iterations')
    parser.add_argument('--lr_img',
                        type=float,
                        default=0.1,
                        help='learning rate for updating synthetic images')
    parser.add_argument('--lr_net',
                        type=float,
                        default=0.01,
                        help='learning rate for updating network parameters')
    parser.add_argument('--batch_real',
                        type=int,
                        default=256,
                        help='batch size for real data')
    parser.add_argument('--batch_train',
                        type=int,
                        default=256,
                        help='batch size for training networks')
    parser.add_argument(
        '--init',
        type=str,
        default='noise',
        help=
        'initialization of synthetic data, noise/real: initialize from random noise or real images. The two initializations will get similar performances.'
    )
    parser.add_argument('--clip_syn',
                        type=str,
                        default='False',
                        help='clip the pixel values of synthetic data')
    parser.add_argument('--data_path',
                        type=str,
                        default='data',
                        help='dataset path')
    parser.add_argument('--save_path',
                        type=str,
                        default='result',
                        help='path to save results')
    parser.add_argument('--dis_metric',
                        type=str,
                        default='ours',
                        help='distance metric')
    # The experimental results in the paper were obtained with --init noise and --clip_syn False.
    # For speeding up, we can decrease the Iteration and epoch_eval_train, which will not cause significant performance decrease.

    args = parser.parse_args()
    args.clip_syn = True if args.clip_syn == 'True' else False
    args.outer_loop, args.inner_loop = get_loops(args.ipc)
    args.device = 'cuda' if torch.cuda.is_available() else 'cpu'
    print('Hyper-parameters: \n', args.__dict__)

    if not os.path.exists(args.data_path):
        os.mkdir(args.data_path)

    if not os.path.exists(args.save_path):
        os.mkdir(args.save_path)

    eval_it_pool = np.arange(
        0, args.Iteration + 1, 100).tolist() if args.eval_mode == 'S' else [
            args.Iteration
        ]  # The list of iterations when we evaluate models and record results.
    channel, im_size, num_classes, class_names, mean, std, dst_train, dst_test, testloader = get_dataset(
        args.dataset, args.data_path)
    model_eval_pool = get_eval_pool(args.eval_mode, args.model, args.model)
    print('Evaluation model pool: ', model_eval_pool)

    accs_all_exps = dict()  # record performances of all experiments
    for key in model_eval_pool:
        accs_all_exps[key] = []

    data_save = []

    for exp in range(args.num_exp):
        print('\n================== Exp %d ==================\n ' % exp)
        ''' organize the real dataset '''
        images_all = []
        labels_all = []
        indices_class = [[] for c in range(num_classes)]

        images_all = [
            torch.unsqueeze(dst_train[i][0], dim=0)
            for i in range(len(dst_train))
        ]
        labels_all = [dst_train[i][1] for i in range(len(dst_train))]
        for i, lab in enumerate(labels_all):
            indices_class[lab].append(i)
        images_all = torch.cat(images_all, dim=0).to(args.device)
        labels_all = torch.tensor(labels_all,
                                  dtype=torch.long,
                                  device=args.device)

        for c in range(num_classes):
            print('class c = %d: %d real images' % (c, len(indices_class[c])))

        def get_images(c, n):  # get random n images from class c
            idx_shuffle = np.random.permutation(indices_class[c])[:n]
            return images_all[idx_shuffle]

        for ch in range(channel):
            print('real images channel %d, mean = %.4f, std = %.4f' %
                  (ch, torch.mean(
                      images_all[:, ch]), torch.std(images_all[:, ch])))
        ''' initialize the synthetic data '''
        image_syn = torch.randn(size=(num_classes * args.ipc, channel,
                                      im_size[0], im_size[1]),
                                dtype=torch.float,
                                requires_grad=True,
                                device=args.device)
        label_syn = torch.tensor(
            [np.ones(args.ipc) * i for i in range(num_classes)],
            dtype=torch.long,
            requires_grad=False,
            device=args.device).view(-1)  # [0,0,0, 1,1,1, ..., 9,9,9]

        if args.init == 'real':
            print('initialize synthetic data from random real images')
            for c in range(num_classes):
                image_syn.data[c * args.ipc:(c + 1) * args.ipc] = get_images(
                    c, args.ipc).detach().data
        else:
            print('initialize synthetic data from random noise')
        ''' training '''
        optimizer_img = torch.optim.SGD(
            [
                image_syn,
            ], lr=args.lr_img,
            momentum=0.5)  # optimizer_img for synthetic data
        optimizer_img.zero_grad()
        criterion = nn.CrossEntropyLoss().to(args.device)
        print('%s training begins' % get_time())

        for it in range(args.Iteration + 1):
            ''' Evaluate synthetic data '''
            if it in eval_it_pool:
                for model_eval in model_eval_pool:
                    print(
                        '-------------------------\nEvaluation\nmodel_train = %s, model_eval = %s, iteration = %d'
                        % (args.model, model_eval, it))
                    param_augment = get_daparam(args.dataset, args.model,
                                                model_eval, args.ipc)
                    if param_augment['strategy'] != 'none':
                        epoch_eval_train = 1000  # More training epochs for augmentation would be better.
                        print('data augmentation = %s' % param_augment)
                    else:
                        epoch_eval_train = args.epoch_eval_train
                    accs = []
                    for it_eval in range(args.num_eval):
                        net_eval = get_network(
                            model_eval, channel, num_classes,
                            im_size).to(args.device)  # get a random model
                        image_syn_eval, label_syn_eval = copy.deepcopy(
                            image_syn.detach()), copy.deepcopy(
                                label_syn.detach()
                            )  # avoid any unaware modification
                        _, acc_train, acc_test = evaluate_synset(
                            it_eval, net_eval, image_syn_eval, label_syn_eval,
                            testloader, args.lr_net, args.batch_train,
                            param_augment, args.device, epoch_eval_train)
                        accs.append(acc_test)
                    print(
                        'Evaluate %d random %s, mean = %.4f std = %.4f\n-------------------------'
                        % (len(accs), model_eval, np.mean(accs), np.std(accs)))

                    if it == args.Iteration:  # record the final results
                        accs_all_exps[model_eval] += accs
                ''' visualize and save '''
                save_name = os.path.join(
                    args.save_path, 'vis_%s_%s_%dipc_exp%d_iter%d.png' %
                    (args.dataset, args.model, args.ipc, exp, it))
                image_syn_vis = copy.deepcopy(image_syn.detach().cpu())
                for ch in range(channel):
                    image_syn_vis[:,
                                  ch] = image_syn_vis[:,
                                                      ch] * std[ch] + mean[ch]
                image_syn_vis[image_syn_vis < 0] = 0.0
                image_syn_vis[image_syn_vis > 1] = 1.0
                save_image(image_syn_vis, save_name, nrow=args.ipc)
                # The generated images would be slightly different from the visualization results in the paper, because of the clip and normalization of pixels.
            ''' Train synthetic data '''
            net = get_network(args.model, channel, num_classes,
                              im_size).to(args.device)  # get a random model
            net.train()
            net_parameters = list(net.parameters())
            optimizer_net = torch.optim.SGD(
                net.parameters(), lr=args.lr_net,
                momentum=0.5)  # optimizer_img for synthetic data
            optimizer_net.zero_grad()
            loss_avg = 0

            for ol in range(args.outer_loop):
                ''' freeze the running mu and sigma for BatchNorm layers '''
                # Synthetic data batch, e.g. only 1 image/batch, is too small to obtain stable mu and sigma.
                # So, we calculate and freeze mu and sigma for BatchNorm layer with real data batch ahead.
                # This would make the model with BatchNorm layers easier to train.
                BN_flag = False
                BNSizePC = 16  # for batch normalization
                for module in net.modules():
                    if 'BatchNorm' in module._get_name():  #BatchNorm
                        BN_flag = True
                if BN_flag:
                    img_real = torch.cat(
                        [get_images(c, BNSizePC) for c in range(num_classes)],
                        dim=0)
                    net.train()  # for updating the mu, sigma of BatchNorm
                    output_real = net(img_real)  # get running mu, sigma
                    for module in net.modules():
                        if 'BatchNorm' in module._get_name():  #BatchNorm
                            module.eval(
                            )  # fix mu and sigma of every BatchNorm layer
                ''' update synthetic data '''
                loss = torch.tensor(0.0).to(args.device)
                for c in range(num_classes):
                    img_real = get_images(c, args.batch_real)
                    lab_real = torch.ones((img_real.shape[0], ),
                                          device=args.device,
                                          dtype=torch.long) * c
                    output_real = net(img_real)
                    loss_real = criterion(output_real, lab_real)
                    gw_real = torch.autograd.grad(loss_real, net_parameters)
                    gw_real = list((_.detach().clone() for _ in gw_real))

                    img_syn = image_syn[c * args.ipc:(c + 1) *
                                        args.ipc].reshape(
                                            (args.ipc, channel, im_size[0],
                                             im_size[1]))
                    lab_syn = torch.ones(
                        (args.ipc, ), device=args.device, dtype=torch.long) * c
                    output_syn = net(img_syn)
                    loss_syn = criterion(output_syn, lab_syn)
                    gw_syn = torch.autograd.grad(loss_syn,
                                                 net_parameters,
                                                 create_graph=True)

                    loss += match_loss(gw_syn, gw_real, args)

                optimizer_img.zero_grad()
                loss.backward()
                optimizer_img.step()
                loss_avg += loss.item()

                if args.clip_syn:  # clip synthetic data
                    for ch in range(channel):
                        image_syn_ch = image_syn[:, ch]
                        clip_min, clip_max = (0.0 - mean[ch]) / std[ch], (
                            1.0 - mean[ch]) / std[ch]
                        image_syn_ch[
                            image_syn_ch > clip_max].data[:] = clip_max
                        image_syn_ch[
                            image_syn_ch < clip_min].data[:] = clip_min

                if ol == args.outer_loop - 1:
                    break
                ''' update network to be unified with optimizer_img xxxxxxxxxxxxxxxxx '''
                image_syn_train, label_syn_train = copy.deepcopy(
                    image_syn.detach()), copy.deepcopy(
                        label_syn.detach())  # avoid any unaware modification
                dst_train = TensorDataset(image_syn_train, label_syn_train)
                trainloader = torch.utils.data.DataLoader(dst_train,
                                                          batch_size=256,
                                                          shuffle=True,
                                                          num_workers=0)
                for il in range(args.inner_loop):
                    epoch('train', trainloader, net, optimizer_net, criterion,
                          None, args.device)

            loss_avg /= (num_classes * args.outer_loop)

            if it % 10 == 0:
                print('%s iter = %04d, loss = %.4f' %
                      (get_time(), it, loss_avg))

            if it == args.Iteration:  # only record the final results
                data_save.append([
                    copy.deepcopy(image_syn.detach().cpu()),
                    copy.deepcopy(label_syn.detach().cpu())
                ])
                torch.save(
                    {
                        'data': data_save,
                        'accs_all_exps': accs_all_exps,
                    },
                    os.path.join(
                        args.save_path, 'res_%s_%s_%dipc.pt' %
                        (args.dataset, args.model, args.ipc)))

    print('\n==================== Final Results ====================\n')
    for key in model_eval_pool:
        accs = accs_all_exps[key]
        print(
            'Run %d experiments, train on %s, evaluate %d random %s, mean  = %.2f%%  std = %.2f%%'
            % (args.num_exp, args.model, len(accs), key, np.mean(accs) * 100,
               np.std(accs) * 100))
コード例 #24
0
ファイル: train_MLP.py プロジェクト: ast0414/copra
def train_mlp(args,
              loaders,
              model,
              criterion,
              optimizer,
              scheduler,
              l1_factor=0.0,
              clr_factor=0.0,
              model_name='model'):
    train_loader = loaders['train_loader']
    valid_loader = loaders['valid_loader']
    test_loader = loaders['test_loader']

    if args.cuda:
        model = model.cuda()

    best_valid_loss = sys.float_info.max

    train_losses = []
    valid_losses = []

    if not os.path.exists(args.output_dir):
        os.makedirs(args.output_dir)

    for i_epoch in tqdm(range(args.epochs), desc='Epochs'):

        # Train
        train_labels, train_preds, train_loss = epoch(train_loader,
                                                      model,
                                                      train=True,
                                                      criterion=criterion,
                                                      optimizer=optimizer,
                                                      l1_factor=l1_factor,
                                                      clr_factor=clr_factor)
        train_losses.append(train_loss)

        # Validation
        valid_labels, valid_preds, valid_loss = epoch(valid_loader,
                                                      model,
                                                      criterion=criterion)

        # Learning rate decay
        if scheduler is not None:
            scheduler.step(valid_loss)

        valid_losses.append(valid_loss)

        # remember best valid loss and save checkpoint
        is_best = valid_loss < best_valid_loss

        if is_best:
            best_valid_loss = valid_loss

            # evaluate on test set
            test_labels, test_preds, test_loss = epoch(test_loader,
                                                       model,
                                                       criterion=criterion)

            with open(args.output_dir + model_name + '_result.txt', 'w') as f:
                f.write('Best Validation Epoch: {}\n'.format(i_epoch))
                f.write('Best Validation Loss: {}\n'.format(best_valid_loss))
                f.write('Train Loss: {}\n'.format(train_loss))
                f.write('Test Loss: {}\n'.format(test_loss))

            # Save entire model
            torch.save(model, args.output_dir + model_name + '.pth')
            # Save model params
            torch.save(model.state_dict(),
                       args.output_dir + model_name + '_params.pth')

    # plot
    plt.figure()
    plt.plot(np.arange(len(train_losses)),
             np.array(train_losses),
             label='Training Loss')
    plt.plot(np.arange(len(valid_losses)),
             np.array(valid_losses),
             label='Validation Loss')
    plt.xlabel('epoch')
    plt.ylabel('Loss')
    plt.legend(loc="best")
    plt.savefig(args.output_dir + model_name + '_loss.eps', format='eps')
コード例 #25
0
def fetchData(requestContext, pathExpr):
    seriesList = []
    startTime = int(epoch(requestContext['startTime']))
    endTime = int(epoch(requestContext['endTime']))

    def _fetchData(pathExpr, startTime, endTime, requestContext, seriesList):
        matching_nodes = STORE.find(pathExpr,
                                    startTime,
                                    endTime,
                                    local=requestContext['localOnly'])
        fetches = [(node, node.fetch(startTime, endTime))
                   for node in matching_nodes if node.is_leaf]

        for node, results in fetches:
            if isinstance(results, FetchInProgress):
                results = results.waitForResults()

            if not results:
                log.info(
                    "render.datalib.fetchData :: no results for %s.fetch(%s, %s)"
                    % (node, startTime, endTime))
                continue

            try:
                (timeInfo, values) = results
            except ValueError as e:
                raise Exception(
                    "could not parse timeInfo/values from metric '%s': %s" %
                    (node.path, e))
            (start, end, step) = timeInfo

            series = TimeSeries(node.path, start, end, step, values)
            series.pathExpression = pathExpr  # hack to pass expressions through to render functions
            seriesList.append(series)

        # Prune empty series with duplicate metric paths to avoid showing empty graph elements for old whisper data
        names = set([s.name for s in seriesList])
        for name in names:
            series_with_duplicate_names = [
                s for s in seriesList if s.name == name
            ]
            empty_duplicates = [
                s for s in series_with_duplicate_names if not nonempty(s)
            ]

            if series_with_duplicate_names == empty_duplicates and len(
                    empty_duplicates) > 0:  # if they're all empty
                empty_duplicates.pop()  # make sure we leave one in seriesList

            for series in empty_duplicates:
                seriesList.remove(series)

        return seriesList

    retries = 1  # start counting at one to make log output and settings more readable
    while True:
        try:
            seriesList = _fetchData(pathExpr, startTime, endTime,
                                    requestContext, seriesList)
            return seriesList
        except Exception, e:
            if retries >= settings.MAX_FETCH_RETRIES:
                log.exception("Failed after %s retry! Root cause:\n%s" %
                              (settings.MAX_FETCH_RETRIES, format_exc()))
                raise e
            else:
                log.exception(
                    "Got an exception when fetching data! Try: %i of %i. Root cause:\n%s"
                    % (retries, settings.MAX_FETCH_RETRIES, format_exc()))
                retries += 1
コード例 #26
0
ファイル: db.py プロジェクト: giacaglia/wigo2-server
 def clean_old(self, key, ttl=None):
     if ttl is None:
         ttl = timedelta(days=10)
     up_to = datetime.utcnow() - ttl
     self.sorted_set_remove_by_score(key, '-inf', epoch(up_to))
コード例 #27
0
ファイル: __init__.py プロジェクト: giacaglia/wigo2-server
 def get_index_score(self):
     return epoch(self.created)
コード例 #28
0
ファイル: data.py プロジェクト: giacaglia/wigo2-server
    def data_save_listener(sender, instance, created):
        if isinstance(instance, User):
            if is_new_user(instance, created):
                new_user(instance.id)

            if instance.status == 'deleted':
                delete_user.delay(instance.id, instance.group_id)

            publish_model_change(instance)

        elif isinstance(instance, Group):
            if created:
                new_group.delay(instance.id)

            publish_model_change(instance)

        elif isinstance(instance, Event):
            event_related_change.delay(instance.group_id, instance.id,
                                       instance.is_global)

            if not created:
                publish_model_change(instance)

        elif isinstance(instance, Friend) and created:
            if instance.accepted:
                new_friend.delay(instance.user_id, instance.friend_id)
            else:
                instance.friend.track_meta('last_friend_request',
                                           epoch(instance.created))

            instance.user.track_meta('last_friend_change')
            instance.friend.track_meta('last_friend_change')

        elif isinstance(instance, Tap):
            instance.user.track_meta('last_tap_change')
        elif isinstance(instance, Block):
            instance.user.track_meta('last_block_change')
        elif isinstance(instance, Invite):
            user_invited.delay(instance.event_id, instance.user_id,
                               instance.invited_id)
        elif isinstance(instance, EventAttendee):
            event = instance.event
            event_related_change.delay(event.group_id, event.id,
                                       event.is_global)
            tell_friends_user_attending.delay(instance.user_id,
                                              instance.event_id)
        elif isinstance(instance, EventMessage):
            event = instance.event
            event_related_change.delay(event.group_id, event.id,
                                       event.is_global)
            tell_friends_event_message.delay(instance.id)
        elif isinstance(instance, EventMessageVote):
            event = instance.message.event
            event_related_change.delay(event.group_id, event.id,
                                       event.is_global)
            # tell_friends_about_vote.delay(instance.message_id, instance.user_id)
        elif isinstance(instance, Message):
            instance.user.track_meta('last_message_change')
            instance.to_user.track_meta('last_message_change')
            instance.to_user.track_meta('last_message_received',
                                        epoch(instance.created))
コード例 #29
0
ファイル: test_MLP.py プロジェクト: ast0414/copra
    with open(args.csr_path, 'rb') as f:
        X_test = pickle.load(f)
        X_test = X_test.todense()

    test_set = TensorDataset(torch.from_numpy(X_test.astype('float32')),
                             torch.from_numpy(y_test.astype('int')))
    test_loader = DataLoader(dataset=test_set,
                             batch_size=args.eval_batch_size,
                             shuffle=False)

    model = torch.load(args.model_path)
    if args.cuda:
        model = model.cuda()

    _, scores, _ = epoch(test_loader, model, output_activation=F.softmax)

    if args.cuda:
        scores = scores.cpu()
    scores = scores.numpy()
    preds = np.argmax(scores, axis=1)

    y_scores = scores[:, 1]
    y_preds = preds

    auroc = roc_auc_score(y_test, y_scores)
    aupr = average_precision_score(y_test, y_scores)
    f1 = f1_score(y_test, y_preds)
    accuracy = accuracy_score(y_test, y_preds)

    print("AUROC: {}, AUPR: {}, F1: {}, ACC: {}".format(
コード例 #30
0
ファイル: user.py プロジェクト: giacaglia/wigo2-server
    def index(self):
        super(Friend, self).index()

        with self.db.transaction(commit_on_select=False):
            if self.accepted:

                def setup(u1, u2):
                    self.db.sorted_set_add(skey(u1, 'friends'), u2.id,
                                           epoch(self.created))
                    self.db.sorted_set_add(skey(u1, 'friends', 'top'), u2.id,
                                           10000)
                    self.db.sorted_set_add(skey(u1, 'friends', 'alpha'),
                                           u2.id,
                                           prefix_score(u2.full_name.lower()),
                                           replicate=False)
                    if u2.privacy == 'private':
                        self.db.set_add(skey(u1, 'friends', 'private'),
                                        u2.id,
                                        replicate=False)

                setup(self.user, self.friend)
                setup(self.friend, self.user)

                for type in ('friend_requests', 'friend_requested'):
                    self.db.sorted_set_remove(skey('user', self.user_id, type),
                                              self.friend_id)
                    self.db.sorted_set_remove(
                        skey('user', self.friend_id, type), self.user_id)

            else:

                def teardown(u1, u2):
                    self.db.sorted_set_remove(skey(u1, 'friends'), u2.id)
                    self.db.sorted_set_remove(skey(u1, 'friends', 'top'),
                                              u2.id)
                    self.db.sorted_set_remove(skey(u1, 'friends', 'alpha'),
                                              u2.id,
                                              replicate=False)
                    self.db.set_remove(skey(u1, 'friends', 'private'),
                                       u2.id,
                                       replicate=False)

                teardown(self.user, self.friend)
                teardown(self.friend, self.user)

                f_reqed_key = skey('user', self.user_id, 'friend_requested')
                self.db.sorted_set_add(f_reqed_key, self.friend_id,
                                       epoch(self.created))

                f_req_key = skey('user', self.friend_id, 'friend_requests')
                self.db.sorted_set_add(f_req_key, self.user_id,
                                       epoch(self.created))

                f_req_in_common_key = skey('user', self.friend_id,
                                           'friend_requests', 'common')
                f_in_common = self.user.get_num_friends_in_common(
                    self.friend_id)
                self.db.sorted_set_add(f_req_in_common_key, self.user_id,
                                       f_in_common)

                # clean out old friend requests
                self.db.clean_old(f_reqed_key, timedelta(days=30))
                self.db.clean_old(f_req_key, timedelta(days=30))
                self.db.clean_old(f_req_in_common_key, timedelta(days=30))