def resolve_statstotal(self, info, **kwargs): initial_timestamp = kwargs.get('initial_timestamp') final_timestamp = kwargs.get('final_timestamp') if initial_timestamp is None: initial_timestamp = datetime.min() if final_timestamp is None: final_timestamp = datetime.max() if final_timestamp and initial_timestamp is not None: s = Visit.objects.select_related( visit__timestamp_range=(initial_timestamp, final_timestamp)) return s.stats.count()
def aware(request: HttpRequest, memory_id: int) -> HttpResponse: memory = get_object_or_404(Memory, pk=memory_id) # type: Memory # 첫 테스트에 바로 맞췄다면 if memory.group_level <= 0: # 통계 업데이트 statistics = __get_statistics(memory) statistics.aware_cnt += 1 statistics.save() # step을 증가시키고, 그에 맞게 unlock_dt 시각을 변경한다 if memory.step == 0: memory.unlock_dt = timezone.now() + timedelta(days=1) elif memory.step == 1: memory.unlock_dt = timezone.now() + timedelta(days=7) elif memory.step == 2: memory.unlock_dt = timezone.now() + timedelta(days=28) elif memory.step == 3: memory.unlock_dt = timezone.now() + timedelta(days=28*3) elif memory.step == 4: memory.unlock_dt = datetime.max() memory.unlock_dt = memory.unlock_dt - timedelta(hours=8) memory.step += 1 # 한번에 맞췄다면 이 단어는 안다고 볼 수 있다 memory.status = MemoryStatus.Aware # 한번에 맞췄다는 숫자 표시 memory.aware_cnt += 1 # 두번째 이후에 맞췄다면 else: # 내일 다시 테스트한다 memory.unlock_dt = timezone.now() + timedelta(hours=16) # 한번이라도 틀리면 스텝0부터 다시 시작한다 memory.step = 0 memory.group_level = 0 memory.save() return redirect('exam', book_id=memory.book.id, exam_type=memory.type)
def calc_similarity(self, session_items, sessions, dwelling_times, timestamp): ''' Calculates the configured similarity for the items in session_items and each session in sessions. Parameters -------- session_items: set of item ids sessions: list of session ids Returns -------- out : list of tuple (session_id,similarity) ''' pos_map = {} length = len(session_items) count = 1 for item in session_items: if self.weighting is not None: pos_map[item] = getattr(self, self.weighting)(count, length) count += 1 else: pos_map[item] = 1 if self.dwelling_time: dt = dwelling_times.copy() dt.append(0) dt = pd.Series(dt, index=session_items) dt = dt / dt.max() # dt[session_items[-1]] = dt.mean() if len(session_items) > 1 else 1 dt[session_items[-1]] = 1 # print(dt) for i in range(len(dt)): pos_map[session_items[i]] *= dt.iloc[i] # print(pos_map) if self.idf_weighting_session: max = -1 for item in session_items: pos_map[item] = self.idf[item] if item in self.idf else 0 # if pos_map[item] > max: # max = pos_map[item] # for item in session_items: # pos_map[item] = pos_map[item] / max # print 'nb of sessions to test ', len(sessionsToTest), ' metric: ', self.metric items = set(session_items) neighbors = [] cnt = 0 for session in sessions: cnt = cnt + 1 # get items of the session, look up the cache first n_items = self.items_for_session(session) sts = self.session_time[session] # dot product similarity = self.vec(items, n_items, pos_map) if similarity > 0: if self.weighting_time: diff = timestamp - sts days = round(diff / 60 / 60 / 24) decay = pow(7 / 8, days) similarity *= decay # print("days:",days," => ",decay) neighbors.append((session, similarity)) return neighbors