Esempio n. 1
0
 def list_symbols(self, date_range):
     libraries = self._get_libraries(date_range)
     return sorted(
         list(
             set(
                 itertools.chain(
                     *[l.library.list_symbols() for l in libraries]))))
    def neighbors(self, node):
        assert node.startswith(u"n_")

        edges = self.bipartite_graph.neighbors(node)
        neighbors = map(lambda edge: self.bipartite_graph.neighbors(edge),
                        edges)
        return list(set(itertools.chain(*neighbors)) - set([node]))
Esempio n. 3
0
def student_notes(request, userid):

    try:
        student = Person.objects.get(find_userid_or_emplid(userid))
    except Person.DoesNotExist:
        student = get_object_or_404(NonStudent, slug=userid)

    if request.POST and 'note_id' in request.POST:
        # the "hide note" box was checked: process
        note = get_object_or_404(AdvisorNote,
                                 pk=request.POST['note_id'],
                                 unit__in=request.units)
        note.hidden = request.POST['hide'] == "yes"
        note.save()

    if isinstance(student, Person):
        notes = AdvisorNote.objects.filter(
            student=student, unit__in=request.units).order_by("-created_at")
        form_subs = FormSubmission.objects.filter(
            initiator__sfuFormFiller=student,
            form__unit__in=Unit.sub_units(request.units),
            form__advisor_visible=True)
        visits = AdvisorVisit.objects.filter(
            student=student, unit__in=request.units).order_by('-created_at')
        # decorate with .entry_type (and .created_at if not present so we can sort nicely)
        for n in notes:
            n.entry_type = 'NOTE'
        for fs in form_subs:
            fs.entry_type = 'FORM'
            fs.created_at = fs.last_sheet_completion()

        items = list(itertools.chain(notes, form_subs))
        items.sort(key=lambda x: x.created_at, reverse=True)
        nonstudent = False
    else:
        notes = AdvisorNote.objects.filter(
            nonstudent=student, unit__in=request.units).order_by("-created_at")
        visits = AdvisorVisit.objects.filter(
            nonstudent=student, unit__in=request.units).order_by('-created_at')
        for n in notes:
            n.entry_type = 'NOTE'
        items = notes
        nonstudent = True

    show_transcript = False
    # For demo purposes only.
    # if 'UNIV' in [u.label for u in request.units]:
    #    show_transcript = True

    template = 'advisornotes/student_notes.html'
    context = {
        'items': items,
        'student': student,
        'userid': userid,
        'nonstudent': nonstudent,
        'show_transcript': show_transcript,
        'units': request.units,
        'visits': visits
    }
    return render(request, template, context)
Esempio n. 4
0
def real():
  import pandas as pd
  df = pd.read_csv('one_gallon_on_burner.log')
  df.columns = ['time', 'T']
  df['time'] /= 1000.

  df = df.iloc[df['time'].values >= 50.]
  df = df.iloc[df['time'].values <= 100.]

  deg_inf = 25
  sig_T = 0.02
  t_0 = 30
  s_0 = 0
  s_max = 1

  observations = zip(df['time'].values, df['T'].values)


  x0 = np.array([30., 0.15, np.log(0.2), np.log(0.05)])
  ret = list(run_filter(x0, observations, t_0=t_0, s_0=s_0, s_max=s_max,
                        deg_inf=deg_inf, sig_T=sig_T))

  ts = df['time'].values


  def extend(cnt):
    t, x, P = ret[-1]
    dt = 1.
    for i in range(cnt):
      t += dt
      x_new, F, Q = process_model(x, dt, t, t_0=t_0, s_0=s_0, s_max=s_max,
                                  deg_inf=deg_inf, sig_T=sig_T)
      x, P = kalman_predict(x, P, (lambda xx: x_new, F), Q)
      x[2] = min(x[2], np.log(2))
      x[3] = min(x[3], np.log(0.01))
      yield t, x, P

  ret = itertools.chain(ret, list(extend(10)))

  ts, mus, Ps = zip(*list(ret))
  mus = np.array(mus)
  vars = np.array([np.diag(P) for P in Ps])

  fig, axes = plt.subplots(2, 2, sharex=True)

  for ax, mu, var, vname in zip(axes.reshape(-1), mus.T,
                                vars.T, ['T', 'm', 'H_max', 'k']):
    ax.set_title(vname)
    ax.plot(ts, mu, color='steelblue', lw=2)
    ax.fill_between(ts, mu + np.sqrt(var), mu - np.sqrt(var), alpha=0.8, color='steelblue')
    delta = np.max(mu) - np.min(mu)
    lim = [np.min(mu) - 0.3 * delta, np.max(mu) + 0.3 * delta]
    ax.set_ylim(lim)

  plt.show()
Esempio n. 5
0
def student_notes(request, userid):

    try:
        student = Person.objects.get(find_userid_or_emplid(userid))
    except Person.DoesNotExist:
        student = get_object_or_404(NonStudent, slug=userid)

    if request.POST and 'note_id' in request.POST:
        # the "hide note" box was checked: process
        note = get_object_or_404(AdvisorNote, pk=request.POST['note_id'], unit__in=request.units)
        note.hidden = request.POST['hide'] == "yes"
        note.save()

    if isinstance(student, Person):
        notes = AdvisorNote.objects.filter(student=student, unit__in=request.units).order_by("-created_at")
        alerts = Alert.objects.filter(person=student, alerttype__unit__in=request.units, hidden=False).order_by("-created_at")
        form_subs = FormSubmission.objects.filter(initiator__sfuFormFiller=student, form__unit__in=Unit.sub_units(request.units),
                                                  form__advisor_visible=True)

        # decorate with .entry_type (and .created_at if not present so we can sort nicely)
        for n in notes:
            n.entry_type = 'NOTE'
        for a in alerts:
            a.entry_type = 'ALERT'
        for fs in form_subs:
            fs.entry_type = 'FORM'
            fs.created_at = fs.last_sheet_completion()

        items = list(itertools.chain(notes, alerts, form_subs))
        items.sort(key=lambda x: x.created_at, reverse=True)
        nonstudent = False
    else:
        notes = AdvisorNote.objects.filter(nonstudent=student, unit__in=request.units).order_by("-created_at")
        for n in notes:
            n.entry_type = 'NOTE'
        items = notes
        nonstudent = True
    
    show_transcript = False
    if 'UNIV' in [u.label for u in request.units]:
        show_transcript = True

    template = 'advisornotes/student_notes.html'
    if 'compact' in request.GET:
        template = 'advisornotes/student_notes_compact.html'
    context = {'items': items, 'student': student, 'userid': userid, 'nonstudent': nonstudent,
               'show_transcript': show_transcript, 'units': request.units}
    return render(request, template, context)
Esempio n. 6
0
    def quality(sketch_matrix, train_sketch, test_sketch, train_targets,
                test_targets):
        k = sketch_matrix.k
        L = sketch_matrix.L
        train_cols_count = np.shape(train_sketch)[1]
        if multilabel:
            test_targets_pred = []
        else:
            test_targets_proba = np.empty(len(test_targets))
        for i in range(np.shape(test_sketch)[1]):
            col_i = test_sketch[:, i:i + 1]
            similar_cols = SketchMatrix._get_similar_columns(
                col_i, train_sketch, k, L, train_cols_count)
            similar_targets = itertools.chain(
                *map(lambda c: train_targets[c], similar_cols))
            #             similar_targets = list(itertools.chain(*map(lambda c: train_targets[c], similar_cols)))
            if multilabel:
                target_proportions = statistics.get_multilabel_target_proportions(
                    similar_targets,
                    np.shape(similar_cols)[0])
                targets_pred = filter(
                    lambda target: target_proportions[target] >
                    multilabel_prediction_threshold, target_proportions)
                test_targets_pred.append(targets_pred)
#                 print "Col:", i, ", Target:", test_targets[i], ", Est. target: ", targets_pred
#                 print "Similar cols:", similar_cols
#                 print "Similar targets:", similar_targets
#                 print "--------------------------------------"
#                 _acc, _prec, _recall, _f1 = statistics.multi_label_scores([test_targets[i]], [targets_pred])
#                 fp = open(output_dir + "classification_sketch", "a")
#                 fp.write("Col: {0}, Target: {1}, Est. target: {2}\n".format(i, test_targets[i], targets_pred))
#                 fp.write("Accuracy: {0}, Precision: {1}, Recall: {2}, F1: {3}\n".format(_acc, _prec, _recall, _f1))
#                 fp.write("Similar cols: {0}\n".format(list(similar_cols)))
#                 fp.write("Similar targets: {0}\n".format(similar_targets))
#                 fp.write("--------------------------------------\n")
#                 fp.close()
            else:
                estimated_target_proba_i = statistics.predict_binary_target_proba(
                    similar_targets)
                test_targets_proba[i] = estimated_target_proba_i

        if multilabel:
            # TODO: compute also AUC?
            acc, prec, recall, f1 = statistics.multi_label_scores(
                test_targets, test_targets_pred)
            return -1., acc, prec, recall, f1
        else:
            return all_scores(test_targets, test_targets_proba)
Esempio n. 7
0
	def get(self):
		if(not isUserAdmin(self)):
			self.session[LOGIN_NEXT_PAGE_KEY] = self.URL
			self.redirect("/")
			return
		today = datetime.datetime.now(timeZone)
		twoWeeksAgo = today + datetime.timedelta(days = - 14)
		unseenUsers = User.all().filter("lastOrder < ", twoWeeksAgo)
		unseenUsersOrdered = sorted(unseenUsers, key=getLastOrderDate)
		taskedUsers = User.all().filter("taskList >= ", None)
		
		template_values={
			'users': itertools.chain(taskedUsers, unseenUsersOrdered),
		}
		template = jinja_environment.get_template('templates/crm/crmTaskList.html')
		self.printPage("Felhaszn&aacute;l&oacute;k", template.render(template_values), False, False)
Esempio n. 8
0
    def quality(sketch_matrix, train_sketch, test_sketch, train_targets, test_targets):
        k = sketch_matrix.k
        L = sketch_matrix.L
        train_cols_count = np.shape(train_sketch)[1]
        if multilabel:
            test_targets_pred = []
        else:
            test_targets_proba = np.empty(len(test_targets))
        for i in range(np.shape(test_sketch)[1]):
            col_i = test_sketch[:, i : i + 1]
            similar_cols = SketchMatrix._get_similar_columns(col_i, train_sketch, k, L, train_cols_count)
            similar_targets = itertools.chain(*map(lambda c: train_targets[c], similar_cols))
#             similar_targets = list(itertools.chain(*map(lambda c: train_targets[c], similar_cols)))
            if multilabel:
                target_proportions = statistics.get_multilabel_target_proportions(similar_targets, np.shape(similar_cols)[0])
                targets_pred = filter(lambda target: target_proportions[target] > multilabel_prediction_threshold, target_proportions)
                test_targets_pred.append(targets_pred)
#                 print "Col:", i, ", Target:", test_targets[i], ", Est. target: ", targets_pred
#                 print "Similar cols:", similar_cols
#                 print "Similar targets:", similar_targets
#                 print "--------------------------------------"
#                 _acc, _prec, _recall, _f1 = statistics.multi_label_scores([test_targets[i]], [targets_pred])
#                 fp = open(output_dir + "classification_sketch", "a")
#                 fp.write("Col: {0}, Target: {1}, Est. target: {2}\n".format(i, test_targets[i], targets_pred))
#                 fp.write("Accuracy: {0}, Precision: {1}, Recall: {2}, F1: {3}\n".format(_acc, _prec, _recall, _f1))
#                 fp.write("Similar cols: {0}\n".format(list(similar_cols)))
#                 fp.write("Similar targets: {0}\n".format(similar_targets))
#                 fp.write("--------------------------------------\n")
#                 fp.close()
            else:
                estimated_target_proba_i = statistics.predict_binary_target_proba(similar_targets)
                test_targets_proba[i] = estimated_target_proba_i
        
        if multilabel:
            # TODO: compute also AUC?
            acc, prec, recall, f1 = statistics.multi_label_scores(test_targets, test_targets_pred)
            return -1., acc, prec, recall, f1
        else:
            return all_scores(test_targets, test_targets_proba)
Esempio n. 9
0
def get_successful_analyze_task_results(group_result, chunked = False):
    '''
    Get results for successful tasks from `group_result` (meaning their results) and ignore revoked ones.

    Parameters
    ----------
    group_result : GroupResult
    chunked : bool, optional (default is False)
        If work has been divided into chunks.

    Returns
    -------
    list< tuple<id, gridfs (bool)>>
    '''

    results = []

    def check_n_add(res):
        ''' Check if `res` is ready and has not been revoked etc. '''
        try:
            if res is not None:
                    result = res.get(propagate = False)
                    # if chunked, result is list of multiple tasks -> unpack results
                    if chunked:
                        result = Util.flatten(result)
                    # no result available if e.g. exception raised
                    if result is not None:
                        results.append(result)
        # TaskRevokedError
        except Exception:
            pass

    for res in group_result:
        check_n_add(res)

    # single result is of type list< tuple<id, gridfs (bool)>>
    # so flatten the result!
    return list(itertools.chain(*results))
Esempio n. 10
0
 def list_symbols(self, date_range):
     libraries = self._get_libraries(date_range)
     return sorted(list(set(itertools.chain(*[l.library.list_symbols() for l in libraries]))))
Esempio n. 11
0
def main():
	global opts, args
	parser = OptionParser( version="%prog 2.0", usage='Usage: %prog [options] <dirs>...' )

	parser.set_defaults( **{
		'verbose': False,
		'dir_mode': False
	} )

	parser.add_option( '-d', '--dir-mode', action='store_true' )
	parser.add_option( '-v', '--verbose', action='store_true' )

	opts, args = parser.parse_args()

	dirs = args

	if not dirs:
		dirs.append( '*' )

	hive = {}

	if opts.verbose:
		print( "SCAN PHASE..." )


	if opts.dir_mode:
		files = set( x for x in itertools.chain( *( iglob( x ) for x in dirs ) ) if os.path.isdir( x ) )
	else:
		files = set( x for x in itertools.chain( *( iglob( x ) for x in dirs ) ) if os.path.isfile( x ) )

	if opts.verbose:
		print( "FOUND:", len( files ) )


	for path in files:

		entry = make_entry( os.path.basename( path ) )

		if not entry:
			continue

		entry.add_path( path )

		ean = entry.get_ean()
		id_date = entry.get_id_date()
		id_vol = entry.get_id_vol()

		new_entry = reduce( lambda x, y: x.absorb( y ), set( filter( None, [ ean in hive and hive[ean], id_date in hive and hive[id_date], id_vol in hive and hive[id_vol], entry] ) ) )

		info = {}

		if new_entry.get_ean():
			info[new_entry.get_ean()] = new_entry
		if new_entry.get_id_date():
			info[new_entry.get_id_date()] = new_entry
		if new_entry.get_id_vol():
			info[new_entry.get_id_vol()] = new_entry

		hive.update( info )



	if opts.verbose:
		print( 'FILTER & SORT PHASE...' )

	for entry in set( x for x in hive.values() if len( x.paths ) > 1 ):
		paths = sorted( entry.paths, key=( opts.dir_mode and dirsize or ( lambda x: os.stat( x ).st_size ) ), reverse=True )

		retain = paths.pop( 0 )
		if opts.verbose:
			print( '----:', retain )

		for path in paths:
			if opts.verbose:
				print( '   -:', path )
			reclaim( path )
		else:
			if opts.verbose:
				print()


		updated_path = reduce( lambda x, y: y.sub( '', x ), [make_entry.re_isbn, make_entry.re_maz], os.path.basename( retain ) )


		if entry.ean:
			updated_path = '[#{}]{}'.format( entry.ean, updated_path )
		if entry.id:
			updated_path = '[@{} {}#{:03d}]{}'.format( entry.id, entry.date or '0000-00', int( entry.vol or 0 ), updated_path )

		updated_path = os.path.join( os.path.dirname( retain ), updated_path )

		if retain != updated_path:
			try:
				os.renames( retain, updated_path )
			except:
				pass
Esempio n. 12
0
    for size, car in data:
        print car, size


test = "google"
compress_ex(test)
ret = compress(test)
r1, r2 = itertools.tee(ret)
r3, r4 = itertools.tee(r1)
print list(r2)
print ''.join(decompress(r3))
decompress_ex(r4)

a = 'google'
b = 'yahoo'
iterm = itertools.chain(a, b)
for i in iterm:
    print i
iterm = itertools.count(11)
for i in iterm:
    if i == 21:
        break
    print i

iterm = itertools.cycle(a)
for i, n in enumerate(iterm):
    if i == 21:
        break
    print n
L = [
    "Marc Bolan", "David Bowie", "Mick Ronson", "Ian Hunter", "Morgan Fisher",
Esempio n. 13
0
 import itertools
'''
Method 2
'''
 list1=['a','b','c']
 list2=[1,2]
 zip(x,list2) for x in itertools.permutations(list1,len(list2))]



def sumOfTwo(a, b, v):
	'''
	Method 3
	'''
    combined = [zip(i,a) for i in itertools.permutations(b,len(a))]
    cc = map(sum,itertools.chain(*combined))
    if v in cc:
        return True
    else:
        return False


def sumOfTwo(a, b, v):
	'''
	Method 4
	'''
    #if len(a) == 0 or len(b) == 0:
    #    return False
    s = map(sum,itertools.product(a, b))
    #s = [sum(x) for x in list(itertools.product(a, b))]
    print s
Esempio n. 14
0
 def neighbors(self, node):
     assert node.startswith(u"n_")
     
     edges = self.bipartite_graph.neighbors(node)
     neighbors = map(lambda edge: self.bipartite_graph.neighbors(edge), edges)
     return list(set(itertools.chain(*neighbors)) - set([node]))