예제 #1
0
def copy_static_files():
    static_func = info("Copy static files")

    folders = [
        x for x in glob('static/*') if os.path.join("static", "css") not in x
    ]

    os.mkdir('build/static')

    for f in folders:
        cp.cp(f, 'build/' + f)

    static_func("Done", True)
예제 #2
0
def copy_template_files():
    template_func = info("Copying Template Files")

    template_files = itertools.chain.from_iterable(
        [glob(g) for g in template_file_globs])

    for f in template_files:
        dir_name = os.path.dirname(f)
        if dir_name != '':
            os.makedirs(os.path.join('build/', dir_name), exist_ok=True)
        cp.cp(f, 'build/' + f)

    template_func("Done", True)
예제 #3
0
def copy_python_files():
    python_func = info("Copying Python Files")

    python_files = itertools.chain.from_iterable(
        [glob(g) for g in python_file_globs])

    for f in python_files:
        if f == 'build.py':
            continue

        dir_name = os.path.dirname(f)
        if dir_name != '':
            os.makedirs(os.path.join('build/', dir_name), exist_ok=True)
        cp.cp(f, 'build/' + f)

    python_func("Done", True)
예제 #4
0
def mv(src, dst):
    import cp
    import rm
    cp.cp(src, dst)
    rm.rm(src)
예제 #5
0
    batchsize = 40

    with open('ktensor_noise_1em3.pickle', 'rb') as f:
        p = pickle.load(f)

    size = p['size']
    rank = p['rank']
    train_input = torch.LongTensor(p['train']['indexes']).t()
    train_value = torch.Tensor(p['train']['values'])
    #train_norm = torch.norm(train_value).item()
    test_input = torch.LongTensor(p['test']['indexes']).t()
    test_value = torch.Tensor(p['test']['values'])
    #test_norm = torch.norm(test_value).item()

    model = cp(size, rank)
    train_loader = DataLoader(TensorDataset(train_input, train_value),
                              batch_size=batchsize,
                              shuffle=True)
    test_loader = DataLoader(TensorDataset(test_input, test_value),
                             batch_size=batchsize,
                             shuffle=True)

    use_cuda = False
    kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
    #    class MNIST_index(datasets.MNIST):
    #        def __getitem__(self, index):
    #            img, target = super(MNIST_index, self).__getitem__(index)
    #            return (img, target, index)

    criterion = nn.MSELoss()
예제 #6
0
파일: analyze.py 프로젝트: kenjsc/CAMpping
	def get_cp(self, tabfile):
		"Function returns the cp object generated from the passed in tab file."
		return cp.cp(tabfile)
예제 #7
0
            if_e()
        elif send == 'z':
            if_z()
        elif send == '1':
            if_1()
        elif send == '2':
            if_2()
        elif send == '3':
            if_3()
        elif send == '4':
            if_4()
        elif send == '5':
            if_5()
        else:
            if_else()


if os_type().upper() == 'WINDOWS':
    screensize()
    features()
    download()
    cp()
    main()
elif os_type().upper() == 'LINUX':
    print(c.END + c.BLUE + 'support only' + c.GREEN + ' windows' + c.RED +
          ' , ' + c.GREEN + 'GNU/Linux' + c.BLUE + ' version coming soon ' +
          c.RED + '.' + c.GREEN + '.' + c.BLUE + '.')
    xinput('')
else:
    pass
예제 #8
0
if os.name != 'nt':
    print("You are not using Windows.Termulator is for Windows systems only")
    exit(1)
user = os.getlogin()
while 1:
    try:
        q = input(user + '$')
        q = q.split()
        print(q)
    except KeyboardInterrupt:
        exit(1)
    try:
        if q[0] == 'pwd':
            pwd.pwd()
        elif q[0] == "ls":
            ls.ls()
        elif q[0] == "clear":
            clear.clear()
        elif q[0] == "cd":
            cd.cd(q)
        elif q[0] == "cp":
            cp.cp(q)
        elif q[0] == "mv":
            mv.mv(q)
        elif q[0] == "touch":
            touch.touch(q)
        else:
            print(q[0] + " is not a recognizable command")  # default
    except IndexError:
        continue
        print('this is the train loop:')
        print(i)
        ## the data format that sess.run get is tensor,not ndarray
        Xtemp1a, Ytemp1a = sess.run([featuretrain, labeltrain])
        ### here we have to concate all the Xtemp1&Ttemp1 into Xtrain&Ytrain
        ## how to concate c1,c2,...,c5 into a densefeature (named featuretrain/test)??
        ## coding here

        # convert np.ndarray into string so that can be fed into SVM
        # does Y need reshape?
        #print('Xtemp1a and its shape')
        #print(Xtemp1a.shape)

        ## here d=64,s=14
        ## note that for AlexNet,the conv5 layer got a tensor of 14*14*256
        Xtemp1 = cp(Xtemp1a, 64, 14, batch_size)

        Ytemp1 = np.array(Ytemp1a)

        #print('after cp, Xtemp1 and its shape:')
        #print(Xtemp1.shape)

        ### if else
        if i == 0:
            #print('this is for the loop i=0')
            Xtrain = Xtemp1
            Ytrain = Ytemp1
        else:
            ## horizontal: np.hstack; vertical:np.vstack
            #print('in the loop, the shape of Xtemp:')
            #print(Xtemp1.shape)
예제 #10
0
def compress_css(npm_module_root: str):
    sumfile_func = info("Creating sum.css")

    css = glob('static/css/*')

    sumfile = ""

    for c in css:
        with open(c, 'r') as f:
            sumfile += f.read()

    os.mkdir('build-staging/')
    with open('build-staging/sum.css', 'w') as f:
        f.write(sumfile)

    cp.cp('build-staging/sum.css', 'build-staging/sum-dce.css')

    sumfile_func("Done", True)
    '''purgecss
       --css build/static/staging/sum-lean.css
       --content templates/**/* templates/*.html
       -o build/static/staging/'''

    html = [
        val for val in glob("templates/**/*", recursive=True)
        if not os.path.isdir(val)
    ]

    purge_func = info("CSS Dead Code Elimination")

    purge_args = [
        'purgecss', '--css', 'build-staging//sum-dce.css', '--content', *html,
        '-o', 'build-staging//'
    ]
    purge_result = subprocess.run(purge_args,
                                  stdout=subprocess.PIPE,
                                  stderr=subprocess.STDOUT,
                                  shell=windows)

    if purge_result.returncode != 0:
        command = " ".join(purge_args)
        purge_func(
            f"error while running \'{command}\':\n{purge_result.stdout.decode('utf8')}",
            False)

    purge_func("Done", True)
    '''NODE_PATH=/usr/lib/node_modules
       postcss
       build-staging/sum-dce.css
       --config postcss.config.js
       --no-map
       -o
       build-staging/sum-min.css'''

    minify_func = info("CSS Minifcation")

    minify_args = [
        'postcss', 'build-staging/sum-dce.css', '--config',
        'postcss.config.js', '--no-map', '-o', 'build-staging/sum-min.css'
    ]

    current_environ = os.environ.copy()
    if windows:
        current_environ.update(
            {'NODE_PATH': os.path.join(npm_module_root, 'node_modules')})
    else:
        current_environ.update(
            {'NODE_PATH': os.path.join(npm_module_root, 'lib/node_modules')})
    minify_result = subprocess.run(minify_args,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE,
                                   env=current_environ,
                                   shell=windows)

    if minify_result.returncode != 0:
        command = " ".join(minify_args)
        minify_func(
            f"error while running \'{command}\':\n{minify_result.stderr.decode('utf8')}",
            False)

    minify_func("Done", True)

    #
    # copy css
    #

    css_copy_func = info("Copy CSS")

    os.makedirs('build/static/css', exist_ok=True)
    cp.cp('build-staging/sum-min.css', 'build/static/css/sum.css')

    css_copy_func("Done", True)
예제 #11
0
	def get_cp(self, tabfile):
		return cp.cp(tabfile)
예제 #12
0
	def all_chem_adjust(self, mz_heat, cp_heat, output):
		"""Does vector compression, ignoring biological activity. Uses all baskets from a run, not
		just those that are within a biological cutoff range.
		"""
		new_cp = cp.cp(None)
		for feat in cp_heat.features():
			new_cp[str(feat)] = cp.feature(str(feat))
		
		runcount = 0
		n = 0
		av_dist = 0
			
		widgets = ['VectorMove: ', Percentage(), ' ', Bar(marker=RotatingMarker()), ' ',\
		ETA(), ' ', FileTransferSpeed()]
		
		pbar = ProgressBar(widgets=widgets, maxval=len(cp_heat.fingerprints())).start()
		
		largest_scaler = None
		#This is a dictionary of run (as string) - [run_vector, add_vector] pairs
		add_vectors = dict()
		for run in cp_heat.fingerprints():
			output.write("\t" + str(run) + "\n")
			runcount += 1
			pbar.update(runcount)
			
			labels = run.keys()
			#This is the original vector fingerprint in log scale
			run_vec = numpy.log(run.values(), dtype=float)
			print run_vec
			#This is going to be a list of vectors, one from each basket, in log scale
			bask_vectors = []
			#This is the number of vectors, one for each connection, with multiple connections per basket
			vector_num = 0
			for bask in mz_heat.grab_basks(str(run)):
			        #If there's only one run, then the vector has nothing to connect to
				if len(bask.keys()) <= 1:
					continue
				#This is the average value of the scaler, for use in line plots later
				av_scaler = 0.0
				for connect_run in bask.keys():
					cprun = connect_run.replace("_", "")
					#Don't connect the query run to itself, that's not useful
					if cprun == str(run):
						continue
					#If the run isn't in the cp_heatmap, just continue; that means it also wasn't used for creating synthetic fingerprints
					if cprun not in cp_heat.map:
						continue
					#Get the vector difference between the target and the source, but it's in log scale. Also, make sure label values are
					# in the same order between the vectors. Remember log scale!
					vec_dif = numpy.log(numpy.array([cp_heat[cprun][val] for val in labels]) - run_vec)
					scaler = self.bask_prob(bask, cp_heat[cprun]) + self.bask_prob(bask, run)
					print scaler
					if scaler >= largest_scaler:
						largest_scaler = scaler
					av_scaler = numpy.logaddexp(av_scaler, scaler)
					bask_vectors.append(vec_dif + scaler)
					print "bask_ind", vec_dif+scaler
					vector_num += 1
				if not vector_num == 0:
					output.write("\t\t\t{}; {}\n".format(str(bask), numpy.exp(av_scaler) / vector_num))
			#This is the sum of the basket vectors, still in log scale
			print "all", bask_vectors
			add_vector = logsumexp(bask_vectors, axis=0)
			print "summed", add_vector
			if not vector_num == 0:
				add_vector -= numpy.log(vector_num)
			print "averaged", add_vector
			print 'large', largest_scaler
			return
#			if not largest_scaler == 0:
#				add_vector /= 2 * largest_scaler
                        add_vectors[str(run)] = [run_vec, add_vector]
                
                for run, (run_vec, add_vector) in add_vectors.items():
			add_vector = numpy.exp(add_vector - (numpy.log(2) + largest_scaler))
			run_vec = numpy.exp(run_vec) + add_vector
			new_cp[str(run)] = cp.fingerprint(str(run))
			for param, value in zip(labels, run_vec):
				new_cp[param][str(run)] = new_cp[str(run)][param] = value
				
			output.write("\t\tRun Movement: {}\n".format(numpy.sqrt(add_vector.dot(add_vector))))
				
			av_dist += numpy.sqrt(add_vector.dot(add_vector))
			n += 1
		pbar.finish()

		output.write("Average Movement: " + str(av_dist / n) + "\n")			
		return new_cp
예제 #13
0
	def chem_adjust(self, mz_heat, cp_heat, output):
		new_cp = cp.cp(None)
		for feat in cp_heat.features():
			new_cp[str(feat)] = cp.feature(str(feat))
		
		runcount = 0
		n = 0
		av_dist = 0
			
		widgets = ['VectorMove: ', Percentage(), ' ', Bar(marker=RotatingMarker()), ' ',\
		ETA(), ' ', FileTransferSpeed()]
		
		pbar = ProgressBar(widgets=widgets, maxval=len(cp_heat.fingerprints())).start()
		
		
		
		for run in cp_heat.fingerprints():
			output.write("\t" + str(run) + "\n")
			runcount += 1
			pbar.update(runcount)

			all_basks = [bask for bask in mz_heat.grab_basks(str(run))]

			inruns = set([inrun + "_" for inrun in cp_heat.cluster(str(run), max_tolerance=0.5, min_tolerance=0.65)])
#			antiruns = set([antirun + "_" for antirun in cp_heat.anticluster(str(run), pmax=-0.2, fraction=1)])
			
			basks = []
			for bask in all_basks:
				bruns = set(bask.keys())
#				if len(bruns & antiruns) > 0:
#					continue
				if len(bruns & inruns) < 2:
					continue
				basks.append(bask)
			
			labels = run.keys()
			run_vec = numpy.array(run.values(), dtype=float)
			add_vector = numpy.zeros(len(labels))
			vector_num = 0
			largest_scaler = 0.0
			for bask in basks:
				av_scaler = 0.0
				for connect_run in bask.keys():
					cprun = connect_run.replace("_", "")
					if cprun == str(run):
						continue
					if cprun not in cp_heat.map:
						continue
					vec_dif = numpy.array([cp_heat[cprun][val] for val in labels]) - run_vec
					scaler = self.bask_prob(bask, cp_heat[cprun]) * self.bask_prob(bask, run)
					if scaler >= largest_scaler:
						largest_scaler = scaler
					av_scaler += scaler
					add_vector += vec_dif * scaler
					vector_num += 1
				if not vector_num == 0:
					av_scaler /= vector_num
					output.write("\t\t\t{}; {}\n".format(str(bask), av_scaler))
			if not vector_num == 0:
				add_vector /= vector_num
			if not largest_scaler == 0:
				add_vector /= 2 * largest_scaler

				run_vec += add_vector
			new_cp[str(run)] = cp.fingerprint(str(run))
			for param, value in zip(labels, run_vec):
				new_cp[param][str(run)] = new_cp[str(run)][param] = value
				
			output.write("\t\tRun Movement: {}\n".format(numpy.sqrt(add_vector.dot(add_vector))))
				
			av_dist += numpy.sqrt(add_vector.dot(add_vector))
			n += 1
		pbar.finish()

		output.write("Average Movement: " + str(av_dist / n) + "\n")			
		return new_cp