Example #1
0
 def write_to_file(self, codelisting):
     self.assertEqual(
         type(codelisting), CodeListing,
         "passed a non-Codelisting to write_to_file:\n%s" % (codelisting,)
     )
     print('writing to file', codelisting.filename)
     write_to_file(codelisting, os.path.join(self.tempdir, 'superlists'))
Example #2
0
def generateRSA():
    try:
        print('Generating RSA Key...')
        bits = 0

        while bits < 32 or bits > 2048 or (math.log(bits, 2) + 1) % 1 != 0:

            bits = inp()

        p = generateLargePrime(int(bits / 2))
        q = generateLargePrime(int(bits / 2))

        n = int(p * q)  #key length

        O = (p - 1) * (q - 1)  #Euler's totient function

        e = 2**16 + 1  #Default value -- a well known prime that works well most of the time

        if gcd(e, O) != 1:  #must be coprime
            e = generateLargePrime(17)

        d = EEA(O, e, 1, 0, 0, 1)

        #prevent d with negative value
        if d < 0:
            d += (1 + abs(d) // O) * O

        write_to_file(n, e, d, p1, n1, p2, n2)
        print("Key 생성 완료!")
        return [n, e, d]
    except FileNotFoundError:
        print('Not Existing Directory or File Not Found!!!')
        return False
    '''except:
 def test_simple_case(self):
     #done
     listing = CodeListing(filename='foo.py', contents='abc\ndef')
     write_to_file(listing, self.tempdir)
     with open(os.path.join(self.tempdir, listing.filename)) as f:
         self.assertEqual(f.read(), listing.contents + '\n')
     self.assertTrue(listing.was_written)
Example #4
0
 def test_simple_case(self):
     #done
     listing = CodeListing(filename='foo.py', contents='abc\ndef')
     write_to_file(listing, self.tempdir)
     with open(os.path.join(self.tempdir, listing.filename)) as f:
         self.assertEqual(f.read(), listing.contents + '\n')
     self.assertTrue(listing.was_written)
Example #5
0
 def write_to_file(self, codelisting):
     self.assertEqual(
         type(codelisting), CodeListing,
         "passed a non-Codelisting to write_to_file:\n%s" % (codelisting,)
     )
     print('writing to file', codelisting.filename)
     write_to_file(codelisting, self.tempdir)
 def test_multiple_files(self):
     listing = CodeListing(filename='foo.py, bar.py', contents='abc\ndef')
     write_to_file(listing, self.tempdir)
     with open(os.path.join(self.tempdir, 'foo.py')) as f:
         self.assertEqual(f.read(), listing.contents + '\n')
     with open(os.path.join(self.tempdir, 'bar.py')) as f:
         self.assertEqual(f.read(), listing.contents + '\n')
     self.assertTrue(listing.was_written)
Example #7
0
 def test_multiple_files(self):
     listing = CodeListing(filename='foo.py, bar.py', contents='abc\ndef')
     write_to_file(listing, self.tempdir)
     with open(os.path.join(self.tempdir, 'foo.py')) as f:
         self.assertEqual(f.read(), listing.contents + '\n')
     with open(os.path.join(self.tempdir, 'bar.py')) as f:
         self.assertEqual(f.read(), listing.contents + '\n')
     self.assertTrue(listing.was_written)
Example #8
0
	def put_tweets(self):
		"""Create a retweet df of the tweets already collected and write the file to the corresponding retweet csv file
        """
		screen_name = self.screen_name
		self.get_user_retweets()
		self.retweet_df["date"] = pd.to_datetime(self.retweet_df['created_at']).dt.date
		self.retweet_df = self.retweet_df[self.retweet_df["date"] >= self.__START_DATE]
		self.retweet_df = self.retweet_df.drop("date",axis=1)
		write_to_file(self.file_path,self.retweet_df,self.screen_name)
		print("--- done for {} ---".format(screen_name))
Example #9
0
    def assert_write_to_file_gives(self, old_contents, new_contents,
                                   expected_contents):
        listing = CodeListing(filename='foo.py', contents=new_contents)
        with open(os.path.join(self.tempdir, 'foo.py'), 'w') as f:
            f.write(old_contents)

        write_to_file(listing, self.tempdir)

        with open(os.path.join(self.tempdir, listing.filename)) as f:
            actual = f.read()
            self.assertMultiLineEqual(actual, expected_contents)
Example #10
0
 def write_to_file(self, codelisting):
     self.assertEqual(
         type(codelisting), CodeListing,
         "passed a non-Codelisting to write_to_file:\n%s" % (codelisting, ))
     print('writing to file', codelisting.filename)
     write_to_file(codelisting, os.path.join(self.tempdir, 'superlists'))
     filenames = codelisting.filename.split(', ')
     for filename in filenames:
         with open(os.path.join(self.tempdir, 'superlists', filename)) as f:
             print('wrote:')
             print(f.read())
Example #11
0
 def write_to_file(self, codelisting):
     self.assertEqual(
         type(codelisting), CodeListing,
         "passed a non-Codelisting to write_to_file:\n%s" % (codelisting,)
     )
     print('writing to file', codelisting.filename)
     write_to_file(codelisting, os.path.join(self.tempdir, 'superlists'))
     filenames = codelisting.filename.split(', ')
     for filename in filenames:
         with open(os.path.join(self.tempdir, 'superlists', filename)) as f:
             print('wrote:')
             print(f.read())
    def assert_write_to_file_gives(
        self, old_contents, new_contents, expected_contents
    ):
        listing = CodeListing(filename='foo.py', contents=new_contents)
        with open(os.path.join(self.tempdir, 'foo.py'), 'w') as f:
            f.write(old_contents)

        write_to_file(listing, self.tempdir)

        with open(os.path.join(self.tempdir, listing.filename)) as f:
            actual = f.read()
            self.assertMultiLineEqual(actual, expected_contents)
def main():
    from read_file import read_file
    from create_drug_dict import create_drug_dict
    from create_drug_list import create_drug_list
    from write_to_file import write_to_file
    input_file = sys.argv[1]
    output_file = sys.argv[2]
    input_data = read_file(input_file)  # reads the input file
    drugDict = create_drug_dict(
        input_data)  # creates a dictionary from the input data
    output_data = create_drug_list(
        drugDict)  # creates a list of tuples from the dictionary
    write_to_file(output_data, output_file)  # writes the output file to disk
Example #14
0
	def get_user_retweets(self):
		screen_name = self.screen_name
		index = 1
		pbar = tqdm.tqdm(total=len(self.tweet_ids))
		for _, row in self.tweet_ids.iterrows():
			tweet_id = row['id']
			retweets = self.get_retweets(tweet_id)
			self.retweet_df = self.retweet_df.append(retweets)
			if index % 74 == 0:
				print("\t> writing tweets")
				write_to_file(self.file_path,self.retweet_df,self.screen_name)
			index += 1
			pbar.update(1)
		pbar.close()
		self.retweet_df.drop(self.retweet_df.loc[self.retweet_df['original_author']==screen_name].index, inplace=True)
Example #15
0
def find_list_judges(url):
    """get list judges: inser url, return list"""
    session = requests.Session()
    req = session.get(url)
    if req.status_code == 200:
        soup = bs4(req.content, 'html.parser')
        judge_cards = soup.find(
            'li',
            attrs={
                "class":
                "b-menu-item js-menu-item b-menu-item--active js-menu-item--active b-menu-item--is_submenu last"
            })
        judge_cards = judge_cards.find(
            "ul", attrs={"class": 'b-menu js-menu menu accordion'})
        write_to_file.write_to_file(judge_cards)
        return judge_cards
    else:
        return "Not found"
    return "list"
MP_IDs = []
for formula in Mater2Do:
  Eng_ID = MP().get_data(formula, "vasp", "e_above_hull")
  for i in Eng_ID:
    # select materials with zero formation energy (residing at convex hull)
    if i['e_above_hull'] == 0:
      MP_IDs.append(i['material_id'])

# Get data from Materials Project database
# Write OCEAN input file
for i in MP_IDs:
  compound = MPEntry(i, ctr_atom)
  os.chdir(here)
  new_folder = compound.formula + '_' + compound.ID
  new_file = compound.formula + '.in'
  os.mkdir(new_folder)
  os.chdir(here + '/' + new_folder)
  # OCEAN.head strores the in-common parameters for every spectrum calculation
  shutil.copy(ocean_tmplate_loc + '/' + "OCEAN.head", new_file)
  fout = open(new_file, "a")
  write_to_file(compound, fout, ctr_atom)
  fout.write("# Spectral broadening in eV\n")
  fout.write("cnbse.broaden   " + str(K_broad[ctr_atom]) + '\n')
  fout.write("#LDA+U for QE\nldau{\nlda_plus_u=.true. ")
  # write Hubbard U parameter, if not transition metal, then U = 0
  ntypat = compound.structure.ntypesp
  for j in range(ntypat):
    fout.write(",\nHubbard_U(" + str(j + 1) + ")=" + str(U_eff.get(compound.elements[j], 0)))
  fout.write('\n}\n')
  fout.close()
Example #17
0
    bits = inp()

  p = generateLargePrime(int(bits/2))
  q = generateLargePrime(int(bits/2))

  n = int(p*q) #key length

  O = (p-1)*(q-1) #Euler's totient function

  e = 2**16 + 1 #Default value -- a well known prime that works well most of the time

  if gcd (e,O) != 1: #must be coprime
      e = generateLargePrime (17)

  raw_message = input("Please enter a message to be encrypted: ")
  m = convert_to_hex(raw_message)  #encoded hex message to be used for RSA

  #d * e = 1 (mod O) => linear diophantine: e(d) + O(y) = 1 -- trying to find d
  #Implement Extended Euclidean Algorithm 
  d = EEA (O , e, 1, 0, 0, 1)

  #prevent d with negative value
  if d < 0: 
  	d += (1 + abs(d)//O)*O

  c = cipher.encrypt (int(m, 16), e, n)
  write_to_file(raw_message, m, c, n, e, d)
  print("Message successfully encrypted and details stored in RSA.txt!")
except:
  print("Something bad happened. This either means you did something naughty or Parsa's program has bug(s). This error message is useless for debugging, but at least it's not ugly. If you know what went wrong, please submit a pull request! Have a nice day!")
Example #18
0
        ## load in image
        img = cv2.imread(join(path, file))

        ## preprocess image
        preprocessed_lines = preprocess_image(img)

        ## get root filename for writing the transcribed lines
        outfile = file.split('.')[0]

        ## classify lines
        for line in preprocessed_lines:
            sentence = ''
            ## neural network call here
            sw = SlidingWindow()
            sw.WRITE_WINDOWS = False  # If True, the input images of the cnn will be written to a file
            sw.load_image(line)
            transcribed_lines = sw.get_letters()

            ## apply postprocessing
            postp = Bayesian_processor()
            final_letter = postp.apply_postprocessing(transcribed_lines)
            sentence = sentence + str(final_letter)

            ## write croppings to file
            write_to_file(sentence, path, outfile)

        print("Succesfully transcribed \"%s\" to \"%s\"." % (file, outfile))

    print("Finished transcribing.")
Example #19
0
    '--outEco',
    type=str,
    default="ECO:0000000",
    help='evidence code(eco) for output file')
if __name__ == "__main__":
    args = parser.parse_args()
    input_file_name = args.inputFile
    seperator = args.seperator

    geneColumn = args.geneColumn
    ecoColumn = args.ecoColumn
    annotationColumn = args.annotationColumn
    gene_info_ecoID_Dict = file_reader(input_file_name, seperator, geneColumn, annotationColumn, ecoColumn)
    eco_term_dict = read_ecoObo()
    eco_parents_dict = form_all_eco_parents_dict(eco_term_dict)

    all_gene_annotation_ecoIDs_propagated = dict()
    for gene_id in gene_info_ecoID_Dict:
        if gene_id not in all_gene_annotation_ecoIDs_propagated:
            all_gene_annotation_ecoIDs_propagated[gene_id] = dict()
        for annotation in gene_info_ecoID_Dict[gene_id]:
            eco_list = set()
            for eco_id in gene_info_ecoID_Dict[gene_id][annotation]:
                eco_list.add(eco_id)
                for parent_ecoID in eco_parents_dict[eco_id]:
                    eco_list.add(parent_ecoID)
            all_gene_annotation_ecoIDs_propagated[gene_id][annotation] = eco_list
    output_eco = args.outEco
    write_to_file(input_file_name, all_gene_annotation_ecoIDs_propagated, output_eco)