def get_wd(self):
		with open(self.filename) as csvfile:
			d_file = csv_DictReader(csvfile)
			for row in d_file:
				self.count = str(row['count'])
				self.status = str(row['status'])
				print("Received From : ",self.filename,self.count)
示例#2
0
def send_by_ftp(dbug_send_ftp, ftp_cred, send_filename, save_as_filename,
                ftp_remote_dir, ftp_timeout):
    here = "send_by_ftp"
    result = ["FTP attempt for :" + send_filename]

    # Use Following two lines for debug to check parameters received by send_by_ftp
    #print("send_by_ftp: dbug_send_ftp: ",dbug_send_ftp," ftp_cred: ", ftp_cred, \
    #	" send_filename: ",send_filename, "save_as_filename: ",save_as_filename, "ftp_remote_dir: ",ftp_remote_dir)

    try:
        with open(ftp_cred, 'r') as csvfile:
            cred_file = csv_DictReader(csvfile)
            ind = 0
            for row in cred_file:
                if ind == 0:
                    ftp_user = row['user']
                    pr(dbug_send_ftp, here, "ftpuser : "******"ftp password : "******"/" + ftp_remote_dir
                    pr(dbug_send_ftp, here, "ftp directory : ", ftp_directory)
                    ftp_site = str(row['site'])
                    pr(dbug_send_ftp, here, "ftp site : ", ftp_site)
                else:
                    result.append("Error more than one line in FTP creds file")
                    return (result)
                ind += 1
        ftp = FTP()
        debug_level = 1
        if dbug_send_ftp:
            ftp.set_debuglevel(debug_level)
            print("--------FTP Debug on------------ftp_timeout : ",
                  ftp_timeout, "degug_level : ", debug_level)
        pr(dbug_send_ftp, here, "Will try to connect to : ", ftp_site)
        pr(dbug_send_ftp, here, "WTimeout is set to : ", ftp_timeout)
        ftp.connect(ftp_site, 21, timeout=ftp_timeout)
        pr(dbug_send_ftp, here, "logging in here is ftp welcome message : ",
           ftp.getwelcome())
        ftp.login(user=ftp_user, passwd=ftp_password)
        pr(dbug_send_ftp, here, "logged in to : ", ftp_site)
        ftp.cwd(ftp_directory)
        pr(dbug_send_ftp, here, "directory changed to : ", ftp_directory)

        sendfile = open(send_filename, 'rb')
        result.append("Will try to send : " + send_filename + " : as : " +
                      save_as_filename + " to : " + ftp_site + "/" +
                      ftp_directory)
        ftp.storbinary('STOR ' + save_as_filename, sendfile)
        sendfile.close()

        ftp.quit()
        pr(dbug_send_ftp, here, "ftp quitedfrom : ", ftp_site)
        pr(dbug_send_ftp, here, "Done FTP", save_as_filename)
        return (result)
    except:
        pr(dbug_send_ftp, here, "Failed FTP", save_as_filename)
        result.append("Error Trying To Send " + send_filename + " file by FTP")
    return (result)
示例#3
0
def read_csv_as_dicts(filename,
                      newline="",
                      delimiter=",",
                      quotechar='"',
                      encoding="utf-8"):
    """Read in CSV file into a list of :class:`dict`.

    This offers an easy import functionality of your data from CSV files.
    See more at
    `csv <https://docs.python.org/3/library/csv.html>`_.

    CSV file structure:
    1) The header row contains the column names.
    2) A row contains one dataset
    3) A column contains one specific attribute.

    Recommendation: Name the column name the way you want the attribute to be
    named later in your Dataverse object. See the
    `pyDataverse templates <https://github.com/AUSSDA/pyDataverse_templates>`_
    for this. The created :class:`dict` can later be used for the `set()`
    function to create Dataverse objects.

    Parameters
    ----------
    filename : str
        Filename with full path.
    newline : str
        Newline character.
    delimiter : str
        Cell delimiter of CSV file. Defaults to ';'.
    quotechar : str
        Quote-character of CSV file. Defaults to '"'.
    encoding : str
        Character encoding of file. Defaults to 'utf-8'.

    Returns
    -------
    list
        List with one :class:`dict` each row. The keys of a :class:`dict` are
        named after the columen names.

    """
    assert isinstance(filename, str)
    assert isinstance(newline, str)
    assert isinstance(delimiter, str)
    assert isinstance(quotechar, str)
    assert isinstance(encoding, str)

    with open(filename, "r", newline=newline, encoding=encoding) as csvfile:
        reader = csv_DictReader(csvfile,
                                delimiter=delimiter,
                                quotechar=quotechar)
        data = []
        for row in reader:
            data.append(dict(row))
    assert isinstance(data, list)
    return data
示例#4
0
 def _m_rr_reactions(self, rules_rall_path):
     rr_reactions = {}
     try:
         #with open(rules_rall_path, 'r') as f:
         #    reader = csv.reader(f, delimiter = '\t')
         #    next(reader)
         #    rule = {}
         #    for row in reader:
         for row in csv_DictReader(open(rules_rall_path), delimiter='\t'):
             #NOTE: as of now all the rules are generated using MNX
             #but it may be that other db are used, we are handling this case
             #WARNING: can have multiple products so need to seperate them
             products = {}
             for i in row['Product_IDs'].split('.'):
                 mnxm = self._checkMNXMdeprecated(i)
                 if not mnxm in products:
                     products[mnxm] = 1
                 else:
                     products[mnxm] += 1
             try:
                 #WARNING: one reaction rule can have multiple reactions associated with them
                 #To change when you can set subpaths from the mutliple numbers of
                 #we assume that the reaction rule has multiple unique reactions associated
                 if row['# Rule_ID'] not in rr_reactions:
                     rr_reactions[row['# Rule_ID']] = {}
                 if row['# Rule_ID'] in rr_reactions[row['# Rule_ID']]:
                     self.logger.warning('There is already reaction ' +
                                         str(row['# Rule_ID']) +
                                         ' in reaction rule ' +
                                         str(row['# Rule_ID']))
                 rr_reactions[row['# Rule_ID']][row['Reaction_ID']] = {
                     'rule_id': row['# Rule_ID'],
                     'rule_score': float(row['Score_normalized']),
                     'reac_id':
                     self._checkMNXRdeprecated(row['Reaction_ID']),
                     'subs_id':
                     self._checkMNXMdeprecated(row['Substrate_ID']),
                     'rel_direction': int(row['Rule_relative_direction']),
                     'left': {
                         self._checkMNXMdeprecated(row['Substrate_ID']): 1
                     },
                     'right': products
                 }
             except ValueError:
                 self.logger.error('Problem converting rel_direction: ' +
                                   str(row['Rule_relative_direction']))
                 self.logger.error('Problem converting rule_score: ' +
                                   str(row['Score_normalized']))
         return rr_reactions
     except FileNotFoundError as e:
         self.logger.error('Could not read the rules_rall file (' +
                           str(rules_rall_path) + ')')
         return {}
示例#5
0
 def read_file(self):
     #	Set sensor data lists with initial values
     #	read in from file if it exists if not then set up
     #	just defaults for one sensor
     #	later any sensors that are connected will be added
     #global my_sensors
     #global smartplug_info
     #global config
     here = "mysensors.read_file"
     #pr(here, "dictionary of my_sensors : ", self.__dict__ )
     with open(self.__s_filename, 'r') as csvfile:
         d_file = csv_DictReader(csvfile)
         self.width = 0
         ind = 0
         for row in d_file:
             self.number[ind] = row['number']
             self.code[ind] = row['code']
             self.connected[ind] = False
             self.reading[ind] = -108
             self.last_logged[ind] = -108
             self.code_seen[ind] = False
             self.code_seen_but_disconnected[ind] = False
             self.location[ind] = row['location']
             self.stype[ind] = row['stype']
             self.comment[ind] = row['comment']
             self.delay[ind] = 0
             self.error_number[ind] = 2
             self.last_logged_error_number[ind] = 2
             self.status_text[ind] = "?"
             self.width += 1
             ind += 1
             if ind > self.max_width:
                 print("Two many items in file for max width of : ",
                       self.max_width)
                 break
     return (True)
示例#6
0
 def _m_full_reactions(self, rxn_recipes_path):
     #### for character matching that are returned
     DEFAULT_STOICHIO_RESCUE = {
         "4n": 4,
         "3n": 3,
         "2n": 2,
         'n': 1,
         '(n)': 1,
         '(N)': 1,
         '(2n)': 2,
         '(x)': 1,
         'N': 1,
         'm': 1,
         'q': 1,
         '0.01': 1,
         '0.1': 1,
         '0.5': 1,
         '1.5': 1,
         '0.02': 1,
         '0.2': 1,
         '(n-1)': 0,
         '(n-2)': -1
     }
     reaction = {}
     try:
         for row in csv_DictReader(open(rxn_recipes_path), delimiter='\t'):
             tmp = {}  # makes sure that if theres an error its not added
             #parse the reaction equation
             if not len(row['Equation'].split('=')) == 2:
                 self.logger.warning(
                     'There should never be more or less than a left and right of an equation'
                 )
                 self.logger.warnin(row['Equation'])
                 continue
             ######### LEFT ######
             #### MNX id
             tmp['left'] = {}
             # if row['#Reaction_ID']=="MNXR141948":
             #     print(row)
             #     exit()
             for spe in re_findall(
                     '(\(n-1\)|\d+|4n|3n|2n|n|\(n\)|\(N\)|\(2n\)|\(x\)|N|m|q|\(n\-2\)|\d+\.\d+) ([\w\d]+)@\w+',
                     row['Equation'].split('=')[0]):
                 #1) try to rescue if its one of the values
                 try:
                     tmp['left'][self._checkMNXMdeprecated(
                         spe[1])] = DEFAULT_STOICHIO_RESCUE[spe[0]]
                 except KeyError:
                     #2) try to convert to int if its not
                     try:
                         tmp['left'][self._checkMNXMdeprecated(
                             spe[1])] = int(spe[0])
                     except ValueError:
                         self.logger.warning('Cannot convert ' +
                                             str(spe[0]))
                         continue
             ####### RIGHT #####
             ####  MNX id
             tmp['right'] = {}
             for spe in re_findall(
                     '(\(n-1\)|\d+|4n|3n|2n|n|\(n\)|\(N\)|\(2n\)|\(x\)|N|m|q|\(n\-2\)|\d+\.\d+) ([\w\d]+)@\w+',
                     row['Equation'].split('=')[1]):
                 #1) try to rescue if its one of the values
                 try:
                     tmp['right'][self._checkMNXMdeprecated(
                         spe[1])] = DEFAULT_STOICHIO_RESCUE[spe[0]]
                 except KeyError:
                     #2) try to convert to int if its not
                     try:
                         tmp['right'][self._checkMNXMdeprecated(
                             spe[1])] = int(spe[0])
                     except ValueError:
                         self.logger.warning('Cannot convert ' +
                                             str(spe[0]))
                         continue
             ####### DIRECTION ######
             try:
                 tmp['direction'] = int(row['Direction'])
             except ValueError:
                 self.logger.error('Cannot convert ' +
                                   str(row['Direction']) + ' to int')
                 continue
             ### add the others
             tmp['main_left'] = row['Main_left'].split(',')
             tmp['main_right'] = row['Main_right'].split(',')
             reaction[self._checkMNXRdeprecated(row['#Reaction_ID'])] = tmp
         return reaction
     except FileNotFoundError:
         self.logger.error('Cannot find file: ' + str(path))
         return False
示例#7
0
 def _m_mnxm_strc(self, rr_compounds_path, chem_prop_path):
     mnxm_strc = {}
     for row in csv_DictReader(open(rr_compounds_path), delimiter='\t'):
         tmp = {
             'formula': None,
             'smiles': None,
             'inchi': row['inchi'],
             'inchikey': None,
             'mnxm': self._checkMNXMdeprecated(row['cid']),
             'name': None
         }
         try:
             resConv = self._convert_depiction(idepic=tmp['inchi'],
                                               itype='inchi',
                                               otype={'smiles', 'inchikey'})
             for i in resConv:
                 tmp[i] = resConv[i]
         except self.DepictionError as e:
             self.logger.warning(
                 'Could not convert some of the structures: ' + str(tmp))
             self.logger.warning(e)
         mnxm_strc[tmp['mnxm']] = tmp
     with open(chem_prop_path) as f:
         c = csv_reader(f, delimiter='\t')
         for row in c:
             if not row[0][0] == '#':
                 mnxm = self._checkMNXMdeprecated(row[0])
                 tmp = {
                     'formula': row[2],
                     'smiles': row[6],
                     'inchi': row[5],
                     'inchikey': row[8],
                     'mnxm': mnxm,
                     'name': row[1]
                 }
                 for i in tmp:
                     if tmp[i] == '' or tmp[i] == 'NA':
                         tmp[i] = None
                 if mnxm in mnxm_strc:
                     mnxm_strc[mnxm]['formula'] = row[2]
                     mnxm_strc[mnxm]['name'] = row[1]
                     if not mnxm_strc[mnxm]['smiles'] and tmp['smiles']:
                         mnxm_strc[mnxm]['smiles'] = tmp['smiles']
                     if not mnxm_strc[mnxm]['inchikey'] and tmp['inchikey']:
                         mnxm_strc[mnxm]['inchikey'] = tmp['inchikey']
                 else:
                     #check to see if the inchikey is valid or not
                     otype = set({})
                     if not tmp['inchikey']:
                         otype.add('inchikey')
                     if not tmp['smiles']:
                         otype.add('smiles')
                     if not tmp['inchi']:
                         otype.add('inchi')
                     itype = ''
                     if tmp['inchi']:
                         itype = 'inchi'
                     elif tmp['smiles']:
                         itype = 'smiles'
                     else:
                         self.logger.warning(
                             'No valid entry for the convert_depiction function'
                         )
                         continue
                     try:
                         resConv = self._convert_depiction(
                             idepic=tmp[itype], itype=itype, otype=otype)
                         for i in resConv:
                             tmp[i] = resConv[i]
                     except self.DepictionError as e:
                         self.logger.warning(
                             'Could not convert some of the structures: ' +
                             str(tmp))
                         self.logger.warning(e)
                     mnxm_strc[tmp['mnxm']] = tmp
     return mnxm_strc
示例#8
0
import cx_Oracle

print('Будь ласка, закрийте ораклівський connection, інакше програма зависне!')
input('Натисніть Enter, щоб почати заповнювати таблиці: ')

# під'єднуємось до бази даних
username = '******'
password = '******'
databaseName = 'localhost/xe'

connection = cx_Oracle.connect(username, password, databaseName)
cursor = connection.cursor()

# відкриваємо csv-файл
csv_file = open('ted_main.csv', errors='ignore')
csv_reader = csv_DictReader(csv_file)
line_num = 1

# деякі атрибути, що визначені як PK, можуть повторюватися
# вставлення рядків з такими атрибутами у таблицю викличе помилку
# створимо списки, які міститимуть унікальні значення цих атрибутів
event_unique = []
person_name_unique = []

# якщо в таблицях вже є дані -- видалимо їх
tables = [
    'Speechperson', 'SpeechRating', 'Person', 'Rating', 'Video', 'TEDTalk',
    'TEDEvent'
]
for table in tables:
    cursor.execute("DELETE FROM " + table)