예제 #1
0
 def __init__(self, host, dbname, password, fileName, display=False):
     super(MyCustomerManager, self).__init__(host, dbname, password)
     
     c = CsvParser(fileName)
     for row, count in c.rows():
         raw_data = {
             'name': row['name'],
             'street': row['street'],
             'zip': row['zip'],
             'city': row['city'],
             'phone': row['phone'],
             'mobile': row['mobile'],
             'fax': row['fax'],
             'email': row['email'],
             'website': row['website'],
             'customer': self.booleanFromString(row['customer']),
             'is_company': self.booleanFromString(row['is_company']),
         }
         many2one_data = {
             'title': row['title'],
             'country': row['country'],
         }
         ID = self.insert(raw_data, many2one_data, row['ref'])
         
         if display == True:
             print(str(count) + ' --- ID: ' + str(ID))
예제 #2
0
def _get_vehicle_and_passes_from_csv(
        path_to_csv: str) -> typing.Tuple[Vehicle, list]:
    """Get data to be analyzed, will be parsed into a Vehicle object as well as a list of passes
    
    Args:
        path_to_csv (str)
    
    Returns:
        typing.Tuple[Vehicle, list]
    """

    cp = CsvParser()
    licence_plate, passes = cp.parse_csv(path_to_csv)
    return _get_vehicle_from_licence_plate(licence_plate), passes
예제 #3
0
 def run(self, fileName):
     c = CsvParser(fileName)
     for row, count in c.rows():
         data = {}
         for key in self.fieldsNames:
             value = self.fieldsNames[key]
             if type(value) == dict:
                 data[key] = value['records'][row[value['fieldName']]]
             else:
                 data[key] = row[value]
         ref = row['ref']
         ID = self.insertOrUpdate(ref,'res.partner', data, self.existing_partners_records)
         
         if __name__ == '__main__':
             print(str(count) + ' --- ID: ' + str(ID))
def test_csv_parser_with_blank_file():
    
    global test_file    

    # value missing
    with open(test_file,"w")as f:
        f.write("")

    y = CsvParser(test_file)
    y.load_file()
    y.parse_file()
    y.process_file()
    assert(y.get_value() == 0)
def test_csv_parser_with_no_child_nodes():
    
    global test_file    

    # value missing
    with open(test_file,"w")as f:
        f.write("""name,active,value""")

    y = CsvParser(test_file)
    y.load_file()
    y.parse_file()
    y.process_file()
    assert(y.get_value() == 0)

    # delete the test file as not required 
    os.remove(test_file)
예제 #6
0
 def __init__(self, host, dbname, password, fileName, display=False):
     super(MyStockManager, self).__init__(host, dbname, password)
     data = self.all_records('product.product', ['default_code', 'name_template'])
     product_records = self.list_to_dict(data, 'default_code', 'name_template')
     
     c = CsvParser(fileName)
     for row, count in c.rows():
         kwargs = {
             'product_ref': row['ref'],
             'product_qty': row['qty'],
             'product_uom': row['uom'],
             'source': row['source'],
             'destination': row['destination'],
             'state': 'done',
             'name': '[' + row['ref'] + '] ' + product_records[row['ref']],
         }
         ID = self.update_stock(**kwargs)
         
         if display == True:
             print(str(count) + ' --- ID: ' + str(ID))
예제 #7
0
class TestCsvParser(TestCase):
    def setUp(self):
        self.csv_parser = CsvParser('1000 Sales Records.csv')

    def test_save_as(self):
        try:
            self.csv_parser.save_as("new_test_file", '\t')
            self.assertTrue(os.path.exists("new_test_file"))
        finally:
            pass
            os.remove("new_test_file")

    def test_sell_over(self):
        bf_greater = [
            'Brunei', 'Democratic Republic of the Congo', 'Germany',
            'Guatemala', 'Guinea', 'Haiti', 'Iran', 'Japan', 'Kiribati',
            'Lesotho', 'Luxembourg', 'Mali', 'Moldova ', 'Niger', 'Oman',
            'Samoa '
        ]
        self.assertEqual(self.csv_parser.sell_over("Baby Food", 8000),
                         bf_greater)
        cosmetics = [
            'Belgium', 'Burundi', 'India', 'Iran', 'Maldives', 'Moldova ',
            'Norway', 'Saint Lucia', 'Sweden', 'Turkey'
        ]
        self.assertEqual(self.csv_parser.sell_over('Cosmetics', 9000),
                         cosmetics)

    def test_get_country_profit(self):
        self.assertEqual(self.csv_parser.get_country_profit("Nepal"),
                         1022269.6299999999)
        self.assertEqual(self.csv_parser.get_country_profit("Armenia"),
                         1827634.7)
def test_csv_parser_with_non_csv_values():
    
    global test_file    

    # value missing
    with open(test_file,"w")as f:
        f.write("""name,active,value
John true 
Mark true 
Paul false 100
Ben true 150
""")

    y = CsvParser(test_file)
    y.load_file()
    y.parse_file()
    y.process_file()
    assert(y.get_value() == 0)
def test_csv_parser_with_missing_values():
    
    global test_file    

    # value missing
    with open(test_file,"w")as f:
        f.write("""name,active,value
John,true,
Mark,true,
Paul,false,100
Ben,true,150
""")

    y = CsvParser(test_file)
    y.load_file()
    y.parse_file()
    y.process_file()
    assert(y.get_value() == 150)
예제 #10
0
 def __init__(self, host, dbname, password, fileName, display=False):
     super(ProductManager, self).__init__(host, dbname, password)
     existing_prod_tmpl_records = self.prepare_ir_model_data('product.template')
     existing_prod_prod_records = self.prepare_ir_model_data('product.product')
     category_records = self.prepare_many2one('product.category')
     fields_tmpl = ['name', 'description', 'weight_net', 'standard_price', 'list_price', 'type']
     taxes_id = self.search('account.tax', [('description', '=', '20')])
     supplier_taxes_id = self.search('account.tax', [('description', '=', 'ACH-20')])
     
     c = CsvParser(fileName, delimiter=';')
     for row, count in c.rows():
         # product_template
         data_tmpl = {field: row[field] for field in fields_tmpl}
         data_tmpl['sale_ok'] = True
         data_tmpl['purchase_ok'] = True
         data_tmpl['supply_method'] = 'buy'
         data_tmpl['procure_method'] = 'make_to_stock'
         data_tmpl['categ_id'] = category_records[row['category']]
         
         ref = row['ref']
         product_tmpl_id = self.insertOrUpdate(
                 ref + '_product_template','product.template', data_tmpl, existing_prod_tmpl_records)
         
         # product_product
         data_product = {
             'default_code': ref,
             'name_template': row['name'],
             'active': True,
             'product_tmpl_id': product_tmpl_id,
             'taxes_id': [(6, 0, taxes_id)],
             'supplier_taxes_id': [(6, 0, supplier_taxes_id)],
         }
         
         product_product_id = self.insertOrUpdate(
                 ref, 'product.product', data_product, existing_prod_prod_records)
         
         if display == True:
             print(str(count) + ' --- ID: ' + str(product_product_id))
예제 #11
0
class TestSchemaInferencer(unittest.TestCase):
    def setUp(self):
        self._csv_parser = CsvParser('testdata/csv_parser_test.csv', 'col3')

    def testGetRawData(self):
        self.assertEqual(['col' + str(i + 1) for i in range(6)],
                         self._csv_parser._raw_header)

        self.assertEqual([[1.0, 'y', '', 946713600.0, '1234-56-79', 2.3],
                          [2.0, 'y', 1.0, 981100800.0, '1234-56-79', 2.3],
                          [2.0, 'z', 0.0, 981100800.0, '1234-56-79', 2.3]],
                         self._csv_parser._raw_data)

        self.assertEqual([[2.0, 'y', 981100800.0, '1234-56-79', 2.3],
                          [2.0, 'z', 981100800.0, '1234-56-79', 2.3]],
                         self._csv_parser._raw_train_data)

        self.assertEqual([
            [1.0, 'y', 946713600.0, '1234-56-79', 2.3],
        ], self._csv_parser._raw_test_data)

        self.assertEqual([1.0, 0.0], self._csv_parser._raw_target_data)

    def testGetShuffledHeader(self):
        self.assertEqual(
            ['col1', 'col4', 'col6', 'col2_0', 'col2_1', 'col5_0'],
            self._csv_parser._shuffled_header)

    def testGetData(self):
        data = self._csv_parser.GetData()

        np.testing.assert_array_equal(
            np.array([[2.0, 9.811008e+08, 2.3, 1.0, 0.0, 1.0],
                      [2.0, 9.811008e+08, 2.3, 0.0, 1.0, 1.0]]), data['X'])

        np.testing.assert_array_equal(
            np.array([[1.0, 9.467136e+08, 2.3, 1.0, 0.0, 1.0]]),
            data['X_test'])

        np.testing.assert_array_equal(np.array([1., 0.]), data['y'])

        self.assertEqual(
            ['col1', 'col4', 'col6', 'col2_0', 'col2_1', 'col5_0'],
            data['X_schema'])

        self.assertEqual('col3', data['y_schema'])
def run(website_urls,
        outputfolder='',
        export_tabs=False,
        export_reports=False,
        export_bulk_exports=False):

    # Automatically create the correct --headless Screaming Frog commands;
    sf = ScreamingFrogAnalyser(website_urls=website_urls,
                               outputfolder=outputfolder,
                               export_tabs=export_tabs,
                               export_reports=export_reports,
                               export_bulk_exports=export_bulk_exports)

    # Start running the web crawls
    sf.run_crawls()

    parser = CsvParser(outputfolder=outputfolder,
                       file_paths=sf._sf_folders,
                       website_urls=sf._website_urls)

    # Return the objects for running tests;
    return sf
예제 #13
0
from csv_parser import CsvParser

__author__ = 'hellfish90'

if __name__ == '__main__':

    filename = "test_files/test_location.csv"
    parser = CsvParser(filename)
    header = parser.get_data_types()

    data_set = parser.get_set_by_data_and_location(1, 1)

    for item in header:
        print item,

    print ""

    for data in data_set[0]:
        print data['coordinates'], data['data']

    print "Missed Rows: ", len(data_set[1])

    parser.close_file()
예제 #14
0
 def __init__(self):
     self._parser = {'.csv': CsvParser()}
     self.processor_factory = ProcessorFactory()
예제 #15
0
from csv_parser import CsvParser

__author__ = 'hellfish90'

if __name__ == '__main__':

    filename = "test_files/test_coordinates.csv"
    parser = CsvParser(filename)
    header = parser.get_data_types()

    data_set = parser.get_set_by_data_and_coordinates(5, 1, 1)

    for item in header:
        print item,

    print ""

    for data in data_set[0]:
        print data['coordinates'], data['data']

    print "Missed Rows: ", len(data_set[1])

    parser.close_file()
예제 #16
0
def test_parser_multiple_files_csvs():
    parser = CsvParser(outputfolder=outputfolder,
                       file_paths=csv_multiple_file_paths,
                       website_urls=website_urls)
    # Multiple tests here:
    check_data_frame(parser)
예제 #17
0
def sf_run(website_urls,
           outputfolder='',
           export_tabs=False,
           export_reports=False,
           export_bulk_exports=False,
           push_data_to_biquery=False,
           create_bigquery_table=False,
           bigquery_table_mapping=BIGQUERY_TABLE_ID_MAPPINGS):

    if OUTPUTFOLDER == '':
        raise ValidationError(
            'Your OUTPUTFOLDER cannot be empty',
            'Please update your outputfolder to a valid value.')

    # Automatically create the correct --headless Screaming Frog commands;
    sf = ScreamingFrogAnalyser(website_urls=website_urls,
                               outputfolder=outputfolder,
                               export_tabs=export_tabs,
                               export_reports=export_reports,
                               export_bulk_exports=export_bulk_exports)

    # 1. Start running + saving the web crawls
    sf.run_crawls()

    parser = CsvParser(outputfolder=outputfolder,
                       file_paths=sf._sf_folders,
                       website_urls=sf._website_urls)

    # 2.1 Data checking: Making sure that there is data & at least one of the dataframes contains rows:
    if not any(dataframe_checker(parser)):
        print(
            '''Finished crawling and saved the output to your desired folder/folders. It's impossible to save to BigQuery because you have no .csv data.
        Re-run the script with export_tabs, export_reports, or export_bulk_exports if you would like to upload to BigQuery!
        Existing the program.
        ''')
        # exit() <-- Disabling this whilst running tests.
        return sf

    # 2.1 Data checking - For valid credentials (Google Cloud Project ID + Service Account Key):
    if push_data_to_biquery == True:
        config_setup_check(
            [GOOGLE_CLOUD_PROJECT_ID, SERVICE_ACCOUNT_KEY_LOCATION])
        # Google Cloud Credentials + BQ Client Initialisation
        credentials = service_account.Credentials.from_service_account_file(
            SERVICE_ACCOUNT_KEY_LOCATION)
        client = bigquery.Client(credentials=credentials,
                                 project=GOOGLE_CLOUD_PROJECT_ID)

    # 2.1 Data checking - Compile a list of dataframes that have both rows and columns:
    available_data = dataframe_row_checker(parser)

    # 3.1 Storing The Queryable Data:
    if create_bigquery_table == True:
        # Automatically generate the BigQuery tables with timestamped names + push the relevant data:
        print(
            "Some function here that will automatically generate Xn BigQuery tables."
        )
        pass
    else:
        # Automatically use the BigQuery Table Mapping
        print(
            "Some function here that will map the name of the BigQuery table_id against the csv_name."
        )
        if config._bigquery_inputs_validated == False:
            raise ValidationError(
                "You need to use a custom dictionary to map your concatenated .csv data against BigQuery table ids.",
                '''
            Please update the setup.yaml file with the relevant bigquery_tab_id mappings.'''
            )

        # Match the dictionary mapping against the available_data dictionary and only contain the Bigquery table_id's where there is data.
        # Error checking that the length of the dictionary keys are the same length as the available_data dict keys.
        pass

    # Return the objects for running tests;
    return sf
예제 #18
0
def test_parser_multiple_files_no_csvs():
    parser = CsvParser(outputfolder=outputfolder,
                       file_paths=seo_spider_multiple_file_paths,
                       website_urls=website_urls)
    assert len(parser._csv_data_dict.keys()) == 0
예제 #19
0
 def setUp(self):
     self.csv_parser = CsvParser('1000 Sales Records.csv')
예제 #20
0
 def setUp(self):
     self._csv_parser = CsvParser('testdata/csv_parser_test.csv', 'col3')
예제 #21
0
        # prefer method enumerate(x) over range(len(x))
        for product_index, product_name in enumerate(parser.product_names):
            cumulative_sales_per_product[product_index] += sales_per_week_per_product[product_index]

        number_of_records += 1

    sales_report.total_sales_per_product = dict(zip(parser.product_names, cumulative_sales_per_product))
    return number_of_records


if __name__ == '__main__':

    # print("Test")

    # sales_text contains entire csv file
    filename = './data/sales.csv'
    with open(filename) as f:
        sales_text = f.read()

    # print(sales_text)
    parser = CsvParser(sales_text)

    sales_report = generate_sales_report(parser)

    print(sales_report.total_sales_per_week_report())
    print(sales_report.week_with_highest_sales_report())

    print(sales_report.total_sales_per_product_report())
    # print(sales_report.total_sales_per_product_report_narrow_format())
    print(sales_report.average_weekly_sales_report())
예제 #22
0
파일: run.py 프로젝트: vspark/cobbler-csv
def run():
    # Parse Command Line Options

    usage = "usage: %prog [options]"
    Parser = OptionParser(usage=usage)
    Parser.add_option("-f",
                      "--file",
                      dest="csv_file",
                      metavar="STRING",
                      help="CSV File Location")

    Parser.add_option("-c",
                      "--config",
                      dest="config_file",
                      default="/etc/cobbler-csv.conf",
                      metavar="STRING",
                      help="Config file (default: /etc/cobbler-csv.conf)")
    (options, args) = Parser.parse_args()

    if len(sys.argv) == 1:
        Parser.print_help()
        sys.exit(1)

    config = Config(configFile=options.config_file)
    csv = CsvParser(options.csv_file, config)

    for system in csv:
        hostname = config.get_mapping("hostname").format(**system)
        print "Creating new system %s...\n" % hostname

        cobbler_system = CobblerSystem(options.config_file, hostname=hostname)

        interface = {
            'interface': 'eth0',
            'macaddress': config.get_mapping("macaddress").format(**system),
            'ipaddress': config.get_mapping("ipaddress").format(**system),
            'subnet': config.get_mapping("subnet").format(**system),
            'gateway': config.get_mapping("gateway").format(**system),
            'static': config.get_mapping("static").format(**system)
        }

        cobbler_system.set_interface(**interface)

        ns = ", ".join(
            config.get_mapping("name_servers").format(**system).split(" "))
        search = config.get_mapping("name_servers_search").format(**system)

        kernel_opts = {
            'hostname': hostname,
            'ipaddress': interface['ipaddress'],
            'netmask': interface['subnet'],
            'gateway': interface['gateway'],
            'nameserver': ns,
            'search': search
        }

        cobbler_system.set_kernel_opts(**kernel_opts)

        attributes = [k[4:] for k in dir(cobbler_system) if k[0:4] == "set_"]

        for attribute in attributes:
            try:
                value = config.get_mapping(attribute).format(**system)
                getattr(cobbler_system, "set_" + attribute)(value)
                print "Setting %s:\n%s\n" % (attribute, value)
            except:
                continue  # no biggie, not a required param

        cobbler_system.set_ks_meta(**dict([(k.lower().replace(" ", "_"), v)
                                           for k, v in system.iteritems()]))

        cobbler_system.save()
        print "System saved!"

        if config.update_custom_info:
            try:
                post_trigger(options.config_file, hostname=hostname)
                print "System custom info in Satellite updated!"
            except:
                pass

        print "-----------------------------------------------------------"

    if config.cobbler_sync:
        """
        Sync cobbler
        """
        print "Syncing cobbler..."
        cobbler_system.sync()
        print "Done."
        print "-----------------------------------------------------------"