def get_query_records_dict(self, db_table, soql_query):
        """Execute bulk Salesforce soql queries and return results as generator of dictionaries.

        :param db_table: Database table name
        :param soql_query: Soql queries
        :return: If success, List of result record dictionaries; Else empty list
        """
        self.bulk = SalesforceBulk(sessionId=self.session_id,
                                   host=self.instance)
        job = self.bulk.create_query_job(db_table, contentType="JSON")
        batch = self.bulk.query(job, soql_query)
        self.bulk.close_job(job)
        while not self.bulk.is_batch_done(batch):
            print("Waiting for batch query to complete")
            sleep(10)

        dict_records = []
        rec_count = 0
        print("Iterating through batch result set")
        for result in self.bulk.get_all_results_for_query_batch(batch):
            result = json.load(IteratorBytesIO(result))
            for row in result:
                rec_count += 1
                dict_records.append(row)
            print("Current fetched record count: ", rec_count)

        return dict_records
def login():
    global bulk
    logging.info('logging in...')
    # domain passed to SalesforceBulk should be 'test' or 'login' or 'something.my'
    bulk = SalesforceBulk(username=os.environ['ORG_USERNAME'], password=os.environ['ORG_PASSWORD'],
                          security_token=os.environ['ORG_SECURITY_TOKEN'], domain=os.environ['ORG_DOMAIN'])
    logging.info('login successful !')
Beispiel #3
0
 def setUp(self):
     request_patcher = mock.patch('simple_salesforce.api.requests')
     self.mockrequest = request_patcher.start()
     self.addCleanup(request_patcher.stop)
     self.sessionId = '12345'
     self.host = 'https://example.com'
     self.bulk = SalesforceBulk(self.sessionId, self.host)
Beispiel #4
0
def sfBulkUpdate(namespace,sObject):

    myObject =sObject
    if len(namespace) > 0:
        myObject = namespace.upper() + '__' + sObject

    stageCSV = stageCSVDir + myObject  + '_stg.csv'
    print(stageCSV)
    #print (sObject)

    sfBulk = SalesforceBulk(username=username_loc, password=password_loc, security_token=security_token_loc)
    job = sfBulk.create_insert_job(myObject, contentType='CSV', concurrency='Parallel')


    with open(stageCSV) as csvfile:
        reader = csv.DictReader(csvfile)
        #print (reader.fieldnames)
        rows = []

        for row in reader:
            print("row****", dict(row))
            rows.append(dict(row))

        csv_iter = CsvDictsAdapter(iter(rows))
        print("rows****", rows)
        batch = sfBulk.post_batch(job, csv_iter)
        sfBulk.wait_for_batch(job, batch)
        sfBulk.close_job(job)
        print("Done. Data Uploaded.")
Beispiel #5
0
    def test_raw_query(self):
        bulk = SalesforceBulk(self.sessionId, self.endpoint)
        self.bulk = bulk

        job_id = bulk.create_query_job("Contact")
        self.jobs.append(job_id)
        self.assertIsNotNone(re.match("\w+", job_id))

        batch_id = bulk.query(job_id, "Select Id,Name,Email from Contact Limit 1000")
        self.assertIsNotNone(re.match("\w+", batch_id))

        while not bulk.is_batch_done(job_id, batch_id):
            print "Job not done yet..."
            print bulk.batch_status(job_id, batch_id)
            time.sleep(2)

        self.results = ""
        def save_results(tfile, **kwargs):
            print "in save results"
            self.results = tfile.read()

        flag = bulk.get_batch_results(job_id, batch_id, callback = save_results)
        self.assertTrue(flag)
        self.assertTrue(len(self.results) > 0)
        self.assertIn('"', self.results)
Beispiel #6
0
    def test_query(self):
        bulk = SalesforceBulk(self.sessionId, self.endpoint)
        self.bulk = bulk

        job_id = bulk.create_query_job("Contact", contentType=self.contentType)
        self.jobs.append(job_id)
        self.assertIsNotNone(re.match("\w+", job_id))

        batch_id = bulk.query(job_id, "Select Id,Name,Email from Contact Limit 1000")
        self.assertIsNotNone(re.match("\w+", batch_id))

        while not bulk.is_batch_done(batch_id):
            print("Job not done yet...")
            print(bulk.batch_status(batch_id))
            time.sleep(2)

        all_results = []
        results = bulk.get_all_results_for_query_batch(batch_id)
        for result in results:
            all_results.extend(self.parse_results(result))

        self.assertTrue(len(all_results) > 0)
        self.assertEqual(
            sorted(all_results[0].keys()),
            ['Email', 'Id', 'Name']
        )
Beispiel #7
0
    def test_upload(self):
        bulk = SalesforceBulk(self.sessionId, self.endpoint)
        self.bulk = bulk

        job_id = bulk.create_insert_job("Contact", contentType=self.contentType)
        self.jobs.append(job_id)
        self.assertIsNotNone(re.match("\w+", job_id))

        batch_ids = []
        data = [
            {
                'FirstName': 'BulkTestFirst%s' % i,
                'LastName': 'BulkLastName',
                'Phone': '555-555-5555',
            } for i in range(50)
        ]
        for i in range(2):
            content = self.generate_content(data)
            batch_id = bulk.post_batch(job_id, content)
            self.assertIsNotNone(re.match("\w+", batch_id))
            batch_ids.append(batch_id)

        bulk.close_job(job_id)

        for batch_id in batch_ids:
            bulk.wait_for_batch(job_id, batch_id, timeout=120)

        for batch_id in batch_ids:
            results = bulk.get_batch_results(batch_id)

            print(results)
            self.assertTrue(len(results) > 0)
            self.assertTrue(isinstance(results, list))
            self.assertTrue(isinstance(results[0], UploadResult))
            self.assertEqual(len(results), 50)
Beispiel #8
0
    def test_csv_query(self):
        bulk = SalesforceBulk(self.sessionId, self.endpoint)
        self.bulk = bulk

        job_id = bulk.create_query_job("Account")
        self.jobs.append(job_id)
        self.assertIsNotNone(re.match("\w+", job_id))

        batch_id = bulk.query(job_id, "Select Id,Name,Description from Account Limit 10000")
        self.assertIsNotNone(re.match("\w+", batch_id))
        bulk.wait_for_batch(job_id, batch_id, timeout=120)

        self.results = None
        def save_results1(rows, **kwargs):
            self.results = rows

        flag = bulk.get_batch_results(job_id, batch_id, callback = save_results1, parse_csv=True)
        self.assertTrue(flag)
        results = self.results
        self.assertTrue(len(results) > 0)
        self.assertTrue(isinstance(results,list))
        self.assertEqual(results[0], ['Id','Name','Description'])
        self.assertTrue(len(results) > 3)

        self.results = None
        self.callback_count = 0
        def save_results2(rows, **kwargs):
            self.results = rows
            print rows
            self.callback_count += 1

        batch = len(results) / 3
        self.callback_count = 0
        flag = bulk.get_batch_results(job_id, batch_id, callback = save_results2, parse_csv=True, batch_size=batch)
        self.assertTrue(self.callback_count >= 3)
Beispiel #9
0
 def __init__(self, connector_param):
     self.connector_param = connector_param
     self.instance_url = 'https://' + connector_param.url_prefix + 'salesforce.com'
     self.token_url = 'https://' + connector_param.url_prefix + 'salesforce.com/services/oauth2/token'
     self.access_token = None
     self.access_token = self.get_token()
     self.bulk = SalesforceBulk(sessionId=self.access_token, host=urlparse(self.instance_url).hostname)
def bulkUpdate(sObject):

    sfBulk = SalesforceBulk(username=username,
                            password=password,
                            security_token=security_token)
    job = sfBulk.create_insert_job(sObject,
                                   contentType='CSV',
                                   concurrency='Parallel')

    dir = "c:/kenandy/python/stageCSV/"
    stageCSV = dir + sObject + '.csv'
    print(stageCSV)

    with open(stageCSV) as csvfile:
        reader = csv.DictReader(csvfile)
        #print (reader.fieldnames)
        rows = []

        for row in reader:
            print("row****", dict(row))
            #print(row['Id'], row['Name'])
            # print(row['Id'], row['Name'])
            rows.append(dict(row))
            #print("rows****", rows)

        csv_iter = CsvDictsAdapter(iter(rows))
        #print("csv_iter**** ", csv_iter)
        print("rows****", rows)
        batch = sfBulk.post_batch(job, csv_iter)
        sfBulk.wait_for_batch(job, batch)
        sfBulk.close_job(job)
        print("Done. Data Uploaded.")
    def __init__(self, config_path):
        """
        Bootstrap a fetcher class
        :param config_path: Path to the configuration file to use for this instance
        """
        # Get settings
        with open(config_path, 'r') as f:
            self.settings = yaml.safe_load(f)

        # Configure the logger
        log_level = (logging.WARN, logging.DEBUG)[self.settings['debug']]
        LOG_FORMAT = logging.Formatter(
            '%(asctime)s - %(name)s - %(levelname)s - %(message)s')
        logger = logging.getLogger("salesforce-fetcher")
        logger.setLevel(log_level)

        ch = logging.StreamHandler()
        ch.setFormatter(LOG_FORMAT)
        logger.addHandler(ch)

        logger.debug("Logging is set to DEBUG level")
        # let's not output the password
        #logger.debug("Settings: %s" % self.settings)

        self.logger = logger
        self.salesforce = Salesforce(**self.settings['salesforce']['auth'])
        self.salesforce_bulk = SalesforceBulk(**self.settings['salesforce']
                                              ['auth'],
                                              API_version='46.0')

        # Make sure output dir is created
        output_directory = self.settings['output_dir']
        if not os.path.exists(output_directory):
            os.makedirs(output_directory)
Beispiel #12
0
def _init_bulk(sf, org_config):
    from salesforce_bulk import SalesforceBulk

    return SalesforceBulk(
        host=org_config.instance_url.replace("https://", "").rstrip("/"),
        sessionId=org_config.access_token,
        API_version=sf.sf_version,
    )
Beispiel #13
0
 def _init_bulk(self):
     version = self.api_version or self.project_config.project__package__api_version
     if not version:
         raise ConfigError("Cannot find Salesforce version")
     return SalesforceBulk(
         host=self.org_config.instance_url.replace("https://", "").rstrip("/"),
         sessionId=self.org_config.access_token,
         API_version=version,
     )
Beispiel #14
0
    def test_upload_with_mapping_file(self):
        if self.contentType != 'CSV':
            print('Mapping file can only be used with CSV content')
            return
        bulk = SalesforceBulk(self.sessionId, self.endpoint)
        self.bulk = bulk

        job_id = bulk.create_insert_job("Contact",
                                        contentType=self.contentType)
        self.jobs.append(job_id)
        self.assertIsNotNone(re.match("\w+", job_id))

        batch_ids = []
        data = [{
            'Not FirstName': 'BulkTestFirst%s' % i,
            'Arbitrary Field': 'BulkLastName',
            'Phone': '555-555-5555',
        } for i in range(50)]

        mapping_data = [{
            "Salesforce Field": "FirstName",
            "Csv Header": "NotFirstName",
            "Value": "",
            "Hint": ""
        }, {
            "Salesforce Field": "Phone",
            "Csv Header": "Phone",
            "Value": "",
            "Hint": ""
        }, {
            "Salesforce Field": "LastName",
            "Csv Header": "Arbitrary Field",
            "Value": "",
            "Hint": ""
        }]
        mapping_data = self.generate_content(mapping_data)

        bulk.post_mapping_file(job_id, mapping_data)
        for i in range(2):
            content = self.generate_content(data)
            batch_id = bulk.post_batch(job_id, content)
            self.assertIsNotNone(re.match("\w+", batch_id))
            batch_ids.append(batch_id)

        bulk.close_job(job_id)

        for batch_id in batch_ids:
            bulk.wait_for_batch(job_id, batch_id, timeout=120)

        for batch_id in batch_ids:
            results = bulk.get_batch_results(batch_id)

            print(results)
            self.assertTrue(len(results) > 0)
            self.assertTrue(isinstance(results, list))
            self.assertTrue(isinstance(results[0], UploadResult))
            self.assertEqual(len(results), 50)
Beispiel #15
0
    def test_query_pk_chunk(self):
        bulk = SalesforceBulk(self.sessionId, self.endpoint)
        self.bulk = bulk

        job_id = bulk.create_query_job("Contact",
                                       contentType=self.contentType,
                                       pk_chunking=True)
        self.jobs.append(job_id)
        self.assertIsNotNone(re.match("\w+", job_id))

        query = "Select Id,Name,Email from Contact"
        batch_id = bulk.query(job_id, query)
        self.assertIsNotNone(re.match("\w+", batch_id))

        try:
            i = 0
            while not bulk.is_batch_done(batch_id):
                print("Job not done yet...")
                print(bulk.batch_status(batch_id))
                time.sleep(2)
                i += 1
                if i == 20:
                    raise Exception
        except BulkBatchFailed as e:
            if e.state != bulk_states.NOT_PROCESSED:
                raise

        batches = bulk.get_batch_list(job_id)
        print(batches)
        batch_ids = [
            x['id'] for x in batches if x['state'] != bulk_states.NOT_PROCESSED
        ]
        requests = [bulk.get_query_batch_request(x, job_id) for x in batch_ids]
        print(requests)
        for request in requests:
            self.assertTrue(request.startswith(query))

        all_results = []

        i = 0
        while not all(bulk.is_batch_done(j, job_id) for j in batch_ids):
            print("Job not done yet...")
            print(bulk.batch_status(batch_id, job_id))
            time.sleep(2)
            i += 1
            if i == 20:
                raise Exception

        for batch_id in batch_ids:
            results = bulk.get_all_results_for_query_batch(batch_id, job_id)
            for result in results:
                all_results.extend(self.parse_results(result))

            self.assertTrue(len(all_results) > 0)
            self.assertEqual(sorted(all_results[0].keys()),
                             ['Email', 'Id', 'Name'])
Beispiel #16
0
 def __init__(self):
     if BulkHelper.__instance != None:
         raise Exception("BulkHelper class is a singleton!")
     else:
         BulkHelper.__instance = self
         self.__bulk = SalesforceBulk(username=Config.USERNAME,
                                      password=Config.PASSWORD,
                                      security_token=Config.SECURITY_TOKEN,
                                      sandbox=Config.IS_SANDBOX,
                                      API_version=Config.API_VERSION)
def action(operation):
    """Performs the Insertion, Deletion, or Update in the Salesforce org"""
    global object_name
    object_name = select(entity, 0)
    impacted_records = []
    for index in range(len(df)):
        record = {}
        for col in df.columns:
            record[col] = df[col][index]
        impacted_records.append(record)

    try:
        MsgBox = messagebox.askquestion("Operation", (
            'You are about to {action} {length} records within the {obj} object'
            ' within your Salesforce org. Are you sure you want to proceed?'
        ).format(action=operation.lower(),
                 length=str(len(impacted_records)),
                 obj=object_name), icon='warning')
        if (MsgBox == 'yes'):
            bulk = SalesforceBulk(
                username=USERNAME, password=PASSWORD, security_token=TOKEN
            )
            if (operation == "Delete"):
                job = bulk.create_delete_job(object_name, contentType='CSV')
            elif (operation == "Insert"):
                job = bulk.create_insert_job(object_name, contentType='CSV')
            else:
                job = bulk.create_update_job(object_name, contentType='CSV')
            csv_iter = CsvDictsAdapter(iter(impacted_records))
            batch = bulk.post_batch(job, csv_iter)
            bulk.wait_for_batch(job, batch)
            bulk.close_job(job)

            result_df = pd.DataFrame(impacted_records)
            results = bulk.get_batch_results(bulk.get_batch_list(job)[0]['id'])
            result_df['ID'] = ""
            result_df['SUCCESS'] = ""
            result_df['ERROR'] = ""
            for index in range(len(result_df)):
                result_df['ID'][index] = str(results[index]).split("'")[1]
                result_df['SUCCESS'][index] = str(results[index]).split("'")[3]
                result_df['ERROR'][index] = str(results[index]).split("'")[7]
            input_file = (folder_path
                          + "/results"
                          + bulk.get_batch_list(job)[0]['id']
                          + ".xlsx")
            result_df.to_excel(input_file)

            messagebox.showinfo("Info", (
                'Job Details:\n\nNumber of Records Processed: {recordsProcessed}\n'
                'Number of Records Failed: {recordsFailed}').format(
                recordsProcessed=bulk.job_status(job)['numberRecordsProcessed'],
                recordsFailed=bulk.job_status(job)['numberRecordsFailed']))
    except Exception as e:
        messagebox.showerror("Error", e)
Beispiel #18
0
    def setUp(self):
        login = salesforce_oauth_request.login(
            username=USERNAME,
            password=PASSWORD,
            token=SECURITY_TOKEN,
            client_id=CONSUMER_KEY,
            client_secret=CONSUMER_SECRET,
            cache_session=False,
            sandbox=True,
        )

        self.bulk = SalesforceBulk(login['access_token'], login['endpoint'])
        self.jobs = []
Beispiel #19
0
    def test_csv_upload(self):
        bulk = SalesforceBulk(SALESFORCE_API_VERSION, self.sessionId,
                              self.endpoint)
        self.bulk = bulk

        job_id = bulk.create_insert_job("Contact")
        self.jobs.append(job_id)
        self.assertIsNotNone(re.match("\w+", job_id))

        batch_ids = []
        content = open("example.csv").read()
        for i in range(5):
            batch_id = bulk.query(job_id, content)
            self.assertIsNotNone(re.match("\w+", batch_id))
            batch_ids.append(batch_id)

        for batch_id in batch_ids:
            bulk.wait_for_batch(job_id, batch_id, timeout=120)

        self.results = None

        def save_results1(rows, failed, remaining):
            self.results = rows

        for batch_id in batch_ids:
            flag = bulk.get_upload_results(job_id,
                                           batch_id,
                                           callback=save_results1)
            self.assertTrue(flag)
            results = self.results
            self.assertTrue(len(results) > 0)
            self.assertTrue(isinstance(results, list))
            self.assertEqual(results[0],
                             UploadResult('Id', 'Success', 'Created', 'Error'))
            self.assertEqual(len(results), 3)

        self.results = None
        self.callback_count = 0

        def save_results2(rows, failed, remaining):
            self.results = rows
            self.callback_count += 1

        batch = len(results) / 3
        self.callback_count = 0
        flag = bulk.get_upload_results(job_id,
                                       batch_id,
                                       callback=save_results2,
                                       batch_size=batch)
        self.assertTrue(self.callback_count >= 3)
Beispiel #20
0
    def request(self, data=()):
        # use csv iterator
        csv_iter = CsvDictsAdapter(iter(data))

        bulk = SalesforceBulk(username=self.username,
                              password=self.password,
                              organizationId=self.organizationId)

        job = bulk.create_insert_job('SamanageCMDB__AgentPost__c',
                                     contentType='CSV')
        batch = bulk.post_batch(job, csv_iter)
        bulk.wait_for_batch(job, batch)
        bulk.close_job(job)

        while not bulk.is_batch_done(batch):
            sleep(10)
Beispiel #21
0
 def __init__(self, username, password, security_token, sandbox=True):
     """
     :param username:
     :type username: str
     :param password:
     :type password: str
     :param security_token:
     :type security_token: str
     :param sandbox: Whether the Salesforce Instance is Production or Sandbox. Default value is False (Production).
     :type sandbox: bool
     """
     # Logging setup
     self.log = logging.getLogger(__name__)
     self.log.info('Signing into Salesforce.')
     try:
         self.bulk = SalesforceBulk(username=username,
                                    password=password,
                                    security_token=security_token,
                                    sandbox=sandbox)
         self.log.info(
             f'Successfully connected to Salesforce as "{username}".')
     except Exception as auth_err:
         self.log.exception(f'Failed to connect to Salesforce: {auth_err}')
         raise
Beispiel #22
0
    "USERNAME": os.getenv("SALESFORCE_USERNAME"),
    "PASSWORD": os.getenv("SALESFORCE_PASSWORD"),
    "HOST": os.getenv("SALESFORCE_HOST"),
    "TOKEN": os.getenv("SALESFORCE_TOKEN"),
    "CLIENT_ID": os.getenv("SALESFORCE_CLIENT_ID"),
    "CLIENT_SECRET": os.getenv("SALESFORCE_CLIENT_SECRET"),
}

USER = SALESFORCE["USERNAME"]
PASS = SALESFORCE["PASSWORD"]
TOKEN = SALESFORCE["TOKEN"]
HOST = SALESFORCE["HOST"]

sf = Salesforce(username=USER, password=PASS, security_token=TOKEN)

bulk = SalesforceBulk(sessionId=sf.session_id, host=HOST)

job = bulk.create_query_job("Contact", contentType="CSV")

batch = bulk.query(job, query)
while not bulk.is_batch_done(job, batch):
    sleep(3)
bulk.close_job(job)

rows = bulk.get_batch_result_iter(job, batch, parse_csv=True)
bulk_email = list(rows)
email_list = []
emails_sf = [x[COMBINED_EMAIL_FIELD] for x in bulk_email]
print ("The following email addresses appear in Stripe but not Salesforce: \n")
for field in emails_sf:
    for email in field.split(","):
 def _init_bulk(self):
     return SalesforceBulk(
         host=self.org_config.instance_url.replace("https://",
                                                   "").rstrip("/"),
         sessionId=self.org_config.access_token,
     )
def sfBulk_Login(username, password, security_token):

    sfBulk = SalesforceBulk(username=username,
                            password=password,
                            security_token=security_token)
    return sfBulk
Beispiel #25
0
import csv
from salesforce_bulk import SalesforceBulk
from salesforce_bulk import CsvDictsAdapter


username = '******'
password = '******'
security_token = 'HxK2ciSHbsjN5PvAE8psL9w9F'


bulk = SalesforceBulk(username=username, password=password, security_token=security_token)
job = bulk.create_insert_job("account", contentType='CSV', concurrency='Parallel')


rootDir = "c:/python/kenandy/stageCSV/"
objectName = "Account"
stageCSV = rootDir + objectName + '.csv'
print (stageCSV)
with open(stageCSV) as csvfile:

    reader = csv.DictReader(stageCSV)
    account = [dict(Name="Account%d" % idx) for idx in xrange(5)]
    #disbursals = []
    #for row in reader:
    #    disbursals.append(row)
    #print (disbursals)
    print (account)
    csv_iter = CsvDictsAdapter(iter(account))
    
    #csv_iter = CsvDictsAdapter(iter(disbursals))
    batch = bulk.post_batch(job, csv_iter)
	with open(file_name, 'w+', encoding='utf-8') as csv_file:
		write_header = csv.writer(csv_file)
		write_header.writerow(config.csv_header)

	# Create the time_log file that will be used for the daily delta date comparison
	time_log = open("run_time.txt", "a")
	time_log.write(run_time + "\n")

	# Create the log file and write the time the program is run
	log = open("log.txt", "a")
	log.write("\n" + "|---------------------------------------|" + "\n") 
	log.write("PROGRAM STARTED: "),log.write(datetime.now().ctime())
	log.write("\n" + "|---------------------------------------|" + "\n")

	# Set the Salesforce username, password, and token
	sf = SalesforceBulk(username=config.salesforce["username"], password=config.salesforce["password"],
	sandbox=True, security_token=config.salesforce["token"])

	try:
		# Set the sftp hostkeys (if any)
		cnopts = pysftp.CnOpts()
		cnopts.hostkeys = None
	except Exception as e:
		pass
	else:
		pass

	# Set the sftp host, username, and password (optional paramter: port="22")
	sftp = pysftp.Connection(host=config.sftp["host"], username=config.sftp["username"],
	password=config.sftp["password"], cnopts=cnopts)

	# Build a dynamic User list, format the string, and create a variable that can be used in the SOQL filter
Beispiel #27
0
import requests
import sfConstants
import os
##lvluat##005f4000000IfrqAAC##[email protected]       ##New UAT
##lvldev##005c0000003RZfAAAW##[email protected]   ##New DEV
##########00539000005GkosAAC##[email protected]   ##Old DEV
##########005q0000003OJwUAAW##[email protected]   ##New UAT
##########005f4000000JdJ4AAK##[email protected]  ##New Production 
##########005f4000000Jd1UAAS##[email protected]  ##New Production 

logPath = '/var/www/ADHome/project/static/sf/'
oldSFURl = 'https://levelsolar.my.salesforce.com/'
newSFURl = 'https://levelsolar2nd.lightning.force.com/'
# bulkOld = SalesforceBulk(username='******', password='******', security_token='fKOeff9DhEU6y48yvlYwvE87', sandbox=True)
bulkOld = SalesforceBulk(username='******', password='******', security_token='yWlJG8lAKCq1pTBkbBMSVcKg')
# bulkNew = SalesforceBulk(username='******', password='******', security_token='XcxQoJGjpnMgsVqUiA3DOuFcA', sandbox=True)
# bulkNew = SalesforceBulk(username='******', password='******', security_token='RH9EBerK4bu7nJ7XjKJtWzAPt', sandbox=True)
bulkNew = SalesforceBulk(username='******', password='******', security_token='qfTKDt6kj7lphtBUKIb7JvhDB')
# bulkNew = SalesforceBulk(username='******', password='******', security_token='pAtrZvJb2FiKIylgrgJe0F9u', sandbox=True)

def secondToTime(second):
    return str(datetime.fromtimestamp(second/1000.0)).replace(' ', 'T')

def check1212(bulk = bulkOld, bulkDev = bulkNew):
    timeStamp = time.strftime("%d/%m/%Y")
    job = bulk.create_query_job("Opportunity", contentType='JSON')
    batch = bulk.query(job, "select Id, Name, CreatedDate, leadId__c from Opportunity where CreatedDate = today")
    bulk.close_job(job)
    while not bulk.is_batch_done(batch):
        time.sleep(10)
Beispiel #28
0
def sf_Bulk_Login(username, password,security_token):

    sfBulk = SalesforceBulk(username=username, password=password,security_token=security_token)
    print ('login sucessfully')
    return sfBulk
Beispiel #29
0
# ID Extraction from Salesforce and saving to local
import json
from salesforce_bulk import SalesforceBulk
from salesforce_bulk.util import IteratorBytesIO
from time import sleep
from salesforce_bulk import CsvDictsAdapter
import pandas as pd
import unicodecsv
import config as cfg
#Authentication

bulk = SalesforceBulk(username=cfg.USERNAME,
                      password=cfg.PASSWORD,
                      security_token=cfg.SECURITY_KEY,
                      sandbox=True)

#Source CSV File path for Account
input_file = "/home/baadmin/NCT_ETL/input_files/pg_extract_prd/InstallmentId_sf.csv"

#Target SFDC Object name
target_obj = "cpm__Installment__c"

# Mapping of Input csv Fields to SalesForce Fields

sf_fields = ['Contact_Key__c', 'cpm__Contact__c', 'Installment_Key__c', 'Id']

# Extract the data from salesforce and save it to csv

job = bulk.create_query_job(target_obj, contentType='CSV')
sql = "SELECT " + ",".join(sf_fields) + " FROM " + target_obj
batch = bulk.query(job, sql)
 def _init_bulk(self):
     return SalesforceBulk(
         host=self.org_config.instance_url.replace('https://', ''),
         sessionId=self.org_config.access_token,
     )