Ejemplo n.º 1
0
def prettyQuery(query):
    """prints JSON of SOQL query. For convenience console use."""
    sf = Salesforce(username=config.username,
                    password=config.password,
                    security_token=config.security_token)
    raw = sf.query_all(query)
    pretty = jsonizer(raw)
    print pretty
    return pretty
Ejemplo n.º 2
0
def main(argv):
        
    from simple_salesforce import Salesforce
    from simple_salesforce.util import date_to_iso8601 as iso
    from datetime import datetime, timedelta
    from secret import *
    import pytz    
    
    print 'running...'
    
    inputfile = 'C:\Users\mivinson\workspace\SFDC Data Connection\sfdcDataConnection\\accountSelect.sql'
#    parameter = ''
    startDate =  datetime(2014,11,12,0,0,0, tzinfo=pytz.UTC)
    endDate = datetime(2014,11,12,0,0,0, tzinfo = pytz.UTC)

    print startDate
    print endDate    
    
    delta = endDate - startDate
#    desc = ''
#    try:
#        opts, args = getopt.getopt(argv,"hi:p:s:e:d:",["ifile=","param=","sDate=","eDate=","desc="])
#    except getopt.GetoptError:
#        print '-i <inputfile> -p whereParameter -s startDate -e endDate -d describe(Object)'
#        sys.exit(2)
#    for opt, arg in opts:
#        if opt == '-h':
#            print '-i <inputfile>\n-p where Parameter'
#            sys.exit()
#        elif opt in ("-i", "--ifile"):
#            inputfile = arg
#        elif opt in ("-s", "--sDate"):
#            startDate = arg
#        elif opt in ("-e", "--eDate"):
#            endDate = arg
#        elif opt in ("-p","--param"):
#            parameter = arg
#        elif opt in ("-d","--desc"):
#            desc = arg
##      elif opt in ("-o", "--ofile"):
##         outputfile = arg
#    print 'Input file is ', inputfile
#   print 'Output file is "', outputfile    
    f = file(inputfile,'r') 
    select = f.read()    


    sf = Salesforce(username = USER, password = PASSWORD,security_token=HKEY)
#    
    for i in range(delta.days + 1):
        iSelect = select.replace(':Start',(startDate + timedelta(days=i)).strftime('%Y-%m-%dT%H:%M:%SZ'))
        iSelect = iSelect.replace( ':End',(startDate + timedelta(days=i+1) + timedelta(microseconds=-1)).strftime('%Y-%m-%dT%H:%M:%SZ'))
        print iSelect
        req = sf.query_all(iSelect)
        print req
def sync_country_data(self, request, queryset):
	for setting in queryset:
		rows_updated = queryset.update(
	        sync_country_date = timezone.now(),
	    )
		if setting.sf_active == True:
				sfusername = setting.sf_username
				sfpassword = setting.sf_password
				sftoken = setting.sf_token
				sftype = setting.sf_type

				if sftype == 'Production':
					sftypesetup = False 
				else:
					sftypesetup = True

				cursor = connection.cursor()

				salesforce_login = {'username':sfusername, 'password':sfpassword, 'security_token':sftoken, 'sandbox':sftypesetup}
				sf = Salesforce(**salesforce_login)

				countries = sf.query_all("SELECT Id, Name, Programme_Launch__c FROM Country__c where Programme_Launch__c!=''")

				for x in countries["records"]:
					countryname = str(x["Name"])
					sfrecordid = str(x["Id"])
					launch = str(x["Programme_Launch__c"])
					slug = str(x["Name"]).replace(" ","-").replace("/","-").replace(".","-").lower()

					try:				
						add_objects = ("INSERT INTO data_country "
					              "(name,sfid,launch,slug) "
					              "VALUES (%(name)s,%(sfid)s,%(launch)s,%(slug)s)")
						data_objects = {
						    'name' : countryname,
					    	'sfid' : sfrecordid,
					    	'launch' : launch,
					    	'slug' : slug,
						}
						cursor.execute(add_objects,data_objects)
					except:
						add_objects = ("UPDATE data_country "
					              "SET name=%(name)s,sfid=%(sfid)s,launch=%(launch)s,slug=%(slug)s "
					              "WHERE sfid=%(sfid)s")
						data_objects = {
						    'name' : countryname,
					    	'sfid' : sfrecordid,
					    	'launch' : launch,
					    	'slug' : slug,
						}
						cursor.execute(add_objects,data_objects)

				self.message_user(request, "Your Countries have been syncronised")
Ejemplo n.º 4
0
class SalesforceService():
    error_msg = None
    topLevel = None 
    sf = None

    def __init__(self, username, password, token, topLevel):
        self.topLevel = topLevel

        try:
            self.sf = Salesforce(username=username, password=password, security_token=token)
        except:
            self.error_msg = "Invalid Salesforce credentials"
        
        if not self.sf:
            self.error_msg = "Invalid Salesforce credentials"
        else:
            ConfigManager().cache_sf_creds(username, password, token)
            scholars = self.query_sf_scholars()
            if scholars is None or not scholars:
                self.error_msg = "No scholars returned from query"
            else:
                threading.Thread(target=self.async_process_data, args=(scholars,)).start()

    def query_sf_scholars(self):
        """ Queries Salesforce for scholars using query provided in config file. 
            Formats scholars as tuple expected by scholarscraper """
        cfg_mgr = ConfigManager()
        query = cfg_mgr.sf_select_scholars_query()
        sf_obj_id_name = cfg_mgr.get_sf_account_id_name()
        scholars = []
        try:
            query_result = self.sf.query_all(query)
            records = query_result['records']
            if not records:
                msg = "No scholars returned from Salesforce query:\n{}".format(query)
                ErrorLogService().log_error(msg, "High")
            else:
                for scholar in records:
                    scholar_info = (scholar["Google_Scholar_ID__c"], 
                                    {sf_obj_id_name: scholar["Id"]})
                    scholars.append(scholar_info)
        except:
            ErrorLogService().log_error("Error querying Salesforce", 
                                        "High", traceback.format_exc())
        return scholars

    def async_process_data(self, scholars):
        """ Thread function to call scholar scraper """
        results = scholarscraper.scraperEntry(scholars)
        self.topLevel.processing_complete(results, self.error_msg)
Ejemplo n.º 5
0
 def user_id(self):
     if not self.config.get('user_id'):
         sf = Salesforce(
             instance=self.instance_url.replace('https://', ''),
             session_id=self.access_token,
             version='38.0',
         )
         result = sf.query_all(
             "SELECT Id FROM User WHERE UserName='******'".format(
                 self.username
             )
         )
         self.config['user_id'] = result['records'][0]['Id']
     return self.config['user_id']
Ejemplo n.º 6
0
    def get_cases(self, environment='staging',
                  type='01250000000Hnex',
                  status='In Progress',
                  sub_status='In Development',
                  technician=''):
        """ Get helpdesk tickets for the respective que 100 at a time.
        :return: OrderedDict that contains totalsize, done, and records. Records in turn is also given as an
                OrderedDict with the actualrecords data.
        """
        user_name, pw, token = auth.sfdc_login(environment)
        sf = Salesforce(username=user_name, password=pw, security_token=token)
        result = sf.query_all(self.build_query())
        print result

        return result
Ejemplo n.º 7
0
 def _salesforce_import(self, session_id, instance, user, user_company):
     #print session_id, instance, user, user_company
     #SESSION_ID = "00Dj0000001neXP!AQUAQIbUn9RsdTZH6MbFA7qaPtDovNU75.fOC6geI_KnEhJKyUzk2_yFx2TXgkth7zgFuJThY6qZQwH7Pq4UtlcW.Cq0aHt1"
     print instance
     print instance.replace("https://","")
     sf = Salesforce(instance=instance.replace("https://",""),
                     session_id=session_id)
     lol = sf.query_all("SELECT Id, Name, Email FROM Contact")
     sf = pd.DataFrame(pd.DataFrame(lol).records.tolist())
     sf = sf[["Name","Email"]]
     sf.columns = ["name","email"]
     sf = sf.dropna()
     sf["domain"] = [i.split("@")[-1] if i else "" for i in sf.email]
     sf["source"] = "salesforce"
     sf["db_type"] = "crm"
     sf["user"] = [Parse()._pointer("_User", user) for i in sf.index] 
     sf["user_company"] = [Parse()._pointer("UserCompany",user_company) 
                           for i in sf.index]
     Parse()._batch_df_create("UserContact", sf)
     Prospecter()._batch_df_create("UserContact", sf)
     print Prospecter().update("_User/"+user, 
             {"salesforce_integration":arrow.utcnow().timestamp, 
              "salesforce_token":session_id}).json()
Ejemplo n.º 8
0
 def getTableData(self,sObject):
     sf = Salesforce(instance_url=self.instance_url, session_id=self.session_id)
     session_id = sf.session_id
     instance = sf.sf_instance
     query = "Select "
     sObjectName = SFType(sObject,session_id,instance)
     for x in sObjectName.describe()['fields']:
         query = query + x['name'] + ","
     query = query[:-1] + " from " + sObjectName.name
     #print query
     res = sf.query_all(query)
     records = res['records']
     ls = []
     adapter = mongoadapter.adapter()
     collection = adapter.createColletion(sObject)
     for x in records:
         data = {}
         for y in sObjectName.describe()['fields']:
             data[y['name']] = x[y['name']]
             #print data
         #print data
         ls.append(adapter.insert_posts(collection, data))
     return ls
Ejemplo n.º 9
0
def getLeadsList(sf, sfCred, audience):

    try:
        # response = sf.query_all("SELECT Email FROM Lead WHERE Email <> ''")
        response = sf.query_all(audience['sql_query']) # Use query stored in db
    except Exception, e:
        if e.status == 401:
            SALESFORCE_REFRESH_URL = "https://login.salesforce.com/services/oauth2/token"
            SALESFORCE_CONSUMER_KEY = sfCred['consumer_key']   #"3MVG98_Psg5cppyYH7Cios03svOf9hpZtPg.n0yTXRIKlnjy43.MNRgdLDbmBc3T5wK2IoYOaPLNlqBzNouzE"
            SALESFORCE_CONSUMER_SECRET = sfCred['consumer_secret']   #"2132402812325087889"
            salesforce_refresh_token = sfCred['refresh_token']

            # HTTP request
            url = SALESFORCE_REFRESH_URL
            headers = {
                'Content-Type': 'application/json',
                'Authorization': 'Bearer ' + sfCred['access_token'],
                'X-PrettyPrint': '1'
            }
            params = {'grant_type': 'refresh_token',
                      'client_id': SALESFORCE_CONSUMER_KEY,
                      'client_secret': SALESFORCE_CONSUMER_SECRET,
                      'refresh_token': salesforce_refresh_token }
            result = requests.get(url, headers=headers, params=params)

            newSfCred = json.loads(result.content)
            sfCred['instance_url'] = newSfCred['instance_url']
            sfCred['access_token'] = newSfCred['access_token']
            sfCred['signature'] = newSfCred['signature']
            sfCred['id'] = newSfCred['id']
            sfCred['issued_at'] = newSfCred['issued_at']
            db.pushSalseforceCredentials(sfCred)

            # try with new token
            sf = Salesforce(instance_url=sfCred['instance_url'], session_id=sfCred['access_token'])
            # response = sf.query_all("SELECT Email FROM Lead WHERE Email <> ''")
            response = sf.query_all(audience['sql_query']) # Use query stored in db
def index(request):
    """
    When this view is loaded in a canvas app, Salesforce sends a POST request to it containing the
        currently logged in user's data.
    This post data contains a base64 string of the user's data, along with a signature. We can validate
        the signature by comparing it to our own generated expected signature from our Salesforce
        application's secret key.
    The result of parsing the signed request is that we obtain an instance_url and oauth_token we
        can use to query the Salesforce API. Feel free to dive into the parse_signed_request() function
        to see the nitty-gritty details.
    """
    data = parse_signed_request(request.POST['signed_request'], settings.SALESFORCE_CONSUMER_SECRET)
    if data:
        #User authenticated, let's do queries!

        #Create a simple salesforce instance, using our newly obtained instance_url and oauth_token for authentication.
        sf = Salesforce(instance_url=data['client']['instanceUrl'], session_id=data['client']['oauthToken'])

        #execute a query
        stats = {}
        for opp in sf.query_all("SELECT LeadSource FROM Opportunity WHERE IsWon = True")['records']:
            if opp['LeadSource'] not in stats:
                stats[opp['LeadSource']] = 0
            stats[opp['LeadSource']] += 1

        results = []
        for lead_source,total in stats.items():
            results.append({"lead_source": lead_source, "total": total})
        results = sorted(results, key=lambda k: k['total']*-1) #sort results by total

    else:
        #invalid signed request, throw error.
        pass

    return render(request, 'index.html', {
        "results": results
    })
Ejemplo n.º 11
0
 def getTableData(self,*args):
     sf = Salesforce(username=self.username, password=self.password, security_token=self.security_token)
     session_id = sf.session_id
     instance = sf.sf_instance
     query = "Select "
     #session_id, instance = SalesforceLogin(self.username, self.password, self.security_token, True)
     for sObject in args:
         sObjectName = SFType(sObject,session_id,instance)
         for x in sObjectName.describe()['fields']:
             query = query + x['name'] + ","
         query = query[:-1] + " from " + sObjectName.name
         print query
         res = sf.query_all(query)
         records = res['records']
         ls = []
         data = {}
         adapter = mongoadapter.adapter()
         collection = adapter.createColletion(sObject)
         for x in records:
             for y in sObjectName.describe()['fields']:
                 data[y['name']] = x[y['name']]
             print data
             ls.append(adapter.insert_posts(collection, data))
     return ls
Ejemplo n.º 12
0
class SalesForceHandler(object):

    def __init__(self,
                 instance=Config.SalesForce.INSTANCE,
                 username=Config.SalesForce.USERNAME,
                 password=Config.SalesForce.PASSWORD,
                 token=Config.SalesForce.TOKEN):

        self.__sf = Salesforce(instance=instance,
                               username=username,
                               password=password,
                               security_token=token)

    def get_user_id(self, name):
        query = "SELECT Id FROM User WHERE Name LIKE '" + name + "'"
        result = self.__sf.query_all(query)

        if 'records' in result:
            result = self.__sf.query_all(query)["records"]

        if (len(result) == 1):
            return result[0]['Id']
        return None

    def get_shared_users(self, phone):
        shared_users = Config.SalesForce.SHARED

        print shared_users, type(phone)
        for i in shared_users:
            if phone in shared_users[i]:
                return i
        return None

    def get_number_contacts(self, phone):
        term = get_number_term(phone)
        query = "SELECT AccountId FROM Contact WHERE Phone LIKE '" + term +\
                "' OR MobilePhone LIKE '" + term + "'"

        results = self.__sf.query_all(query)
        if 'records' in results:
            results = self.__sf.query_all(query)["records"]

        return len(results)

    def get_number_accounts(self, phone):

        term = get_number_term(phone)
        query = "SELECT Id FROM Account WHERE Phone LIKE '" + term + "'"

        results = self.__sf.query_all(query)
        if 'records' in results:
            results = self.__sf.query_all(query)["records"]

        return len(results)

    def create_task(self, info):
        print "Creating task"
        task = self.__sf.Task.create({
            'Type': 'Called',
            'WhatId': info['AccountId'],
            'OwnerID': info['UserId'],
            'Subject': 'Call',
            'Status': 'Completed',
            'WhoId': info['ContactId'],
            'Description': 'A call has been logged automagically.',
            'Status': 'Completed',
            'Priority': 'Normal',
            'Summary__c': info['Summary'],
            'ActivityDate': time.strftime('%Y-%m-%d')
        })

        name = self.__sf.User.get(info['UserId'])['FirstName']
        to = self.__sf.User.get(info['UserId'])['Email']
        task_id = self.__sf.Task.get(task['id'])

        account_name = self.__sf.Account.get(task_id['WhatId'])['Name']

        send_email(to, name, task['id'], account_name,
                   Config.SalesForce.INSTANCE)

    def get_account_id_from_account(self, phone):
        term = get_number_term(phone)

        query_account = "SELECT Id FROM Account WHERE Phone LIKE '"\
                        + term + "'"

        accounts = self.__sf.query_all(query_account)
        if 'records' in accounts:
            accounts = self.__sf.query_all(query_account)["records"]

        if (len(accounts) == 1):
            return accounts[0]['Id']
        else:
            return None

    def get_account_id_from_contact(self, phone):
        term = get_number_term(phone)

        query_contact = "SELECT AccountId FROM Contact WHERE Phone LIKE '"\
                        + term + "'"

        accounts = self.__sf.query_all(query_contact)
        if 'records' in accounts:
            accounts = self.__sf.query_all(query_contact)["records"]

        if (len(accounts) == 1):
            return accounts[0]['AccountId']
        elif (len(accounts) > 1):
            account_id = accounts[0]['AccountId']
            for account in accounts:
                if account['AccountId'] != account_id:
                    return None
            return account_id
        else:
            return None

    def get_account_id_from_mobile(self, phone):
        term = get_number_term(phone)

        query_mobile = "SELECT AccountId FROM Contact WHERE MobilePhone LIKE '"\
                       + term + "'"

        accounts = self.__sf.query_all(query_mobile)
        if 'records' in accounts:
            accounts = self.__sf.query_all(query_mobile)["records"]

        if (len(accounts) == 1):
            return accounts[0]['AccountId']
        elif (len(accounts) > 1):
            account_id = accounts[0]['AccountId']
            for account in accounts:
                if account['AccountId'] != account_id:
                    return None
            return account_id
        else:
            return None

    def get_account_id(self, phone):

        account_id = self.get_account_id_from_account(phone)
        if (account_id):
            return account_id
        account_id = self.get_account_id_from_contact(phone)
        if (account_id):
            return account_id
        account_id = self.get_account_id_from_mobile(phone)
        if (account_id):
            return account_id

    def get_contact_id(self, phone):

        term = get_number_term(phone)

        query = "SELECT Id FROM Contact WHERE Phone LIKE '" + term +\
                "' OR MobilePhone LIKE '" + term + "'"

        results = self.__sf.query_all(query)
        if 'records' in results:
            results = self.__sf.query_all(query)["records"]

        if (len(results) == 1):
            return results[0]['Id']
        return None
Ejemplo n.º 13
0
class SalesForceAPI:
    def __init__(self, log):
        self.log = log
        #self.sf = Salesforce(username='******', password='******',
        #security_token='XFBTDkhKpVxNXBZ4na81RHxS', sandbox=True)
        self.sf = Salesforce(username='******',
                             password='******',
                             security_token='bbNLLmm82AkIjL93evgIztuQ',
                             sandbox=True)

    def getDacRecords(self):
        #'Id','Name','Access_Code__c','Allocation_Status__c','Document_ID__c','For_Print__c','Is_Non_Private_Dwelling__c','Response__c'
        dacQuery = self.sf.query("\
            SELECT id, Name, access_code__c, allocation_status__c, document_id__c, for_print__c,\
                is_non_private_dwelling__c, response__c \
            FROM DAC_store__c \
            WHERE allocation_status__c = 'Allocated' \
            AND is_non_private_dwelling__c = False" \
            )

        return (dacQuery['records'])

    def getResponse(self, responseId):
        respQuery = self.sf.query_all("\
        SELECT id, name, access_code__c, address__c, address_id__c, collection_code__c, \
            collection_instance__c, collection_mode__c, document_number__c, \
            number_of_dwelling_forms_received__c, number_of_individual_forms_received__c, \
            number_of_occupants__c, Number_of_Individual_Responses_Expected__c,\
            Number_of_Outstanding_Individual_Forms__c, Number_of_Paper_Dwelling_Forms__c, \
            Number_of_Paper_Individual_Forms__c, Mark_In__c \
        FROM Response__c \
        WHERE id = '" + responseId + "'")

        return (respQuery['records'])

    def getResponseFromAccessCode(self, accessCode):

        respQuery = self.sf.query_all("\
        SELECT id, name, access_code__c, address__c, address_id__c, collection_code__c, \
            collection_instance__c, collection_mode__c, document_number__c, \
            number_of_dwelling_forms_received__c, number_of_individual_forms_received__c, \
            number_of_occupants__c, Number_of_Individual_Responses_Expected__c,\
            Number_of_Outstanding_Individual_Forms__c, Number_of_Paper_Dwelling_Forms__c, \
            Number_of_Paper_Individual_Forms__c, Mark_In__c \
        FROM Response__c \
        WHERE access_code__c = '" + accessCode + "'")

        return (respQuery['records'])

    def checkValidResponse(self, respRecords, dacAccessCode):

        noRespRecords = len(respRecords)

        if (noRespRecords != 1):
            self.log.writeToLog(
                "More than one response store record found - moving onto next record"
            )
            return False

        noDwellForms = respRecords[0]['Number_of_Dwelling_Forms_Received__c']
        noIndForms = respRecords[0]['Number_of_Individual_Forms_Received__c']
        noOccs = respRecords[0]['Number_of_Occupants__c']
        noIndExpect = respRecords[0][
            'Number_of_Individual_Responses_Expected__c']
        noOutstandInd = respRecords[0][
            'Number_of_Outstanding_Individual_Forms__c']
        noPaperDwell = respRecords[0]['Number_of_Paper_Dwelling_Forms__c']
        noPaperInd = respRecords[0]['Number_of_Paper_Individual_Forms__c']
        markIn = respRecords[0]['Mark_In__c']
        accessCode = respRecords[0]['Access_Code__c']
        collectionInstance = respRecords[0]['Collection_Instance__c']

        self.log.writeToLog("Response Store, noDwellForms: " +
                            str(noDwellForms) + ", noIndForms: " +
                            str(noIndForms) + ", noOccs: " + str(noOccs) +
                            ", noIndExpect: " + str(noIndExpect) +
                            ", noOutstandInd: " + str(noOutstandInd) +
                            ", noPaperDwell: " + str(noPaperDwell) +
                            ", noPaperInd: " + str(noPaperInd) + ", markIn: " +
                            str(markIn) + ", accessCode: " + str(accessCode) +
                            ", collectionInstance: " + str(collectionInstance))

        # Check it has not been used
        if (accessCode == dacAccessCode
                and collectionInstance == "a03P0000004VCiAIAW"
                and markIn == False
                and (noDwellForms is None or noDwellForms == 0)
                and (noIndForms is None or noIndForms == 0)
                and (noOccs is None or noOccs == 0)
                and (noIndExpect is None or noIndExpect == 0)
                and (noOutstandInd is None or noOutstandInd == 0)
                and (noPaperDwell is None or noPaperDwell == 0)
                and (noPaperInd is None or noPaperInd == 0)):

            return True
        else:
            return False

    def convertInt(self, str):
        value = 0
        try:
            value = int(str)
        except:
            value = 0
        return value

    def performSFChecks(self, respRecords):

        self.log.writeToLog("Checking Sales Force Reponse")

        totalForms = len(respRecords)
        if (totalForms != 1):
            self.log.writeToLog(
                "Error - Expected ones SF response record for access code, have: "
                + str(totalForms) + "\n" + str(respRecords))
        else:
            self.log.writeToLog(
                "Salesforce - Response record present for access code")

        noDwellForms = self.convertInt(
            respRecords[0]['Number_of_Dwelling_Forms_Received__c'])
        if (noDwellForms != 1):
            self.log.writeToLog(
                "Error - SF: Do not have 1 dwelling form received as expected, have: "
                + str(noDwellForms) + "\n" + str(respRecords))
        else:
            self.log.writeToLog(
                "Salesforce - One Dwelling form received correctly")

        noIndForms = self.convertInt(
            respRecords[0]['Number_of_Individual_Forms_Received__c'])
        if (noIndForms != 1):
            self.log.writeToLog(
                "Error - SF: Do not have 1 individual form received as expected, have: "
                + str(noIndForms) + "\n" + str(respRecords))
        else:
            self.log.writeToLog(
                "Salesforce - One Individual form received correctly")

        noOccs = self.convertInt(respRecords[0]['Number_of_Occupants__c'])
        if (noOccs != 5):
            self.log.writeToLog(
                "Error - SF: Do not have 5 occupants as expected, have: " +
                str(noOccs) + "\n" + str(respRecords))
        else:
            self.log.writeToLog(
                "Salesforce - Correctly expects five occupants")

        noOutstandInd = self.convertInt(
            respRecords[0]['Number_of_Outstanding_Individual_Forms__c'])
        if (noOutstandInd != 4):
            self.log.writeToLog(
                "Error - SF: Do not have 4 individual responses expected, have: "
                + str(noOutstandInd) + "\n" + str(respRecords))
        else:
            self.log.writeToLog(
                "Salesforce - Correctly expects four more individual responses"
            )

        noPaperDwell = self.convertInt(
            respRecords[0]['Number_of_Paper_Dwelling_Forms__c'])
        if (noPaperDwell != 0):
            self.log.writeToLog(
                "Error - SF: Unexpectedly have paper dwelling form: " +
                str(noPaperDwell) + "\n" + str(respRecords))
        else:
            self.log.writeToLog(
                "Salesforce - Correctly has no paper dwelling forms received")

        noPaperInd = self.convertInt(
            respRecords[0]['Number_of_Paper_Individual_Forms__c'])
        if (noPaperInd != 0):
            self.log.writeToLog(
                "Error - SF: Unexpectedly have paper individual form: " +
                str(noPaperInd) + "\n" + str(respRecords))
        else:
            self.log.writeToLog(
                "Salesforce - Correctly has no paper individual forms received"
            )

        markIn = respRecords[0]['Mark_In__c']
        if (markIn != True):
            self.log.writeToLog("Error - SF: Not markedin, stats is: " +
                                str(markIn) + "\n" + str(respRecords))
        else:
            self.log.writeToLog("Salesforce - Reponse correctly marked in")
Ejemplo n.º 14
0
#!/home/bthomps5/outmon/bin/python

from config import *
import requests, json
from simple_salesforce import Salesforce

sf = Salesforce(username=USERNAME,
                password=PASSWORD,
                security_token=SECURITY_TOKEN,
                sandbox=SANDBOX,
                organizationId=ORGANIZATION_ID)
data = sf.query_all(
    "select count() from integration_out__c where status__c in ('',null)")

if data['totalSize'] > ROW_THRESHOLD:
    requests.post(REST_ENDPOINT,
                  data=json.dumps(
                      {'text': ALERT_TEXT.format(data['totalSize'])}))
Ejemplo n.º 15
0
class SalesforcePushApi(object):
    """ API Wrapper for the Salesforce Push API """

    def __init__(self, username, password, serverurl, lazy=None, default_where=None):
        sandbox = False
        if serverurl.find('test.salesforce.com') != -1:
            sandbox = True
    
        self.sf = Salesforce(username=username, password=password, security_token='', sandbox=sandbox, version='36.0')

        if not lazy:
            lazy = []
        self.lazy = lazy

        if not default_where:
            default_where = {}
        self.default_where = default_where

    def return_query_records(self, query):
        #print 'Running Query: %s' % query
        res = self.sf.query_all(query)
        if res['totalSize'] > 0:
            return res['records']
        else:
            return []

    def format_where_clause(self, where, obj=None):
        if obj and obj in self.default_where:
            default_where = self.default_where[obj]
            if where:
                where = '(%s) AND (%s)' % (default_where, where)
            else:
                where = '(%s)' % default_where
        if where:
            where = ' WHERE %s' % where
        else:
            where = ''
        return where

    def add_query_limit(self, query, limit):
        if not limit:
            return query

        return '%s LIMIT %s' % (query, limit)

    @memoize
    def get_packages(self, where=None, limit=None):
        where = self.format_where_clause(where)
        query = "SELECT id, name, namespaceprefix FROM MetadataPackage%s" % where
        query = self.add_query_limit(query, limit)
        return self.return_query_records(query)

    @memoize
    def get_package_objs(self, where=None, limit=None):
        package_objs = []
        for package in self.get_packages(where, limit):
            package_objs.append(
                MetadataPackage(
                    push_api = self,
                    sf_id=package['Id'], 
                    name=package['Name'], 
                    namespace=package['NamespacePrefix']
                )
            )
        return package_objs

    @memoize
    def get_packages_by_id(self, where=None, limit=None):
        packages = {}
        for package in self.get_package_objs(where, limit):
            packages[package.sf_id] = package
        return packages

    @memoize
    def get_package_versions(self, where=None, limit=None):
        where = self.format_where_clause(where)
        query = "SELECT Id, Name, MetadataPackageId, ReleaseState, MajorVersion, MinorVersion, PatchVersion, BuildNumber FROM MetadataPackageVersion%s ORDER BY MajorVersion DESC, MinorVersion DESC, PatchVersion, BuildNumber DESC" % where
        query = self.add_query_limit(query, limit)
        return self.return_query_records(query)

    @memoize
    def get_where_last_version(self, major=None, minor=None, beta=None):
        if beta:
            where = "ReleaseState = 'Beta'"
        else:
            where = "ReleaseState = 'Released'"
        if major:
            where += " AND MajorVersion=%s" % int(major)
        if minor:
            where += " AND MinorVersion=%s" % int(minor)
        return where
        
    @memoize
    def get_package_version_objs(self, where=None, limit=None):
        package_version_objs = []
        packages = self.get_packages_by_id()
        for package_version in self.get_package_versions(where, limit):
            package_version_objs.append(
                MetadataPackageVersion(
                    push_api = self,
                    name = package_version['Name'],
                    package = packages[package_version['MetadataPackageId']],
                    state = package_version['ReleaseState'],
                    major = package_version['MajorVersion'],
                    minor = package_version['MinorVersion'],
                    patch = package_version['PatchVersion'],
                    build = package_version['BuildNumber'],
                    sf_id = package_version['Id'],
                )
            )
        return package_version_objs

    @memoize
    def get_package_versions_by_id(self, where=None, limit=None):
        package_versions = {}
        for package_version in self.get_package_version_objs(where, limit):
            package_versions[package_version.sf_id] = package_version
        return package_versions

    @memoize
    def get_subscribers(self, where=None, limit=None):
        where = self.format_where_clause(where, obj='PackageSubscriber')
        query = "SELECT Id, MetadataPackageVersionId, InstalledStatus, OrgName, OrgKey, OrgStatus, OrgType from PackageSubscriber%s" % where
        query = self.add_query_limit(query, limit)
        return self.return_query_records(query)

    @memoize
    def get_subscriber_objs(self, where=None, limit=None):
        subscriber_objs = []
        package_versions = self.get_package_versions_by_id()
        for subscriber in self.get_subscribers(where, limit):
            subscriber_objs.append(
                PackageSubscriber(
                    push_api = self,
                    version = package_versions[subscriber['MetadataPackageVersionId']],
                    status = subscriber['InstalledStatus'],
                    org_name = subscriber['OrgName'],
                    org_key = subscriber['OrgKey'],
                    org_status = subscriber['OrgStatus'],
                    org_type = subscriber['OrgType'],
                    sf_id = subscriber['Id'],
                )
            )
        return subscriber_objs

    @memoize
    def get_subscribers_by_org_key(self, where=None, limit=None):
        subscribers = {}
        for subscriber in self.get_subscriber_objs(where, limit):
            subscribers[subscriber.org_key] = subscriber 
        return subscribers

    @memoize
    def get_push_requests(self, where=None, limit=None):
        where = self.format_where_clause(where, obj='PackagePushRequest')
        query = "SELECT Id, PackageVersionId, ScheduledStartTime, Status FROM PackagePushRequest%s ORDER BY ScheduledStartTime DESC" % where
        query = self.add_query_limit(query, limit)
        return self.return_query_records(query)

    @memoize
    def get_push_request_objs(self, where=None, limit=None):
        push_request_objs = []
        package_versions = self.get_package_versions_by_id()
        for push_request in self.get_push_requests(where, limit):
            push_request_objs.append(
                PackagePushRequest(
                    push_api = self,
                    version = package_versions[push_request['PackageVersionId']],
                    start_time=push_request['ScheduledStartTime'],
                    status = push_request['Status'],
                    sf_id = push_request['Id'],
                )
            )
        return push_request_objs

    @memoize
    def get_push_requests_by_id(self, where=None, limit=None):
        push_requests = {}
        for push_request in self.get_push_request_objs(where, limit):
            push_requests[push_request.sf_id] = push_request
        return push_requests

    @memoize
    def get_push_jobs(self, where=None, limit=None):
        where = self.format_where_clause(where)
        query = "SELECT Id, PackagePushRequestId, SubscriberOrganizationKey, Status FROM PackagePushJob%s" % where
        query = self.add_query_limit(query, limit)
        return self.return_query_records(query)

    @memoize
    def get_push_job_objs(self, where=None, limit=None):
        push_job_objs = []
        lazy = 'subscribers' in self.lazy
        if not lazy:
            subscriberorgs = self.get_subscribers_by_org_key()
        push_requests = self.get_push_requests_by_id()
        for push_job in self.get_push_jobs(where, limit):
            if lazy:
                orgs = self.get_subscriber_objs("OrgKey = '%s'" % push_job['SubscriberOrganizationKey'])
                if not orgs:
                    org = None
                else:
                    org = orgs[0]
            else:
                if push_job['SubscriberOrganizationKey'] not in subscriberorgs:
                    #print 'Skipping job for unmatched subscriber org %s' % push_job['SubscriberOrganizationKey']
                    continue
                else:
                    org = subscriberorgs[push_job['SubscriberOrganizationKey']]
            push_job_objs.append(
                PackagePushJob(
                    push_api = self,
                    request = push_requests[push_job['PackagePushRequestId']],
                    org = org,
                    status = push_job['Status'],
                    sf_id = push_job['Id'],
                )
            )
        return push_job_objs

    @memoize
    def get_push_jobs_by_id(self, where=None, limit=None):
        push_jobs = {}
        for push_job in self.get_push_job_objs(where, limit):
            push_jobs[push_job.sf_id] = push_job
        return push_jobs

    @memoize
    def get_push_errors(self, where=None, limit=None):
        where = self.format_where_clause(where)
        query = "SELECT Id, PackagePushJobId, ErrorSeverity, ErrorType, ErrorTitle, ErrorMessage, ErrorDetails FROM PackagePushError%s" % where
        query = self.add_query_limit(query, limit)
        return self.return_query_records(query)

    @memoize
    def get_push_error_objs(self, where=None, limit=None):
        push_error_objs = []
        lazy = 'jobs' in self.lazy
        if not lazy:
            push_jobs = self.get_push_jobs_by_id()
        for push_error in self.get_push_errors(where, limit):
            if lazy:
                jobs = self.get_push_job_objs(where="Id = '%s'" % push_error['PackagePushJobId'])
                if jobs:
                    job = jobs[0]
                else:
                    job = None
             
            push_error_objs.append(
                PackagePushError(
                    push_api = self,
                    job = job,
                    severity = push_error['ErrorSeverity'],
                    error_type = push_error['ErrorType'],
                    title = push_error['ErrorTitle'],
                    message = push_error['ErrorMessage'],
                    details = push_error['ErrorDetails'],
                    sf_id = push_error['Id'],
                )
            )
        return push_error_objs

    @memoize
    def get_push_errors_by_id(self, where=None, limit=None):
        push_errors = {}
        for push_error in self.get_push_error_objs(where, limit):
            push_errors[push_error.sf_id] = push_error
        return push_errors

    def create_push_request(self, version, orgs, start=None):
        if not start:
            # By default, delay the push start by 15 minutes to allow manual review and intervention
            start = datetime.datetime.now() + datetime.timedelta(0, 15*60)

        # Create the request
        res = self.sf.PackagePushRequest.create({
            'PackageVersionId': version.sf_id,
            'ScheduledStartTime': start.isoformat(),
        })
        request_id = res['id']

        # Schedule the orgs
        batch_size = 200
        batch_offset = 0

        for batch in batch_list(orgs, batch_size):

            batch_data = {'records': []}
            i = 0

            for org in batch:
                batch_data['records'].append({
                    'attributes': {'type': 'PackagePushJob', 'referenceId': 'org%s' % i},
                    'PackagePushRequestId': request_id,
                    'SubscriberOrganizationKey': org,
                })
                i += 1
            
            try:
                res = self.sf._call_salesforce(
                    'POST', 
                    self.sf.base_url + 'composite/tree/PackagePushJob', 
                    data=json.dumps(batch_data),
                )
            except SalesforceMalformedRequest as e:
                for result in e.content['results']:
                    for error in result['errors']:
                        if error['statusCode'] == 'INVALID_OPERATION':
                            print u'Skipping org, error message = {}'.format(error['message'])
                        else:
                            raise e

        return request_id

    def run_push_request(self, request_id):
        # Set the request to Pending status
        return self.sf.PackagePushRequest.update(request_id, {'Status': 'Pending'})
Ejemplo n.º 16
0
class SalesforcePushApi(object):
    """ API Wrapper for the Salesforce Push API """

    def __init__(self, username, password, serverurl, lazy=None, default_where=None):
        sandbox = False
        if serverurl.find('test.salesforce.com') != -1:
            sandbox = True
    
        self.sf = Salesforce(username=username, password=password, security_token='', sandbox=sandbox, version='35.0')

        # Change base_url to use the tooling api
        self.sf.base_url = self.sf.base_url + 'tooling/'

        if not lazy:
            lazy = []
        self.lazy = lazy

        if not default_where:
            default_where = {}
        self.default_where = default_where

    def return_query_records(self, query):
        #print 'Running Query: %s' % query
        res = self.sf.query_all(query)
        if res['totalSize'] > 0:
            return res['records']
        else:
            return []

    def format_where_clause(self, where, obj=None):
        if obj and obj in self.default_where:
            default_where = self.default_where[obj]
            if where:
                where = '(%s) AND (%s)' % (default_where, where)
            else:
                where = '(%s)' % default_where
        if where:
            where = ' WHERE %s' % where
        else:
            where = ''
        return where

    def add_query_limit(self, query, limit):
        if not limit:
            return query

        return '%s LIMIT %s' % (query, limit)

    def get_tooling_object(self, object_name):
        # Set up a simple-salesforce sobject for TraceFlag using the tooling api
        obj = getattr(self.sf, object_name)
        obj.base_url = (u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/'
                     .format(instance=self.sf.sf_instance,
                             object_name=object_name,
                             sf_version=self.sf.sf_version))
        return obj

    @memoize
    def get_packages(self, where=None, limit=None):
        where = self.format_where_clause(where)
        query = "SELECT id, name, namespaceprefix FROM MetadataPackage%s" % where
        query = self.add_query_limit(query, limit)
        return self.return_query_records(query)

    @memoize
    def get_package_objs(self, where=None, limit=None):
        package_objs = []
        for package in self.get_packages(where, limit):
            package_objs.append(
                MetadataPackage(
                    push_api = self,
                    sf_id=package['Id'], 
                    name=package['Name'], 
                    namespace=package['NamespacePrefix']
                )
            )
        return package_objs

    @memoize
    def get_packages_by_id(self, where=None, limit=None):
        packages = {}
        for package in self.get_package_objs(where, limit):
            packages[package.sf_id] = package
        return packages

    @memoize
    def get_package_versions(self, where=None, limit=None):
        where = self.format_where_clause(where)
        query = "SELECT Id, Name, MetadataPackageId, ReleaseState, MajorVersion, MinorVersion, PatchVersion, BuildNumber FROM MetadataPackageVersion%s ORDER BY MajorVersion DESC, MinorVersion DESC, PatchVersion, BuildNumber DESC" % where
        query = self.add_query_limit(query, limit)
        return self.return_query_records(query)

    @memoize
    def get_where_last_version(self, major=None, minor=None, beta=None):
        if beta:
            where = "ReleaseState = 'Beta'"
        else:
            where = "ReleaseState = 'Released'"
        if major:
            where += " AND MajorVersion=%s" % int(major)
        if minor:
            where += " AND MinorVersion=%s" % int(minor)
        return where
        
    @memoize
    def get_package_version_objs(self, where=None, limit=None):
        package_version_objs = []
        packages = self.get_packages_by_id()
        for package_version in self.get_package_versions(where, limit):
            package_version_objs.append(
                MetadataPackageVersion(
                    push_api = self,
                    name = package_version['Name'],
                    package = packages[package_version['MetadataPackageId']],
                    state = package_version['ReleaseState'],
                    major = package_version['MajorVersion'],
                    minor = package_version['MinorVersion'],
                    patch = package_version['PatchVersion'],
                    build = package_version['BuildNumber'],
                    sf_id = package_version['Id'],
                )
            )
        return package_version_objs

    @memoize
    def get_package_versions_by_id(self, where=None, limit=None):
        package_versions = {}
        for package_version in self.get_package_version_objs(where, limit):
            package_versions[package_version.sf_id] = package_version
        return package_versions

    @memoize
    def get_subscribers(self, where=None, limit=None):
        where = self.format_where_clause(where, obj='PackageSubscriber')
        query = "SELECT Id, MetadataPackageVersionId, InstalledStatus, OrgName, OrgKey, OrgStatus, OrgType from PackageSubscriber%s" % where
        query = self.add_query_limit(query, limit)
        return self.return_query_records(query)

    @memoize
    def get_subscriber_objs(self, where=None, limit=None):
        subscriber_objs = []
        package_versions = self.get_package_versions_by_id()
        for subscriber in self.get_subscribers(where, limit):
            subscriber_objs.append(
                PackageSubscriber(
                    push_api = self,
                    version = package_versions[subscriber['MetadataPackageVersionId']],
                    status = subscriber['InstalledStatus'],
                    org_name = subscriber['OrgName'],
                    org_key = subscriber['OrgKey'],
                    org_status = subscriber['OrgStatus'],
                    org_type = subscriber['OrgType'],
                    sf_id = subscriber['Id'],
                )
            )
        return subscriber_objs

    @memoize
    def get_subscribers_by_org_key(self, where=None, limit=None):
        subscribers = {}
        for subscriber in self.get_subscriber_objs(where, limit):
            subscribers[subscriber.org_key] = subscriber 
        return subscribers

    @memoize
    def get_push_requests(self, where=None, limit=None):
        where = self.format_where_clause(where, obj='PackagePushRequest')
        query = "SELECT Id, PackageVersionId, ScheduledStartTime, Status FROM PackagePushRequest%s ORDER BY ScheduledStartTime DESC" % where
        query = self.add_query_limit(query, limit)
        return self.return_query_records(query)

    @memoize
    def get_push_request_objs(self, where=None, limit=None):
        push_request_objs = []
        package_versions = self.get_package_versions_by_id()
        for push_request in self.get_push_requests(where, limit):
            push_request_objs.append(
                PackagePushRequest(
                    push_api = self,
                    version = package_versions[push_request['PackageVersionId']],
                    start_time=push_request['ScheduledStartTime'],
                    status = push_request['Status'],
                    sf_id = push_request['Id'],
                )
            )
        return push_request_objs

    @memoize
    def get_push_requests_by_id(self, where=None, limit=None):
        push_requests = {}
        for push_request in self.get_push_request_objs(where, limit):
            push_requests[push_request.sf_id] = push_request
        return push_requests

    @memoize
    def get_push_jobs(self, where=None, limit=None):
        where = self.format_where_clause(where)
        query = "SELECT Id, PackagePushRequestId, SubscriberOrganizationKey, Status FROM PackagePushJob%s" % where
        query = self.add_query_limit(query, limit)
        return self.return_query_records(query)

    @memoize
    def get_push_job_objs(self, where=None, limit=None):
        push_job_objs = []
        lazy = 'subscribers' in self.lazy
        if not lazy:
            subscriberorgs = self.get_subscribers_by_org_key()
        push_requests = self.get_push_requests_by_id()
        for push_job in self.get_push_jobs(where, limit):
            if lazy:
                orgs = self.get_subscriber_objs("OrgKey = '%s'" % push_job['SubscriberOrganizationKey'])
                if not orgs:
                    org = None
                else:
                    org = orgs[0]
            else:
                if push_job['SubscriberOrganizationKey'] not in subscriberorgs:
                    #print 'Skipping job for unmatched subscriber org %s' % push_job['SubscriberOrganizationKey']
                    continue
                else:
                    org = subscriberorgs[push_job['SubscriberOrganizationKey']]
            push_job_objs.append(
                PackagePushJob(
                    push_api = self,
                    request = push_requests[push_job['PackagePushRequestId']],
                    org = org,
                    status = push_job['Status'],
                    sf_id = push_job['Id'],
                )
            )
        return push_job_objs

    @memoize
    def get_push_jobs_by_id(self, where=None, limit=None):
        push_jobs = {}
        for push_job in self.get_push_job_objs(where, limit):
            push_jobs[push_job.sf_id] = push_job
        return push_jobs

    @memoize
    def get_push_errors(self, where=None, limit=None):
        where = self.format_where_clause(where)
        query = "SELECT Id, PackagePushJobId, ErrorSeverity, ErrorType, ErrorTitle, ErrorMessage, ErrorDetails FROM PackagePushError%s" % where
        query = self.add_query_limit(query, limit)
        return self.return_query_records(query)

    @memoize
    def get_push_error_objs(self, where=None, limit=None):
        push_error_objs = []
        lazy = 'jobs' in self.lazy
        if not lazy:
            push_jobs = self.get_push_jobs_by_id()
        for push_error in self.get_push_errors(where, limit):
            if lazy:
                jobs = self.get_push_job_objs(where="Id = '%s'" % push_error['PackagePushJobId'])
                if jobs:
                    job = jobs[0]
                else:
                    job = None
             
            push_error_objs.append(
                PackagePushError(
                    push_api = self,
                    job = job,
                    severity = push_error['ErrorSeverity'],
                    error_type = push_error['ErrorType'],
                    title = push_error['ErrorTitle'],
                    message = push_error['ErrorMessage'],
                    details = push_error['ErrorDetails'],
                    sf_id = push_error['Id'],
                )
            )
        return push_error_objs

    @memoize
    def get_push_errors_by_id(self, where=None, limit=None):
        push_errors = {}
        for push_error in self.get_push_error_objs(where, limit):
            push_errors[push_error.sf_id] = push_error
        return push_errors

    def create_push_request(self, version, orgs, start=None):
        if not start:
            # By default, delay the push start by 15 minutes to allow manual review and intervention
            start = datetime.datetime.now() + datetime.timedelta(0, 15*60)

        # Get the modified simple-salesforce Tooling API endpoint objects
        ToolingPackagePushRequest = self.get_tooling_object('PackagePushRequest')
        ToolingPackagePushJob = self.get_tooling_object('PackagePushJob')

        # Create the request
        res = ToolingPackagePushRequest.create({
            'PackageVersionId': version.sf_id,
            'ScheduledStartTime': start.isoformat(),
        })
        request_id = res['id']

        # Schedule the orgs
        for org in orgs:
            try:
                res = ToolingPackagePushJob.create({
                    'PackagePushRequestId': request_id,
                    'SubscriberOrganizationKey': org,
                })
            except SalesforceMalformedRequest, e:
                error = e.content[0]
                if error['errorCode'] == 'INVALID_OPERATION':
                    print 'Skipping org %s, error message = %s' % (org, error['message'])
                else:
                    raise e

        return request_id
Ejemplo n.º 17
0
def run_tests():
    username = os.environ.get('SF_USERNAME')
    password = os.environ.get('SF_PASSWORD')
    serverurl = os.environ.get('SF_SERVERURL')
    test_name_match = os.environ.get('APEX_TEST_NAME_MATCH', '%_TEST')
    namespace = os.environ.get('NAMESPACE', None)
    poll_interval = int(os.environ.get('POLL_INTERVAL', 10))
    debug = os.environ.get('DEBUG_TESTS',False) == 'true'
    debug_logdir = os.environ.get('DEBUG_LOGDIR')
    
    if namespace:
        namespace = "'%s'" % namespace
    else:
        namespace = 'null'
    
    sandbox = False
    if serverurl.find('test.salesforce.com') != -1:
        sandbox = True
    
    sf = Salesforce(username=username, password=password, security_token='', sandbox=sandbox, version='32.0')
    
    # Change base_url to use the tooling api
    sf.base_url = sf.base_url + 'tooling/'
    
    # Split test_name_match by commas to allow multiple class name matching options
    where_name = []
    for pattern in test_name_match.split(','):
        where_name.append("Name LIKE '%s'" % pattern)
   
    # Get all test classes for namespace
    query = "SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = %s and (%s)" % (namespace, ' OR '.join(where_name))

    print "Running Query: %s" % query
    sys.stdout.flush()

    res = sf.query_all("SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = %s and (%s)" % (namespace, ' OR '.join(where_name)))

    print "Found %s classes" % res['totalSize']
    sys.stdout.flush()

    if not res['totalSize']:
        return {'Pass': 0, 'Failed': 0, 'CompileFail': 0, 'Skip': 0}
    
    classes_by_id = {}
    classes_by_name = {}
    traces_by_class_id = {}
    results_by_class_name = {}
    classes_by_log_id = {}
    logs_by_class_id = {}
    
    for cls in res['records']:
        classes_by_id[cls['Id']] = cls['Name']
        classes_by_name[cls['Name']] = cls['Id']
        results_by_class_name[cls['Name']] = {}

    # If debug is turned on, setup debug traces for all test classes
    if debug:
        expiration = datetime.datetime.now() + datetime.timedelta(0,3600)
        for class_id in classes_by_id.keys():
            TraceFlag = sf.TraceFlag
            TraceFlag.base_url = (u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/'
                         .format(instance=sf.sf_instance,
                                 object_name='TraceFlag',
                                 sf_version=sf.sf_version))
            res = TraceFlag.create({
                'ApexCode': 'DEBUG',
                'ApexProfiling': 'DEBUG',
                'Callout': 'DEBUG',
                'Database': 'DEBUG',
                'ExpirationDate': expiration.isoformat(),
                #'ScopeId': class_id,
                'System': 'DEBUG',
                'TracedEntityId': class_id,
                'Validation': 'DEBUG',
                'Visualforce': 'DEBUG',
                'Workflow': 'DEBUG',
            })
            traces_by_class_id[class_id] = res['id']
    
    # Run all the tests
    print "Queuing tests for execution..."
    sys.stdout.flush()
    job_id = sf.restful('runTestsAsynchronous', params={'classids': ','.join(classes_by_id.keys())})
    
    # Loop waiting for the tests to complete
    while True:
        res = sf.query_all("SELECT Id, Status, ApexClassId FROM ApexTestQueueItem WHERE ParentJobId = '%s'" % job_id)
        counts = {
            'Queued': 0,
            'Processing': 0,
            'Aborted': 0,
            'Completed': 0,
            'Failed': 0,
            'Preparing': 0,
            'Holding': 0,
        }
        for item in res['records']:
            counts[item['Status']] += 1
    
        # If all tests have run, break from the loop
        if not counts['Queued'] and not counts['Processing']:
            print ''
            print '-------------------------------------------------------------------------------'
            print 'Test Results'
            print '-------------------------------------------------------------------------------'
            sys.stdout.flush()
            break
        
        print 'Completed: %(Completed)s  Processing: %(Processing)s  Queued: %(Queued)s' % counts
        sys.stdout.flush()
        sleep(poll_interval)
    
    # Get the test results by method
    res = sf.query_all("SELECT StackTrace,Message, ApexLogId, AsyncApexJobId,MethodName, Outcome, ApexClassId FROM ApexTestResult WHERE AsyncApexJobId = '%s'" % job_id)
    
    counts = {
        'Pass': 0,
        'Fail': 0,
        'CompileFail': 0,
        'Skip': 0,
    }
    for result in res['records']:
        class_name = classes_by_id[result['ApexClassId']]
        results_by_class_name[class_name][result['MethodName']] = result
        counts[result['Outcome']] += 1
        if debug:
            classes_by_log_id[result['ApexLogId']] = result['ApexClassId']
    
    # Fetch debug logs if debug is enabled
    if debug:
        log_ids = "('%s')" % "','".join([str(id) for id in classes_by_log_id.keys()])
        res = sf.query_all("SELECT Id, Application, DurationMilliseconds, Location, LogLength, LogUserId, Operation, Request, StartTime, Status from ApexLog where Id in %s" % log_ids)
        for log in res['records']:
            class_id = classes_by_log_id[log['Id']]
            class_name = classes_by_id[class_id]
            logs_by_class_id[class_id] = log
            # Fetch the debug log file
            body_url = '%ssobjects/ApexLog/%s/Body' % (sf.base_url, log['Id'])
            resp = sf.request.get(body_url, headers=sf.headers)
            log_file = class_name + '.log'
            if debug_logdir:
                log_file = debug_logdir + os.sep + log_file
            f = open(log_file, 'w')
            f.write(resp.content)
            f.close()

            # Parse stats from the log file
            f = open(log_file, 'r')
            method_stats = parse_log(f)
            
            # Add method stats to results_by_class_name
            for method, stats in method_stats.items():
                results_by_class_name[class_name][method]['stats'] = stats

        # Expire the trace flags
        for trace_id in traces_by_class_id.values():
            TraceFlag.update(trace_id, {'ExpirationDate': datetime.datetime.now().isoformat()})

    class_names = results_by_class_name.keys()
    class_names.sort()
    for class_name in class_names:
        class_id = classes_by_name[class_name]
        if debug:
            duration = int(logs_by_class_id[class_id]['DurationMilliseconds']) * .001
            print 'Class: %s (%ss)' % (class_name, duration)
        else:
            print 'Class: %s' % class_name
        sys.stdout.flush()
        method_names = results_by_class_name[class_name].keys()
        method_names.sort()
        for method_name in method_names:
            result = results_by_class_name[class_name][method_name]
    
            # Output result for method
            print '   %(Outcome)s: %(MethodName)s' % result

            if debug:
                print '     DEBUG LOG INFO:'
                stat_keys = result['stats'].keys()
                stat_keys.sort()
                for stat in stat_keys:
                    try:
                        value = result['stats'][stat]
                        output = '       %s / %s' % (value['used'], value['allowed'])
                        print output.ljust(26) + stat
                    except:
                        output = '       %s' % result['stats'][stat]
                        print output.ljust(26) + stat
    
            # Print message and stack trace if failed
            if result['Outcome'] in ['Fail','CompileFail']:
                print '   Message: %(Message)s' % result
                print '   StackTrace: %(StackTrace)s' % result
            sys.stdout.flush()
    
    print '-------------------------------------------------------------------------------'
    print 'Passed: %(Pass)s  Fail: %(Fail)s  Compile Fail: %(CompileFail)s  Skipped: %(Skip)s' % counts
    print '-------------------------------------------------------------------------------'
    sys.stdout.flush()

    return counts
Ejemplo n.º 18
0
class sf_Manager:
    def __init__(self):
        # Create a free SalesForce account: https://developer.salesforce.com/signup
        self.sf = Salesforce(
            username=os.getenv("USERNAME"),
            password=os.getenv("PASSWORD"),
            security_token=os.getenv("TOKEN"),
        )

    def login(self):
        # Create a free SalesForce account: https://developer.salesforce.com/signup
        self.sf = Salesforce(
            username=os.getenv("USERNAME"),
            password=os.getenv("PASSWORD"),
            security_token=os.getenv("TOKEN"),
        )
        return 0

    def dict_to_df(self, query_result, date=True):
        items = {
            val: dict(query_result["records"][val])
            for val in range(query_result["totalSize"])
        }
        df = pd.DataFrame.from_dict(items, orient="index").drop(["attributes"],
                                                                axis=1)

        if date:  # date indicates if the df contains datetime column
            df["CreatedDate"] = pd.to_datetime(
                df["CreatedDate"], format="%Y-%m-%d")  # convert to datetime
            df["CreatedDate"] = df["CreatedDate"].dt.strftime(
                "%Y-%m-%d")  # reset string
        return df

    def get_leads(self):
        try:
            desc = self.sf.Lead.describe()
        except SalesforceExpiredSession as e:
            self.login()
            desc = self.sf.Lead.describe()

        field_names = [field["name"] for field in desc["fields"]]
        soql = "SELECT {} FROM Lead".format(",".join(field_names))
        query_result = self.sf.query_all(soql)
        leads = self.dict_to_df(query_result)
        return leads

    def get_opportunities(self):
        query_text = "SELECT CreatedDate, Name, StageName, ExpectedRevenue, Amount, LeadSource, IsWon, IsClosed, Type, Probability FROM Opportunity"
        try:
            query_result = self.sf.query(query_text)
        except SalesforceExpiredSession as e:
            self.login()
            query_result = self.sf.query(query_text)
        opportunities = self.dict_to_df(query_result)
        return opportunities

    def get_cases(self):
        query_text = "SELECT CreatedDate, Type, Reason, Status, Origin, Subject, Priority, IsClosed, OwnerId, IsDeleted, AccountId FROM Case"
        try:
            query_result = self.sf.query(query_text)
        except SalesforceExpiredSession as e:
            self.login()
            query_result = self.sf.query(query_text)

        cases = self.dict_to_df(query_result)
        return cases

    def get_contacts(self):
        query_text = "SELECT Id, Salutation, FirstName, LastName FROM Contact"
        try:
            query_result = self.sf.query(query_text)
        except SalesforceExpiredSession as e:
            self.login()
            query_result = self.sf.query(query_text)

        contacts = self.dict_to_df(query_result, False)
        return contacts

    def get_users(self):
        query_text = "SELECT Id,FirstName, LastName FROM User"
        try:
            query_result = self.sf.query(query_text)
        except SalesforceExpiredSession as e:
            self.login()
            query_result = self.sf.query(query_text)

        users = self.dict_to_df(query_result, False)
        return users

    def get_accounts(self):
        query_text = "SELECT Id, Name FROM Account"
        try:
            query_result = self.sf.query(query_text)
        except SalesforceExpiredSession as e:
            self.login()
            query_result = self.sf.query(query_text)

        accounts = self.dict_to_df(query_result, False)
        return accounts

    def add_lead(self, query):
        try:
            self.sf.Lead.create(query)
        except SalesforceExpiredSession as e:
            self.login()
            self.sf.Lead.create(query)
        return 0

    def add_opportunity(self, query):
        try:
            self.sf.Opportunity.create(query)
        except SalesforceExpiredSession as e:
            self.login()
            self.sf.Opportunity.create(query)
        return 0

    def add_case(self, query):
        try:
            self.sf.Case.create(query)
        except SalesforceExpiredSession as e:
            self.login()
            self.sf.Case.create(query)
        return 0
def sync_school_data(self, request, queryset):
	for setting in queryset:
		rows_updated = queryset.update(
	        sync_school_date = timezone.now(),
	    )
		if setting.sf_active == True:
			sfusername = setting.sf_username
			sfpassword = setting.sf_password
			sftoken = setting.sf_token
			sftype = setting.sf_type
			
			if sftype == 'Production':
				sftypesetup = False 
			else:
				sftypesetup = True

			cursor = connection.cursor()

			query = ("SELECT id, country_id, name FROM data_district")
			cursor.execute(query)

			for district in cursor:
				
				salesforce_login = {'username':sfusername, 'password':sfpassword, 'security_token':sftoken, 'sandbox':sftypesetup}
				sf = Salesforce(**salesforce_login)
				
				district = str(district).replace("(","").replace("u'","").replace("')","").replace(")","")
				a,b,c = district.split(",")
				districtname = c[1:]
				countryid = b[1:].replace("L","")
				districtid = a.replace("L","")

				schools = sf.query_all("SELECT Id, Name, Geo_Point__Latitude__s, Geo_Point__Longitude__s, School_Type__c, Date_Camfed_began_support__c, Active_Partner_School__c FROM School__c where DistrictName__c='%s'" % districtname)
				for x in schools["records"]:
					schoolname = str(x["Name"])
					sfrecordid = str(x["Id"])
					slug = str(x["Id"])
					schooltype = str(x["School_Type__c"])
					time = str(x["Date_Camfed_began_support__c"])
					if time == 'None':
						datecamfed = datetime.strptime('1950-01-01' , '%Y-%m-%d')
						print('none value: ' + time)
					else:
						datecamfed = datetime.strptime(time , '%Y-%m-%d')
						print('value: ' + time)
					
					active = bool(x["Active_Partner_School__c"])
					geolatitude = str(x["Geo_Point__Latitude__s"])
					geolongitude = str(x["Geo_Point__Longitude__s"])

					try:
						add_objects = ("INSERT INTO data_school "
					              "(name,sfid,country_id,district_id,slug,schooltype,date_support_began,active,geolatitude,geolongitude) "
					              "VALUES (%(name)s,%(sfid)s,%(country_id)s,%(district_id)s,%(slug)s,%(schooltype)s,%(date_support_began)s,%(active)s,%(geolatitude)s,%(geolongitude)s)")
						data_objects = {
						    'name' : schoolname,
				    		'sfid' : sfrecordid,
					    	'country_id' : countryid,
					    	'district_id' : districtid,
					    	'slug' : slug,
					    	'schooltype' : schooltype,
					    	'date_support_began' : datecamfed,
					    	'active' : active,
					    	'geolatitude' : geolatitude,
							'geolongitude' : geolongitude,
						}
						cursor.execute(add_objects,data_objects)							

					except:
						add_objects = ("UPDATE data_school "
					              "SET name=%(name)s,sfid=%(sfid)s,country_id=%(country_id)s,district_id=%(district_id)s,slug=%(slug)s,schooltype=%(schooltype)s,date_support_began=%(date_support_began)s,active=%(active)s,geolatitude=%(geolatitude)s,geolongitude=%(geolongitude)s "
					              "WHERE sfid=%(sfid)s")
						data_objects = {
						    'name' : schoolname,
				    		'sfid' : sfrecordid,
					    	'country_id' : countryid,
					    	'district_id' : districtid,
					    	'slug' : slug,
					    	'schooltype' : schooltype,
					    	'date_support_began' : datecamfed,
					    	'active' : active,
					    	'geolatitude' : geolatitude,
							'geolongitude' : geolongitude,
						}
						cursor.execute(add_objects,data_objects)
						

	self.message_user(request, "Your Schools have been syncronised")
Ejemplo n.º 20
0
class SFClient:
    def __init__(self, username: str, password: str, sec_token: str):
        self.client = Salesforce(username=username,
                                 password=password,
                                 security_token=sec_token)
        self.batch_size_limit = 2000

        self.client.session_id

    @staticmethod
    def from_config():
        return SFClient(config.Salesforce.username, config.Salesforce.password,
                        config.Salesforce.security_token)

    def execute_query(self, soql: str):
        return self.client.query_all(soql)

    def get_accounts(self, limit: int = 0):
        import itertools

        soql = "SELECT AccountId, Transcripts_Type__c, count(id) FROM Contact " \
               "WHERE Status_of_User__c IN ('Active', 'Trial') GROUP BY AccountId, Transcripts_Type__c"

        print('Downloading Contact Agg from Salesforce...', end='')
        response = self.client.query_all(soql)
        print('Done!')
        contact_agg = response['records']

        soql = "SELECT Id, Name, Transcript_Provider__c FROM Account " \
               "WHERE Customer_Status__c IN ('Client', 'Broker Client', 'Paid Trial')"

        if limit > 0:
            soql += f" LIMIT {limit}"

        print('Downloading Accounts from Salesforce...', end='')
        response = self.client.query_all(soql)
        print('Done!')
        accounts = response['records']

        accts = []
        for a in accounts:
            ac = {
                "id": a['Id'],
                "name": a['Name'],
                "ts_prov": a['Transcript_Provider__c'],
                "ts_types": []
            }

            # c_group = itertools.takewhile(lambda c: c['AccountId'] == ac['id'], contact_agg)
            c_group = [c for c in contact_agg if c['AccountId'] == ac['id']]
            for c_agg in c_group:
                if c_agg['Transcripts_Type__c'] not in ac['ts_types']:
                    ac['ts_types'].append(c_agg['Transcripts_Type__c'])

            accts.append(ac)

        return accts

    def get_contacts(self, limit: int = 0):
        #soql = "SELECT Id, CreatedDate, PW_Last_Changed__c, (SELECT CreatedDate FROM Histories WHERE Field = 'Password__c' " \
        #"ORDER BY CreatedDate DESC LIMIT 1) FROM Contact WHERE Password__c != null " \
        # "AND PW_Last_Changed__c = null AND Status_of_User__c IN ('Active', 'Trial')"

        soql = "SELECT Id, AccountId, Email FROM Contact WHERE Email != null"

        if limit > 0:
            soql += f" LIMIT {limit}"

        print('Downloading Contacts from Salesforce...', end='')
        response = self.client.query_all(soql)
        print('done!')

        return [u for u in response['records']]

    def get_all_pb_processes(self):
        query = 'Select Id,ActiveVersion.VersionNumber,LatestVersion.VersionNumber,DeveloperName From FlowDefinition'
        response = self.query_tooling_api(query)

        return {pb['Id']: pb for pb in response['records']}

    def query_tooling_api(self, query):
        cleaned_query = urllib.parse.quote_plus(query)
        data = self.client.restful(path=f'tooling/query/?q={cleaned_query}')
        return data

    def toggle_pb_process(self, process_id, version_num=None):
        pb = {'Metadata': {'activeVersionNumber': version_num}}

        pb_str = jsonpickle.encode(pb, unpicklable=False)
        response = None

        try:
            # The response coming from Salesforce is apparently malformed and fails to parse properly
            response = self.client.restful(
                path=f'tooling/sobjects/FlowDefinition/{process_id}/',
                method='PATCH',
                data=pb_str)
        except Exception as ex:
            if 'Expecting value' not in str(ex):
                print(ex)
class SalesforceOutputService():
    error_msg = None
    topLevel = None 
    sf = None
    def __init__(self, username, password, token, results, topLevel):
        self.topLevel = topLevel
        
        try:
            self.sf = Salesforce(username=username, password=password, security_token=token)
        except:
            self.error_msg = "Invalid Salesforce credentials"
            return
        
        if not self.sf:
            self.error_msg = "Invalid Salesforce credentials"
        else:
            try:
                ConfigManager().cache_sf_creds(username, password, token)
                threading.Thread(target=self.async_update_sf, args=(results,)).start()
            except:
                ErrorLogService().log_error("An error occurred, see log file for more information",
                                            "High", traceback.format_exc())

    def async_update_sf(self, results):
        """ Thread function to update salesforce with results """
        self.update_sf_helper(results)
        self.topLevel.processing_complete("")

    def update_sf_helper(self, results):
        """ Updates Account and PRJ__c in salesforce """
        cfg_mgr = ConfigManager()
        sf_obj_id_name = cfg_mgr.get_sf_account_id_name()
        sf_article_id_name = cfg_mgr.get_sf_article_id_name()
        sf_person_obj_lbl = cfg_mgr.get_sf_person_obj_lbl()
        sf_article_obj_lbl = cfg_mgr.get_sf_article_obj_lbl()
        sf_person_obj = getattr(self.sf, sf_person_obj_lbl)

        sf_articles = self.sf.query_all(cfg_mgr.sf_select_PRJs_query())
        articles_to_update = {}
        for result in results:
            try:
                if result.external_data is not None and sf_obj_id_name in result.external_data:
                    sf_id = (result.external_data)[sf_obj_id_name]
                    stats = result.stats.todict()
                    if stats is not None:  
                        sf_person_obj.update(sf_id, stats)
                    articles_dict = self.map_sf_id_to_article(sf_article_id_name, result.articles, sf_articles)
                    articles_to_update.update(articles_dict)                        
                else:
                    msg = "{}: Error processing object: {}.\nIs the Salesforce Id missing from the input CSV?".format(
                                sf_person_obj_lbl, pprint.pformat(result))
                    ErrorLogService().log_error(msg, "Medium")
                    self.topLevel.parent.add_error(msg)
            except:
                ErrorLogService().log_error("Error updating Scholar: {}".format(
                            pprint.pformat(result)), "High", traceback.format_exc())
        try:
            self.update_articles(articles_to_update, sf_article_id_name, sf_article_obj_lbl)
        except:
            ErrorLogService().log_error("Error updating articles", 
                                        "High", traceback.format_exc())

    def map_sf_id_to_article(self, sf_article_id_name, scholar_articles, sf_articles):
        """ Maps the salesforce Id for an article onto Google Scholar articles """
        articles_dict = {}
        for sf_article in sf_articles['records']:
            for scholar_article in scholar_articles:
                if sf_article[sf_article_id_name].split(':')[1] == scholar_article.articleid.split(':')[1]:
                    articles_dict[sf_article['Id']] = scholar_article
        return articles_dict


    def update_articles(self, articles_dict, article_id_name, sf_article_obj_lbl):
        """ Updates PRJ__c objects (articles) in Salesforce """
        if articles_dict is None or not articles_dict:
            return

        sf_article_obj = getattr(self.sf, sf_article_obj_lbl)
        for article_id in articles_dict:
            article_to_update = (articles_dict[article_id]).to_PRJ_dict()
            sf_article_obj.update(article_id, article_to_update)
class LeadScoring:
    def __init__(self, username, password, organizationId):
        logging.debug("Initiating Salesforce object...")
        self.sf = Salesforce(username=username,
                             password=password,
                             organizationId=organizationId)
        logging.debug("Initiating Salesforce object... Done")
        self.data = None
        self.ObjectCreationDateTimestamp = datetime.now()

    def QueryData(self,
                  Query,
                  Query_opp=Query_opp,
                  stage="initial",
                  sitename=sitename,
                  daynumber=daynumber,
                  exlude_new_days=exlude_new_days):

        assert stage == "initial" or stage == "loop"
        logging.info("Running queries...")
        logging.debug("Running Lead query...")
        self.data = self.sf.query_all(Query)
        logging.debug("Running Lead query... Done")

        if stage == "initial":
            logging.debug("Running Opportunity query...")
            self.data_opp = self.sf.query_all(Query_opp)
            logging.debug("Running Opportunity query... Done")

        logging.info("Running queries... Done")

        if len(self.data["records"]) == 0:
            logging.info("Query empty, exiting function...")
            return

        assert len(self.data["records"]) > 0
        self.data = [[item for item in self.data["records"][0].keys()][1:]] + [
            filtered_row[1:]
            for filtered_row in [[row for row in item.values()]
                                 for item in self.data["records"]]
        ]

        if stage == 'initial':
            assert len(self.data_opp["records"]) > 0
            self.data_opp = [[
                item for item in self.data_opp["records"][0].keys()
            ][1:]] + [
                filtered_row[1:]
                for filtered_row in [[row for row in item.values()]
                                     for item in self.data_opp["records"]]
            ]

        #numpy.isnan(number)

        #Try to Convert to floats string numbers
        def process_data(data_to_process):
            logging.info("Processing SF Query Data...")
            for Y_key, y_item in enumerate(data_to_process):
                for x_key, x_item in enumerate(y_item):

                    #Process none types
                    if x_item == None:
                        data_to_process[Y_key][x_key] = "NO_DATA"

                    #Process NaN foats
                    elif type(x_item) == float:
                        if math.isnan(x_item) == True:
                            data_to_process[Y_key][x_key] = "NO_DATA"
                        else:
                            pass
                    #Process booleans, i.e. leave as it is
                    elif type(x_item) == bool:
                        if x_item == True:
                            data_to_process[Y_key][x_key] = 1.0
                        else:
                            data_to_process[Y_key][x_key] = 0.0

                    #Process integers
                    elif type(x_item) == int:
                        data_to_process[Y_key][x_key] = float(
                            data_to_process[Y_key][x_key])

                    #Process emtry strings
                    elif x_item.strip() == "" or x_item.strip() == " ":
                        data_to_process[Y_key][x_key] = "NO_DATA"

                    elif x_item.isdigit():
                        data_to_process[Y_key][x_key] = float(
                            data_to_process[Y_key][x_key])
            logging.info("Processing SF Query Data... Done")
            return data_to_process

        self.data = process_data(self.data)
        self.data = pd.DataFrame(self.data[1:], columns=self.data[0])
        #self.data.to_csv("original_testing_v2.csv")

        if stage == 'initial':
            #Make opportunity object and later join to main leads
            self.leads = pd.DataFrame(
                self.data[(self.data["IsConverted"] == 0)
                          & (self.data["Lead_score_timestamp__c"] !=
                             self.data["Lead_score_timestamp_2__c"])])
            self.data_opp = process_data(self.data_opp)
            self.data_opp = pd.DataFrame(self.data_opp[1:],
                                         columns=self.data_opp[0])

            #Replace opportunity ID with outcome
            if "ConvertedOpportunityId" in list(self.data.columns.values):
                self.data = self.data.join(self.data_opp.set_index('Id'),
                                           on='ConvertedOpportunityId')
                #self.data['outcome'] = np.where(self.data['IsWon'] == True, 1, 0)
                self.data['outcome'] = np.where(self.data["IsConverted"] == 1,
                                                1, 0)
                self.data = self.data[((self.data["IsConverted"] == 0) & (
                    (self.data["Status"] == "Unqualified") |
                    (self.data["Status"] == "Not converted"))) |
                                      (self.data["IsConverted"] == 1)]
            else:
                raise ValueError(
                    "please add field into Leads table: ConvertedOpportunityId"
                )
        elif stage == 'loop':
            self.leads = pd.DataFrame(
                self.data[(self.data["IsConverted"] == 0)
                          & (self.data["Lead_score_timestamp__c"] !=
                             self.data["Lead_score_timestamp_2__c"])])
            delattr(self, 'data')
        else:
            raise ValueError("Can not recognise operation stage")
            #Testing save
            #self.leads = pd.DataFrame(self.data)
            #self.leads.to_csv("Leads_original_3.csv")

            #Additional filtering layer to get only with known outcomes
            #self.data = self.data[(self.data["IsWon"] == True) | (self.data["isLost__c"] == True)]
            #self.data = self.data[((self.data["IsConverted"] == 0) & ((self.data["Status"] == "Unqualified") | (self.data["Status"] == "Not converted"))) | (self.data["IsConverted"] == 1)]
            #self.data.to_csv("original.csv")
            #status

    def Reframe_data_optional(self, stage="initial"):
        logging.info("Reframming data fields...")
        assert stage == "initial" or stage == "loop" or stage == 'All_leads'

        #This step is to deal with too many diverse labels for certain fields such as city
        #This step will make sure only most frequent are being kept, while others simply labeled as NO_DATA

        #'IP_city__c'

        def field_reframe(Reframe_field, Data_object, field, sample_size):
            logging.debug("Reframing field {} ...".format(str(field)))
            field_index = list(
                Data_object.groupby([field])['outcome'].sum().sort_values(
                    ascending=False).head(sample_size).index.values)
            #field_values = list(Data_object.groupby([field])['outcome'].sum().sort_values(ascending=False).head(sample_size).values)

            for key, index in enumerate(field_index):
                Reframe_field[index] = index

            ItemsToReframe = {
                x: "NO_DATA"
                for x in list(Data_object[field].values)
                if x not in list(Reframe_field.keys())
            }
            logging.debug("Items to reframe: {}".format(
                str(len(ItemsToReframe))))
            if len(ItemsToReframe) > 0:
                Data_object[field] = Data_object[field].replace(ItemsToReframe)
            else:
                logging.debug(
                    "Skipping reframing, no items to reframe, i.e. specified unique top items to keep fits all unique combinations"
                )

            logging.debug("Reframing field {} ...".format(str(field)) + "Done")
            return Reframe_field, Data_object

        def utm_term_reframe_separate(Reframe_field, Data_object, field):
            logging.debug("Reframing field {} ...".format(str(field)))
            field_index = list(Data_object["utm_term__c"].values)
            #list(Data_object[(Data_object['utm_term__c'].str.contains("tour")) | (Data_object['utm_term__c'].str.contains("cruise")) | (Data_object['utm_term__c'].str.contains("vacation")) | (Data_object['utm_term__c'].str.contains("trip")) | (Data_object['utm_term__c'].str.contains("travel"))]["utm_term__c"].values)

            for key, item in enumerate(field_index):
                if "tour" in item:
                    Reframe_field[item] = "tour"
                elif "cruise" in item:
                    Reframe_field[item] = "cruise"
                elif "vacation" in item:
                    Reframe_field[item] = "vacation"
                elif "trip" in item:
                    Reframe_field[item] = "trip"
                elif "travel" in item:
                    Reframe_field[item] = "travel"
                else:
                    Reframe_field[item] = "NO_DATA"

            Data_object[field] = Data_object[field].replace(Reframe_field)
            logging.debug("Reframing field {} ...".format(str(field)) + "Done")
            return Reframe_field, Data_object

        if stage == "initial":
            self.Reframe_field = dict()
            self.Reframe_field['IP_city__c'] = dict()
            self.Reframe_field['IP_city__c'], self.data = field_reframe(
                self.Reframe_field['IP_city__c'], self.data, 'IP_city__c', 100)

            self.Reframe_field['Country'] = dict()
            self.Reframe_field['Country'], self.data = field_reframe(
                self.Reframe_field['Country'], self.data, 'Country', 100)

            self.Reframe_field['utm_term__c'] = dict()
            self.Reframe_field[
                'utm_term__c'], self.data = utm_term_reframe_separate(
                    self.Reframe_field['utm_term__c'], self.data,
                    'utm_term__c')

            #city_index = list(self.data.groupby(['IP_city__c'])['outcome'].sum().sort_values(ascending=False).head(100).index.values)
            #city_values = list(self.data.groupby(['IP_city__c'])['outcome'].sum().sort_values(ascending=False).head(100).values)
            #Reframe_IP_city__c = dict()
            #for key, index in enumerate(city_index):
            #    Reframe_IP_city__c[index] = city_values[key]

            #ItemsToReframe = {x:"NO_DATA" for x in list(self.data["IP_city__c"].values) if x not in list(Reframe_IP_city__c.keys())}
            #self.data['IP_city__c'] = self.data['IP_city__c'].replace(ItemsToReframe)

        if stage == "All_leads" or stage == 'loop':
            if self.leads.shape[0] == 0:
                logging.info("No leads to score, exiting function")
                return

            ItemsToReframe = {
                x: "NO_DATA"
                for x in list(self.leads['IP_city__c'].values)
                if x not in list(self.Reframe_field['IP_city__c'].keys())
            }
            self.leads['IP_city__c'] = self.leads['IP_city__c'].replace(
                ItemsToReframe)

            ItemsToReframe = {
                x: "NO_DATA"
                for x in list(self.leads['Country'].values)
                if x not in list(self.Reframe_field['Country'].keys())
            }
            self.leads['Country'] = self.leads['Country'].replace(
                ItemsToReframe)

            self.Reframe_field['utm_term__c'] = dict()
            self.Reframe_field[
                'utm_term__c'], self.leads = utm_term_reframe_separate(
                    self.Reframe_field['utm_term__c'], self.leads,
                    'utm_term__c')

            #ItemsToReframe = {x:"NO_DATA" for x in list(self.leads['utm_term__c'].values) if x not in list(self.Reframe_field['utm_term__c'].keys())}
            #self.leads['utm_term__c'] = self.leads['utm_term__c'].replace(ItemsToReframe)

            #utm_term__c
            #city_index = list(self.leads.groupby(['IP_city__c'])['outcome'].sum().sort_values(ascending=False).head(100).index.values)
            #city_values = list(self.leads.groupby(['IP_city__c'])['outcome'].sum().sort_values(ascending=False).head(100).values)
            #Reframe_IP_city__c = dict()
            #for key, index in enumerate(city_index):
            #    Reframe_IP_city__c[index] = city_values[key]

            #ItemsToReframe = {x:"NO_DATA" for x in list(self.leads["IP_city__c"].values) if x not in list(Reframe_IP_city__c.keys())}
            #self.leads['IP_city__c'] = self.leads['IP_city__c'].replace(ItemsToReframe)

        logging.info("Reframming data fields... Done")

    def mapfields(self,
                  column_list_to_map,
                  numeric_column_list_to_use,
                  labelfields,
                  stage="initial"):

        assert stage == "initial" or stage == "loop" or stage == 'All_leads'

        logging.info("Mapping fields for {} stage".format(stage))

        if stage == "initial":
            self.data_map = dict()

            for col in column_list_to_map:
                self.data_map[col] = {}

            #Map Non-Numeric Data
            #First convert to pandas dataframe whole dataset

            columns_to_keep = column_list_to_map + numeric_column_list_to_use + labelfields

            #self.data = pd.DataFrame(self.data[1:], columns = self.data[0])
            self.data = self.data.drop([
                x for x in list(self.data.columns.values)
                if x not in columns_to_keep
            ],
                                       axis=1)

            #Startmapping
            for key in self.data_map.keys():
                counter = 0
                for uniqueitem in [x for x in self.data[key].unique()]:
                    self.data_map[key][uniqueitem] = counter
                    #print(self.data[key])
                    self.data[key] = self.data[key].replace(self.data_map[key])
                    counter += 1

        elif stage == "All_leads" or stage == 'loop':
            if self.leads.shape[0] == 0:
                logging.info("No leads to score, exiting function")
                return

            columns_to_keep = ["Id"] + [
                "Lead_score_timestamp__c"
            ] + column_list_to_map + numeric_column_list_to_use

            self.leads = self.leads.drop([
                x for x in list(self.leads.columns.values)
                if x not in columns_to_keep
            ],
                                         axis=1)

            for key in self.data_map.keys():
                logging.debug(key)
                self.ReplaceWithoutMapping = {}
                self.ReplaceWithoutMapping = {
                    x: self.data_map[key]["NO_DATA"]
                    for x in list(self.leads[key].values)
                    if x not in list(self.data_map[key].keys())
                }
                self.leads[key] = self.leads[key].replace(self.data_map[key])
                if len(self.ReplaceWithoutMapping.keys()) > 0:
                    self.leads[key] = self.leads[key].replace(
                        self.ReplaceWithoutMapping)

        logging.info("Mapping fields for {} stage".format(stage) + "...Done")

    def outlier_specification(self, MapToNumbers, NoMapping):

        #To preserve model relatedness to training data, data to be predicted will need to adjere to outliers of training data
        #Outlier specification only needs to be runned once for initial testing, do not run on once model has been built

        logging.info("Running outlier specification...")

        if hasattr(self, 'outlier_specification'):
            pass
        else:
            self.outlier_specification = None

        self.outlier_specification = {
            "Device__c": {
                "min": self.data["Device__c"].min(),
                "max": self.data["Device__c"].max()
            },
            #"Destination__c":{"min": self.data["Destination__c"].min(), "max": self.data["Destination__c"].max()},
            "Country": {
                "min": self.data["Country"].min(),
                "max": self.data["Country"].max()
            },
            "IP_city__c": {
                "min": self.data["IP_city__c"].min(),
                "max": self.data["IP_city__c"].max()
            },
            "Created_weekday__c": {
                "min": self.data["Created_weekday__c"].min(),
                "max": self.data["Created_weekday__c"].max()
            },
            "Last_session_source__c": {
                "min": self.data["Last_session_source__c"].min(),
                "max": self.data["Last_session_source__c"].max()
            },
            "LeadSource": {
                "min": self.data["LeadSource"].min(),
                "max": self.data["LeadSource"].max()
            },
            "HasOptedOutOfEmail": {
                "min": self.data["HasOptedOutOfEmail"].min(),
                "max": self.data["HasOptedOutOfEmail"].max()
            },
            "Have_you_been_to_this_destination_before__c": {
                "min":
                self.data["Have_you_been_to_this_destination_before__c"].min(),
                "max":
                self.data["Have_you_been_to_this_destination_before__c"].max()
            },
            "Sign_up_source__c": {
                "min": self.data["Sign_up_source__c"].min(),
                "max": self.data["Sign_up_source__c"].max()
            },
            "utm_source__c": {
                "min": self.data["utm_source__c"].min(),
                "max": self.data["utm_source__c"].max()
            },
            "utm_term__c": {
                "min": self.data["utm_term__c"].min(),
                "max": self.data["utm_term__c"].max()
            },
            #"RecordTypeId":{"min": self.data["RecordTypeId"].min(), "max": self.data["RecordTypeId"].max()},
        }

        for x in NoMapping:
            self.outlier_specification[x] = {}
            print(x)

            #Bellow replacing values for none types to zeros
            #This is appropriate for fields selected, since 0 indicates for filled data set as generally less positive value

            self.data[x] = self.data[x].replace("NO_DATA", 0.0)
            self.leads[x] = self.leads[x].replace("NO_DATA", 0.0)

            self.outlier_specification[x]["min"] = self.data[x].min()
            self.outlier_specification[x]["max"] = self.data[x].mean() + (
                2 * self.data[x].std())

        logging.info("Running outlier specification... Done")

    def runmodel(self, stage="initial"):

        assert stage == "initial" or stage == "All_leads" or stage == "loop"

        logging.info("Running model for {} stage...".format(stage))

        def standartize_field(datainput, colname, maxvalue, minvalue):

            assert datainput[colname].min() >= 0

            #Remove outliers
            datainput.loc[datainput[colname] > maxvalue, colname] = maxvalue
            datainput.loc[datainput[colname] < minvalue, colname] = minvalue

            #Standartise field
            datainput[colname] = datainput[colname] / maxvalue

            return datainput

        if stage == "initial":
            for col in [
                    x for x in list(self.data.columns.values) if x != 'outcome'
            ]:
                self.data = standartize_field(
                    self.data, col, self.outlier_specification[col]["max"],
                    self.outlier_specification[col]["min"])

            #split data into train and test
            X_train, X_test, y_train, y_test = train_test_split(
                self.data[self.data.columns.difference(['outcome'])].values,
                self.data['outcome'].values,
                test_size=0.33,
                random_state=42)

            #Build Model
            self.model = keras.Sequential([
                #keras.layers.Flatten(input_shape=(28, 28)),
                keras.layers.Dense(128, activation=tf.nn.relu),
                keras.layers.Dense(2, activation=tf.nn.softmax)
            ])

            self.model.compile(optimizer=tf.train.AdamOptimizer(),
                               loss='sparse_categorical_crossentropy',
                               metrics=['accuracy'])

            self.model.fit(X_train, y_train, epochs=5)

            #Test model
            test_loss, test_acc = self.model.evaluate(X_test, y_test)

            #Save predictions to a file of whole data set
            #prediction = self.model.predict(np.array(self.data[self.data.columns.difference(['outcome'])]))

            #prediction = self.model.predict(np.array(self.leads[self.leads.columns.difference(['Id'])]))

            #pd.DataFrame(prediction).to_csv("Leads_predicted.csv")
            logging.info('Test accuracy: ' + str(test_acc))
            return print('Test accuracy:', test_acc)

            logging.debug("Deleting data used for Learning.")
            delattr(self, 'data')

            #Return units for training and testing
            #return X_train, X_test, y_train, y_test

        elif stage == "All_leads" or stage == "loop":
            if self.leads.shape[0] == 0:
                logging.info("No leads to score, exiting function")
                return
            for col in [
                    x for x in list(self.leads.columns.values) if x != 'Id'
                    and x != "id" and x != 'Lead_score_timestamp__c'
            ]:
                print(col)
                self.leads = standartize_field(
                    self.leads, col, self.outlier_specification[col]["max"],
                    self.outlier_specification[col]["min"])

            prediction = self.model.predict(
                np.array(self.leads[self.leads.columns.difference(
                    ['Id', 'Lead_score_timestamp__c'])]))
            #pd.DataFrame(prediction).to_csv("Leads_prediction_12.csv")

            #Prepare bulk update batches
            #rec_per_batch = 250
            #number_of_batches = math.ceil(self.leads.shape[0] / rec_per_batch)

            #open previously written leads

            with open(script_path + "InProgressWrittenLeads/" +
                      "written_leads.csv",
                      "r",
                      encoding="UTF-8") as openfile:
                rd = csv.reader(openfile)
                writtenleads = list(rd)

                if len(writtenleads) > 1:
                    logging.info(
                        "Found previously incomplete lead update, will continue..."
                    )
                    writtenleads = [x[0] for x in writtenleads]
                else:
                    logging.info("Starting new lead update from sratch...")

            for row in range(0, self.leads.shape[0]):
                logging.info(
                    "Progress: " +
                    str(round(float(row / self.leads.shape[0]), 2) * 100) +
                    "%")
                if np.argmax(prediction[row]) == 0:
                    value_to_push = min(x for x in prediction[row])
                elif np.argmax(prediction[row]) == 1:
                    value_to_push = max(x for x in prediction[row])

                #still need to implement not to update old records
                if self.leads['Id'].iloc[row] not in writtenleads:
                    try:
                        logging.debug(
                            self.sf.Lead.update(
                                self.leads['Id'].iloc[row], {
                                    'Lead_score_change__c':
                                    str(round(value_to_push, 2))
                                }))
                        logging.debug(
                            self.sf.Lead.update(
                                self.leads['Id'].iloc[row], {
                                    'Lead_score_timestamp_2__c':
                                    self.leads['Lead_score_timestamp__c'].
                                    iloc[row]
                                }))
                        #Append wrote leads to a file for future reference
                        with open(script_path + "InProgressWrittenLeads/" +
                                  "written_leads.csv",
                                  "a",
                                  encoding="UTF-8",
                                  newline='') as openfile:
                            wr = csv.writer(openfile)
                            wr.writerows([[self.leads['Id'].iloc[row]]])
                        logging.debug("Updated lead: " +
                                      str(self.leads['Id'].iloc[row]) +
                                      " with score: " +
                                      str(round(value_to_push, 2)))
                        sleep(3)
                    except:
                        logging.debug("Failed updating lead: " +
                                      str(self.leads['Id'].iloc[row]) +
                                      " with score: " +
                                      str(round(value_to_push, 2)))
                        sleep(3)
                else:
                    logging.debug("Skipping already updated: " +
                                  str(self.leads['Id'].iloc[row]) +
                                  " with score: " +
                                  str(round(value_to_push, 2)))

            #At this point all leads were attempted to be pushed to salesforce,
            #Therefore writtenleads.csv file needs to be overwritten with empty data, so next time
            #script would not continue, but rather rerun scoring
            logging.info(
                "Scoring leads operation completed, emptying in progress file."
            )
            with open(script_path + "InProgressWrittenLeads/" +
                      "written_leads.csv",
                      "w",
                      encoding="UTF-8",
                      newline='') as openfile:
                wr = csv.writer(openfile)
                wr.writerows([[""]])
            delattr(self, 'leads')

        logging.info("Running model for {} stage...".format(stage) + "Done")
Ejemplo n.º 23
0
import openpyxl
from collections import OrderedDict
from simple_salesforce import Salesforce

sf = Salesforce(username='******',
                password='******',
                security_token='yWlJG8lAKCq1pTBkbBMSVcKg')

metrics = sf.query_all(
    "SELECT Date__c, Ambassador__c, Office__c, AmbShifts__c, Shift_Length__c, Doors__c, Appointments__c FROM Metrics__c WHERE Ambassador__c != null AND Office__c != null AND Date__c = (LAST_N_WEEKS:4 OR THIS_WEEK)"
)["records"]
Ejemplo n.º 24
0
class Root(object):

   def __init__(self):
      self.login()

   def login(self):
      self._sf = Salesforce(username=USERNAME, password=PASSWORD, security_token=SECURITY_TOKEN, sandbox=True)

   @cherrypy.expose
   def index(self):
      return "hello"

   @cherrypy.expose
   def get_all_teachers(self, **kwargs):
      soql = "select id,name,e_mail__c from Teacher__c"
      try:
         results = self._sf.query_all(soql)
         return json.dumps({'ok': True, 'results': results})
      except Exception as ex:
         return json.dumps({'ok': False, 'error': ex.message})

   @cherrypy.expose
   def get_all_students(self, **kwargs):
      soql = 'select id,name,placement_address__c from Student__c'
      try:
         results = self._sf.query_all(soql)
         return json.dumps({'ok': True, 'results': results})
      except Exception as ex:
         return json.dumps({'ok': False, 'error': ex.message})

   @cherrypy.expose
   def get_teacher(self, teacher_id=None, **kwargs):
      if not teacher_id:
         return json.dumps({'ok': False, 'error': 'teacher_id not given'})
      try:
         teacher = self._sf.Teacher__c.get(teacher_id)
         results = {}
         for k in ['attributes', 'Name', 'E_mail__c']:
            results[k] = teacher[k]
         return json.dumps({'ok': True, 'results': results})
      except Exception as ex:
         return json.dumps({'ok': False, 'error': ex.message})

   @cherrypy.expose
   def get_student(self, student_id=None, **kwargs):
      if not student_id:
         return json.dumps({'ok': False, 'error': 'student_id not given'})
      try:
         student = self._sf.Student__c.get(student_id)
         results = {}
         for k in ['attributes', 'Name', 'Placement_Address__c']:
            results[k] = student[k]
         return json.dumps({'ok': True, 'results': results})
      except Exception as ex:
         return json.dumps({'ok': False, 'error': ex.message})

   @cherrypy.expose
   def save_lesson_log(self, submit=None, **kwargs):
      body = cherrypy.request.body.read()
      if not body:
         return json.dumps({'ok': False, 'error': 'no message body'})
      params = json.loads(body)
      if 'teacherName' not in params:
         return json.dumps({'ok': False, 'error': 'teacherName not given'})
      if 'studentName' not in params:
         return json.dumps({'ok': False, 'error': 'studentName not given'})

      if not submit:
         result = self.save_to_mongo(params)
         return json.dumps(result)

      # get teacher id
      soql = "select id from teacher__c where name='%s'" % (params['teacherName'])
      try:
         teacher = self._sf.query(soql)
      except Exception as ex:
         return json.dumps({'ok': False, 'error': ex.message})

      if len(teacher['records']) <= 0:
         return json.dumps({'ok': False, 'error': 'teacherName not found'})
      teacher_id = teacher['records'][0]['Id']

      # get student id
      soql = "select id from student__c where name='%s'" % (params['studentName'])
      try:
         student = self._sf.query(soql)
      except Exception as ex:
         return json.dumps({'ok': False, 'error': ex.message})

      if len(student['records']) <= 0:
         return json.dumps({'ok': False, 'error': 'studentName not found'})
      student_id = student['records'][0]['Id']

      # get lesson id
      soql = "select id from lesson__c where teacher__c='%s' and student__c='%s'" % (teacher_id, student_id)
      try:
         lesson = self._sf.query(soql)
      except Exception as ex:
         return json.dumps({'ok': False, 'error': ex.message})

      data = {'name': str(datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
              'Teacher_s_Name__c': teacher_id,
              'student__c': student_id,
              'life_skills__c': int(float(params.get('studentProgressMultipleSkills', '0'))),
              'music_education__c': int(float(params.get('studentMusicProgressRank', '0'))),
              'length_of_lesson__c': 0,
              'lesson_notes__c': json.dumps(params, indent=0)
             }

      if len(lesson['records']) > 0:
         data['lesson__c'] = lesson['records'][0]['Id']

      result = self._sf.Lesson_Logs__c.create(data)
      return json.dumps({'ok': True})

   @cherrypy.expose
   def get_lesson_logs(self, **kwargs):
      lessons = mongo_db.lessons.find()
      lessons_array = []
      for lesson in lessons:
         lessons_array.append({'id': str(lesson['_id']), 'params': lesson['params']})
      return json.dumps({'ok': True, 'results': lessons_array})

   @cherrypy.expose
   def get_lesson_log(self):
      body = cherrypy.request.body.read()
      if 'lesson_id' not in body:
         return json.dumps({'ok': False, 'error': 'lesson_id not given'})
      lesson_id = json.loads(body)['lesson_id']
      try:
         lesson = mongo_db.lessons.find_one({'_id': ObjectId(lesson_id)})
         return json.dumps({'ok': True, 'results': lesson['params']})
      except Exception as ex:
         return json.dumps({'ok': False, 'error': ex.message})

   def save_to_mongo(self, params):
      if params.get('lesson_id'):
         # update existing record
         id = params.get('lesson_id')
         mongo_db.lessons.update({'_id': ObjectId(id)}, {"$set": {'params': params}})
         return {'ok': True}
      else:
         # create new record
         id = mongo_db.lessons.insert({'params': params})
         return {'ok': True, 'id': str(id)}
Ejemplo n.º 25
0
class SalesForceHandler(object):
    def __init__(self,
                 instance=Config.SalesForce.INSTANCE,
                 username=Config.SalesForce.USERNAME,
                 password=Config.SalesForce.PASSWORD,
                 token=Config.SalesForce.TOKEN):

        self.__sf = Salesforce(instance=instance,
                               username=username,
                               password=password,
                               security_token=token)

    def get_user_id(self, name):
        query = "SELECT Id FROM User WHERE Name LIKE '" + name + "'"
        result = self.__sf.query_all(query)

        if 'records' in result:
            result = self.__sf.query_all(query)["records"]

        if (len(result) == 1):
            return result[0]['Id']
        return None

    def get_shared_users(self, phone):
        shared_users = Config.SalesForce.SHARED

        print shared_users, type(phone)
        for i in shared_users:
            if phone in shared_users[i]:
                return i
        return None

    def get_number_contacts(self, phone):
        term = get_number_term(phone)
        query = "SELECT AccountId FROM Contact WHERE Phone LIKE '" + term +\
                "' OR MobilePhone LIKE '" + term + "'"

        results = self.__sf.query_all(query)
        if 'records' in results:
            results = self.__sf.query_all(query)["records"]

        return len(results)

    def get_number_accounts(self, phone):

        term = get_number_term(phone)
        query = "SELECT Id FROM Account WHERE Phone LIKE '" + term + "'"

        results = self.__sf.query_all(query)
        if 'records' in results:
            results = self.__sf.query_all(query)["records"]

        return len(results)

    def create_task(self, info):
        print "Creating task"
        task = self.__sf.Task.create({
            'Type': 'Called',
            'WhatId': info['AccountId'],
            'OwnerID': info['UserId'],
            'Subject': 'Call',
            'Status': 'Completed',
            'WhoId': info['ContactId'],
            'Description': 'A call has been logged automagically.',
            'Status': 'Completed',
            'Priority': 'Normal',
            'Summary__c': info['Summary'],
            'ActivityDate': time.strftime('%Y-%m-%d')
        })

        name = self.__sf.User.get(info['UserId'])['FirstName']
        to = self.__sf.User.get(info['UserId'])['Email']
        task_id = self.__sf.Task.get(task['id'])

        account_name = self.__sf.Account.get(task_id['WhatId'])['Name']

        send_email(to, name, task['id'], account_name,
                   Config.SalesForce.INSTANCE)

    def get_account_id_from_account(self, phone):
        term = get_number_term(phone)

        query_account = "SELECT Id FROM Account WHERE Phone LIKE '"\
                        + term + "'"

        accounts = self.__sf.query_all(query_account)
        if 'records' in accounts:
            accounts = self.__sf.query_all(query_account)["records"]

        if (len(accounts) == 1):
            return accounts[0]['Id']
        else:
            return None

    def get_account_id_from_contact(self, phone):
        term = get_number_term(phone)

        query_contact = "SELECT AccountId FROM Contact WHERE Phone LIKE '"\
                        + term + "'"

        accounts = self.__sf.query_all(query_contact)
        if 'records' in accounts:
            accounts = self.__sf.query_all(query_contact)["records"]

        if (len(accounts) == 1):
            return accounts[0]['AccountId']
        elif (len(accounts) > 1):
            account_id = accounts[0]['AccountId']
            for account in accounts:
                if account['AccountId'] != account_id:
                    return None
            return account_id
        else:
            return None

    def get_account_id_from_mobile(self, phone):
        term = get_number_term(phone)

        query_mobile = "SELECT AccountId FROM Contact WHERE MobilePhone LIKE '"\
                       + term + "'"

        accounts = self.__sf.query_all(query_mobile)
        if 'records' in accounts:
            accounts = self.__sf.query_all(query_mobile)["records"]

        if (len(accounts) == 1):
            return accounts[0]['AccountId']
        elif (len(accounts) > 1):
            account_id = accounts[0]['AccountId']
            for account in accounts:
                if account['AccountId'] != account_id:
                    return None
            return account_id
        else:
            return None

    def get_account_id(self, phone):

        account_id = self.get_account_id_from_account(phone)
        if (account_id):
            return account_id
        account_id = self.get_account_id_from_contact(phone)
        if (account_id):
            return account_id
        account_id = self.get_account_id_from_mobile(phone)
        if (account_id):
            return account_id

    def get_contact_id(self, phone):

        term = get_number_term(phone)

        query = "SELECT Id FROM Contact WHERE Phone LIKE '" + term +\
                "' OR MobilePhone LIKE '" + term + "'"

        results = self.__sf.query_all(query)
        if 'records' in results:
            results = self.__sf.query_all(query)["records"]

        if (len(results) == 1):
            return results[0]['Id']
        return None
Ejemplo n.º 26
0
class TimecardEntry(object):

    def __init__(self, cfg="~/.pse.json"):

        self.cfg_file = os.path.expanduser(cfg)
        with open(self.cfg_file) as f:
            self.cfg = json.load(f)

        credential_store = self.cfg.get('credential_store', 'default')
        if credential_store == 'default':
            password = base64.b64decode(self.cfg["password"]).decode()
            security_token = self.cfg["token"]
        elif credential_store == 'keyring':
            password = keyring.get_password("salesforce_cli", f"{self.cfg['username']}_password")
            security_token = keyring.get_password("salesforce_cli", f"{self.cfg['username']}_token")

        self.sf = Salesforce(username=self.cfg["username"],
                             password=password,
                             security_token=security_token,
                             sandbox=self.cfg.get("sandbox", None),
                             client_id="FF"
                             )

        self.contact_id = self.get_contact_id(self.cfg["username"])
        self.assignments = self.get_assignments_active()
        self.global_project = self.get_global_project()
        self.ASSIGNMENTS_MAPPING = []

        today = date.today()
        day = today.strftime("%d-%m-%Y")
        self.get_week(day)

    def get_week(self, day):
        dt = datetime.strptime(day, "%d-%m-%Y")
        self.start = dt - timedelta(days=dt.weekday())
        self.end = self.start + timedelta(days=6)

    def safe_sql(self, sql):
        logger.debug(sql)
        try:
            return self.sf.query_all(sql)
        except:
            logger.error("error on query:{}".format(sql))
            logger.error(sys.exc_info()[1])
            sys.exit(1)

    def list_timecard(self, details, start, end):
        self.assignments = self.get_assignments_all()
        fields = ["Id","Name",  "pse__Project__c", "pse__Assignment__c",
                  "pse__Monday_Hours__c",
                  "pse__Tuesday_Hours__c",
                  "pse__Wednesday_Hours__c",
                  "pse__Thursday_Hours__c",
                  "pse__Friday_Hours__c",
                  "pse__Status__c"]
        if details:
            base = ["OwnerId", "PROJECT_ID__c", "pse__Approved__c", "pse__Start_Date__c", "pse__End_Date__c"]
            fields = base + fields + ["CreatedById", "CreatedDate",  "IsDeleted", "LastModifiedById", "LastModifiedDate",
                               "LastReferencedDate", "LastViewedDate",
                               "pse__Audit_Notes__c", "pse__Billable__c",  "pse__Resource__c",
                               "pse__Location_Mon__c", "pse__Location_Tue__c", "pse__Location_Wed__c",
                               "pse__Location_Thu__c", "pse__Location_Fri__c",
                               "pse__Saturday_Hours__c", "pse__Saturday_Notes__c", "pse__Location_Sat__c",
                               "pse__Sunday_Hours__c", "pse__Sunday_Notes__c", "pse__Location_Sun__c",
                               "pse__Timecard_Notes__c", "pse__Submitted__c",
                               "pse__Monday_Notes__c", "pse__Tuesday_Notes__c",
                               "pse__Wednesday_Notes__c", "pse__Thursday_Notes__c",
                                "pse__Friday_Notes__c"]

        SQL = '''
            select
            {}
            from pse__Timecard_Header__c
            where
            pse__Start_Date__c = {} and pse__End_Date__c = {} and
            pse__Resource__c = '{}' '''.format(
            ", ".join(fields),
            start,
            end,
            self.contact_id,
        )
        results = self.safe_sql(SQL)
        rs = []
        if len(results["records"]) > 0:

            for r in results["records"]:
                r.pop("attributes", None)
                # adding Project name
                if r.get("pse__Assignment__c", "") in self.assignments.keys():
                    r["pse__Project_Name__c"] = self.assignments[r["pse__Assignment__c"]]["assignment_name"]
                if r.get("pse__Project__c", "") in self.global_project.keys():
                    r["pse__Project_Name__c"] = self.global_project[r["pse__Project__c"]]["project_name"]
                rs.append(r)
            return rs
        else:
            logger.warn("No time card")
            return []

    def get_contact_id(self, email):
        name_part = email.split("@")[0]
        r = self.safe_sql(
            "select Id, Name, Email from Contact where pse__Is_Resource__c = true and Email LIKE '{}@%'".format(
                name_part))
        return r["records"][0]["Id"]

    def get_timecard_id(self, timecard_name):
        r = self.safe_sql(
            "select Id from pse__Timecard_Header__c where Name = '{}'".format(
                timecard_name))
        return r["records"][0]["Id"]

    def get_assignments_all(self, contact_id = None):
        if not contact_id:
            contact_id = self.contact_id

        SQL = '''select Id, Name, pse__Project__c, pse__Project__r.Name, pse__Project__r.pse__Is_Billable__c from pse__Assignment__c
        where pse__Resource__c = '{}' and
        Open_up_Assignment_for_Time_entry__c = false and
        pse__Closed_for_Time_Entry__c = false
        '''.format(
            contact_id)

        return self.get_assignments(SQL)

    def get_assignments_active(self, contact_id = None):
        if not contact_id:
            contact_id = self.contact_id

        SQL = '''select Id, Name, pse__Project__c, pse__Project__r.Name, pse__Project__r.pse__Is_Billable__c from pse__Assignment__c
        where pse__Resource__c = '{}' and
        Open_up_Assignment_for_Time_entry__c = false and
        pse__Closed_for_Time_Entry__c = false and
        pse__Exclude_from_Planners__c = false and
        pse__End_Date__c > {}
        '''.format(
            contact_id,
            date.today().strftime("%Y-%m-%d")
            )

        return self.get_assignments(SQL)


    def get_assignments(self, SQL):

        results = self.safe_sql(SQL)
        assignments = {}
        for r in results["records"]:
            assignments[r["Id"]] = {"assignment_id": r["Id"],
                                    "assignment_name": r["Name"],
                                    "project_id": r["pse__Project__c"],
                                    "project_name": r["pse__Project__r"]["Name"],
                                    "billable": r["pse__Project__r"]["pse__Is_Billable__c"]}
        return assignments

    def get_global_project(self):

        SQL = '''select Id, Name, pse__Is_Billable__c
        from pse__Proj__c
        where pse__Allow_Timecards_Without_Assignment__c = true and pse__Is_Active__c = true
        '''
        results = self.safe_sql(SQL)
        rs = {}
        for r in results["records"]:
            rs[r["Id"]] = {
                "project_id": r["Id"],
                "project_name": r["Name"],
                "billable": r["pse__Is_Billable__c"]
            }
        return rs

    def delete_time_entry(self, id):
        try:
            self.sf.pse__Timecard_Header__c.delete(id)
        except:
            logger.error("failed on deletion id:{}".format(id))
            logger.error(sys.exc_info()[1])
            sys.exit(1)

    def add_time_entry(self, assignment_id, day_n, hours, notes):

        self.assignment_id = assignment_id
        new_timecard = {
            "pse__Start_Date__c": self.start.strftime("%Y-%m-%d"),
            "pse__End_Date__c": self.end.strftime("%Y-%m-%d"),
            "pse__Resource__c": self.contact_id,
            "pse__Status__c": "Saved",
        }

        if self.assignment_id in self.assignments.keys():
            new_timecard["pse__Assignment__c"] = self.assignment_id
            new_timecard["pse__Project__c"] = self.assignments[self.assignment_id]["project_id"]
            new_timecard["pse__Billable__c"] = self.assignments[self.assignment_id]["billable"]
            SQL = '''select Id from pse__Timecard_Header__c
                where
                pse__Start_Date__c = {} and pse__End_Date__c = {} and
                pse__Resource__c = '{}' and
                pse__Assignment__c = '{}'  and
                pse__Project__c = '{}' and
                pse__Status__c not in ('Submitted', 'Approved')
                '''.format(
                self.start.strftime("%Y-%m-%d"),
                self.end.strftime("%Y-%m-%d"),
                self.contact_id,
                self.assignment_id,
                self.assignments[self.assignment_id]["project_id"],
            )
        else:
            # most probably is a project without assigment
            new_timecard["pse__Project__c"] = self.assignment_id
            new_timecard["pse__Billable__c"] = self.global_project[self.assignment_id]["billable"]
            SQL = '''select Id from pse__Timecard_Header__c
                where
                pse__Start_Date__c = {} and pse__End_Date__c = {} and
                pse__Resource__c = '{}' and
                pse__Project__c = '{}' and
                pse__Status__c not in ('Submitted', 'Approved')
                '''.format(
                self.start.strftime("%Y-%m-%d"),
                self.end.strftime("%Y-%m-%d"),
                self.contact_id,
                self.assignment_id,
            )

        new_timecard["pse__" + day_n + "_Hours__c"] = hours
        new_timecard["pse__" + day_n + "_Notes__c"] = notes

        results = self.safe_sql(SQL)
        logger.debug(json.dumps(new_timecard, indent=4))
        if len(results["records"]) > 0:
            logger.debug("required update")
            try:
                self.sf.pse__Timecard_Header__c.update(
                    results["records"][0]["Id"], new_timecard)
            except:
                logger.error("failed on update")
                logger.error(sys.exc_info()[1])
                sys.exit(1)

        else:
            try:
                self.sf.pse__Timecard_Header__c.create(new_timecard)
            except:
                logger.error("failed on creation")
                logger.error(sys.exc_info()[1])
                sys.exit(1)

    def submit_time_entry(self, id):
        data = {
            "pse__Submitted__c": True,
            "pse__Status__c": "Submitted",

        }
        try:
            self.sf.pse__Timecard_Header__c.update(id, data)
        except:
            logger.error("failed on update")
            logger.error(sys.exc_info()[1])
            sys.exit(1)
Ejemplo n.º 27
0
class SFPerson(object):
    def __init__(self):
        self.login()

    def login(self):
        sandbox = os.environ.get('SF_SANDBOX', False)

        self.connection = Salesforce(
            username=os.environ['SF_USERNAME'],
            password=os.environ['SF_PASSWORD'],
            security_token=os.environ['SF_TOKEN'],
            sandbox=sandbox
        )

    def registrations_sync(self, rows):
        """
        Push the registrations from the current date to CRM.
        """
        if not rows:
            return

        # Push the records to CRM
        for r in rows:
            sf_record = {
                'Contact__c': r['contactid'],
                'Event__c': r['externalid'],
                'Event_Date__c': r['event_date'].strftime('%Y-%m-%d'),
                'Status__c': r['status'],
            }
            
            if sf_record['Contact__c']:
                self.connection.Registration__c.upsert('ExternalId__c/%d' % r['registrationid'], sf_record)

    def person(self, from_date):
        """
        Get the updated person records from the CRM system.
        """
        # Query the CRM system
        if not from_date:
            from_string = '1980-01-01T00:00:00Z'
        else:
            from_string = from_date.strftime('%Y-%m-%dT%H:%M:%SZ')

        soql = """
            SELECT Id, Firstname, Lastname, Child_Tag_Number__c, Account.Family_Tag__c, ExternalId__c,School_Year__c,
                Contact_Type__c, Kids_Group__c, Kids_Team__c, Birthdate, Medical_Info__c, Medical_Notes__c, Active__c,
                RecordType.Name, Gender__c, Marital_Status__c, MailingStreet, MailingCity, MailingPostalCode,
                MailingCountry, Phone, MobilePhone, Email, IsBaptised__c, Salvation__c, Partner__c, isKeyLeader__c
            FROM Contact
            WHERE LastModifiedDate >= %s
        """ % from_string

        result = self.connection.query_all(soql)
        records = []
        for r in result.get('records', []):
            address_lines = (r['MailingStreet'] or '').split('\n')

            rec = {
                'name': ('%s %s' % (r['FirstName'], r['LastName'])).strip(),
                'family_tag': int(r['Account']['Family_Tag__c']) if r['Account']['Family_Tag__c'] else None,
                'tagnumber': r['Child_Tag_Number__c'],
                'type': r['Contact_Type__c'],
                'kids_group': r['Kids_Group__c'],
                'kids_team': r['Kids_Team__c'],
                'school_year': int(r['School_Year__c']) if r['School_Year__c'] else None,
                'dob': r['Birthdate'],
                'medical_info': r['Medical_Info__c'],
                'medical_notes': r['Medical_Notes__c'],
                'territory': r['RecordType']['Name'],
                'firstname': r['FirstName'],
                'gender': r['Gender__c'],
                'marital_status': r['Marital_Status__c'],
                'lifegroup': None,
                'address1': address_lines[0],
                'address2': address_lines[1] if len(address_lines) > 1 else None,
                'city': r['MailingCity'],
                'postcode': r['MailingPostalCode'],
                'country': r['MailingCountry'],
                'home_phone': r['Phone'],
                'mobile_phone': r['MobilePhone'],
                'email': r['Email'],
                'baptised': r['IsBaptised__c'],
                'salvation': r['Salvation__c'],
                'partner': r['Partner__c'],
                'key_leader': r['isKeyLeader__c'],
                'personid': r['ExternalId__c'],
                'externalid': r['Id'],
            }
            if not r['Active__c']:
                rec['territory'] = 'Inactive'
            records.append(rec)

        return records

    def family(self, from_date):
        """
        Gets the family records from the CRM system that have changed since the from date.
        """
        # Query the CRM system
        if not from_date:
            from_string = '1980-01-01T00:00:00Z'
        else:
            from_string = from_date.strftime('%Y-%m-%dT%H:%M:%SZ')

        soql = """
            SELECT Id, Name, Salutation__c, Family_Tag__c, ExternalId__c, Active__c
            FROM Account
            WHERE LastModifiedDate >= %s
        """ % from_string

        result = self.connection.query_all(soql)
        records = []
        for r in result.get('records', []):
            rec = {
                'name': '%s %s'.strip() % (r['Salutation__c'] or '', r['Name']),
                'family_tag': int(r['Family_Tag__c']) if r['Family_Tag__c'] else None,
                'tagnumber': r['Family_Tag__c'],
                'territory': 'Kidswork' if r['Family_Tag__c'] else 'Congregation',
                'familyid': r['ExternalId__c'],
                'externalid': r['Id'],
            }
            if not r['Active__c']:
                rec['territory'] = 'Inactive'
            records.append(rec)

        return records

    def team_serving_options(self, from_date):
        """
        Query the CRM system and get all current team-serving groups
        """
        # Query the CRM system
        if not from_date:
            from_string = '1980-01-01T00:00:00Z'
        else:
            from_string = from_date.strftime('%Y-%m-%dT%H:%M:%SZ')

        soql = """
            SELECT Id, Name, ExternalId__c, IsActive__c
            FROM Team__c
            WHERE LastModifiedDate >= %s and IsActive__c = true
        """ % from_string

        result = self.connection.query_all(soql)
        records = []
        for r in result.get('records', []):
            rec = {
                'name': r['Name'],
                'groupsid': r['ExternalId__c'],
                'code': r['Id'],
            }
            records.append(rec)

        return records

    def person_membership(self, contact_id, group_code, membershipid, add_action=True):
        """
        Add or remove a 'team-serving' membership for a person.
        """
        sf_record = {
            'Contact__c': contact_id,
            'Team__c': group_code,
        }
        if add_action:
            sf_record['ExternalId__c'] = membershipid

        # Get the ID of the membership record
        soql = "select Id from ContactTeamLink__c where Contact__c='%s' and Team__c='%s'" % (contact_id, group_code)
        result = self.connection.query(soql)
        if len(result.get('records', [])) == 0:
            sf_link_id = None
        else:
            sf_link_id = result.get('records')[0]['Id']

        if add_action and sf_link_id:
            # Update the record
            del sf_record['Contact__c']
            del sf_record['Team__c']
            self.connection.ContactTeamLink__c.update(sf_link_id, sf_record)
        elif add_action and not sf_link_id:
            # Create a membership link
            self.connection.ContactTeamLink__c.create(sf_record)
        elif not add_action and sf_link_id:
            # Delete the record
            self.connection.ContactTeamLink__c.delete(sf_link_id)

        return {'response': 'Success', 'membership_id': sf_link_id}

    def person_update(self, personid, externalid, field, field_value):
        """
        Update the boolean flags on the person.
        """
        sf_record = {
            'ExternalId__c': personid,
        }
        if field == 'partner':
            sf_record['Partner__c'] = field_value
        elif field == 'key_leader':
            sf_record['isKeyLeader__c'] = field_value
        else:
            return {'response': 'Failed', 'message': 'Invalid field provided for the update'}

        self.connection.Contact.update(externalid, sf_record)
        return {'response': 'Success'}

    def events(self, event_id=None):
        """
        Get the events from Salesforce.
        """
        soql = "select Id, Name, Type__c from Event__c where Active__c = true"
        if event_id:
            soql += " and Id='%s'" % event_id
        result = self.connection.query_all(soql)
        records = []

        for r in result.get('records', []):
            rec =  dict(r)
            records.append(rec)

        return records

    def event_attendees(self, event_id, event_date):
        """
        Get all the attendees for an event.
        """
        soql = """
            select Id, Event_Date__c, Status__c, Event__r.Name, Contact__r.Id, Contact__r.Firstname, Contact__r.Lastname, Contact__r.Contact_Type__c, Contact__r.Journey__c, Contact__r.ExternalId__c 
            from Registration__c
            where Event__r.Id = '%s' 
            and Event_Date__c = %s
        """ % (event_id, event_date)
        result = self.connection.query_all(soql)
        records = []

        for r in result.get('records', []):
            rec =  dict(r)
            records.append(rec)

        return records

    def event_attendees_count(self, event_id, event_date):
        """
        Count all the attendees for an event.
        """
        soql = """
            select count(Id) 
            from Registration__c
            where Event__r.Id = '%s' 
            and Event_Date__c = %s
        """ % (event_id, event_date)
        result = self.connection.query_all(soql)
        records = []

        for r in result.get('records', []):
            rec =  dict(r)
            records.append(rec)

        return records

    def find(self, name):
        """
        Search for a contact by name.
        """
        soql = """
            select Id, Name, Email, Phone, Gender__c, School_Year__c, Contact_Type__c, ExternalId__c, Journey__c
            from Contact
            where Name like '%s%s%s'
            order by LastName
        """ % ('%',name,'%')
        results = self.connection.query_all(soql)
        return results.get('records', [])

    def registration_remove(self, reg_id):
        """
        Delete the registration record, if it exists.
        """
        try:
            self.connection.Registration__c.delete(reg_id)
        except:
            return {'response': 'Success'}

    def registration_add(self, event_id, event_date, event_status, people):
        """
        Add the registration record for the contact, if it doesn't exist.
        """
        contacts = ''
        for p in people:
            if len(contacts) == 0:
                contacts = "'%s'" % p
            else:
                contacts += ",'%s'" % p

        soql = """
            select Id, Contact__c  from Registration__c
            where Event__c = '%s'
            and Contact__c in (%s)
            and Event_Date__c = %s
        """ % (event_id, contacts, event_date)
        result = self.connection.query_all(soql)

        for r in result.get('records', []):
            # Skip the people that we have already registered
            if r['Contact__c'] in people:
                people.remove(r['Contact__c'])
                self.connection.Registration__c.update(
                    r['Id'],
                    {
                        'Event__c': event_id,
                        'Contact__c': r['Contact__c'],
                        'Event_Date__c': event_date,
                        'Status__c': event_status,
                    })

        # Upsert registrations for the people that do not have them
        for p in people:
            self.connection.Registration__c.create({
                    'Event__c': event_id,
                    'Contact__c': p,
                    'Event_Date__c': event_date,
                    'Status__c': event_status,
                })

    def registration_statuses(self):
        """
        Get the statuses of the registration.
        """
        values = []
        reg_meta = self.connection.Registration__c.describe()
        for f in reg_meta['fields']:
            if f['name'] == 'Status__c':
                for v in f['picklistValues']:
                    if v['active']:
                        values.append(v['value'])
                break

        return sorted(values)

    def person_by_id(self, sf_id):
        """
        Get the Salesforce person from their ID
        """
        soql = "select Id from Contact where Id = '%s'" % sf_id
        result = self.connection.query_all(soql)

        return result
Ejemplo n.º 28
0
columns= ['ABN__c', 'AccessRights__c', 'AccessScript__c', 'AccountSource', 'AnnualRevenue', 'Balance__c', 'BalanceActionsEmail__c', 
	'BalanceChangeAction__c', 'BillingCity', 'BillingCountry', 'BillingInfo__c', 'BillingInfoNeeded__c', 'BillingLatitude', 
	'BillingLongitude', 'BillingPlatformAccountId__c', 'BillingPlatformId__c', 'BillingPostalCode', 'BillingState', 'BillingStreet', 
	'CaseSafeAccountID__c', 'CreatedById', 'CreatedDate', 'Culture__c', 'Currency__c', 'Description', 'DidAcceptCompanySizeLimit__c', 
	'DidAcceptCoverageDoc__c', 'Domain__c', 'Fax', 'Geographical_Interest__c', 'Id', 'Industry', 'Is_Active__c', 'IsBlacklist__c', 
	'IsDeleted', 'Jigsaw', 'JigsawCompanyId', 'LastActivity__c', 'LastActivityDate', 'LastActivityDetails__c', 'LastLoginFailure__c',
	 'LastModifiedById', 'LastModifiedDate', 'LastReferencedDate', 'LastViewedDate', 'LicenceInfo__c', 'ManagedByNearmap__c', 
	 'MasterRecordId', 'Migrated__c', 'MirrorId__c', 'Name', 'NumberOfEmployees', 'OwnerId', 'ParentId', 'Password__c', 'PasswordClear__c',
	  'Phone', 'Sector__c', 'SessionPolicy__c', 'ShippingCity', 'ShippingCountry', 'ShippingLatitude', 'ShippingLongitude', 'ShippingPostalCode', 
	  'ShippingState', 'ShippingStreet', 'SicDesc', 'Size_of_Business__c', 'Source_Addresses__c', 'SubscriptionNeedsSync__c', 'SystemModstamp', 
	  'TsAndCsInfo__c', 'Type', 'Type_of_Business__c', 'UsageLimitPolicy__c', 'Username__c', 'Vertical__c', 'WebData__c', 'Website',
    'ActivatedContract__c', 'ExpectedUsage__c', 'Hold_Correspondences__c', 'Mirror_Id__c', 'PlanCode__c', 'RecordTypeId', 'Region__c', 
    'Estimated_Upsell_Amount__c', 'Marketing_Segment__c']

result = sf.query_all('''SELECT {} FROM Account
    WHERE CreatedDate = TODAY OR CreatedDate = YESTERDAY
    OR LastModifiedDate = TODAY OR LastModifiedDate = YESTERDAY
	'''.format(', '.join(columns)))

# Establish connection to SQL Server.  NOTE: this only works on Windows with an ODBC connection setup with DSN = 'SQLServer'

cnxn = pyodbc.connect('DSN=SQL Server') 
cursor = cnxn.cursor()

#cursor.execute("DROP TABLE Staging.STG.Account")
#cursor.execute("DROP TABLE Staging.STG.Lead")
#cursor.execute("DROP TABLE Staging.STG.Contact")
#cursor.execute("DROP TABLE Staging.STG.Opportunity")
#cursor.execute("DROP TABLE Staging.STG.Task")
#cursor.execute("DROP TABLE Staging.STG.Campaign")
#cursor.execute("DROP TABLE Staging.STG.AccountHistory")
#cursor.execute("DROP TABLE Staging.STG.CampaignMember")
Ejemplo n.º 29
0
csv_out = open(file_out, 'wb')

csv_handler = csv.writer(csv_out, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)

sf = Salesforce(password=SFPW, username=SFUSER, security_token=SF_TOKEN)

#connect to DB
#note that the client_flag allows the mysql module to access files on the computer
cnx = mysql.connector.connect(user=DBUSER, password=DBPW,
                              host=DBHOST,
                              database=DBNAME, client_flags=[ClientFlag.LOCAL_FILES])
cursor = cnx.cursor()

sosl = "SELECT %s FROM Opportunity" % ','.join(FIELD_DICT.values()) 

p = sf.query_all(sosl)
#for all records in the returned sql
index = 1
for record in p['records']:	
	row = []
	for a in ORDERED_FIELDS:
		code = FIELD_DICT[a]
		path = code.split(".")
		#print a
		element = record
		for p in path:
			#print element,p
			try:
				element = element[p]
			except:
				element = ""
Ejemplo n.º 30
0
def run_tests():
    username = os.environ.get('SF_USERNAME')
    password = os.environ.get('SF_PASSWORD')
    serverurl = os.environ.get('SF_SERVERURL')
    test_name_match = os.environ.get('APEX_TEST_NAME_MATCH', '%_TEST')
    test_name_exclude = os.environ.get('APEX_TEST_NAME_EXCLUDE', '')
    namespace = os.environ.get('NAMESPACE', None)
    poll_interval = int(os.environ.get('POLL_INTERVAL', 10))
    debug = os.environ.get('DEBUG_TESTS',False) in ['true','True']
    debug_logdir = os.environ.get('DEBUG_LOGDIR')
    json_output = os.environ.get('TEST_JSON_OUTPUT', None)
    junit_output = os.environ.get('TEST_JUNIT_OUTPUT', None)
    
    if namespace:
        namespace = "'{0}'".format(namespace,)
    else:
        namespace = 'null'
    
    sandbox = False
    if serverurl.find('test.salesforce.com') != -1:
        sandbox = True
    
    sf = Salesforce(username=username, password=password, security_token='', sandbox=sandbox, sf_version='32.0')
    
    # Change base_url to use the tooling api
    sf.base_url = sf.base_url + 'tooling/'
    
    # Split test_name_match by commas to allow multiple class name matching options
    where_name = []
    for pattern in test_name_match.split(','):
        if pattern:
            where_name.append("Name LIKE '{0}'".format(pattern))

    # Add any excludes to the where clause
    where_exclude = []
    for pattern in test_name_exclude.split(','):
        if pattern:
            where_exclude.append("(NOT Name LIKE '{0}')".format(pattern,))
   
    # Get all test classes for namespace
    query = "SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = {0}".format(namespace,)
    if where_name:
        query += " AND ({0})".format(' OR '.join(where_name),)
    if where_exclude:
        query += " AND {0}".format(' AND '.join(where_exclude),)

    print "Running Query: {0}".format(query,)
    sys.stdout.flush()

    res = sf.query_all(query)

    print "Found {0} classes".format(res['totalSize'],)
    sys.stdout.flush()

    if not res['totalSize']:
        return {'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0}
    
    classes_by_id = {}
    classes_by_name = {}
    trace_id = None
    results_by_class_name = {}
    classes_by_log_id = {}
    logs_by_class_id = {}
    
    for cls in res['records']:
        classes_by_id[cls['Id']] = cls['Name']
        classes_by_name[cls['Name']] = cls['Id']
        results_by_class_name[cls['Name']] = {}

    # If debug is turned on, setup debug traces for all test classes
    if debug:
        print 'Setting up trace flag to capture debug logs'

        # Get the User's id to set a TraceFlag
        res_user = sf.query("Select Id from User where Username = '******'".format(username,))
        user_id = res_user['records'][0]['Id']
        
        # Set up a simple-salesforce sobject for TraceFlag using the tooling api
        TraceFlag = sf.TraceFlag
        TraceFlag.base_url = (u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/'
                     .format(instance=sf.sf_instance,
                             object_name='TraceFlag',
                             sf_version=sf.sf_version))

        # First, delete any old trace flags still lying around
        tf_res = sf.query('Select Id from TraceFlag')
        if tf_res['totalSize']:
            for tf in tf_res['records']:
                TraceFlag.delete(tf['Id'])
    
        expiration = datetime.datetime.now() + datetime.timedelta(1)
        res = TraceFlag.create({
            'ApexCode': 'Info',
            'ApexProfiling': 'Debug',
            'Callout': 'Info',
            'Database': 'Info',
            'ExpirationDate': expiration.isoformat(),
            #'ScopeId': user_id,
            'System': 'Info',
            'TracedEntityId': user_id,
            'Validation': 'Info',
            'Visualforce': 'Info',
            'Workflow': 'Info',
        })
        trace_id = res['id']

        print 'Created TraceFlag for user'
    
    # Run all the tests
    print "Queuing tests for execution..."
    sys.stdout.flush()
    job_id = sf.restful('runTestsAsynchronous', params={'classids': ','.join(classes_by_id.keys())})
    
    # Loop waiting for the tests to complete
    while True:
        res = sf.query_all("SELECT Id, Status, ApexClassId FROM ApexTestQueueItem WHERE ParentJobId = '{0}'".format(job_id,))
        counts = {
            'Queued': 0,
            'Processing': 0,
            'Aborted': 0,
            'Completed': 0,
            'Failed': 0,
            'Preparing': 0,
            'Holding': 0,
        }
        for item in res['records']:
            counts[item['Status']] += 1
    
        # If all tests have run, break from the loop
        if not counts['Queued'] and not counts['Processing']:
            print ''
            print '-------------------------------------------------------------------------------'
            print 'Test Results'
            print '-------------------------------------------------------------------------------'
            sys.stdout.flush()
            break
        
        print 'Completed: %(Completed)s  Processing: %(Processing)s  Queued: %(Queued)s' % counts
        sys.stdout.flush()
        sleep(poll_interval)
    
    # Get the test results by method
    res = sf.query_all("SELECT StackTrace,Message, ApexLogId, AsyncApexJobId,MethodName, Outcome, ApexClassId, TestTimestamp FROM ApexTestResult WHERE AsyncApexJobId = '{0}'".format(job_id,))
    
    counts = {
        'Pass': 0,
        'Fail': 0,
        'CompileFail': 0,
        'Skip': 0,
    }
    for result in res['records']:
        class_name = classes_by_id[result['ApexClassId']]
        results_by_class_name[class_name][result['MethodName']] = result
        counts[result['Outcome']] += 1
        if debug and result['ApexLogId']:
            classes_by_log_id[result['ApexLogId']] = result['ApexClassId']
    
    # Fetch debug logs if debug is enabled
    if debug:
        log_ids = "('{0}')".format("','".join([str(id) for id in classes_by_log_id.keys()]),)
        res = sf.query_all("SELECT Id, Application, DurationMilliseconds, Location, LogLength, LogUserId, Operation, Request, StartTime, Status from ApexLog where Id in {0}".format(log_ids,))
        for log in res['records']:
            class_id = classes_by_log_id[log['Id']]
            class_name = classes_by_id[class_id]
            logs_by_class_id[class_id] = log
            # Fetch the debug log file
            body_url = '{0}sobjects/ApexLog/{1}/Body'.format(sf.base_url, log['Id'])
            resp = sf.request.get(body_url, headers=sf.headers)
            log_file = class_name + '.log'
            if debug_logdir:
                log_file = debug_logdir + os.sep + log_file
            f = open(log_file, 'w')
            f.write(resp.content)
            f.close()

            # Parse stats from the log file
            f = open(log_file, 'r')
            method_stats = parse_log(class_name, f)
            
            # Add method stats to results_by_class_name
            for method, info in method_stats.items():
                results_by_class_name[class_name][method].update(info)

        # Delete the trace flag
        TraceFlag.delete(trace_id)

    # Build an OrderedDict of results
    test_results = []

    class_names = results_by_class_name.keys()
    class_names.sort()
    for class_name in class_names:
        class_id = classes_by_name[class_name]
        duration = None
        if debug and class_id in logs_by_class_id:
            duration = int(logs_by_class_id[class_id]['DurationMilliseconds']) * .001
            print 'Class: {0} ({1}s)'.format(class_name, duration)
        else:
            print 'Class: {0}'.format(class_name,)
        sys.stdout.flush()

        method_names = results_by_class_name[class_name].keys()
        method_names.sort()
        for method_name in method_names:
            result = results_by_class_name[class_name][method_name]

            test_results.append({
                'Children': result.get('children', None),
                'ClassName': decode_to_unicode(class_name),
                'Method': decode_to_unicode(result['MethodName']),
                'Message': decode_to_unicode(result['Message']),
                'Outcome': decode_to_unicode(result['Outcome']),
                'StackTrace': decode_to_unicode(result['StackTrace']),
                'Stats': result.get('stats', None),
                'TestTimestamp': result.get('TestTimestamp', None),
            })
            
            # Output result for method
            if debug and json_output and result.get('stats') and 'duration' in result['stats']:
                # If debug is enabled and we're generating the json output, include duration with the test
                print u'   {0}: {1} ({2}s)'.format(
                    result['Outcome'], 
                    result['MethodName'], 
                    result['stats']['duration']
                )
            else:
                print u'   {Outcome}: {MethodName}'.format(**result)

            if debug and not json_output:
                print u'     DEBUG LOG INFO:'
                stats = result.get('stats',None)
                if not stats:
                    print u'       No stats found, likely because of debug log size limit'
                else:
                    stat_keys = stats.keys()
                    stat_keys.sort()
                    for stat in stat_keys:
                        try:
                            value = stats[stat]
                            output = u'       {0} / {1}'.format(value['used'], value['allowed'])
                            print output.ljust(26) + stat
                        except:
                            output = u'       {0}'.format(stats[stat],)
                            print output.ljust(26) + stat
    
            # Print message and stack trace if failed
            if result['Outcome'] in ['Fail','CompileFail']:
                print u'   Message: {Message}'.format(**result)
                print u'   StackTrace: {StackTrace}'.format(**result)
            sys.stdout.flush()
    
    print u'-------------------------------------------------------------------------------'
    print u'Passed: %(Pass)s  Fail: %(Fail)s  Compile Fail: %(CompileFail)s  Skipped: %(Skip)s' % counts
    print u'-------------------------------------------------------------------------------'
    sys.stdout.flush()
    
    if counts['Fail'] or counts['CompileFail']:
        print u''
        print u'Failing Tests'
        print u'-------------'
        print u''
        sys.stdout.flush()

        counter = 0
        for result in test_results:
            if result['Outcome'] not in ['Fail','CompileFail']:
                continue
            counter += 1
            print u'{0}: {1}.{2} - {3}'.format(counter, result['ClassName'], result['Method'], result['Outcome'])
            print u'  Message: {0}'.format(result['Message'],)
            print u'  StackTrace: {0}'.format(result['StackTrace'],)
            sys.stdout.flush()

    if json_output:
        f = codecs.open(json_output, encoding='utf-8', mode='w')
        f.write(json.dumps(test_results))
        f.close()

    if junit_output:
        f = codecs.open(junit_output, encoding='utf-8', mode='w')
        f.write('<testsuite tests="{0}">\n'.format(len(test_results)),)
        for result in test_results:
            testcase = '  <testcase classname="{0}" name="{1}"'.format(result['ClassName'], result['Method'])
            if 'Stats' in result and result['Stats'] and 'duration' in result['Stats']:
                testcase = '{0} time="{1}"'.format(testcase, result['Stats']['duration'])
            if result['Outcome'] in ['Fail','CompileFail']:
                testcase = '{0}>\n'.format(testcase,)
                testcase = '{0}    <failure type="{1}">{2}</failure>\n'.format(
                    testcase, 
                    cgi.escape(result['StackTrace']), 
                    cgi.escape(result['Message']),
                )
                testcase = '{0}  </testcase>\n'.format(testcase,)
            else:
                testcase = '{0} />\n'.format(testcase,)
            f.write(testcase)

        f.write('</testsuite>')
        f.close()
        

    return counts
Ejemplo n.º 31
0
def sync(date, alerts, verbose):
    status = 'ERROR'

    with warnings.catch_warnings(record=True) as w:
        try:
            if verbose:
                console_handler = logging.StreamHandler()
                console_handler.setLevel(logging.DEBUG)
                console_handler.setFormatter(formatter)
                logger.addHandler(console_handler)

            logger.info('Starting...')
            start = arrow.now()

            # Connect to Salesforce
            sf = Salesforce(username=SF_USER, \
                            password=SF_PASSWORD, \
                            security_token=SF_TOKEN)

            # Connect to database
            dest_db = datum.connect(DEST_DB_DSN)
            dest_tbl = dest_db[DEST_TABLE]
            tmp_tbl = dest_db[DEST_TEMP_TABLE]

            logger.info('Truncating temp table...')
            tmp_tbl.delete()

            sf_query = SF_QUERY

            # If a start date was passed in, handle it.
            if date:
                warnings.warn('Fetched records for {} only'.format(date))
                try:
                    date_comps = [int(x) for x in date.split('-')]
                    start_date = arrow.get(date_obj(*date_comps), 'US/Eastern')\
                                      .to('Etc/UTC')
                except ValueError:
                    raise HandledError('Date parameter is invalid')
                end_date = start_date.replace(days=1)

                sf_query += ' AND (LastModifiedDate >= {})'.format(start_date)
                sf_query += ' AND (LastModifiedDate < {})'.format(end_date)

            # Otherwise, grab the last updated date from the DB.
            else:
                logger.info('Getting last updated date...')
                start_date_str = dest_db.execute('select max({}) from {}'\
                                            .format(DEST_UPDATED_FIELD, DEST_TABLE))[0]
                start_date = arrow.get(start_date_str, 'US/Eastern').to('Etc/UTC')
                sf_query += ' AND (LastModifiedDate > {})'.format(start_date.isoformat())

            logger.info('Fetching new records from Salesforce...')
            try:
                sf_rows = sf.query_all(sf_query)['records']
            except SalesforceMalformedRequest:
                raise HandledError('Could not query Salesforce')

            logger.info('Processing rows...')
            rows = [process_row(sf_row, FIELD_MAP) for sf_row in sf_rows]

            logger.info('Writing to temp table...')
            tmp_tbl.write(rows)

            logger.info('Deleting updated records...')
            update_count = dest_db.execute(DEL_STMT)
            add_count = len(rows) - update_count

            logger.info('Appending new records...')
            dest_tbl.write(rows)

            # We should have added and updated at least 1 record
            if add_count == 0:
                warnings.warn('No records added')
            if update_count == 0:
                warnings.warn('No records updated')

            # TODO this check was causing an obscure httplib error
            # (essentially, timing out) so disabling it for now

            # Check count against Salesforce
            # sf_count = sf.query_all(SF_COUNT_QUERY)['totalSize']
            # db_count = dest_tbl.count()
            # if sf_count != db_count:
            #     warnings.warn('Salesforce has {} rows, database has {}'\
            #                             .format(sf_count, db_count))

            # If we got here, it was successful.
            status = 'SUCCESS'
            logger.info('Ran successfully. Added {}, updated {}.'\
                                    .format(add_count, update_count))

        except:
            logger.exception('Unhandled error')

        finally:
            if alerts:
                msg = '[311] {} - {}'.format(__file__, status)
                if status == 'SUCCESS':
                    msg += ' - {} added, {} updated'\
                                    .format(add_count, update_count)
                if len(w) > 0:
                    msg += ' - {}.'.format('; '.join([str(x.message) for x in w]))

                # Try to post to Slack
                try:
                    slack = Slacker(SLACK_TOKEN)
                    slack.chat.post_message(SLACK_CHANNEL, msg)
                except Exception as e:
                    logger.error(
                        'Could not post to Slack. '
                        'The message was:\n\n{}\n\n'
                        'The error was:\n\n{}'.format(msg, e)
                    )
Ejemplo n.º 32
0
def run_tests():
    username = os.environ.get('SF_USERNAME')
    password = os.environ.get('SF_PASSWORD')
    serverurl = os.environ.get('SF_SERVERURL')
    test_name_match = os.environ.get('APEX_TEST_NAME_MATCH', '%_TEST')
    test_name_exclude = os.environ.get('APEX_TEST_NAME_EXCLUDE', '')
    namespace = os.environ.get('NAMESPACE', None)
    poll_interval = int(os.environ.get('POLL_INTERVAL', 10))
    debug = os.environ.get('DEBUG_TESTS', False) in ['true', 'True']
    debug_logdir = os.environ.get('DEBUG_LOGDIR')
    json_output = os.environ.get('TEST_JSON_OUTPUT', None)
    junit_output = os.environ.get('TEST_JUNIT_OUTPUT', None)

    if namespace:
        namespace = "'{0}'".format(namespace, )
    else:
        namespace = 'null'

    sandbox = False
    if serverurl.find('test.salesforce.com') != -1:
        sandbox = True

    sf = Salesforce(username=username,
                    password=password,
                    security_token='',
                    sandbox=sandbox,
                    sf_version='32.0')

    # Change base_url to use the tooling api
    sf.base_url = sf.base_url + 'tooling/'

    # Split test_name_match by commas to allow multiple class name matching options
    where_name = []
    for pattern in test_name_match.split(','):
        if pattern:
            where_name.append("Name LIKE '{0}'".format(pattern))

    # Add any excludes to the where clause
    where_exclude = []
    for pattern in test_name_exclude.split(','):
        if pattern:
            where_exclude.append("(NOT Name LIKE '{0}')".format(pattern, ))

    # Get all test classes for namespace
    query = "SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = {0}".format(
        namespace, )
    if where_name:
        query += " AND ({0})".format(' OR '.join(where_name), )
    if where_exclude:
        query += " AND {0}".format(' AND '.join(where_exclude), )

    print "Running Query: {0}".format(query, )
    sys.stdout.flush()

    res = sf.query_all(query)

    print "Found {0} classes".format(res['totalSize'], )
    sys.stdout.flush()

    if not res['totalSize']:
        return {'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0}

    classes_by_id = {}
    classes_by_name = {}
    trace_id = None
    results_by_class_name = {}
    classes_by_log_id = {}
    logs_by_class_id = {}

    for cls in res['records']:
        classes_by_id[cls['Id']] = cls['Name']
        classes_by_name[cls['Name']] = cls['Id']
        results_by_class_name[cls['Name']] = {}

    # If debug is turned on, setup debug traces for all test classes
    if debug:
        print 'Setting up trace flag to capture debug logs'

        # Get the User's id to set a TraceFlag
        res_user = sf.query(
            "Select Id from User where Username = '******'".format(username, ))
        user_id = res_user['records'][0]['Id']

        # Set up a simple-salesforce sobject for TraceFlag using the tooling api
        TraceFlag = sf.TraceFlag
        TraceFlag.base_url = (
            u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/'
            .format(instance=sf.sf_instance,
                    object_name='TraceFlag',
                    sf_version=sf.sf_version))

        # First, delete any old trace flags still lying around
        tf_res = sf.query('Select Id from TraceFlag')
        if tf_res['totalSize']:
            for tf in tf_res['records']:
                TraceFlag.delete(tf['Id'])

        expiration = datetime.datetime.now() + datetime.timedelta(1)
        res = TraceFlag.create({
            'ApexCode': 'Info',
            'ApexProfiling': 'Debug',
            'Callout': 'Info',
            'Database': 'Info',
            'ExpirationDate': expiration.isoformat(),
            #'ScopeId': user_id,
            'System': 'Info',
            'TracedEntityId': user_id,
            'Validation': 'Info',
            'Visualforce': 'Info',
            'Workflow': 'Info',
        })
        trace_id = res['id']

        print 'Created TraceFlag for user'

    # Run all the tests
    print "Queuing tests for execution..."
    sys.stdout.flush()
    job_id = sf.restful('runTestsAsynchronous',
                        params={'classids': ','.join(classes_by_id.keys())})

    # Loop waiting for the tests to complete
    while True:
        res = sf.query_all(
            "SELECT Id, Status, ApexClassId FROM ApexTestQueueItem WHERE ParentJobId = '{0}'"
            .format(job_id, ))
        counts = {
            'Queued': 0,
            'Processing': 0,
            'Aborted': 0,
            'Completed': 0,
            'Failed': 0,
            'Preparing': 0,
            'Holding': 0,
        }
        for item in res['records']:
            counts[item['Status']] += 1

        # If all tests have run, break from the loop
        if not counts['Queued'] and not counts['Processing']:
            print ''
            print '-------------------------------------------------------------------------------'
            print 'Test Results'
            print '-------------------------------------------------------------------------------'
            sys.stdout.flush()
            break

        print 'Completed: %(Completed)s  Processing: %(Processing)s  Queued: %(Queued)s' % counts
        sys.stdout.flush()
        sleep(poll_interval)

    # Get the test results by method
    res = sf.query_all(
        "SELECT StackTrace,Message, ApexLogId, AsyncApexJobId,MethodName, Outcome, ApexClassId, TestTimestamp FROM ApexTestResult WHERE AsyncApexJobId = '{0}'"
        .format(job_id, ))

    counts = {
        'Pass': 0,
        'Fail': 0,
        'CompileFail': 0,
        'Skip': 0,
    }
    for result in res['records']:
        class_name = classes_by_id[result['ApexClassId']]
        results_by_class_name[class_name][result['MethodName']] = result
        counts[result['Outcome']] += 1
        if debug and result['ApexLogId']:
            classes_by_log_id[result['ApexLogId']] = result['ApexClassId']

    # Fetch debug logs if debug is enabled
    if debug:
        log_ids = "('{0}')".format(
            "','".join([str(id) for id in classes_by_log_id.keys()]), )
        res = sf.query_all(
            "SELECT Id, Application, DurationMilliseconds, Location, LogLength, LogUserId, Operation, Request, StartTime, Status from ApexLog where Id in {0}"
            .format(log_ids, ))
        for log in res['records']:
            class_id = classes_by_log_id[log['Id']]
            class_name = classes_by_id[class_id]
            logs_by_class_id[class_id] = log
            # Fetch the debug log file
            body_url = '{0}sobjects/ApexLog/{1}/Body'.format(
                sf.base_url, log['Id'])
            resp = sf.request.get(body_url, headers=sf.headers)
            log_file = class_name + '.log'
            if debug_logdir:
                log_file = debug_logdir + os.sep + log_file
            f = open(log_file, 'w')
            f.write(resp.content)
            f.close()

            # Parse stats from the log file
            f = open(log_file, 'r')
            method_stats = parse_log(class_name, f)

            # Add method stats to results_by_class_name
            for method, info in method_stats.items():
                results_by_class_name[class_name][method].update(info)

        # Delete the trace flag
        TraceFlag.delete(trace_id)

    # Build an OrderedDict of results
    test_results = []

    class_names = results_by_class_name.keys()
    class_names.sort()
    for class_name in class_names:
        class_id = classes_by_name[class_name]
        duration = None
        if debug and class_id in logs_by_class_id:
            duration = int(
                logs_by_class_id[class_id]['DurationMilliseconds']) * .001
            print 'Class: {0} ({1}s)'.format(class_name, duration)
        else:
            print 'Class: {0}'.format(class_name, )
        sys.stdout.flush()

        method_names = results_by_class_name[class_name].keys()
        method_names.sort()
        for method_name in method_names:
            result = results_by_class_name[class_name][method_name]

            test_results.append({
                'Children':
                result.get('children', None),
                'ClassName':
                decode_to_unicode(class_name),
                'Method':
                decode_to_unicode(result['MethodName']),
                'Message':
                decode_to_unicode(result['Message']),
                'Outcome':
                decode_to_unicode(result['Outcome']),
                'StackTrace':
                decode_to_unicode(result['StackTrace']),
                'Stats':
                result.get('stats', None),
                'TestTimestamp':
                result.get('TestTimestamp', None),
            })

            # Output result for method
            if debug and json_output and result.get(
                    'stats') and 'duration' in result['stats']:
                # If debug is enabled and we're generating the json output, include duration with the test
                print u'   {0}: {1} ({2}s)'.format(result['Outcome'],
                                                   result['MethodName'],
                                                   result['stats']['duration'])
            else:
                print u'   {Outcome}: {MethodName}'.format(**result)

            if debug and not json_output:
                print u'     DEBUG LOG INFO:'
                stats = result.get('stats', None)
                if not stats:
                    print u'       No stats found, likely because of debug log size limit'
                else:
                    stat_keys = stats.keys()
                    stat_keys.sort()
                    for stat in stat_keys:
                        try:
                            value = stats[stat]
                            output = u'       {0} / {1}'.format(
                                value['used'], value['allowed'])
                            print output.ljust(26) + stat
                        except:
                            output = u'       {0}'.format(stats[stat], )
                            print output.ljust(26) + stat

            # Print message and stack trace if failed
            if result['Outcome'] in ['Fail', 'CompileFail']:
                print u'   Message: {Message}'.format(**result)
                print u'   StackTrace: {StackTrace}'.format(**result)
            sys.stdout.flush()

    print u'-------------------------------------------------------------------------------'
    print u'Passed: %(Pass)s  Fail: %(Fail)s  Compile Fail: %(CompileFail)s  Skipped: %(Skip)s' % counts
    print u'-------------------------------------------------------------------------------'
    sys.stdout.flush()

    if counts['Fail'] or counts['CompileFail']:
        print u''
        print u'Failing Tests'
        print u'-------------'
        print u''
        sys.stdout.flush()

        counter = 0
        for result in test_results:
            if result['Outcome'] not in ['Fail', 'CompileFail']:
                continue
            counter += 1
            print u'{0}: {1}.{2} - {3}'.format(counter, result['ClassName'],
                                               result['Method'],
                                               result['Outcome'])
            print u'  Message: {0}'.format(result['Message'], )
            print u'  StackTrace: {0}'.format(result['StackTrace'], )
            sys.stdout.flush()

    if json_output:
        f = codecs.open(json_output, encoding='utf-8', mode='w')
        f.write(json.dumps(test_results))
        f.close()

    if junit_output:
        f = codecs.open(junit_output, encoding='utf-8', mode='w')
        f.write('<testsuite tests="{0}">\n'.format(len(test_results)), )
        for result in test_results:
            testcase = '  <testcase classname="{0}" name="{1}"'.format(
                result['ClassName'], result['Method'])
            if 'Stats' in result and result['Stats'] and 'duration' in result[
                    'Stats']:
                testcase = '{0} time="{1}"'.format(testcase,
                                                   result['Stats']['duration'])
            if result['Outcome'] in ['Fail', 'CompileFail']:
                testcase = '{0}>\n'.format(testcase, )
                testcase = '{0}    <failure type="{1}">{2}</failure>\n'.format(
                    testcase,
                    cgi.escape(result['StackTrace']),
                    cgi.escape(result['Message']),
                )
                testcase = '{0}  </testcase>\n'.format(testcase, )
            else:
                testcase = '{0} />\n'.format(testcase, )
            f.write(testcase)

        f.write('</testsuite>')
        f.close()

    return counts
cdoSync = elq.CreateSync(defObject=cdoDef)
logging.info("export sync started: " + cdoSync['uri'])

status = elq.CheckSyncStatus(syncObject=cdoSync)
logging.info("sync successful; retreiving data")

data = elq.GetSyncedData(defObject=cdoDef)
logging.info("# of records:" + str(len(data)))

if (len(data)>0):

    dataOut = data

    nullCount = 0

    sfdcCampaigns = sf.query_all("SELECT Id, Solution_Code_Family__c, Solution_Code__c FROM Campaign")
    sfdcCampaignsId = dict( (d['Id'], d) for d in sfdcCampaigns['records'])

    logging.info("Retreived SFDC Campaigns")

    for record in dataOut:
        try:
            ##thisCampaign = sf.Campaign.get(record['Contacts_LeadType_MostRecent_OfferID'])
            thisCampaign = sfdcCampaignsId[record['Contacts_LeadType_MostRecent_OfferID1']]
        except:
            record['Contacts_LeadType_S_Last_Error1'] = 'Error retreiving campaign details'
            record['Contacts_LeadType_S_Data_Status1'] = 'CAMPAIGN DETAIL ERROR'
        else:
            record['Contacts_LeadType_MostRecent_Offer_PrimarySol1'] = thisCampaign['Solution_Code_Family__c']
            record['Contacts_LeadType_MostRecent_Offer_ProductSer1'] = thisCampaign['Solution_Code__c']
            record['Contacts_LeadType_S_Data_Status1'] = 'CAMPAIGN DETAILS RETREIVED'
Ejemplo n.º 34
0
    #this has the list of ordered dictionaries that describe the fields
    table_fields = this_table['fields']

    #list comprehension style
    field_names = []
    [field_names.append(item['name']) for item in table_fields]
    all_tables[i] = field_names

    # #non list comprehension style, fro reference

    # field_names = []
    # for item in table_fields:
    #     field_names.append(item['name'])

for i in all_tables:
    this_fields = all_tables[i]
    sql = "SELECT " + ",".join(this_fields) + " FROM " + i
    print "Getting table %s from Salesforce DB." % i
    sf_result = sf.query_all(sql)
    sf_df = DataFrame.from_dict(sf_result['records'], orient='columns', dtype=float)
    # drop attributes column if it exists, if not then pass
    try:
        sf_df.drop('attributes', axis = 1, inplace = True)
    except:
        pass
    # load table to database
    print "loading table %s into Silo." % i
    databaseconfig.create_sf_table(engine=engine, conn=conn, tablename= i, df=sf_df)

print "Done!"
# If the word "offline" is specified on the command line, use a cached query
if 'offline' in sys.argv:
    try:
        with open(CACHE_FILE, 'rb') as file:
            records = pickle.load(file)
    except IOError: 
        die("Can't work offline because no CACHE_FILE exists")
else:
    sf = Salesforce(
        username=SALESFORCE_USERNAME, 
        password=SALESFORCE_PASSWORD, 
        security_token=SALESFORCE_SECURITY_TOKEN
    )
    records = sf.query_all('''
        SELECT {} 
        FROM Beautiful_Solution__c
        WHERE on_website__c = true
    '''.format(','.join(salesforce2jekyll)))['records']
    with open(CACHE_FILE, 'wb') as file:
        pickle.dump(records, file)

# Clean and process records
# ////////////////////////////////////////////////////////////////////////////
# Convert record keys from salesforce to jekyll names, replace None w/'' & remove carriage returns
records = [{j: (r[s] or '').replace('\r\n','\n') for j,s in jekyll2salesforce.items()} for r in records]

# Salesforce currently limits multi-select picklist fields to 40 characters.
# I need full titles to complete 2-way relationships between modules, so this
# mapping is for expanding truncated titles to their full names.
TITLE_LIMIT = 40
full_titles = {record['title'][:TITLE_LIMIT]: record['title'] for record in records}
Ejemplo n.º 36
0
def run_tests():
    username = os.environ.get('SF_USERNAME')
    password = os.environ.get('SF_PASSWORD')
    serverurl = os.environ.get('SF_SERVERURL')
    test_name_match = os.environ.get('APEX_TEST_NAME_MATCH', '%_TEST')
    test_name_exclude = os.environ.get('APEX_TEST_NAME_EXCLUDE', '')
    namespace = os.environ.get('NAMESPACE', None)
    poll_interval = int(os.environ.get('POLL_INTERVAL', 10))
    debug = os.environ.get('DEBUG_TESTS',False) == 'true'
    debug_logdir = os.environ.get('DEBUG_LOGDIR')
    json_output = os.environ.get('TEST_JSON_OUTPUT', None)
    
    if namespace:
        namespace = "'%s'" % namespace
    else:
        namespace = 'null'
    
    sandbox = False
    if serverurl.find('test.salesforce.com') != -1:
        sandbox = True
    
    sf = Salesforce(username=username, password=password, security_token='', sandbox=sandbox, version='32.0')
    
    # Change base_url to use the tooling api
    sf.base_url = sf.base_url + 'tooling/'
    
    # Split test_name_match by commas to allow multiple class name matching options
    where_name = []
    for pattern in test_name_match.split(','):
        if pattern:
            where_name.append("Name LIKE '%s'" % pattern)

    # Add any excludes to the where clause
    where_exclude = []
    for pattern in test_name_exclude.split(','):
        if pattern:
            where_exclude.append("(NOT Name LIKE '%s')" % pattern)
   
    # Get all test classes for namespace
    query = "SELECT Id, Name FROM ApexClass WHERE NamespacePrefix = %s" % namespace
    if where_name:
        query += " AND (%s)" % ' OR '.join(where_name)
    if where_exclude:
        query += " AND %s" % ' AND '.join(where_exclude)

    print "Running Query: %s" % query
    sys.stdout.flush()

    res = sf.query_all(query)

    print "Found %s classes" % res['totalSize']
    sys.stdout.flush()

    if not res['totalSize']:
        return {'Pass': 0, 'Fail': 0, 'CompileFail': 0, 'Skip': 0}
    
    classes_by_id = {}
    classes_by_name = {}
    traces_by_class_id = {}
    results_by_class_name = {}
    classes_by_log_id = {}
    logs_by_class_id = {}
    
    for cls in res['records']:
        classes_by_id[cls['Id']] = cls['Name']
        classes_by_name[cls['Name']] = cls['Id']
        results_by_class_name[cls['Name']] = {}

    # If debug is turned on, setup debug traces for all test classes
    if debug:
        # Set up a simple-salesforce sobject for TraceFlag using the tooling api
        TraceFlag = sf.TraceFlag
        TraceFlag.base_url = (u'https://{instance}/services/data/v{sf_version}/tooling/sobjects/{object_name}/'
                     .format(instance=sf.sf_instance,
                             object_name='TraceFlag',
                             sf_version=sf.sf_version))

        # First, delete any old trace flags still lying around
        tf_res = sf.query('Select Id from TraceFlag')
        if tf_res['totalSize']:
            for tf in tf_res['records']:
                TraceFlag.delete(tf['Id'])
    
        expiration = datetime.datetime.now() + datetime.timedelta(1)
        for class_id in classes_by_id.keys():
            res = TraceFlag.create({
                'ApexCode': 'Error',
                'ApexProfiling': 'Debug',
                'Callout': 'Error',
                'Database': 'Error',
                'ExpirationDate': expiration.isoformat(),
                #'ScopeId': class_id,
                'System': 'Error',
                'TracedEntityId': class_id,
                'Validation': 'Error',
                'Visualforce': 'Error',
                'Workflow': 'Error',
            })
            traces_by_class_id[class_id] = res['id']
    
    # Run all the tests
    print "Queuing tests for execution..."
    sys.stdout.flush()
    job_id = sf.restful('runTestsAsynchronous', params={'classids': ','.join(classes_by_id.keys())})
    
    # Loop waiting for the tests to complete
    while True:
        res = sf.query_all("SELECT Id, Status, ApexClassId FROM ApexTestQueueItem WHERE ParentJobId = '%s'" % job_id)
        counts = {
            'Queued': 0,
            'Processing': 0,
            'Aborted': 0,
            'Completed': 0,
            'Failed': 0,
            'Preparing': 0,
            'Holding': 0,
        }
        for item in res['records']:
            counts[item['Status']] += 1
    
        # If all tests have run, break from the loop
        if not counts['Queued'] and not counts['Processing']:
            print ''
            print '-------------------------------------------------------------------------------'
            print 'Test Results'
            print '-------------------------------------------------------------------------------'
            sys.stdout.flush()
            break
        
        print 'Completed: %(Completed)s  Processing: %(Processing)s  Queued: %(Queued)s' % counts
        sys.stdout.flush()
        sleep(poll_interval)
    
    # Get the test results by method
    res = sf.query_all("SELECT StackTrace,Message, ApexLogId, AsyncApexJobId,MethodName, Outcome, ApexClassId, TestTimestamp FROM ApexTestResult WHERE AsyncApexJobId = '%s'" % job_id)
    
    counts = {
        'Pass': 0,
        'Fail': 0,
        'CompileFail': 0,
        'Skip': 0,
    }
    for result in res['records']:
        class_name = classes_by_id[result['ApexClassId']]
        results_by_class_name[class_name][result['MethodName']] = result
        counts[result['Outcome']] += 1
        if debug and result['ApexLogId']:
            classes_by_log_id[result['ApexLogId']] = result['ApexClassId']
    
    # Fetch debug logs if debug is enabled
    if debug:
        log_ids = "('%s')" % "','".join([str(id) for id in classes_by_log_id.keys()])
        res = sf.query_all("SELECT Id, Application, DurationMilliseconds, Location, LogLength, LogUserId, Operation, Request, StartTime, Status from ApexLog where Id in %s" % log_ids)
        for log in res['records']:
            class_id = classes_by_log_id[log['Id']]
            class_name = classes_by_id[class_id]
            logs_by_class_id[class_id] = log
            # Fetch the debug log file
            body_url = '%ssobjects/ApexLog/%s/Body' % (sf.base_url, log['Id'])
            resp = sf.request.get(body_url, headers=sf.headers)
            log_file = class_name + '.log'
            if debug_logdir:
                log_file = debug_logdir + os.sep + log_file
            f = open(log_file, 'w')
            f.write(resp.content)
            f.close()

            # Parse stats from the log file
            f = open(log_file, 'r')
            method_stats = parse_log(class_name, f)
            
            # Add method stats to results_by_class_name
            for method, info in method_stats.items():
                results_by_class_name[class_name][method].update(info)

        # Delete the trace flags
        for trace_id in traces_by_class_id.values():
            TraceFlag.delete(trace_id)

    # Build an OrderedDict of results
    test_results = []

    class_names = results_by_class_name.keys()
    class_names.sort()
    for class_name in class_names:
        class_id = classes_by_name[class_name]
        duration = None
        if debug and class_id in logs_by_class_id:
            duration = int(logs_by_class_id[class_id]['DurationMilliseconds']) * .001
            print 'Class: %s (%ss)' % (class_name, duration)
        else:
            print 'Class: %s' % class_name
        sys.stdout.flush()

        method_names = results_by_class_name[class_name].keys()
        method_names.sort()
        for method_name in method_names:
            result = results_by_class_name[class_name][method_name]

            test_results.append({
                'Children': result.get('children', None),
                'ClassName': class_name,
                'Method': result['MethodName'],
                'Message': result['Message'],
                'Outcome': result['Outcome'],
                'StackTrace': result['StackTrace'],
                'Stats': result.get('stats', None),
                'TestTimestamp': result.get('TestTimestamp', None),
            })
    
            # Output result for method
            print '   %(Outcome)s: %(MethodName)s' % result

            if debug:
                print '     DEBUG LOG INFO:'
                stats = result.get('stats',None)
                if not stats:
                    print '       No stats found, likely because of debug log size limit'
                else:
                    stat_keys = stats.keys()
                    stat_keys.sort()
                    for stat in stat_keys:
                        try:
                            value = stats[stat]
                            output = '       %s / %s' % (value['used'], value['allowed'])
                            print output.ljust(26) + stat
                        except:
                            output = '       %s' % stats[stat]
                            print output.ljust(26) + stat
    
            # Print message and stack trace if failed
            if result['Outcome'] in ['Fail','CompileFail']:
                print '   Message: %(Message)s' % result
                print '   StackTrace: %(StackTrace)s' % result
            sys.stdout.flush()
    
    print '-------------------------------------------------------------------------------'
    print 'Passed: %(Pass)s  Fail: %(Fail)s  Compile Fail: %(CompileFail)s  Skipped: %(Skip)s' % counts
    print '-------------------------------------------------------------------------------'
    sys.stdout.flush()

    if json_output:
        f = open(json_output, 'w')
        f.write(json.dumps(test_results))
        f.close()

    return counts
Ejemplo n.º 37
0
class RFCReport:
    def __init__(self):
        print "Logging in to Salesforce API..."
        # initialize our SFDC connection
        self.sf = Salesforce(username=config.username,
                             password=config.password,
                             security_token=config.security_token)
        self.caseData = None
        self.reportData = dict()
        self.fulltable = ''
        self.outputDict = dict()
        self.sorted_list = list()
        print "Login successful."

    def getData(self, initString, query, checkFields, exitString):
        """ Generalized case data querying function.
        Returns nested dict/list structure corresponding to SOQL output.
        Query should be a SOQL query. See nestedGet for checkFields format."""
        print initString
        data = self.sf.query_all(query)
        output = json.loads(jsonizer(data))
        lengthset = set()
        for change in output["records"]:
            lengthset.add(nestedGet(checkFields, change))
        totalcase = len(lengthset)
        print "Got", totalcase, exitString
        return output

    def genReport(self, data):
        """deduplicate gathered case data"""
        dupecount = 0
        output = dict()
        for change in data["records"]:
            for line in change["FeedTrackedChanges"]["records"]:
                if line is not None:
                    if line["NewValue"] in (
                            "Ready For Close",
                            "Closed",
                            "Cancelled",
                            "Closed as Duplicate"):
                        caseid = nestedGet(["Parent", "CaseNumber"], change)
                        changedate = dateparser.parse(change["CreatedDate"])
                        # need to account for >1 escalation per case
                        if caseid in output:
                            # chronological order - latest gets it
                            if output[caseid]["Date"] > changedate:
                                dupecount += 1
                                continue
                        if nestedGet(["Parent", "Cancel_Effective_Date__c"],
                                     change) is not None:
                            teardown = True
                        else:
                            teardown = False
                        output[caseid] = frozendict(
                            Name=nestedGet(["CreatedBy", "Name"], change),
                            Case=caseid,
                            Status=line["NewValue"],
                            Teardown=teardown,
                            Date=changedate)
        print "Found and removed", dupecount, "cases handled more than once."
        print "Credit for duplicates given to latest resolver."
        return output

    def tabulateReport(self):
        """tabulates case data per team member"""
        print "Reticulating splines..."
        listedUsers = [TeamMember(y) for y in
                       set([x["Name"] for x in self.reportData.itervalues()])]
        print "Generating summaries..."
        for case in self.reportData.itervalues():
            name = case["Name"]
            nameobj = (filter(lambda z: z.name == name, listedUsers))[0]
            nameobj.caseCount.add(case)
            if case["Status"] == "Ready For Close":
                nameobj.closedCount.discard(case)
                nameobj.rfcCount.add(case)
            if case["Status"] in (
                    "Closed",
                    "Cancelled",
                    "Closed as Duplicate"):
                nameobj.closedCount.add(case)
                nameobj.rfcCount.discard(case)
            if case["Teardown"]:
                nameobj.tdCount.add(case)
                nameobj.rfcCount.discard(case)
                nameobj.closedCount.discard(case)
        self.sorted_list = sorted(listedUsers,
                                  key=lambda q: len(q.caseCount),
                                  reverse=True)

    @property
    def dataToJSON(self):
        cutoff = ''
        if config.closedOnly:
            cutoff = "resolved cases only"
        d = [x["Date"] for x in self.reportData.itervalues()]
        dates = ' - '.join(
            [x.strftime("%B %d, %Y") for x in (min(d), max(d))])
        drange = ' '.join(
            ["current" if x == "this" else x for x in
             (config.SFDCdaterange.lower().split('_'))])
        rowset = [['Total', ], ]
        groups = list()
        categories = list()
        for each in self.sorted_list:
            categories.append(each.name)
            subrow = [len(each.caseCount), ]
            for key, value in each.counts.iteritems():
                subrow.append(len(value))
                if key not in rowset[0]:
                    rowset[0].append(key)
                    groups.append(key)
            rowset.append(subrow)
        self.outputDict = dict(daterange=drange,
                               dates=dates,
                               cutoff=cutoff,
                               rows=rowset,
                               groups=groups,
                               categories=categories)
        return jsonizer(self.outputDict)
Ejemplo n.º 38
0
 def get_query(self, query):
     self.refresh_token()
     sf = Salesforce(instance=self.client.instance_url,
                     session_id=self.client.access_token)
     return sf.query_all(query)