def main(OBJECTID, lck, count, length, getArea=False):
    """
    OBJECTID           - the objectid of the feature from the wfs service
    lck                - multiprocess lock
    count              - how many features have been processed
    length             - the total number of features to be processed
    getArea            - boolean flag to indicate whether to capture the area of intersection 
    """
    try:
        logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s',)
        logging.info(str(os.getpid()) + " OBJECTID " + str(OBJECTID) + " (" + str(count) + " out of " + str(length) + ")")   
        multiprocessing.current_process().cnt += 1   
        conn = dbconnect('species_especies_schema')                                             # connect to PostGIS 
       
        # intersect the species range features with the intersectingfeature features
        if getArea:                                                                             # populate the area using the intersection area between the wdpa and the species
            conn.cur.execute("SELECT * from especies.intersect_species_wdpa_area(%s,false)" % OBJECTID)
        else:
            conn.cur.execute("SELECT * from especies.intersect_species_wdpa(%s,false)" % OBJECTID)
        intersectingfeatures = conn.cur.fetchall()                                              # get all of the intersecting PAs for the species
        if len(intersectingfeatures) > 0:
            for intersectingfeature in intersectingfeatures:                                    # iterate through the intersectingfeatures
                if getArea:                                                                     # populate the output table
                    conn.cur.execute("SELECT especies.insert_species_wdpa_area(%s,%s,%s,%s)" % (OBJECTID, intersectingfeature[1], intersectingfeature[2], intersectingfeature[3]))
                else:      
                    conn.cur.execute("SELECT especies.insert_species_wdpa(%s,%s,%s)" % (OBJECTID, intersectingfeature[1], intersectingfeature[2]))                                                                  
        else:
            raise Exception("No intersecting features for OBJECTID %s" % OBJECTID)
                        
    except Exception as inst:
        logging.error(str(os.getpid()) + " " + inst.args[0])
    finally:
        conn.cur.close()
        del(conn)
Пример #2
0
def api_id(passid = "nopass"):
    # Check if an ID was provided as part of the URL.
    # If ID is provided, assign it to a variable.
    # If no ID is provided, display an error in the browser.
    if passid == "nopass":
        if 'id' in request.args:
            id = int(request.args['id'])
        else:
            return "Error: No id field provided. Please specify an id."
    else:
        id = int(passid)

    # Loop through the data and match results that fit the requested ID.
    # IDs are unique, but other fields might return many results
    conn = dbc.dbconnect()
    curselect = conn.cursor(buffered=False)
    dbquery = "SELECT * FROM Riverside WHERE ID={0};".format(id)
    curselect.execute(dbquery)
    row_headers=[x[0] for x in curselect.description] #this will extract row headers
    results = curselect.fetchall()
    curselect.close()  
    conn.close()
    json_data=[]
    for result in results:
        json_data.append(dict(zip(row_headers,result)))
        
    return json.dumps(json_data)
Пример #3
0
def report():
    conn = dbconnect('equipment_testing')
    if conn is None:
        return render_template('results.html', ans=0)

    if request.form.get('send') is None:
        return render_template('otchet.html')

    year = request.form['year']
    month = request.form['month']

    condition = 'WHERE '

    if year:
        year = 'ot_year=' + year
    if month:
        month = 'ot_month=' + month

    if year != '' and month != '':
        condition += year + ' AND ' + month
    elif year == '' and month == '':
        condition += '1'
    else:
        condition += year + month

    sqlreq = 'SELECT * FROM `calendar` ' + condition

    mycursor = conn.cursor()
    mycursor.execute(sqlreq)
    result = mycursor.fetchall()

    reskeys = ['id', 'year', 'month', 'department', 'tests']

    result = list(map(lambda x: dict(zip(reskeys, x)), result))
    return render_template('otchet.html', otchet=1, table=result)
Пример #4
0
def api_add():
    jsondata = request.get_json()
    if 'timeout' in jsondata:
        timeout = int(jsondata['timeout'])
    else:
        return "Need timeout value"
    controllerlist = []
    controllers = xled.discover.xdiscover(None, None, timeout)

    with suppress(xled.exceptions.DiscoverTimeout):
        for controller in controllers:
            controllerlist.append(controller)
            print(controllerlist)
    try:
        conn = dbc.dbconnect()
        for con in controllerlist:
            curinsert = conn.cursor(buffered=False)
            curselect = conn.cursor(buffered=False)

            curselect.execute(
                "SELECT StartChannel, StartUniverse, NumLEDS, ChannelsPerLED FROM Riverside ORDER BY id DESC LIMIT 1;"
            )
            sel_results = curselect.fetchone()
            curselect.close()
            control_interface = xled.ControlInterface(con.ip_address,
                                                      con.hw_address)
            device_info = control_interface.get_device_info()
            if not sel_results:
                curinsert.execute(
                    "INSERT INTO rxtnet.Riverside(Name, MacAddress, IP, NumLEDS, ChannelsPerLED, StartChannel, StartUniverse) VALUES (?, ?, ?, ?, ?, ?, ?)",
                    (con.id, con.hw_address, con.ip_address,
                     device_info["number_of_led"],
                     len(device_info["led_profile"]), '1', '0'))
                conn.commit()
                curinsert.close()
                conn.close()
            else:
                startchannel = sel_results[0]
                startuniverse = sel_results[1]
                usedchannels = sel_results[2] * sel_results[3]
                burnedchannels = ((usedchannels // 512) - 1) + (
                    (512 - startchannel + 1) % sel_results[3])
                nextaddr = [
                    startuniverse + (usedchannels // 512),
                    startchannel + (usedchannels % 512) + burnedchannels
                ]
                curinsert.execute(
                    "INSERT INTO rxtnet.Riverside(Name, MacAddress, IP, NumLEDS, ChannelsPerLED, StartChannel, StartUniverse) VALUES (?, ?, ?, ?, ?, ?, ?)",
                    (con.id, con.hw_address, con.ip_address,
                     device_info["number_of_led"],
                     len(device_info["led_profile"]), nextaddr[1],
                     nextaddr[0]))
                conn.commit()
                curinsert.close()
                conn.close()

    except mariadb.Error as e:
        print(f"Error: {e}")

    return api_all()
Пример #5
0
def api_update_field():
    #example curl post
    #curl --header "Content-Type: application/json" --request POST --data '{"id":"46","fieldname":"GroupName","value":"test5"}' http://127.0.0.1:8083/api/v1/controllers/update
    jsondata = request.get_json()
    
    if 'id' in jsondata:
        id = int(jsondata['id'])
    else:
        return "Error: No id field provided. Please specify an id."
    
    if 'fieldname' in jsondata:
        fieldname = jsondata['fieldname']
    else:
        return "Error: No field provided. Please specify a field."
    
    if 'value' in jsondata:
        value = jsondata['value']
    else:
        return "Error: No value provided. Please specify a value."
    
    conn = dbc.dbconnect()
    dbquery = "UPDATE Riverside SET {0}='{1}' WHERE ID={2}".format(fieldname, value, id)
    curselect = conn.cursor(buffered=False)
    curselect.execute(dbquery)
    conn.commit()
    
    dbquery = "SELECT * FROM Riverside WHERE ID={0};".format(id)
    curselect.execute(dbquery)
    results = curselect.fetchall()   
    curselect.close()
    conn.close()
    
    return api_id(id)
Пример #6
0
def api_controlleroff():
    # Check if an ID was provided as part of the URL.
    # If ID is provided, assign it to a variable.
    # If no ID is provided, display an error in the browser.
    jsondata = request.get_json()
        
    if 'id' in jsondata:
        id = int(jsondata['id'])
    else:
        return "Error: No id field provided. Please specify an id."

    conn = dbc.dbconnect()
    curselect = conn.cursor(buffered=False)
    
    dbquery = "SELECT MacAddress, IP FROM Riverside WHERE ID={0}".format(id)

    curselect.execute(dbquery)
    results = curselect.fetchone()
    curselect.close()
    conn.close()

    control_interface = xled.ControlInterface(results[1], results[0])
    hicontrol = xled.HighControlInterface(results[1])
    control_interface.set_mode('off')

    # Use the jsonify function from Flask to convert our list of
    # Python dictionaries to the JSON format.
    return 200
Пример #7
0
def revalid(table):
  validated = {} # Store validated ids in here
  #add_trandsvalid(VALID_TABLE)
  add_trandsvalid(table)
  ts = {"t":table,"valid_certs":VALID_TABLE}
  q = fetch_revalidatable % ts
  print q
  db1,dbc1 = dbconnect()
  dbc1.execute(q)
  res = dbc1.fetchmany(1000)
  while res:
    print ".",
    # Sometimes this 
    for certpath, certid, extrapath, fprt, fetchtime in res:
      # Extrapath contains a cert[chain] that may make something in certpath
      # valid, so rerun validation that way...
      certs = dataToCerts(open(certpath, 'rb').read())
      certs = map(enc, certs)
      ecerts = dataToCerts(open(extrapath, 'rb').read())
      ecerts = map(enc, ecerts)
      q = "select min(id) from `%s` where path='%s'" % (table, certpath)
      dbc.execute(q)
      chain_start = dbc.fetchone()[0]
      pos_in_chain = certid - chain_start
      tcert = certs[pos_in_chain]
      others = certs[:pos_in_chain] + certs[pos_in_chain +1:] + ecerts
      ts["id"] = certid
      ts["fprt"] = fprt
      revalidate_one_cert(tcert, others, ts, pos_in_chain, validated, fetchtime)

    res = dbc1.fetchmany(1000)
Пример #8
0
def revalid(table):
    validated = {}  # Store validated ids in here
    #add_trandsvalid(VALID_TABLE)
    add_trandsvalid(table)
    ts = {"t": table, "valid_certs": VALID_TABLE}
    q = fetch_revalidatable % ts
    print q
    db1, dbc1 = dbconnect()
    dbc1.execute(q)
    res = dbc1.fetchmany(1000)
    while res:
        print ".",
        # Sometimes this
        for certpath, certid, extrapath, fprt, fetchtime in res:
            # Extrapath contains a cert[chain] that may make something in certpath
            # valid, so rerun validation that way...
            certs = dataToCerts(open(certpath, 'rb').read())
            certs = map(enc, certs)
            ecerts = dataToCerts(open(extrapath, 'rb').read())
            ecerts = map(enc, ecerts)
            q = "select min(id) from `%s` where path='%s'" % (table, certpath)
            dbc.execute(q)
            chain_start = dbc.fetchone()[0]
            pos_in_chain = certid - chain_start
            tcert = certs[pos_in_chain]
            others = certs[:pos_in_chain] + certs[pos_in_chain + 1:] + ecerts
            ts["id"] = certid
            ts["fprt"] = fprt
            revalidate_one_cert(tcert, others, ts, pos_in_chain, validated,
                                fetchtime)

        res = dbc1.fetchmany(1000)
def getData(sql, connection):
    conn = dbconnect(connection)
    cur = conn.cur
    cur.execute(sql)
    return cur.fetchall()
    cur.close()
    conn.close()
def start_intersect(other_tablename, other_primarykey, getArea, processors):
    # Run the preprocessing functions
    conn = dbconnect('species_especies_schema')                         # connect to the database
    conn.cur.execute("select * from especies.redlist_intersect_preprocess('" + other_tablename + "','" + other_primarykey + "')") # create the empty intersection output table and log tables
    
    # iterate through the species features and do the processing
    conn.cur.execute('SELECT id FROM species_distribution.species order by 1;') # get the species ids to process
    ids = conn.cur.fetchall() 
    idlist = [id[0] for id in ids]
    count = 1 
    cpus = int(processors)
    processPool = multiprocessing.Pool(cpus, start_process)             # create a process pool to manage the jobs
    manager = multiprocessing.Manager()                                 # instantiate a manager to manage the shared memory objects 
    lck = manager.Lock()                                                # create a lock to block access
    
    #single feature test 
#    idlist = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] #debug only
#     idlist = [1, 2, 17975,102553] #debug only
    length = len(idlist)
    
    #iterate through the species range features
    print 'Processing %s species using %s processors' % (length,processors)
    for OBJECTID in idlist:                                             # iterate through the OBJECTID values
        processPool.apply_async(redlist_intersect_process.main, [OBJECTID, other_tablename, other_primarykey, lck, count, length, getArea]) #call the processes to do the actual intersection query
        count += 1
        
    processPool.close()                                                 # close the process pool
    processPool.join()                                                  # wait for all the workers to finish
    manager.shutdown()
    
    #do the postprocessing 
    conn.cur.execute("select * from especies.redlist_intersect_postprocess('" + other_tablename + "')") # generic post-processing to create the unique records of species/other table id
    conn.cur.close()
    del(conn)
def getScalar(sql):
    conn = dbconnect("durga_dopa")
    cur = conn.cur
    cur.execute(sql)
    return cur.fetchone()
    cur.close()
    conn.close()
Пример #12
0
def parseAll():
    print('Fetching Entire File')
    f = open(controller_path + fileName)
    DataString = f.readlines()
    ColumnTitles = DataString[6]
    ColumnTitles = ColumnTitles.strip().split(',')
    if ColumnTitles[0] != 'Error' and ColumnTitles[len(
            ColumnTitles
    )] != 'Dishwasher Real Energy (kWh)':  #Changes in file will break this line
        corruptionError = True
        return
    ColumnRawData = []
    for index in range(7, len(DataString)):
        ColumnRawData.append(DataString[index].strip().split(','))
    targetLength = len(ColumnTitles)
    for array in ColumnRawData:
        if len(array) != targetLength:
            corruptionError = True
            print("Corrupt DataFile, Fetching Again")
            return
    timeStamps = []
    for array in ColumnRawData:
        timeStamps.append(array[1])
    db = dbconnect()
    db.unCorruptPowerLogs()
    dbLatest = db.fetchLastPowerReading(
    )  #Find the last timestamp in the database
    dbLatest = datetime.strptime(str(dbLatest), "%Y-%m-%d %H:%M:%S")
    timeArray = []
    deviceArray = []
    wattsArray = []
    kwhArray = []
    for i in range(1, len(ColumnRawData)):
        newer = datetime.strptime(ColumnRawData[i][2], "%Y-%m-%d %H:%M:%S")
        older = datetime.strptime(ColumnRawData[i - 1][2], "%Y-%m-%d %H:%M:%S")
        delta = newer - older
        seconds = delta.seconds + (delta.days * 86400)
        kwhToWatts = (3600 * 1000) / seconds
        for j in range(3, 27):
            if j == 4:
                continue
            kwh = float(ColumnRawData[i][j]) - float(ColumnRawData[i - 1][j])
            watts = kwh * kwhToWatts
            if (watts < 0 or kwh < 0):
                watts = 0
                kwh = 0
            timeArray.append(newer)
            deviceArray.append(deviceIDs[j - 3])
            wattsArray.append(float(watts))
            kwhArray.append(float(kwh))
            print('kwh: ' + str(kwh) + ' seconds: ' + str(seconds) +
                  ' watts: ' + str(watts))

    for i in range(0, len(timeArray)):
        if (timeArray[i] > dbLatest):
            db.insertPowerReading(str(deviceArray[i]), str(timeArray[i]),
                                  str(wattsArray[i]), str(kwhArray[i]))
        else:
            print('failed case')
Пример #13
0
    def __init__(self, tables, load_invalid):
        self.valid_only = not load_invalid

        from dbconnect import dbconnect
        self.db, self.dbc = dbconnect()
        self.tables = tables
        self.calc_max_widths()
        self.calc_total_rows()
Пример #14
0
  def __init__(self,tables, load_invalid):
    self.valid_only = not load_invalid

    from dbconnect import dbconnect
    self.db, self.dbc = dbconnect()
    self.tables = tables
    self.calc_max_widths()
    self.calc_total_rows()
def parseAll():
	print('Fetching Entire File')
	f = open(controller_path+fileName)
	DataString = f.readlines()
	ColumnTitles = DataString[6]
	ColumnTitles = ColumnTitles.strip().split(',')
	if ColumnTitles[0] != 'Error' and ColumnTitles[len(ColumnTitles)] != 'Dishwasher Real Energy (kWh)': #Changes in file will break this line
		corruptionError = True
		return
	ColumnRawData = []
	for index in range(7,len(DataString)):
		ColumnRawData.append(DataString[index].strip().split(','))
	targetLength = len(ColumnTitles)
	for array in ColumnRawData:
		if len(array) != targetLength:
			corruptionError = True
			print("Corrupt DataFile, Fetching Again")
			return
	timeStamps = []
	for array in ColumnRawData:
		timeStamps.append(array[1])
	db = dbconnect()
	db.unCorruptPowerLogs()
	dbLatest = db.fetchLastPowerReading() #Find the last timestamp in the database
	dbLatest = datetime.strptime(str(dbLatest), "%Y-%m-%d %H:%M:%S")
	timeArray = []
	deviceArray = []
	wattsArray = []
	kwhArray = []
	for i in range(1,len(ColumnRawData)):
		newer = datetime.strptime(ColumnRawData[i][2], "%Y-%m-%d %H:%M:%S")
		older = datetime.strptime(ColumnRawData[i-1][2], "%Y-%m-%d %H:%M:%S")
		delta = newer-older
		seconds = delta.seconds + (delta.days * 86400)
		kwhToWatts = (3600 * 1000) / seconds;
		for j in range(3,27):
			if j == 4: 
				continue
			kwh = float(ColumnRawData[i][j]) - float(ColumnRawData[i-1][j])
			watts = kwh * kwhToWatts
			if (watts < 0 or kwh < 0):
				watts = 0
				kwh  = 0
			timeArray.append(newer)
			deviceArray.append(deviceIDs[j-3])
			wattsArray.append(float(watts))
			kwhArray.append(float(kwh))
			print('kwh: '+str(kwh)+' seconds: '+str(seconds)+' watts: '+str(watts))
	
	for i in range(0,len(timeArray)):
		if(timeArray[i] > dbLatest):
			db.insertPowerReading(str(deviceArray[i]),str(timeArray[i]),str(wattsArray[i]),str(kwhArray[i]))
		else:
			print('failed case')
Пример #16
0
 def __init__(self, raw_der_cert=None, fingerprint=None, table_name=None, connect=dbconnect.dbconnect(), existing_fields=[], skipfpcheck=False, create_table=False):
     self.gdb, self.gdbc = connect
     if not table_name:
         self.table_name = TABLE_NAME
     else:
         self.table_name = table_name
     self.existing_fields = existing_fields
     if raw_der_cert:
         self.loadCert(raw_der_cert, fingerprint)
     self.skipfpcheck = skipfpcheck
     self.create_table = create_table
     self.domainre = re.compile(r'^[a-zA-Z\*\d-]{,63}(\.[a-zA-Z\*\d-]{,63})*$')
Пример #17
0
def feedback():
    cur, conn = dbconnect()  # connect to database (in a seperated function)
    if request.method == 'POST':  #submitbutton
        entered_comment = request.form[
            'comment']  #collect values in a form with method="post"
        cur.execute(
            "INSERT INTO feedbackherman (comment) VALUES ('{}')".format(
                entered_comment))  #add comment to database
        conn.commit()
    cur.execute('SELECT comment FROM feedbackherman'
                )  #take the comment out of the database
    rows = cur.fetchall()
    return render_template('feedback.html', comments=rows)  #display everything
Пример #18
0
def api_all():
    conn = dbc.dbconnect()  #connect to database, returns db connection object
    curselect = conn.cursor(buffered=False)
    curselect.execute("SELECT * FROM Riverside ORDER BY id ASC;")
    row_headers=[x[0] for x in curselect.description] #this will extract row headers
    controllersdict = curselect.fetchall()
    curselect.close()   
    conn.close() 
    json_data=[]
    for result in controllersdict:
        json_data.append(dict(zip(row_headers,result)))
    
    return json.dumps(json_data), 200
Пример #19
0
def QueryDB(QUERY, DATA):
  #Receive a SQL Query returns a record set
  print (QUERY)
  import dbconnect
  conn = dbconnect.dbconnect()
  conn.autocommit(True)
  cur = conn.cursor()
  cur.execute(QUERY, DATA)
  rows = cur.fetchall()
  if rows is not None:
    return rows
  else:
    return "Error"
Пример #20
0
def deletecomment():
    if 'username' not in session:  #check if user is logged in
        return redirect('/login')  #if he is not go to login.html
    cur, conn = dbconnect()  #connection to database
    if request.method == 'POST':  #delete button
        entered_id = request.form['id']  #take id out of form
        cur.execute("DELETE FROM feedbackherman WHERE id = {} ".format(
            entered_id))  #delete the comment out of the table
        conn.commit()  #save
    cur.execute('SELECT comment, id FROM feedbackherman')  #get all comments
    rows = cur.fetchall()  # fetch all comments and save in variable rows
    return render_template(
        'deletecomment.html',
        comments=rows)  #display the comments from the database
Пример #21
0
 def connectDB(self):
     #数据库连接
     if not dbconnect.dbconnect():
         self.debug("\t[错误]-->[数据库未能打开]")
         return
     
     self.model = QtSql.QSqlRelationalTableModel()
     
     dbconnect.initModel(self.model, "text")
     self.ui.tableView_DB.setModel(self.model)
     self.ui.tableView_DB.setItemDelegate(QtSql.QSqlRelationalDelegate(self.ui.tableView_DB))
     self.ui.tableView_DB.setWindowTitle("tianwei")
     
     self.ui.btn_connectDB.setEnabled(False)
     self.ui.btn_search.setEnabled(True)
Пример #22
0
def main():
    import pymongo, random, time
    con = pymongo.MongoClient()
    db = con['imdb']
    db2 = dbconnect()
    cur = db.top_100k.find().sort('votes',-1)
    for each in cur:
        res = getlocations(constructUrl(each['_id']))
        res['_id'] = each['_id']
        res['title'] = each['title']
        try:
            db2.movielocations.save(res)
        except:
            print 'Already exists'
        time.sleep(random.randrange(0,15))
    return
Пример #23
0
def api_listgroups():
    #where groupname IS NOT NULL
    conn = dbc.dbconnect()
    dbquery = "SELECT MacAddress, IP, ID, GroupName FROM Riverside WHERE GroupName IS NOT NULL"
    curselect = conn.cursor(buffered=False)
    curselect.execute(dbquery)
    results = curselect.fetchall()
    
    groups = {}
    
    for item in results:
        if item[3] in groups:
            groups[item[3]] += 1
        else:
            groups[item[3]] = 1
    curselect.close()
    conn.close()
    return jsonify(groups)
Пример #24
0
def request5():
    conn = dbconnect('equipment_testing')
    if conn is None:
        return render_template('results.html', ans=0)

    sqlreq = 'SELECT `surname`' \
             'FROM `employee` ' \
             'LEFT JOIN `protocol` ON `employee`.`emp_id`=`protocol`.`staff` ' \
             'WHERE `protocol`.`test_id` IS NULL'

    mycursor = conn.cursor()
    mycursor.execute(sqlreq)
    result = mycursor.fetchall()

    reskeys = ['employee']

    result = list(map(lambda x: dict(zip(reskeys, x)), result))
    return render_template('results.html', ans=5, table=result)
def intersect(id_no):
    conn = dbconnect('species_dev')
    cursor = conn.cursor()
    wfs = ogr.Open("WFS:http://mapservices.iucnredlist.org/ArcGIS/services/Andrew/OrangUtan/MapServer/WFSServer?request=GetCapabilities&service=WFS")
    layer = wfs.GetLayer(0) 
#    print layer.GetSpatialRef()    
    for feature in layer:
        geometry = feature.GetGeometryRef()             # get a copy of the geometry
        geometryWkt = geometry.ExportToWkt()            # create the geometry as wkt
        print geometryWkt
        cursor.execute("select wdpaid from wdpa where st_intersects(ST_GeometryFromText('" + geometryWkt + "',3857), geom)") # intersect the geometry with the protected areas layer
        PAs = cursor.fetchall()
        for PA in PAs:
            print str(int(PA[0]))
    cursor.close()                                                      
    conn.close()                                                        
    return
     
Пример #26
0
def api_staticcolor():
    # Check if an ID was provided as part of the URL.
    # If ID is provided, assign it to a variable.
    # If no ID is provided, display an error in the browser.
    jsondata = request.get_json()
    
    if 'id' in jsondata:
        id = int(jsondata['id'])
    else:
        return "Error: No id field provided. Please specify an id."
    
    if 'red' in jsondata:
        red = int(jsondata['red'])
    else:
        return "Error: No red field provided. Please specify an red."
    
    if 'green' in jsondata:
        green = int(jsondata['green'])
    else:
        return "Error: No green field provided. Please specify an green."
    
    if 'blue' in jsondata:
        blue = int(jsondata['blue'])
    else:
        return "Error: No blue field provided. Please specify an blue."

    

    conn = dbc.dbconnect()
    dbquery = "SELECT MacAddress, IP FROM Riverside WHERE ID={0}".format(id)

    curselect.execute(dbquery)
    results = curselect.fetchone()
    curselect.close()
    conn.close()

    control_interface = xled.ControlInterface(results[1], results[0])
    hicontrol = xled.HighControlInterface(results[1])
    control_interface.set_mode('movie')
    hicontrol.set_static_color(red, green, blue)

    # Use the jsonify function from Flask to convert our list of
    # Python dictionaries to the JSON format.
    return 200
Пример #27
0
def request2():
    conn = dbconnect('equipment_testing')
    if conn is None:
        return render_template('results.html', ans=0)

    sqlreq = 'SELECT `employee`.`surname`, COUNT(`protocol`.`staff`)' \
             'FROM `employee`' \
             'JOIN `protocol`' \
             'ON `employee`.`emp_id`=`protocol`.`staff`' \
             'WHERE YEAR(`protocol`.`test_date`)=2017 AND MONTH(`protocol`.`test_date`)=3 ' \
             'GROUP BY `employee`.`surname`'

    mycursor = conn.cursor()
    mycursor.execute(sqlreq)
    result = mycursor.fetchall()

    reskeys = ['employee', 'count']

    result = list(map(lambda x: dict(zip(reskeys, x)), result))
    return render_template('results.html', ans=2, table=result)
Пример #28
0
def proc():
    conn = dbconnect('equipment_testing')
    if conn is None:
        return render_template('results.html', ans=0)

    if request.form.get('send') is None:
        return render_template('proc.html')

    l_year = request.form['year']
    l_month = request.form['month']

    strnum = proccheck(conn, l_year, l_month)
    if strnum:
        return render_template('proc.html', proc=1)

    mycursor = conn.cursor()
    mycursor.callproc('getdepartment', (l_year, l_month))
    conn.commit()

    return render_template('proc.html', proc=0)
Пример #29
0
def SaveRegistrationInDB(name, password, email, CODE):
  #The objetive of the function is to store registration data of the user in DB
  import hashlib #The password is store in DB in hash sha224
  import dbconnect
  conn = dbconnect.dbconnect()
  conn.autocommit(True)
  cur = conn.cursor()
  name=str(name)
  #password=str(password)
  password=str(password) + SALT
  email=str(email)

  h = hashlib.new('sha384')
  h.update(password.encode('UTF-8'))
  myhash = h.hexdigest()

  cur.execute("INSERT into USERS (`FULLNAME`,`PASS`,`EMAIL`) values (%s,%s,%s)", (name, str(myhash),email))
  cur.execute("INSERT into UNVERIFIED_USER (`EMAIL`,`CODE`) values (%s,%s)", (email,str(CODE)))

  return
Пример #30
0
def request3():
    conn = dbconnect('equipment_testing')
    if conn is None:
        return render_template('results.html', ans=0)

    sqlreq = 'SELECT `equipment`.* ' \
             'FROM `equipment` ' \
             'JOIN `protocol` ON `equipment`.`eq_id`=`protocol`.`equip` ' \
             'WHERE `protocol`.`test_date`=(' \
             'SELECT MIN(`test_date`) FROM `protocol`' \
             ') ' \
             'GROUP BY `protocol`.`test_date`'

    mycursor = conn.cursor()
    mycursor.execute(sqlreq)
    result = mycursor.fetchall()

    reskeys = ['id', 'name', 'producer', 'type']

    result = list(map(lambda x: dict(zip(reskeys, x)), result))
    return render_template('results.html', ans=3, table=result)
Пример #31
0
def request1():
    conn = dbconnect('equipment_testing')
    if conn is None:
        return render_template('results.html', ans=0)

    sqlreq = 'SELECT `equipment`.`eq_id`, `equipment`.`eq_name`, `employee`.`surname`, `protocol`.`status`, ' \
             '`protocol`.`test_date`' \
             'FROM `equipment`' \
             'JOIN `protocol`' \
             'ON `equipment`.`eq_id`=`protocol`.`equip`' \
             'JOIN `employee`' \
             'ON `protocol`.`staff`=`employee`.`emp_id`' \
             'WHERE YEAR(`protocol`.`test_date`)=2017 AND MONTH(`protocol`.`test_date`)=3'

    mycursor = conn.cursor()
    mycursor.execute(sqlreq)
    result = mycursor.fetchall()

    reskeys = ['id', 'name', 'employee', 'status', 'date']

    result = list(map(lambda x: dict(zip(reskeys, x)), result))
    return render_template('results.html', ans=1, table=result)
Пример #32
0
def searchtweets(query, getcount):
    results = api.search(q=query,rpp=1,count=getcount)

    cnx = dbconnect.dbconnect(dbconfig)
    cursor = dbconnect.dbcursor(cnx)

    for result in results:

        tweet = result.text.encode('utf-8')
        user = result.user.screen_name.encode('utf-8')
        timesent = result.created_at
        tweetid = result.id_str.encode('utf-8')

        insertQuery = ('INSERT IGNORE INTO SearchedForTweets '
             '(tweetid, username, tweetcontent, timesent, searchterm) '
             'VALUES '
             '("%s", "%s", %r, "%s", "%s")' % (tweetid, user, tweet, timesent, query))

        cursor.execute(insertQuery)
        cnx.commit()

        print user + " " + tweet
Пример #33
0
def login():
    if request.method == 'POST':  #submitbutton
        entered_username = request.form[
            'username']  #take the username out of the form
        entered_password = request.form[
            'password']  #take the password out of the form
        cur, conn = dbconnect(
        )  # connect to database (in a seperated function)
        cur.execute('SELECT username, password FROM logininformation'
                    )  #read login information out of the database
        rows = cur.fetchall()
        # username = rows[0][0]
        # password = rows[0][1]
        # -> take data out of the list of tuples
        if rows[0] == (
                entered_username, entered_password
        ):  #if the entered info is equal to the database -> route to editcomment.html
            session[
                'username'] = entered_username  #save username in cookie of user if loged in successfully
            return redirect('/deletecomment')
    return render_template(
        'login.html'
    )  #in case the information is wrong or you just want to view the page the user stays at the login.html
Пример #34
0
def main():
    # this is the main function for the program.
    global TWEETCONTENT, TWEETTIME, TWEETTYPE
    # print('%s' % (TWEETCONTENT))
    # print('%s' % (TWEETTIME))
    # print('%s' % (TWEETTYPE))
    tweetContent, tweetTime, tweetType = checkInput(TWEETCONTENT, TWEETTIME, TWEETTYPE)

    ## connect to the database and set a cursor point
    cnx = dbconnect.dbconnect(dbconfig)
    cursor = dbconnect.dbcursor(cnx)

    insertQuery = (
        "INSERT INTO ScheduledTweets "
        "(tweetcontent, timetosend, sent, done, tweettype) "
        "VALUES "
        '("%s", "%s", "0", "0", "%s")' % (tweetContent, tweetTime, tweetType)
    )

    ## run the query and commit to the databse if no error is reported

    cursor.execute(insertQuery)
    cnx.commit()  # commit the changes to the database
def main(OBJECTID, other_tablename, other_primarykey, lck, count, length, getArea):
    """
    OBJECTID           - the objectid of the feature from the wfs service
    other_tablename    - the name of the other table that you are intersecting the red list data with
    other_primarykey   - the name of the primary key in the other table
    lck                - multiprocess lock
    count              - how many features have been processed
    length             - the total number of features to be processed
    getArea            - boolean flag to indicate whether to capture the area of intersection 
    """
    try:
        multiprocessing.current_process().cnt += 1   
        conn = dbconnect('species_especies_schema')                                             # connect to PostGIS
       
        # intersect the species range features with the intersectingfeature features
        conn.cur.execute("SELECT * from especies.redlist_intersect(%s,'%s','%s',%s)" % (OBJECTID, other_tablename, other_primarykey, getArea))
        intersectingfeatures = conn.cur.fetchall()                                              # get all of the intersecting features from the other table for the species
        other_tablename = other_tablename.replace('.', '_')                                      # when writing data, we have to replace any schema separator characters with underscores
        if len(intersectingfeatures) > 0:
            for intersectingfeature in intersectingfeatures:                                    # iterate through the intersectingfeatures
                if (intersectingfeature[3] == None): 
                    sql = "SELECT redlist_intersect_insert_result('%s',%s,%s,%s,%s)" % (other_tablename, OBJECTID, intersectingfeature[1], intersectingfeature[2], 'Null')
                else:
                    sql = "SELECT redlist_intersect_insert_result('%s',%s,%s,%s,%s)" % (other_tablename, OBJECTID, intersectingfeature[1], intersectingfeature[2], intersectingfeature[3])
                conn.cur.execute(sql)
            conn.cur.execute("insert into species_" + other_tablename + "_log (pid, species_id, ts) values (%s,%s,now())" % (os.getpid(), OBJECTID)) 

        else:
            conn.cur.execute("insert into species_" + other_tablename + "_log (pid, species_id, message, ts) values (%s,%s,'No intersecting features',now())" % (os.getpid(), OBJECTID)) 
                        
    except Exception as inst:
        conn.cur.execute("insert into species_" + other_tablename + "_log (pid, species_id, message, ts) values (%s,%s,%s,now())" % (os.getpid(), OBJECTID, inst.args[0])) 

    finally:
        conn.cur.close()
        del(conn)
Пример #36
0
def fetchLatLong():
    db = dbconnect()
    for l in db.movielocations.aggregate([{"$unwind":"$location"}])["result"]:
        print l['_id'], l['title'], l['location'], GLocation.getlatlong(l['location'])
        raw_input()
Пример #37
0
from dbconnect import dbconnect
import re
import json

db = dbconnect()


def cleaner(one):
    value = one.split(' ')[0]
    degree_sign = re.sub('[0-9.]', '', value)
    cleaned = value.split(degree_sign)[0]

    return cleaned


def dict_cleaner(total_data):
    gps_n = total_data['gps'].split(',')[0]
    clean_gps_n = cleaner(gps_n)

    gps_e = total_data['gps'].split(', ')[1]
    clean_gps_e = cleaner(gps_e)

    new_list_of_cordinates = [float(clean_gps_n), float(clean_gps_e)]

    final_dict = {}
    for key, value in total_data.items():
        if key != 'gps':
            final_dict[key] = value

    final_dict['gps'] = new_list_of_cordinates
    return final_dict
def main(OBJECTID, lck, featureclassname, idfieldname, count, length, getArea=False):
    """
    OBJECTID           - the objectid of the feature from the wfs service
    lck                - multiprocess lock
    featureclassname   - the name of the feature class in PostGIS to intersect with the species data
    idfieldname        - the unique field in the feature class that represents the primary key value
    count              - how many features have been processed
    length             - the total number of features to be processed
    getArea            - boolean flag to indicate whether to capture the area of intersection 
    """
    try:
        multiprocessing.current_process().cnt += 1   
        devconn = dbconnect('species_dev')                                                      # connect to Postgresql
        devconn.cur.execute("insert into species_wdpa_analysis_summary (objectid,wfs_requested) values (" + str(OBJECTID) + ",TRUE) RETURNING oid;")
        oid = devconn.cur.fetchone()[0]
        wfsfilter = "<ogc:Filter><ogc:PropertyIsEqualTo><ogc:PropertyName>OBJECTID</ogc:PropertyName><ogc:Literal>" + str(OBJECTID) + "</ogc:Literal></ogc:PropertyIsEqualTo></ogc:Filter>"   # create the filter for the species
        _wfs_ = WebFeatureService(WFS_URL + '?request=GetCapabilities', version='1.0.0')        # get the WFS Service Capabilities
        try:
            stream = _wfs_.getfeature(typename=['Andrew_SpeciesWFSLatLong:AllSpecies'], filter=wfsfilter, propertyname=[])    # get the species range as gml from the WFS service
        except:
            raise Exception("update species_wdpa_analysis_summary set wfs_failed=TRUE where oid=" + str(oid))
        elementTree = ET.parse(stream)                                                          # load the gml into memory
        featureMember = elementTree.find('{' + OPENGIS_NAMESPACE + '}featureMember')            # get the features
        if featureMember == None:                                                               # error check
            raise Exception("update species_wdpa_analysis_summary set no_features_returned=TRUE where oid=" + str(oid)) 
        speciesid = getAttribute(elementTree, 'SpeciesID')                                      # get the SpeciesID
        if speciesid == None:                                                                   # error check
            raise Exception("update species_wdpa_analysis_summary set no_speciesid=TRUE where oid=" + str(oid))
        else:
            devconn.cur.execute("update species_wdpa_analysis_summary set speciesid=" + str(speciesid) + " where objectid=" + str(OBJECTID))
        presence = getAttribute(elementTree, 'PRESENCE')                                        # get the PRESENCE
        if presence == None:                                                                    # error check
            raise Exception("update species_wdpa_analysis_summary set no_presence_value=TRUE where oid=" + str(oid))
        polygons = elementTree.getiterator('{' + OPENGIS_NAMESPACE + '}Polygon')                # get the polygons         
        if len(polygons) == 0:                                                                  # error check
            raise Exception("update species_wdpa_analysis_summary set no_wfs_polygons=TRUE where oid=" + str(oid))
        intersects = False
        liveconn = dbconnect('species_live')                                                    # connect to Postgresql to do the intersection analysis
        for i in polygons:                                                                      # iterate through all of the species range polygons
             
            # intersect the species range features with the intersectingfeature features
            if getArea:
                sql = "select " + idfieldname + ", st_area(st_transform(st_intersection(ST_GeomFromGML('" + ET.tostring(i) + "',4326), geom),97099)) from " + featureclassname + " where st_intersects(ST_GeomFromGML('" + ET.tostring(i) + "',4326), geom)"    
            else:
                sql = "select " + idfieldname + " from " + featureclassname + " where st_intersects(ST_GeomFromGML('" + ET.tostring(i) + "',4326), geom)" # select the features that overlap the species range, e.g. select wdpaid from wdpa where st_intersects
             
            liveconn.cur.execute(sql)                                                           # execute the query
            intersectingfeatures = liveconn.cur.fetchall()                                      # get all of the records
            for intersectingfeature in intersectingfeatures:                                    # iterate through the intersectingfeatures
                sql =   "insert into species_wdpa (speciesid, presence, wdpaid, objectid)  VALUES (" + str(speciesid) + "," + str(presence) + "," + str(int(intersectingfeature[0])) + "," + str(OBJECTID) + ") RETURNING oid"
                print sql
                if getArea:
                    devconn.cur.execute("insert into species_wdpa (speciesid, presence, wdpaid, objectid,area)  VALUES (" + str(speciesid) + "," + str(presence) + "," + str(int(intersectingfeature[0])) + "," + str(OBJECTID) + "," + str(intersectingfeature[1]) + ") RETURNING oid")
                else:      
                    devconn.cur.execute("insert into species_wdpa (speciesid, presence, wdpaid, objectid)  VALUES (" + str(speciesid) + "," + str(presence) + "," + str(int(intersectingfeature[0])) + "," + str(OBJECTID) + ") RETURNING oid")                                                                  
                intersects = True
        if intersects == False:
            raise Exception("update species_wdpa_analysis_summary set intersecting_features=FALSE where oid=" + str(oid))            
                        
    except Exception as inst:
        devconn.cur.execute(inst.args[0])
    finally:
        devconn.cur.close()                                                                     # close the cursor
        liveconn.cur.close()                                                                    # close the cursor
        del(conn)                                                                               # close the connection
#imports
import urllib, overlaySpecies, multiprocessing, os, datetime
from dbconnect import dbconnect

def start_process():
    print 'Processing', multiprocessing.current_process().name, os.getpid()
    multiprocessing.current_process().cnt = 0 

# get a list of the unique ids
conn = dbconnect('species_live') 
cursor = conn.cur
cursor.execute('SELECT id FROM species_distribution.species order by 1;') 
ids = cursor.fetchall() 
idlist = [id[0] for id in ids]
count = 1 
processes = 8                                                       # number of CPUs
processPool = multiprocessing.Pool(processes, start_process)        # create a process pool to manage the jobs
manager = multiprocessing.Manager()                                 # instantiate a manager to manage the shared memory objects 
lck = manager.Lock()                                                # create a lock to block access

#single feature test - orang utan
#idlist = [2565] # normally returns an error
#idlist = [13006] # this is a very small polygon - speciesid = 61554 
#idlist = [91097] # succesful as lat/long but not as mollweide  
#idlist = [2565, 2572, 2576, 15234, 15433, 15626, 18133, 18780, 27964, 28000] # unable to perform query feature  
idlist = [103255,1,130206] # orang utan, feature with no id_no and invalid OBJECTID  
#idlist = [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23] 
                             
length = len(idlist)

#iterate through the species range features
import multiprocessing, os, rasterize, datetime, logging, sys
from dbconnect import dbconnect
LOG_FILENAME = 'rasterize.log' 

def start_process():
    multiprocessing.current_process().cnt = 0

conn = dbconnect('species_appuser')
sql = "SELECT DISTINCT species_id FROM species_distribution.species"
# print sql
conn.cur.execute(sql)
res = conn.cur.fetchall()
conn.cur.close()                                                    # close the cursor to get the species
del(conn)                                                           # close the connection to the database
idlist = []                                                         # instantiate a list to hold the species ids
if res:                                                             # build the species id list
    idlist = [row[0] for row in res]
#idlist = [16,18,59,137,138,139,140,142,217] # and 219 for cheetah
idlist = [899] # sun bear - multiple presence ids, 899 arctic fox, 23148 big ranged swordfish - big extent, 17975 orang utan multiple features
idlist = [17975] # sun bear - multiple presence ids, 899 arctic fox, 23148 big ranged swordfish - big extent, 17975 orang utan multiple features
processes = min(len(idlist), multiprocessing.cpu_count() * 2)       # number of CPUs
print 'Using ' + str(processes) + ' processes'
processPool = multiprocessing.Pool(processes, start_process)        # create a process pool to manage the jobs
manager = multiprocessing.Manager()                                 # instantiate a manager to manage the shared memory objects 
lock = manager.Lock()                                               # create a lock to block access
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG,)
for id in idlist:    
    try:
        r = processPool.apply_async(rasterize.main, [id, lock, True])       # run the rasterization asynchronously in a process
    except:
        print sys.exc_info()
Пример #41
0
import numpy
import Image
import arcpy
from scipy.sparse import *
from dbconnect import dbconnect
import time
t0 = time.time()
CELLSIZE = 1222.9924525618553  #cell size at zoom 15
OFFSET = 20037508.3428  #offset for the web mercator projection
speciesID = arcpy.GetParameterAsText(0)
conn = dbconnect('damon_species')
cur = conn.cur
cur.execute(
    "SELECT tx,ty,z FROM public.pilotspeciesdata WHERE speciesid in (" +
    speciesID + ")")
rows = cur.fetchall()
arcpy.AddMessage("Number records: " + str(len(rows)))
t1 = time.time()
arcpy.AddMessage("Records returned: " + str(t1 - t0) + "ms")
data3d = numpy.transpose(numpy.array(rows, dtype="int32"))
t2 = time.time()
arcpy.AddMessage("Transposed: " + str(t2 - t1) + "ms")
minx = min(data3d[0])  #get the minx value
maxx = max(data3d[0])  #get the maxx value
miny = min(data3d[1])  #get the miny value
maxy = max(data3d[1])  #get the maxy value
width = maxx - minx + 1  #get the width of the resulting image
height = maxy - miny + 1  #get the height of the resulting image
data3d[0].__isub__(
    minx)  #change the x values to be zero based by subtracting the minx value
data3d[1].__imul__(
Пример #42
0
 def __init__(self):
     self.gdb, self.gdbc = dbconnect.dbconnect()
#!/usr/bin/env python

# Make a list of the current top 1,000 certs to whitelist from the
# Decentralized SSL Observatory's client submissions, to live in
# https-everywhere/src/chrome/content/code/X509ChainWhitelist.js

import dbconnect
import sys

db, dbc = dbconnect.dbconnect()

import time
few_days_ago = time.gmtime(time.time() - 3600 * 24 * 3)
cutoff = time.strftime("%Y-%m-%d", few_days_ago)

# This currently runs on the decentralized observatory server, but will also
# run on the published datasets and read-only SQL servers too...

q = """
SELECT hex(reports.chain_fp), count, `Validity:Not After`, Subject 
FROM reports JOIN chains       ON reports.chain_fp = chains.chain_fp 
             JOIN new_parsed_certs ON reports.cert_fp  = new_parsed_certs.cert_fp
WHERE count > 1000 AND 
      `Validity:Not After` > "%s" 
GROUP BY reports.chain_fp 
ORDER BY count DESC 
LIMIT 1000
""" % (cutoff, )

dbc.execute(q)
results = dbc.fetchmany(1000)
import numpy
import Image
import arcpy
from scipy.sparse import *
from dbconnect import dbconnect
import time
t0=time.time()
CELLSIZE = 1222.9924525618553 #cell size at zoom 15
OFFSET = 20037508.3428        #offset for the web mercator projection
speciesID= arcpy.GetParameterAsText(0)
conn = dbconnect('damon_species')
cur = conn.cur
cur.execute("SELECT tx,ty,z FROM public.pilotspeciesdata WHERE speciesid in (" + speciesID + ")")
rows=cur.fetchall()
arcpy.AddMessage("Number records: " + str(len(rows)))
t1=time.time()
arcpy.AddMessage("Records returned: " + str(t1-t0) + "ms")
data3d=numpy.transpose(numpy.array(rows,dtype="int32"))
t2=time.time()
arcpy.AddMessage("Transposed: " + str(t2-t1) + "ms")
minx=min(data3d[0]) #get the minx value
maxx=max(data3d[0]) #get the maxx value
miny=min(data3d[1]) #get the miny value
maxy=max(data3d[1]) #get the maxy value
width=maxx-minx+1   #get the width of the resulting image
height=maxy-miny+1  #get the height of the resulting image
data3d[0].__isub__(minx) #change the x values to be zero based by subtracting the minx value
data3d[1].__imul__(-1)   #do the same with the y values using a different calculation
data3d[1].__iadd__(maxy)
pixels=coo_matrix((data3d[2],(data3d[1],data3d[0])), shape=(height,width)).todense() #convert the sparse array into a dense matrix, i.e. by adding in all of the zero values
t3=time.time()
Пример #45
0
    args=filter(lambda a: a!="--roots", args)
    parse_roots=True
  if "--create" in args:
    args=filter(lambda a: a!="--create", args)
    create = True
  if "--readable" in args:
    args=filter(lambda a: a!="--readable", args)
    make_readable_table=True
  # The if we're doing --readable, --create refers to that table
  if create and not make_readable_table:
    create_table()
  return args


from dbconnect import dbconnect
gdb, gdbc = dbconnect()
def db_from_results(dirs):
  print "Targetting", dirs
  for d in dirs:
    for path, validities, x509_certchain, fprints in openssl_dump.dumpByDir(d):
      add_cert_to_db(path, validities, x509_certchain, fprints)

  make_indicies()

  print "Exiting correctly..."
 
def make_indicies(tablename):
  # Make some indicies
  to_index = ["valid"]
  # use hashes to index these:
  index_h = ["Subject","Issuer",\
#imports
import redlist_intersect
from dbconnect import dbconnect

conn = dbconnect('species_especies_schema') # dont need the host in the connection information as this code is running on the same machine as the db 

# Run the wdpa preprocessing   
print 'Preprocessing..' 
conn.cur.execute('select * from especies.redlist_wdpa_intersect_preprocess()') # create the latest wdpa snapshot 

# Run red_list intersection
print 'Processing..' 
# redlist_intersect.start_intersect('wdpa', 'id', True, 8)
redlist_intersect.start_intersect('wdpa', 'id', False, 8)

# Run the wdpa post-processing
print 'Post processing..' 
conn.cur.execute('select * from especies.redlist_wdpa_intersect_postprocess()') # do the postprocessing to produce all of the derived tables

print 'Publishing analysis.. '
# conn.cur.execute('select * from especies.redlist_wdpa_intersect_makelive()') # delete the existing tables and make the new ones live

conn.cur.close()
del(conn)
Пример #47
0
#!/usr/bin/env python

# This script cleans up after a bug in --readable that left some certs missing
# from the readable table :/

import dbconnect
import openssl_dump
import os

db,dbc = dbconnect.dbconnect()
db1,dbc1 = dbconnect.dbconnect()

q = """
select fingerprint,path from all_certs 
where fingerprint not in (
  select fingerprint from readable
)
"""
print q
dbc.execute(q)
batch = dbc.fetchmany(1000)
while batch:

  fds = []
  for fprt, path in batch:
    # Let the IO subsystem figure out an efficient way to suck all these certs
    # into RAM
    f = os.open(path, os.O_NONBLOCK)
    fds.append(f)
    os.read(f,2048)
def main(spatialObj, spatialObjId, lock, nrIt, datadir, outputformat="csv", extent=None):
    mp.current_process().cnt += 1
    print "PID %s: Job %s: Processing ID %s" % (os.getpid(), mp.current_process().cnt, spatialObjId)
    # get the sql to retrieve the data
    conn = dbconnect("durga_dopa")
    if spatialObj == "species":
        sql = getSpeciesSQL(spatialObjId)
        if outputformat == "csv":
            rs = conn.ExecuteSQL("SELECT * FROM speciestaxonomy WHERE id_no = '" + str(spatialObjId) + "'")
            kingdom = rs.GetFeature(0).GetFieldAsString(3).capitalize()
            phylum = rs.GetFeature(0).GetFieldAsString(4).capitalize()
            _class = rs.GetFeature(0).GetFieldAsString(5).capitalize()
            order = rs.GetFeature(0).GetFieldAsString(6).capitalize()
            family = rs.GetFeature(0).GetFieldAsString(7).capitalize()
            genus = rs.GetFeature(0).GetFieldAsString(8).split(" ")[0]
            species = rs.GetFeature(0).GetFieldAsString(8)
            status = rs.GetFeature(0).GetFieldAsString(9)
            conn.ReleaseResultSet(rs)
    elif spatialObj == "wdpa":
        sql = getWDPASQL(spatialObjId)
    elif spatialObj == "countries":
        sql = getCountrySQL(spatialObjId)
    lock.acquire()
    lyr = conn.execute(sql)
    if not lyr:
        print "PID %s: No results from DB using '%s'" % (os.getpid(), sql)
        return
    lock.release()
    lsrs = lyr.GetSpatialRef().__str__()
    srs = osr.SpatialReference(lsrs)
    print "PID %s: Extent: %s" % (os.getpid(), lyr.GetExtent())
    src_extent, tiff_width, tiff_height = snapExtentToGrid(lyr.GetExtent(), extent)
    if tiff_width == 0:
        tiff_width = 1
    if tiff_height == 0:
        tiff_height = 1
    if tiff_width * tiff_height > 50000000:  # approx 7000px x 7000px ...
        extents = splitExtent(
            src_extent, tiff_width, tiff_height, nrIt
        )  # rasterise the data by using a set of smaller extents if it is large
    else:
        extents = [[src_extent, tiff_width, tiff_height]]
    for src_extent, tiff_width, tiff_height in extents:
        memraster = gdal.GetDriverByName("MEM").Create("", tiff_width, tiff_height, 1, gdal.GDT_Byte)
        memraster.SetGeoTransform((src_extent[0], res, 0, src_extent[3], 0, -res))
        memraster.SetProjection(lsrs)
        err = gdal.RasterizeLayer(memraster, [1], lyr, options=["ATTRIBUTE=Z_VALUE", "ALL_TOUCHED=TRUE"])
        if err != 0:
            print "got an error: %s" % err
        conn.ReleaseResultSet(lyr)
        memrasterarr = memraster.GetRasterBand(1).ReadAsArray()
        del (memraster)
        (data, values) = getData(memrasterarr)
        del (memrasterarr)
        if data != 0:
            data.append(values)
            del (values)
            if len(data) > 0:  # if there is data
                d = numpy.dtype("<i4")
                data3d = [data[1].astype(d), data[0].astype(d), data[2].astype(d)]
                del (data)
                # get the tile number for the x coordinate
                data3d[0].__iadd__(
                    int(math.ceil((src_extent[0] - x0) / res))
                )  # x0 is the origin shift and res is the cell size
                # get the tile number for the y coordinate
                data3d[1].__imul__(-1)  # y values decrease as you go south
                data3d[1].__iadd__(
                    int(math.floor((src_extent[3] - x0) / res)) - 1
                )  # x0 is the origin shift and res is the cell size
                if outputformat == "csv":
                    coords = numpy.transpose(data3d)  # convert to an array in coordinate form ([x0,y0,z0],[x1,y1,z1]..)
                    fileCopyFrom = cStringIO.StringIO()
                    fileCopyFrom.writelines(
                        [
                            ",".join(
                                [
                                    getQuadKey(coord[0], coord[1]),
                                    str(coord[0]),
                                    str(coord[1]),
                                    "1",
                                    str(spatialObjId),
                                    str(coord[2]),
                                    kingdom,
                                    phylum,
                                    _class,
                                    order,
                                    family,
                                    genus,
                                    species,
                                    status,
                                ]
                            )
                            + "\n"
                            for coord in coords
                        ]
                    )
                    fileCopyFrom.seek(0)
                    csvFile = open(datadir + "/" + str(spatialObjId) + ".csv", "a")
                    csvFile.write(fileCopyFrom.getvalue())
                    csvFile.close()
                    fileCopyFrom.close()
                else:
                    f = open(datadir + "/" + str(spatialObjId) + ".npygz", "wb")
                    gzipFile = gzip.GzipFile("ab", fileobj=f)
                    cPickle.dump(data3d, gzipFile, protocol=2)  # append the data for the extent into the zip file
                    gzipFile.close()
                    f.close()
                del (data3d)
            else:
                del (data)  # free memory

    print "PID %s: Task %s: Finished" % (os.getpid(), mp.current_process().cnt)
    return spatialObjId
Пример #49
0
import gzip, datetime
from dbconnect import dbconnect

if __name__ == '__main__':
    conn = dbconnect('species_dev')
    cursor = conn.cur
    cursor2 = conn.cur
    cursor.execute(
        "SELECT DISTINCT id_no::int FROM species WHERE id_no != ' '")
    rows = cursor.fetchall()
    count = 1
    f = open(r"E:\cottaan\My Documents\ArcGIS\wdpa_iucn.csv", 'a')
    for row in rows:
        #        cursor2.execute("SELECT * FROM species WHERE id_no = '" + str(row[0]) + "'")
        sql = "select distinct species.id_no,wdpaid from wdpa, species where st_intersects(species.geom, wdpa.geom) and species.id_no='" + str(
            row[0]) + "'"
        print str(datetime.datetime.now()) + "\t" + str(count) + "\t" + sql
        cursor2.execute(sql)
        species = cursor2.fetchall()
        for s in species:
            f.write(str(s[0]).strip() + "," + str(int(s[1])).strip() + "\n")
        count += 1
#        if count == 40:
#            break
    f.close()
    cursor2.close()  # close the cursor to get the species
    cursor.close()  # close the cursor to get the species
    conn.conn.close()  # close the connection to the database
Пример #50
0
#!/usr/bin/env python
import MySQLdb
import getopt, pdb, re, sys


akid = "`X509v3 extensions:X509v3 Authority Key Identifier:keyid`"
skid = "`X509v3 extensions:X509v3 Subject Key Identifier`"
ca = "`X509v3 extensions:X509v3 Basic Constraints:CA`"

# db init

sys.path.append("..")
from dbconnect import dbconnect
readb,rdbc = dbconnect()

try:
  opts, args = getopt.getopt(sys.argv[1:], "",[])
  
  for o, a in opts:
    pass
    #if o == '--from':
    #  INPUT_TABLE = a
    #elif o == '--into':
    #  ALL_NAMES = a

except getopt.GetoptError, err:
  print err
  sys.exit(1)

def sel(q, params = None, MAX = 1000):
 rdbc.execute(q, params)
# imports
import multiprocessing, species_wdpa_analysis_worker, os
from dbconnect import dbconnect


def start_process():
    print "Processing", multiprocessing.current_process().name, os.getpid()
    multiprocessing.current_process().cnt = 0


# clear the species_wdpa table and create the temporary latest wdpa snapshot
conn = dbconnect("species_especies_schema")
conn.cur.execute("select * from especies.preprocess_species_wdpa_analysis()")
conn.cur.close()

# get a list of the unique ids
conn = dbconnect("species_live")
conn.cur.execute("SELECT id FROM species_distribution.species order by 1;")
ids = conn.cur.fetchall()
idlist = [id[0] for id in ids]
count = 1
processes = 8  # number of CPUs
processPool = multiprocessing.Pool(processes, start_process)  # create a process pool to manage the jobs
manager = multiprocessing.Manager()  # instantiate a manager to manage the shared memory objects
lck = manager.Lock()  # create a lock to block access

# single feature test
# idlist = [113185] # species overlapping Kinabalu Park

length = len(idlist)
Пример #52
0
#!/usr/bin/env python
import MySQLdb
import getopt, pdb, re, sys
import time

# Make tables mapping subject common names and subject alternate names to
# certids

ALT_COLUMN_NAME = 'X509v3 extensions:X509v3 Subject Alternative Name'
FETCH_COUNT = 1000
HARMLESS=False
log = False

from dbconnect import dbconnect
readdb, rdbc = dbconnect()
writedb, wdbc = dbconnect()

assert rdbc != wdbc # :)
assert readdb != writedb # :)

INPUT_TABLE = "valid_certs"

SAN_TABLE_NAME = 'SANToCert'
SCN_TABLE_NAME = 'SCNToCert'
ALL_NAMES = 'names'

try:
  opts, args = getopt.getopt(sys.argv[1:], "",["from=","into=","san_into=", "scn_into="])
  
  for o, a in opts:
    if o == '--from':
Пример #53
0
def runner(dbconfig, waitTime):

    ## create counter variables
    i = 0
    j = 0
    k = 0

    ## variable used in db cache cooldown time
    ## we remove 5 seconds to ensure that the next time it's called it's not
    ## the same - we're not working with millisecond accuracy globally
    cachetill = datetime.datetime.now() - datetime.timedelta(seconds=5)

    ## get an api object and asign it to a variable

    authenticated_api = twitterfunctions.authenticatetwitter(CONSUMER_KEY, CONSUMER_SECRET, ACCESS_KEY, ACCESS_SECRET)

    print colors.tcolors.BOLD + colors.tcolors.HEADER + "Starting program..." + colors.tcolors.ENDC + colors.tcolors.ENDC
    print("Paramiters used: loop sleep - %s seconds, connection cache offset - %s seconds , notify every - %s runs" % (colors.tcolors.GREEN + str(waitTime) + colors.tcolors.ENDC, colors.tcolors.GREEN + str(CACHEOFFSET) + colors.tcolors.ENDC, colors.tcolors.GREEN + str(NOTIFYONRNS) + colors.tcolors.ENDC))

    ## Infinate loop
    while True:

        ## increment loop counter
        i += 1
        j += 1
        if j / NOTIFYONRNS == 1:
            print colors.tcolors.BOLD + "Iteration number: %d" % (i) + colors.tcolors.ENDC
            j = 0

        ## rather than connect to the database each and every time we'll do
        ## some caching to prevent unneccesary connection opening and closing

        ## get time now
        timenow = datetime.datetime.now()

        if cachetill < timenow:

            print( colors.tcolors.BLUE + 'making new connection at %s' % (timenow) + colors.tcolors.ENDC)

            ## set next cachetime to time now + the cache offset specified as
            ## arg2 when starting the program
            cachetill = datetime.datetime.now() + datetime.timedelta(seconds=CACHEOFFSET)

            ## connect to the database and set a cursor point
            cnx = dbconnect.dbconnect(dbconfig)
            cursor = dbconnect.dbcursor(cnx)

        ## query for ScheduledTweets that are not sent yet
        selectQuery = ("SELECT id, tweetcontent, timetosend, sent, tweettype FROM ScheduledTweets WHERE sent = 0")

        ## execute the query and return the result to an array
        cursor.execute(selectQuery)
        selectResult = cursor.fetchall()


        ## loop through the results
        for (id, tweetcontent, timetosend, send, tweettype) in selectResult:
            ## if not sent yet
            if send == 0:

                #print("Send At: {}, {}".format(timetosend, tweetcontent, send))

                ## get current system time
                timestamp = datetime.datetime.now()
                ## compair current time agains timetosend from the entry
                if timestamp > timetosend :
                    ## if current time is after scheduled time to send
                    print( colors.tcolors.BOLD + colors.tcolors.HEADER + 'will be sending now' + colors.tcolors.ENDC + colors.tcolors.ENDC)

                    ## set a query to set this entry sent value to 1
                    updateQuery = ("UPDATE ScheduledTweets "
                               "SET sent = '1', "
                               "timesent = NULL "
                               "WHERE id = %d" % (id))

                    ## send the tweet or retweet
                    if tweettype == 'retweet' :
                        twitterfunctions.sendretweet(authenticated_api, tweetcontent)
                    elif tweettype == 'tweet' :
                        twitterfunctions.sendtweet(authenticated_api, tweetcontent)

                    ## run the update query and commit to the databse
                    cursor.execute(updateQuery)
                    cnx.commit() #commit the changes to the database

                    ## create an INSERT query to copy current tweet to a second
                    ## table for SentTweets
                    insertQuery = ('INSERT INTO SentTweets '
                         '(tweetcontent, timetosend, sent, oldid) '
                         'VALUES '
                         '("%s", "%s", "1", %d)' % (tweetcontent, timetosend, id))

                    ## execute the query and commit to the database
                    cursor.execute(insertQuery)
                    cnx.commit() #commit the changes to the database

            else:
                ## We shouldn't get a result that does == 0. If we do break
                print('There was an error with the queried data.')
                break

        ## print a message between loops
        #print("waiting: %d seconds, already ran: %d times" % (waitTime, i))

        time.sleep(waitTime)
Пример #54
0
__author__ = 'Adam Stelmack'
__version__ = '2.1.8'
__date__ = 'May 17 2010'

#Basic imports
from ctypes import *
import sys
import random
from dbconnect import dbconnect
#Phidget specific imports
from Phidgets.PhidgetException import *
from Phidgets.Events.Events import *
from Phidgets.Devices.InterfaceKit import *

connect = dbconnect()

#Create an interfacekit object
try:
    interfaceKit = InterfaceKit()
except RuntimeError as e:
    print("Runtime Exception: %s" % e.details)
    print("Exiting....")
    exit(1)

#Event Handler Callback Functions
def inferfaceKitAttached(e):
    attached = e.device
    print("InterfaceKit %i Attached!" % (attached.getSerialNum()))

def interfaceKitDetached(e):
Пример #55
0
#!/usr/bin/env python
import MySQLdb
import getopt, pdb, re, sys

akid = "`X509v3 extensions:X509v3 Authority Key Identifier:keyid`"
skid = "`X509v3 extensions:X509v3 Subject Key Identifier`"
ca = "`X509v3 extensions:X509v3 Basic Constraints:CA`"

sys.path.append("..")
from dbconnect import dbconnect
readb, rdbc = dbconnect()

DEBUG = False

try:
    opts, args = getopt.getopt(sys.argv[1:], "", [])
    #for o, a in opts:
    #  if o == '-a':
    #    TABLE = ' all_certs '
    #  elif o == '-o':
    #    TOFIND = a
    #    only_interesting = True
except getopt.GetoptError, err:
    print err
    sys.exit(1)


def sel(q, params=None, MAX=10000):
    if DEBUG: print q, params
    rdbc.execute(q, params)
    return rdbc.fetchmany(MAX)
#This uses psycopg2 to access the Postgresql database rather than the esri cursor on a file gdb table
import cPickle
import numpy
import arcpy
from dbconnect import dbconnect
speciesID=arcpy.GetParameterAsText(0)
conn = dbconnect('durga_dopa')
cur = conn.cur
cur.execute("SELECT tx,ty,z FROM public.pilotspeciesdata WHERE speciesid='" + speciesID + "'")
rows=cur.fetchall()
records=numpy.transpose(numpy.array(rows,dtype="int32"))
records=numpy.reshape(records,len(records[0])*3) #convert the arraytx,arrayty,arrayz to a single array
f = open(r"E:\cottaan\My Documents\ArcGIS\ID" + speciesID + ".npy",'wb')
#f = open("/srv/www/htdocs/eSpecies/SpeciesRichness.npy",'wb')
cPickle.dump(records, f, protocol=2)
f.close()
Пример #57
0
__author__ = 'Adam Stelmack'
__version__ = '2.1.8'
__date__ = 'May 17 2010'

#Basic imports
from ctypes import *
import sys
import random
from dbconnect import dbconnect
#Phidget specific imports
from Phidgets.PhidgetException import *
from Phidgets.Events.Events import *
from Phidgets.Devices.InterfaceKit import *

connect = dbconnect()
controllerID = 902
connect.disconnected(controllerID)

#Create an interfacekit object
try:
    interfaceKit = InterfaceKit()
except RuntimeError as e:
    print("Runtime Exception: %s" % e.details)
    print("Exiting....")
    exit(1)


#Event Handler Callback Functions
def inferfaceKitAttached(e):
    connect.connected(controllerID)
Пример #58
0
        args = filter(lambda a: a != "--roots", args)
        parse_roots = True
    if "--create" in args:
        args = filter(lambda a: a != "--create", args)
        create = True
    if "--readable" in args:
        args = filter(lambda a: a != "--readable", args)
        make_readable_table = True
    # The if we're doing --readable, --create refers to that table
    if create and not make_readable_table:
        create_table()
    return args


from dbconnect import dbconnect
gdb, gdbc = dbconnect()


def db_from_results(dirs):
    print "Targetting", dirs
    for d in dirs:
        for path, validities, x509_certchain, fprints in openssl_dump.dumpByDir(
                d):
            add_cert_to_db(path, validities, x509_certchain, fprints)

    make_indicies()

    print "Exiting correctly..."


def make_indicies(tablename):
Пример #59
0
#!/usr/bin/env python

create_table = False

import os, time, random, os.path, sys, socket
from dbconnect import dbconnect
db,dbc = dbconnect()

myRand = random.Random(time.time())

from xml.parsers.expat import ParserCreate
parser = ParserCreate()

slash8s = []
prefix = False
status = False

def start_element(name, attrs):
    global prefix,status,new
    if name == "record":
      new = []
    elif name == "prefix":
      prefix = True
    elif name == "status":
      status = True

def end_element(name):
    global prefix,status,new
    if name == "prefix":
      prefix = False
    elif name == "status":
import gzip, datetime
from dbconnect import dbconnect

if __name__ == '__main__':
    conn = dbconnect('species_dev')
    cursor = conn.cur
    cursor2 = conn.cur
    cursor.execute("SELECT DISTINCT id_no::int FROM species WHERE id_no != ' '")
    rows = cursor.fetchall()
    count = 1
    f = open(r"E:\cottaan\My Documents\ArcGIS\wdpa_iucn.csv", 'a')
    for row in rows:
#        cursor2.execute("SELECT * FROM species WHERE id_no = '" + str(row[0]) + "'")
        sql = "select distinct species.id_no,wdpaid from wdpa, species where st_intersects(species.geom, wdpa.geom) and species.id_no='" + str(row[0]) + "'"
        print str(datetime.datetime.now()) + "\t" + str(count)+ "\t" + sql
        cursor2.execute(sql)
        species = cursor2.fetchall()
        for s in species:
            f.write(str(s[0]).strip() + "," + str(int(s[1])).strip() + "\n")
        count += 1
#        if count == 40: 
#            break
    f.close()
    cursor2.close()                                                      # close the cursor to get the species
    cursor.close()                                                      # close the cursor to get the species
    conn.conn.close()                                                        # close the connection to the database