def msql(self, gid, sid, did=None): """ This function to return modified SQL. """ data = {} for k, v in request.args.items(): try: data[k] = json.loads(v) except ValueError: data[k] = v try: status, res = self.get_sql(gid, sid, data, did) if not status: return res res = re.sub('\n{2,}', '\n\n', res) SQL = res.strip('\n').strip(' ') return make_json_response( data=SQL, status=200 ) except Exception as e: current_app.logger.exception(e) return make_json_response( data=_("-- modified SQL"), status=200 )
def statistics(self, gid, sid): from pgadmin.utils.driver import get_driver manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(sid) conn = manager.connection() if conn.connected(): status, res = conn.execute_dict( render_template( "/".join([ 'servers/sql', '9.2_plus' if manager.version >= 90200 else '9.1_plus', 'stats.sql' ]), conn=conn, _=gettext ) ) if not status: return internal_server_error(errormsg=res) return make_json_response(data=res) return make_json_response( info=gettext( "Server has no active connection for generating statistics." ) )
def msql(self, gid, sid, did, scid, cfgid=None): """ This function returns modified SQL :param gid: group id :param sid: server id :param did: database id :param scid: schema id :param cfgid: FTS Configuration id """ data = {} for k, v in request.args.items(): try: data[k] = json.loads(v) except ValueError: data[k] = v # Fetch sql query for modified data sql = self.get_sql(gid, sid, did, scid, data, cfgid) if isinstance(sql, str) and sql and sql.strip('\n') and sql.strip(' '): return make_json_response( data=sql, status=200 ) else: return make_json_response( data="--modified SQL", status=200 )
def msql(self, gid, sid, did, lid=None): """ This function is used to return modified SQL for the selected language node. Args: gid: Server Group ID sid: Server ID did: Database ID lid: Language ID """ data = {} for k, v in request.args.items(): try: data[k] = json.loads(v) except ValueError: data[k] = v sql = self.get_sql(data, lid) if sql and sql.strip('\n') and sql.strip(' '): return make_json_response( data=sql, status=200 ) else: return make_json_response( data='-- Modified SQL --', status=200 )
def update(self, gid, sid, did, scid, tid, rid): """ This function will update a rule object """ data = request.form if request.form else \ json.loads(request.data.decode()) SQL = self.getSQL(gid, sid, data, tid, rid) try: if SQL and SQL.strip('\n') and SQL.strip(' '): status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) return make_json_response( success=1, info=gettext("Rule updated"), data={ 'id': tid, 'sid': sid, 'gid': gid, 'did': did } ) else: return make_json_response( success=1, info="Nothing to update", data={ 'id': tid, 'scid': scid, 'did': did } ) except Exception as e: return internal_server_error(errormsg=str(e))
def msql(self, gid, sid, rg_id=None): """ This function is used to return modified SQL for the selected resource group node Args: gid: Server Group ID sid: Server ID rg_id: Resource Group ID """ data = dict() for k, v in request.args.items(): try: data[k] = json.loads(v) except ValueError: data[k] = v sql = self.get_sql(data, rg_id) if sql and sql.strip('\n') and sql.strip(' '): return make_json_response( data=sql, status=200 ) else: return make_json_response( data='-- Modified SQL --', status=200 )
def node(self, server_group_id, server_id, database_id, external_table_id): """ This function will used to create all the child node within that collection. Here it will create all the foreign data wrapper node. Args: server_group_id: Server Group ID server_id: Server ID database_id: Database ID external_table_id: External Table ID """ sql_statement = render_template( template_name_or_list=os.path.join( self.sql_template_path, 'node.sql' ), external_table_id=external_table_id ) result = self.get_external_tables(database_id, sql_statement) if type(result) is not list: return result if len(result) == 0: return make_json_response( data=gettext('Could not find the external table.'), status=404 ) return make_json_response( data=result[0], status=200 )
def save(trans_id): """ This method is used to save the changes to the server Args: trans_id: unique transaction id """ if request.data: changed_data = json.loads(request.data.decode()) else: changed_data = request.args or request.form # Check the transaction and connection status status, error_msg, conn, trans_obj, session_obj = check_transaction_status(trans_id) if status and conn is not None \ and trans_obj is not None and session_obj is not None: # If there is no primary key found then return from the function. if len(session_obj['primary_keys']) <= 0 or len(changed_data) <= 0: return make_json_response( data={'status': False, 'result': gettext('No primary key found for this object, so unable to save records.')} ) status, res, query_res = trans_obj.save(changed_data) else: status = False res = error_msg query_res = None return make_json_response(data={'status': status, 'result': res, 'query_result': query_res})
def delete(self, gid, sid, did, cid): """ This function will drop the cast object :param cid: cast id :param did: database id :param sid: server id :param gid: group id :return: """ # Below will decide if it's simple drop or drop with cascade call if self.cmd == 'delete': # This is a cascade operation cascade = True else: cascade = False try: # Get name for cast from cid sql = render_template("/".join([self.template_path, 'delete.sql']), cid=cid) status, res = self.conn.execute_dict(sql) if not status: return internal_server_error(errormsg=res) if not res['rows']: return make_json_response( status=410, success=0, errormsg=gettext( 'Error: Object not found.' ), info=gettext( 'The specified cast object could not be found.\n' ) ) # drop cast result = res['rows'][0] sql = render_template("/".join([self.template_path, 'delete.sql']), castsource=result['castsource'], casttarget=result['casttarget'], cascade=cascade ) status, res = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=res) return make_json_response( success=1, info=gettext("Cast dropped"), data={ 'id': cid, 'sid': sid, 'gid': gid, 'did': did } ) except Exception as e: return internal_server_error(errormsg=str(e))
def get_object_name(trans_id): """ This method is used to get the object name Args: trans_id: unique transaction id """ # Check the transaction and connection status status, error_msg, conn, trans_obj, session_obj = \ check_transaction_status(trans_id) if error_msg == gettext('Transaction ID not found in the session.'): return make_json_response(success=0, errormsg=error_msg, info='DATAGRID_TRANSACTION_REQUIRED', status=404) if status and conn is not None and \ trans_obj is not None and session_obj is not None: res = trans_obj.object_name else: status = False res = error_msg return make_json_response(data={'status': status, 'result': res})
def move_objects_sql(self, gid, sid, tsid): """ This function returns sql for Move Objects.. dialog Args: gid: Server Group ID sid: Server ID tsid: Tablespace ID """ required_args = ['old_tblspc', 'tblspc', 'obj_type'] data = dict() for k, v in request.args.items(): try: data[k] = json.loads(v, encoding='utf-8') except ValueError as ve: current_app.logger.exception(ve) data[k] = v for arg in required_args: if arg not in data: return make_json_response( data=gettext("-- definition incomplete"), status=200 ) sql = render_template("/".join( [self.template_path, 'move_objects.sql']), data=data, conn=self.conn ) return make_json_response( data=sql.strip('\n'), status=200 )
def delete(self, gid, sid, did, fid): """ This function will delete the selected foreign data wrapper node. Args: gid: Server Group ID sid: Server ID did: Database ID fid: foreign data wrapper ID """ if self.cmd == 'delete': # This is a cascade operation cascade = True else: cascade = False try: # Get name of foreign data wrapper from fid sql = render_template("/".join([self.template_path, 'delete.sql']), fid=fid, conn=self.conn ) status, name = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=name) if name is None: return make_json_response( status=410, success=0, errormsg=gettext( 'Error: Object not found.' ), info=gettext( 'The specified foreign data' ' wrapper could not be found.\n' ) ) # drop foreign data wrapper node sql = render_template("/".join([self.template_path, 'delete.sql']), name=name, cascade=cascade, conn=self.conn) status, res = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=res) return make_json_response( success=1, info=gettext("Foreign Data Wrapper dropped"), data={ 'id': fid, 'did': did, 'sid': sid, 'gid': gid, } ) except Exception as e: return internal_server_error(errormsg=str(e))
def remove_filter(trans_id): """ This method is used to remove the filter. Args: trans_id: unique transaction id """ # Check the transaction and connection status status, error_msg, conn, trans_obj, session_obj = \ check_transaction_status(trans_id) if error_msg == gettext('Transaction ID not found in the session.'): return make_json_response(success=0, errormsg=error_msg, info='DATAGRID_TRANSACTION_REQUIRED', status=404) if status and conn is not None and \ trans_obj is not None and session_obj is not None: res = None # Call the remove_filter method of transaction object trans_obj.remove_filter() # As we changed the transaction object we need to # restore it and update the session variable. session_obj['command_obj'] = pickle.dumps(trans_obj, -1) update_session_grid_transaction(trans_id, session_obj) else: status = False res = error_msg return make_json_response(data={'status': status, 'result': res})
def delete(self, gid, sid): """Delete a server node in the settings database.""" servers = Server.query.filter_by(user_id=current_user.id, id=sid) # TODO:: A server, which is connected, can not be deleted if servers is None: return make_json_response( success=0, errormsg=gettext( 'The specified server could not be found.\n' 'Does the user have permission to access the ' 'server?' ) ) else: try: for s in servers: db.session.delete(s) db.session.commit() except Exception as e: current_app.logger.exception(e) return make_json_response( success=0, errormsg=e.message) try: info = traceback.format_exc() except Exception as e: current_app.logger.exception(e) info = str(e) return make_json_response(success=1, info=info)
def get(*args): """To fetch the current sorted columns""" status, error_msg, conn, trans_obj, session_obj = args if error_msg == gettext('Transaction ID not found in the session.'): return make_json_response( success=0, errormsg=error_msg, info='DATAGRID_TRANSACTION_REQUIRED', status=404 ) column_list = [] if status and conn is not None and \ trans_obj is not None and session_obj is not None: msg = gettext('Success') columns, column_list = trans_obj.get_all_columns_with_order(conn) sql = trans_obj.get_filter() else: status = False msg = error_msg columns = None sql = None return make_json_response( data={ 'status': status, 'msg': msg, 'result': { 'data_sorting': columns, 'column_list': column_list, 'sql': sql } } )
def update(self, gid): """Update the server-group properties""" # There can be only one record at most servergroup = ServerGroup.query.filter_by( user_id=current_user.id, id=gid).first() data = request.form if request.form else json.loads(request.data.decode()) if servergroup is None: return make_json_response( status=417, success=0, errormsg=gettext( 'The specified server group could not be found.' ) ) else: try: if u'name' in data: servergroup.name = data[u'name'] db.session.commit() except Exception as e: return make_json_response( status=410, success=0, errormsg=e.message ) return make_json_response(result=request.form)
def create(self): data = request.form if request.form else json.loads(request.data) if data[u'name'] != '': try: sg = ServerGroup( user_id=current_user.id, name=data[u'name']) db.session.add(sg) db.session.commit() data[u'id'] = sg.id data[u'name'] = sg.name return jsonify(node=blueprint.generate_browser_node( "%d" % (sg.id), None, sg.name, "icon-%s" % self.node_type, True)) except Exception as e: print 'except' return make_json_response( status=410, success=0, errormsg=e.message) else: return make_json_response( status=417, success=0, errormsg=gettext('No server group name was specified'))
def update(self, gid, sid, did, scid, doid): """ Updates the Domain object. Args: gid: Server Group Id sid: Server Id did: Database Id scid: Schema Id doid: Domain Id """ status, SQL = self.get_sql(gid, sid, self.request, scid, doid) if not status: return internal_server_error(errormsg=SQL) try: if SQL: status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) # Get Schema Id SQL = render_template("/".join([self.template_path, 'get_oid.sql']), doid=doid) status, res = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=res) scid = res['rows'][0]['scid'] return make_json_response( success=1, info="Domain updated", data={ 'id': doid, 'scid': scid, 'sid': sid, 'gid': gid, 'did': did } ) else: return make_json_response( success=1, info="Nothing to update", data={ 'id': doid, 'scid': scid, 'sid': sid, 'gid': gid, 'did': did } ) except Exception as e: return internal_server_error(errormsg=str(e))
def delete(self, gid, sid, did, scid): """ This function will delete an existing schema object Args: gid: Server Group ID sid: Server ID did: Database ID scid: Schema ID """ try: # Get name for schema from did SQL = render_template( "/".join([self.template_path, 'sql/get_name.sql']), _=gettext, scid=scid ) status, name = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=name) if name is None: return make_json_response( status=410, success=0, errormsg=gettext( 'Error: Object not found.' ), info=gettext( 'The specified schema could not be found.\n' ) ) # drop schema SQL = render_template( "/".join([self.template_path, 'sql/delete.sql']), _=gettext, name=name, conn=self.conn, cascade=True if self.cmd == 'delete' else False ) status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) return make_json_response( success=1, info=gettext("Schema dropped"), data={ 'id': scid, 'sid': sid, 'gid': gid, 'did': did } ) except Exception as e: current_app.logger.exception(e) return internal_server_error(errormsg=str(e))
def node(self, gid, sid): """Return a JSON document listing the server groups for the user""" server = Server.query.filter_by(user_id=current_user.id, servergroup_id=gid, id=sid).first() if server is None: return make_json_response( status=410, success=0, errormsg=gettext( gettext( "Couldn't find the server with id# %s!" ).format(sid) ) ) from pgadmin.utils.driver import get_driver manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(server.id) conn = manager.connection() connected = conn.connected() if connected: status, result = conn.execute_dict(""" SELECT CASE WHEN usesuper THEN pg_is_in_recovery() ELSE FALSE END as inrecovery, CASE WHEN usesuper AND pg_is_in_recovery() THEN pg_is_xlog_replay_paused() ELSE FALSE END as isreplaypaused FROM pg_user WHERE usename=current_user""") in_recovery = result['rows'][0]['inrecovery']; wal_paused = result['rows'][0]['isreplaypaused'] else: in_recovery = None wal_paused = None return make_json_response( result=self.blueprint.generate_browser_node( "%d" % (server.id), gid, server.name, "icon-server-not-connected" if not connected else "icon-{0}".format(manager.server_type), True, self.node_type, connected=connected, server_type=manager.server_type if connected else 'pg', version=manager.version, db=manager.db, user=manager.user_info if connected else None, in_recovery=in_recovery, wal_pause=wal_paused ) )
def delete(self, gid, sid, did, scid, doid): """ Drops the Domain object. Args: gid: Server Group Id sid: Server Id did: Database Id scid: Schema Id doid: Domain Id """ if self.cmd == 'delete': # This is a cascade operation cascade = True else: cascade = False SQL = render_template("/".join([self.template_path, 'delete.sql']), scid=scid, doid=doid) status, res = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=res) if not res['rows']: return make_json_response( status=410, success=0, errormsg=gettext( 'Error: Object not found.' ), info=gettext( 'The specified domain could not be found.\n' ) ) name = res['rows'][0]['name'] basensp = res['rows'][0]['basensp'] SQL = render_template("/".join([self.template_path, 'delete.sql']), name=name, basensp=basensp, cascade=cascade) status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) return make_json_response( success=1, info=gettext("Domain dropped"), data={ 'id': doid, 'scid': scid, 'sid': sid, 'gid': gid, 'did': did } )
def delete(self, gid, sid, did, scid, coid): """ This function will delete existing the collation object Args: gid: Server Group ID sid: Server ID did: Database ID scid: Schema ID coid: Collation ID """ # Below will decide if it's simple drop or drop with cascade call if self.cmd == 'delete': # This is a cascade operation cascade = True else: cascade = False try: SQL = render_template("/".join([self.template_path, 'get_name.sql']), scid=scid, coid=coid) status, name = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=name) if name is None: return make_json_response( success=0, errormsg=gettext( 'Error: Object not found.' ), info=gettext( 'The specified collation could not be found.\n' ) ) SQL = render_template("/".join([self.template_path, 'delete.sql']), name=name, cascade=cascade, conn=self.conn) status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) return make_json_response( success=1, info=gettext("Collation dropped"), data={ 'id': coid, 'scid': scid, 'did': did } ) except Exception as e: return internal_server_error(errormsg=str(e))
def update(self, gid, sid, did, scid, doid, coid): """ Updates the Domain Constraint object. Args: gid: Server Group Id sid: Server Id did: Database Id scid: Schema Id doid: Domain Id coid: Domain Constraint Id """ data = self.request status, SQL = self.get_sql(gid, sid, data, scid, doid, coid) try: if SQL and status: status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) if 'convalidated' in data and data['convalidated']: icon = 'icon-domain_constraints' elif 'convalidated' in data and not data['convalidated']: icon = 'icon-domain_constraints-bad' else: icon = '' return make_json_response( success=1, info="Domain Constraint updated", data={ 'id': coid, 'doid': doid, 'scid': scid, 'sid': sid, 'gid': gid, 'did': did, 'icon': icon } ) else: return make_json_response( success=1, info="Nothing to update", data={ 'id': coid, 'doid': doid, 'scid': scid, 'sid': sid, 'gid': gid, 'did': did } ) except Exception as e: return internal_server_error(errormsg=str(e))
def preferences(trans_id): """ This method is used to get/put explain options from/to preferences Args: trans_id: unique transaction id """ if request.method == 'GET': # Check the transaction and connection status status, error_msg, conn, trans_obj, session_obj = \ check_transaction_status(trans_id) if error_msg == gettext('Transaction ID not found in the session.'): return make_json_response(success=0, errormsg=error_msg, info='DATAGRID_TRANSACTION_REQUIRED', status=404) if status and conn is not None and \ trans_obj is not None and session_obj is not None: # Call the set_auto_commit and set_auto_rollback method of # transaction object trans_obj.set_auto_commit(blueprint.auto_commit.get()) trans_obj.set_auto_rollback(blueprint.auto_rollback.get()) # As we changed the transaction object we need to # restore it and update the session variable. session_obj['command_obj'] = pickle.dumps(trans_obj, -1) update_session_grid_transaction(trans_id, session_obj) return make_json_response( data={ 'explain_verbose': blueprint.explain_verbose.get(), 'explain_costs': blueprint.explain_costs.get(), 'explain_buffers': blueprint.explain_buffers.get(), 'explain_timing': blueprint.explain_timing.get(), 'auto_commit': blueprint.auto_commit.get(), 'auto_rollback': blueprint.auto_rollback.get() } ) else: data = None if request.data: data = json.loads(request.data, encoding='utf-8') else: data = request.args or request.form for k, v in data.items(): v = bool(v) if k == 'explain_verbose': blueprint.explain_verbose.set(v) elif k == 'explain_costs': blueprint.explain_costs.set(v) elif k == 'explain_buffers': blueprint.explain_buffers.set(v) elif k == 'explain_timing': blueprint.explain_timing.set(v) return success_return()
def delete(self, gid, sid, did, scid, tid): """ This function will drop the fts_template object :param gid: group id :param sid: server id :param did: database id :param scid: schema id :param tid: fts tempate id """ # Below will decide if it's simple drop or drop with cascade call if self.cmd == 'delete': # This is a cascade operation cascade = True else: cascade = False # Get name for template from tid sql = render_template("/".join([self.template_path, 'delete.sql']), tid=tid) status, res = self.conn.execute_dict(sql) if not status: return internal_server_error(errormsg=res) if not res['rows']: return make_json_response( success=0, errormsg=gettext( 'Error: Object not found.' ), info=gettext( 'The specified FTS template could not be found.\n' ) ) # Drop fts template result = res['rows'][0] sql = render_template("/".join([self.template_path, 'delete.sql']), name=result['name'], schema=result['schema'], cascade=cascade ) status, res = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=res) return make_json_response( success=1, info=gettext("FTS Template dropped"), data={ 'id': tid, 'sid': sid, 'gid': gid, 'did': did, 'scid': scid } )
def nodes(self, gid, sid, did, scid, tid, ptid=None): """ This function is used to list all the table nodes within that collection. Args: gid: Server group ID sid: Server ID did: Database ID scid: Schema ID tid: Parent Table ID ptid: Partition Table ID Returns: JSON of available table nodes """ SQL = render_template( "/".join([self.partition_template_path, 'nodes.sql']), scid=scid, tid=tid ) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=rset) def browser_node(row): return self.blueprint.generate_browser_node( row['oid'], tid, row['name'], icon=self.get_icon_css_class({}), tigger_count=row['triggercount'], has_enable_triggers=row['has_enable_triggers'], is_partitioned=row['is_partitioned'], parent_schema_id=scid, schema_id=row['schema_id'], schema_name=row['schema_name'] ) if ptid is not None: if len(rset['rows']) == 0: return gone(gettext( "The specified partitioned table could not be found." )) return make_json_response( data=browser_node(rset['rows'][0]), status=200 ) res = [] for row in rset['rows']: res.append(browser_node(row)) return make_json_response( data=res, status=200 )
def delete(self, gid, sid, did, scid, doid, coid): """ Drops the Domain Constraint object. Args: gid: Server Group Id sid: Server Id did: Database Id scid: Schema Id doid: Domain Id coid: Domain Constraint Id """ try: SQL = render_template("/".join([self.template_path, 'properties.sql']), doid=doid, coid=coid) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) if not res['rows']: return make_json_response( success=0, errormsg=gettext( 'Error: Object not found.' ), info=gettext( 'The specified domain constraint could not be found.\n' ) ) data = res['rows'][0] SQL = render_template("/".join([self.template_path, 'delete.sql']), data=data) status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) return make_json_response( success=1, info=gettext("Domain Constraint dropped"), data={ 'id': doid, 'scid': scid, 'sid': sid, 'gid': gid, 'did': did } ) except Exception as e: return internal_server_error(errormsg=str(e))
def nodes(self, gid, sid, did, scid, pkgid=None): """ This function is used to create all the child nodes within the collection. Here it will create all the package nodes. Args: gid: Server Group ID sid: Server ID did: Database ID scid: Schema ID Returns: """ res = [] SQL = render_template( "/".join([self.template_path, 'nodes.sql']), scid=scid, pkgid=pkgid ) status, rset = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=rset) if pkgid is not None: if len(rset['rows']) == 0: return gone( errormsg=_("Could not find the package.") ) row = rset['rows'][0] return make_json_response( data=self.blueprint.generate_browser_node( row['oid'], scid, row['name'], icon="icon-%s" % self.node_type ) ) for row in rset['rows']: res.append( self.blueprint.generate_browser_node( row['oid'], scid, row['name'], icon="icon-%s" % self.node_type )) return make_json_response( data=res, status=200 )
def nodes(self, gid, sid, jid, jscid=None): """ This function is used to create all the child nodes within the collection. Here it will create all the language nodes. Args: gid: Server Group ID sid: Server ID jid: Job ID """ res = [] sql = render_template( "/".join([self.template_path, 'nodes.sql']), jscid=jscid, jid=jid ) status, result = self.conn.execute_2darray(sql) if not status: return internal_server_error(errormsg=result) if jscid is not None: if len(result['rows']) == 0: return gone( errormsg=gettext("Could not find the specified job step.") ) row = result['rows'][0] return make_json_response( data=self.blueprint.generate_browser_node( row['jscid'], row['jscjobid'], row['jscname'], icon="icon-pga_schedule", enabled=row['jscenabled'] ) ) for row in result['rows']: res.append( self.blueprint.generate_browser_node( row['jscid'], row['jscjobid'], row['jscname'], icon="icon-pga_schedule", enabled=row['jscenabled'] ) ) return make_json_response( data=res, status=200 )
def nodes(self, gid, sid, did, scid, pkgid, edbfnid=None): """ Returns all the Functions to generate the Nodes. Args: gid: Server Group Id sid: Server Id did: Database Id scid: Schema Id """ res = [] SQL = render_template( "/".join([self.sql_template_path, 'node.sql']), pkgid=pkgid, fnid=edbfnid ) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=rset) if edbfnid is not None: if len(rset['rows']) == 0: return gone( errormsg=_("Could not find the function") ) row = rset['rows'][0] return make_json_response( data=self.blueprint.generate_browser_node( row['oid'], pkgid, row['name'], icon="icon-" + self.node_type, funcowner=row['funcowner'] ), status=200 ) for row in rset['rows']: res.append( self.blueprint.generate_browser_node( row['oid'], pkgid, row['name'], icon="icon-" + self.node_type, funcowner=row['funcowner'] )) return make_json_response( data=res, status=200 )
def create(self, gid, sid, did, fid, fsid): """ This function will create the user mapping node. Args: gid: Server Group ID sid: Server ID did: Database ID fid: Foreign data wrapper ID fsid: Foreign server ID """ required_args = ['name'] data = request.form if request.form else json.loads(request.data, encoding='utf-8') for arg in required_args: if arg not in data: return make_json_response( status=410, success=0, errormsg=gettext( "Could not find the required parameter ({}).").format( arg)) try: sql = render_template("/".join( [self.template_path, self._PROPERTIES_SQL]), fserid=fsid, conn=self.conn) status, res1 = self.conn.execute_dict(sql) if not status: return internal_server_error(errormsg=res1) if len(res1['rows']) == 0: return gone( gettext("The specified user mappings could not be found.")) fdw_data = res1['rows'][0] is_valid_options = False if 'umoptions' in data: is_valid_options, data['umoptions'] = validate_options( data['umoptions'], 'umoption', 'umvalue') sql = render_template("/".join( [self.template_path, self._CREATE_SQL]), data=data, fdwdata=fdw_data, is_valid_options=is_valid_options, conn=self.conn) status, res = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=res) sql = render_template("/".join( [self.template_path, self._PROPERTIES_SQL]), fsid=fsid, data=data, conn=self.conn) status, r_set = self.conn.execute_dict(sql) if not status: return internal_server_error(errormsg=r_set) for row in r_set['rows']: return jsonify(node=self.blueprint.generate_browser_node( row['oid'], fsid, row['name'], icon='icon-user_mapping')) except Exception as e: return internal_server_error(errormsg=str(e))
def wrap(self, **kwargs): data = {} if request.data: req = json.loads(request.data, encoding='utf-8') else: req = request.args or request.form if 'foid' not in kwargs: required_args = ['name', 'ftsrvname'] for arg in required_args: if arg not in req or req[arg] == '': return make_json_response( status=410, success=0, errormsg=gettext( "Could not find the required parameter (%s)." % arg)) try: list_params = [] if request.method == 'GET': list_params = [ 'constraints', 'columns', 'ftoptions', 'seclabels', 'inherits', 'acl' ] else: list_params = ['inherits'] for key in req: if key in list_params and req[key] != '' \ and req[key] is not None: # Coverts string into python list as expected. data[key] = [] if \ type(req[key]) == list and len(req[key]) == 0 else \ json.loads(req[key], encoding='utf-8') if key == 'inherits': # Convert Table ids from unicode/string to int # and make tuple for 'IN' query. inherits = tuple([int(x) for x in data[key]]) if len(inherits) == 1: # Python tupple has , after the first param # in case of single parameter. # So, we need to make it tuple explicitly. inherits = "(" + str(inherits[0]) + ")" if inherits: # Fetch Table Names from their respective Ids, # as we need Table names to generate the SQL. SQL = render_template("/".join( [self.template_path, 'get_tables.sql']), attrelid=inherits) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) if 'inherits' in res['rows'][0]: data[key] = res['rows'][0]['inherits'] else: data[key] = [] elif key == 'typnotnull': data[key] = True if (req[key] == 'true' or req[key] is True) else False if \ (req[key] == 'false' or req[key]) is False else '' else: data[key] = req[key] except Exception as e: return internal_server_error(errormsg=str(e)) self.request = data return f(self, **kwargs)
def delete(self, gid, sid, did, fid, fsid, **kwargs): """ This function will delete the selected user mapping node. Args: gid: Server Group ID sid: Server ID did: Database ID fid: foreign data wrapper ID fsid: foreign server ID **kwargs: """ umid = kwargs.get('umid', None) only_sql = kwargs.get('only_sql', False) # get the value of cascade and data cascade, data = self.get_delete_data(self.cmd, umid, request) try: for umid in data['ids']: # Get name of foreign server from fsid sql = render_template("/".join( [self.template_path, self._DELETE_SQL]), fsid=fsid, conn=self.conn) status, name = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=name) if name is None: return make_json_response( status=410, success=0, errormsg=gettext('Error: Object not found.'), info=gettext('The specified foreign server ' 'could not be found.\n')) status, res = \ self._fetch_specified_user_mapping_properties(umid) if not status: return res data = res['rows'][0] # drop user mapping sql = render_template("/".join( [self.template_path, self._DELETE_SQL]), data=data, name=name, cascade=cascade, conn=self.conn) # Used for schema diff tool if only_sql: return sql status, res = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=res) return make_json_response(success=1, info=gettext("User Mapping dropped")) except Exception as e: return internal_server_error(errormsg=str(e))
def start_query_download_tool(trans_id): (status, error_msg, sync_conn, trans_obj, session_obj) = check_transaction_status(trans_id) if not status or sync_conn is None or trans_obj is None or \ session_obj is None: return internal_server_error( errormsg=gettext("Transaction status check failed.") ) data = request.values if request.values else None if data is None or (data and 'query' not in data): return make_json_response( status=410, success=0, errormsg=gettext( "Could not find the required parameter (query)." ) ) try: sql = data['query'] # This returns generator of records. status, gen = sync_conn.execute_on_server_as_csv( sql, records=2000 ) if not status: return make_json_response( data={ 'status': status, 'result': gen } ) r = Response( gen( quote=blueprint.csv_quoting.get(), quote_char=blueprint.csv_quote_char.get(), field_separator=blueprint.csv_field_separator.get(), replace_nulls_with=blueprint.replace_nulls_with.get() ), mimetype='text/csv' if blueprint.csv_field_separator.get() == ',' else 'text/plain' ) import time extn = 'csv' if blueprint.csv_field_separator.get() == ',' else 'txt' filename = data['filename'] if data.get('filename', '') != "" else \ '{0}.{1}'.format(int(time.time()), extn) # We will try to encode report file name with latin-1 # If it fails then we will fallback to default ascii file name # werkzeug only supports latin-1 encoding supported values try: tmp_file_name = filename tmp_file_name.encode('latin-1', 'strict') except UnicodeEncodeError: filename = "download.csv" r.headers[ "Content-Disposition" ] = "attachment;filename={0}".format(filename) return r except (ConnectionLost, SSHTunnelConnectionLost): raise except Exception as e: current_app.logger.error(e) err_msg = "Error: {0}".format( e.strerror if hasattr(e, 'strerror') else str(e)) return internal_server_error(errormsg=err_msg)
def initialize_datagrid(trans_id, cmd_type, obj_type, sgid, sid, did, obj_id): """ This method is responsible for creating an asynchronous connection. After creating the connection it will instantiate and initialize the object as per the object type. It will also create a unique transaction id and store the information into session variable. Args: cmd_type: Contains value for which menu item is clicked. obj_type: Contains type of selected object for which data grid to be render sgid: Server group Id sid: Server Id did: Database Id obj_id: Id of currently selected object """ if request.data: filter_sql = json.loads(request.data, encoding='utf-8') else: filter_sql = request.args or request.form # Create asynchronous connection using random connection id. conn_id = str(random.randint(1, 9999999)) try: manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(sid) # default_conn is same connection which is created when user connect to # database from tree default_conn = manager.connection(did=did) conn = manager.connection(did=did, conn_id=conn_id, auto_reconnect=False, use_binary_placeholder=True, array_to_string=True) except (ConnectionLost, SSHTunnelConnectionLost) as e: raise except Exception as e: app.logger.error(e) return internal_server_error(errormsg=str(e)) status, msg = default_conn.connect() if not status: app.logger.error(msg) return internal_server_error(errormsg=str(msg)) status, msg = conn.connect() if not status: app.logger.error(msg) return internal_server_error(errormsg=str(msg)) try: # if object type is partition then it is nothing but a table. if obj_type == 'partition': obj_type = 'table' # Get the object as per the object type command_obj = ObjectRegistry.get_object(obj_type, conn_id=conn_id, sgid=sgid, sid=sid, did=did, obj_id=obj_id, cmd_type=cmd_type, sql_filter=filter_sql) except Exception as e: app.logger.error(e) return internal_server_error(errormsg=str(e)) if 'gridData' not in session: sql_grid_data = dict() else: sql_grid_data = session['gridData'] # Use pickle to store the command object which will be used later by the # sql grid module. sql_grid_data[str(trans_id)] = { # -1 specify the highest protocol version available 'command_obj': pickle.dumps(command_obj, -1) } # Store the grid dictionary into the session variable session['gridData'] = sql_grid_data return make_json_response(data={'conn_id': conn_id})
def poll(trans_id): """ This method polls the result of the asynchronous query and returns the result. Args: trans_id: unique transaction id """ result = None rows_affected = 0 rows_fetched_from = 0 rows_fetched_to = 0 has_more_rows = False columns = dict() columns_info = None primary_keys = None types = {} client_primary_key = None has_oids = False oids = None additional_messages = None notifies = None # Check the transaction and connection status status, error_msg, conn, trans_obj, session_obj = \ check_transaction_status(trans_id) if error_msg == ERROR_MSG_TRANS_ID_NOT_FOUND: return make_json_response(success=0, errormsg=error_msg, info='DATAGRID_TRANSACTION_REQUIRED', status=404) if status and conn is not None and session_obj is not None: status, result = conn.poll( formatted_exception_msg=True, no_result=True) if not status: messages = conn.messages() if messages and len(messages) > 0: additional_messages = ''.join(messages) result = '{0}\n{1}\n\n{2}'.format( additional_messages, gettext('******* Error *******'), result ) return internal_server_error(result) elif status == ASYNC_OK: status = 'Success' rows_affected = conn.rows_affected() # if transaction object is instance of QueryToolCommand # and transaction aborted for some reason then issue a # rollback to cleanup if isinstance(trans_obj, QueryToolCommand): trans_status = conn.transaction_status() if trans_status == TX_STATUS_INERROR and \ trans_obj.auto_rollback: conn.execute_void("ROLLBACK;") st, result = conn.async_fetchmany_2darray(ON_DEMAND_RECORD_COUNT) # There may be additional messages even if result is present # eg: Function can provide result as well as RAISE messages messages = conn.messages() if messages: additional_messages = ''.join(messages) notifies = conn.get_notifies() if st: if 'primary_keys' in session_obj: primary_keys = session_obj['primary_keys'] # Fetch column information columns_info = conn.get_column_info() client_primary_key = generate_client_primary_key_name( columns_info ) session_obj['client_primary_key'] = client_primary_key # If trans_obj is a QueryToolCommand then check for updatable # resultsets and primary keys if isinstance(trans_obj, QueryToolCommand) and \ trans_obj.check_updatable_results_pkeys_oids(): pk_names, primary_keys = trans_obj.get_primary_keys() session_obj['has_oids'] = trans_obj.has_oids() # Update command_obj in session obj session_obj['command_obj'] = pickle.dumps( trans_obj, -1) # If primary_keys exist, add them to the session_obj to # allow for saving any changes to the data if primary_keys is not None: session_obj['primary_keys'] = primary_keys if 'has_oids' in session_obj: has_oids = session_obj['has_oids'] if has_oids: oids = {'oid': 'oid'} if columns_info is not None: # Only QueryToolCommand or TableCommand can be editable if hasattr(trans_obj, 'obj_id') and trans_obj.can_edit(): columns = trans_obj.get_columns_types(conn) else: for col in columns_info: col_type = dict() col_type['type_code'] = col['type_code'] col_type['type_name'] = None col_type['internal_size'] = col['internal_size'] columns[col['name']] = col_type if columns: st, types = fetch_pg_types(columns, trans_obj) if not st: return internal_server_error(types) for col_name, col_info in columns.items(): for col_type in types: if col_type['oid'] == col_info['type_code']: typname = col_type['typname'] col_info['type_name'] = typname # Using characters %, (, ) in the argument names is not # supported in psycopg2 col_info['pgadmin_alias'] = \ re.sub("[%()]+", "|", col_name) session_obj['columns_info'] = columns # status of async_fetchmany_2darray is True and result is none # means nothing to fetch if result and rows_affected > -1: res_len = len(result) if res_len == ON_DEMAND_RECORD_COUNT: has_more_rows = True if res_len > 0: rows_fetched_from = trans_obj.get_fetched_row_cnt() trans_obj.update_fetched_row_cnt( rows_fetched_from + res_len) rows_fetched_from += 1 rows_fetched_to = trans_obj.get_fetched_row_cnt() session_obj['command_obj'] = pickle.dumps( trans_obj, -1) # As we changed the transaction object we need to # restore it and update the session variable. update_session_grid_transaction(trans_id, session_obj) # Procedure/Function output may comes in the form of Notices # from the database server, so we need to append those outputs # with the original result. if result is None: result = conn.status_message() if result is not None and additional_messages is not None: result = additional_messages + result else: result = result if result is not None \ else additional_messages elif status == ASYNC_EXECUTION_ABORTED: status = 'Cancel' else: status = 'Busy' messages = conn.messages() if messages and len(messages) > 0: result = ''.join(messages) else: status = 'NotConnected' result = error_msg transaction_status = conn.transaction_status() return make_json_response( data={ 'status': status, 'result': result, 'rows_affected': rows_affected, 'rows_fetched_from': rows_fetched_from, 'rows_fetched_to': rows_fetched_to, 'additional_messages': additional_messages, 'notifies': notifies, 'has_more_rows': has_more_rows, 'colinfo': columns_info, 'primary_keys': primary_keys, 'types': types, 'client_primary_key': client_primary_key, 'has_oids': has_oids, 'oids': oids, 'transaction_status': transaction_status, }, encoding=conn.python_encoding )
def delete(self, gid, sid, did, scid, tid, trid=None, only_sql=False): """ This function will updates existing the compound trigger object Args: gid: Server Group ID sid: Server ID did: Database ID scid: Schema ID tid: Table ID trid: Trigger ID """ if trid is None: data = request.form if request.form else json.loads( request.data, encoding='utf-8' ) else: data = {'ids': [trid]} # Below will decide if it's simple drop or drop with cascade call if self.cmd == 'delete': # This is a cascade operation cascade = True else: cascade = False try: for trid in data['ids']: # We will first fetch the compound trigger name for # current request so that we create template for # dropping compound trigger SQL = render_template("/".join([self.template_path, 'properties.sql']), tid=tid, trid=trid, datlastsysoid=self.datlastsysoid) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) if not res['rows']: return make_json_response( success=0, errormsg=gettext( 'Error: Object not found.' ), info=gettext( 'The specified compound trigger could not be ' 'found.\n' ) ) data = dict(res['rows'][0]) SQL = render_template("/".join([self.template_path, 'delete.sql']), data=data, conn=self.conn, cascade=cascade ) if only_sql: return SQL status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) return make_json_response( success=1, info=gettext("Compound Trigger is dropped") ) except Exception as e: return internal_server_error(errormsg=str(e))
def initialize_query_tool(trans_id, sgid, sid, did=None): """ This method is responsible for instantiating and initializing the query tool object. It will also create a unique transaction id and store the information into session variable. Args: sgid: Server group Id sid: Server Id did: Database Id """ connect = True # Read the data if present. Skipping read may cause connection # reset error if data is sent from the client if request.data: _ = request.data reqArgs = request.args if ('recreate' in reqArgs and reqArgs['recreate'] == '1'): connect = False # Create asynchronous connection using random connection id. conn_id = str(random.randint(1, 9999999)) # Use Maintenance database OID manager = get_driver(PG_DEFAULT_DRIVER).connection_manager(sid) if did is None: did = manager.did try: command_obj = ObjectRegistry.get_object('query_tool', conn_id=conn_id, sgid=sgid, sid=sid, did=did) except Exception as e: app.logger.error(e) return internal_server_error(errormsg=str(e)) try: conn = manager.connection(did=did, conn_id=conn_id, auto_reconnect=False, use_binary_placeholder=True, array_to_string=True) if connect: status, msg = conn.connect() if not status: app.logger.error(msg) return internal_server_error(errormsg=str(msg)) except (ConnectionLost, SSHTunnelConnectionLost) as e: app.logger.error(e) raise except Exception as e: app.logger.error(e) return internal_server_error(errormsg=str(e)) if 'gridData' not in session: sql_grid_data = dict() else: sql_grid_data = session['gridData'] # Set the value of auto commit and auto rollback specified in Preferences pref = Preferences.module('sqleditor') command_obj.set_auto_commit(pref.preference('auto_commit').get()) command_obj.set_auto_rollback(pref.preference('auto_rollback').get()) # Use pickle to store the command object which will be used # later by the sql grid module. sql_grid_data[str(trans_id)] = { # -1 specify the highest protocol version available 'command_obj': pickle.dumps(command_obj, -1) } # Store the grid dictionary into the session variable session['gridData'] = sql_grid_data return make_json_response(data={ 'connId': str(conn_id), 'serverVersion': manager.version, })
def create(self, gid, sid, did, scid): """ This function will creates new the FTS Dictionary object :param gid: group id :param sid: server id :param did: database id :param scid: schema id """ # Mandatory fields to create a new FTS Dictionary required_args = [ 'template', 'schema', 'name' ] data = request.form if request.form else json.loads( request.data, encoding='utf-8' ) for arg in required_args: if arg not in data: return make_json_response( status=410, success=0, errormsg=_( "Could not find the required parameter ({})." ).format(arg) ) # Fetch schema name from schema oid sql = render_template( "/".join([self.template_path, 'schema.sql']), data=data, conn=self.conn, ) status, schema = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=schema) # Replace schema oid with schema name before passing to create.sql # To generate proper sql query new_data = data.copy() new_data['schema'] = schema sql = render_template( "/".join([self.template_path, 'create.sql']), data=new_data, conn=self.conn, ) status, res = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=res) # We need dcid to add object in tree at browser, # Below sql will give the same sql = render_template( "/".join([self.template_path, 'properties.sql']), name=data['name'], scid=data['schema'] ) status, dcid = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=dcid) return jsonify( node=self.blueprint.generate_browser_node( dcid, data['schema'], data['name'], icon="icon-fts_dictionary" ) )
def create_maintenance_job(sid, did): """ Args: sid: Server ID did: Database ID Creates a new job for maintenance vacuum operation Returns: None """ if request.form: data = json.loads(request.form['data'], encoding='utf-8') else: data = json.loads(request.data, encoding='utf-8') index_name = get_index_name(data) # Fetch the server details like hostname, port, roles etc server = get_server(sid) if server is None: return make_json_response( success=0, errormsg=_("Could not find the given server")) # To fetch MetaData for the server driver = get_driver(PG_DEFAULT_DRIVER) manager = driver.connection_manager(server.id) conn = manager.connection() connected = conn.connected() if not connected: return make_json_response( success=0, errormsg=_("Please connect to the server first.")) utility = manager.utility('sql') ret_val = does_utility_exist(utility) if ret_val: return make_json_response(success=0, errormsg=ret_val) # Create the command for the vacuum operation query = render_template('maintenance/sql/command.sql', conn=conn, data=data, index_name=index_name) args = [ '--host', manager.local_bind_host if manager.use_ssh_tunnel else server.host, '--port', str(manager.local_bind_port) if manager.use_ssh_tunnel else str(server.port), '--username', server.username, '--dbname', data['database'], '--command', query ] try: p = BatchProcess(desc=Message(server.id, data, query), cmd=utility, args=args) manager.export_password_env(p.id) # Check for connection timeout and if it is greater than 0 then # set the environment variable PGCONNECT_TIMEOUT. if manager.connect_timeout > 0: env = dict() env['PGCONNECT_TIMEOUT'] = str(manager.connect_timeout) p.set_env_variables(server, env=env) else: p.set_env_variables(server) p.start() jid = p.id except Exception as e: current_app.logger.exception(e) return make_json_response(status=410, success=0, errormsg=str(e)) # Return response return make_json_response(data={ 'job_id': jid, 'status': True, 'info': _('Maintenance job created.') })
def delete(self, gid, sid, did, scid, dcid=None, only_sql=False): """ This function will drop the FTS Dictionary object :param gid: group id :param sid: server id :param did: database id :param scid: schema id :param dcid: FTS Dictionary id :param only_sql: Return only sql if True """ if dcid is None: data = request.form if request.form else json.loads( request.data, encoding='utf-8' ) else: data = {'ids': [dcid]} # Below will decide if it's simple drop or drop with cascade call if self.cmd == 'delete': # This is a cascade operation cascade = True else: cascade = False try: for dcid in data['ids']: # Get name for FTS Dictionary from dcid sql = render_template("/".join([self.template_path, 'delete.sql']), dcid=dcid) status, res = self.conn.execute_dict(sql) if not status: return internal_server_error(errormsg=res) if not res['rows']: return make_json_response( success=0, errormsg=_( 'Error: Object not found.' ), info=_( 'The specified FTS dictionary ' 'could not be found.\n' ) ) # Drop FTS Dictionary result = res['rows'][0] sql = render_template("/".join([self.template_path, 'delete.sql']), name=result['name'], schema=result['schema'], cascade=cascade ) # Used for schema diff tool if only_sql: return sql status, res = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=res) return make_json_response( success=1, info=_("FTS Dictionary dropped") ) except Exception as e: current_app.logger.exception(e) return internal_server_error(errormsg=str(e))
def change_owner(): """ Returns: """ data = request.form if request.form else json.loads( request.data, encoding='utf-8' ) try: new_user = User.query.get(data['new_owner']) old_user_servers = Server.query.filter_by(shared=True, user_id=data[ 'old_owner']).all() server_group_ids = [server.servergroup_id for server in old_user_servers] server_groups = ServerGroup.query.filter( ServerGroup.id.in_(server_group_ids)).all() new_owner_sg = ServerGroup.query.filter_by( user_id=data['new_owner']).all() old_owner_sg = ServerGroup.query.filter_by( user_id=data['old_owner']).all() sg_data = {sg.name: sg.id for sg in new_owner_sg} old_sg_data = {sg.id: sg.name for sg in old_owner_sg} deleted_sg = [] # Change server user. for server in old_user_servers: sh_servers = SharedServer.query.filter_by( servergroup_id=server.servergroup_id).all() if old_sg_data[server.servergroup_id] in sg_data: for sh in sh_servers: sh.servergroup_id = sg_data[ old_sg_data[server.servergroup_id]] sh.server_owner = new_user.username # Update Server user and server group to prevent deleting # shared server associated with deleting user. Server.query.filter_by( servergroup_id=server.servergroup_id, shared=True, user_id=data['old_owner'] ).update( { 'servergroup_id': sg_data[old_sg_data[ server.servergroup_id]], 'user_id': data['new_owner'] } ) ServerGroup.query.filter_by(id=server.servergroup_id).delete() deleted_sg.append(server.servergroup_id) else: server.user_id = data['new_owner'] for sh in sh_servers: sh.server_owner = new_user.username # Change server group user. for server_group in server_groups: if server_group.id not in deleted_sg: server_group.user_id = data['new_owner'] db.session.commit() return make_json_response( success=1, info=_("Owner changed successfully."), data={} ) except Exception as e: msg = 'Unable to update shared server owner' + _(str(e)) return internal_server_error( errormsg=msg)
def create(self, gid, sid, did, scid, tid): """ This function will creates new the compound trigger object Args: gid: Server Group ID sid: Server ID did: Database ID scid: Schema ID tid: Table ID """ data = request.form if request.form else json.loads( request.data, encoding='utf-8' ) for k, v in data.items(): try: # comments should be taken as is because if user enters a # json comment it is parsed by loads which should not happen if k in ('description',): data[k] = v else: data[k] = json.loads(v, encoding='utf-8') except (ValueError, TypeError, KeyError): data[k] = v required_args = { 'name': 'Name' } for arg in required_args: if arg not in data: return make_json_response( status=410, success=0, errormsg=gettext( "Could not find the required parameter ({})." ).format(required_args[arg]) ) # Adding parent into data dict, will be using it while creating sql data['schema'] = self.schema data['table'] = self.table try: SQL = render_template("/".join([self.template_path, self._CREATE_SQL]), data=data, conn=self.conn) status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) # we need oid to to add object in tree at browser SQL = render_template("/".join([self.template_path, self._OID_SQL]), tid=tid, data=data) status, trid = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=tid) return jsonify( node=self.blueprint.generate_browser_node( trid, tid, data['name'], icon="icon-compound_trigger" ) ) except Exception as e: return internal_server_error(errormsg=str(e))
def delete(self, gid, sid, did, scid, pkgid=None, only_sql=False): """ This function will drop the object Args: gid: Server Group ID sid: Server ID did: Database ID scid: Schema ID pkgid: Package ID only_sql: Return SQL only if True Returns: """ if pkgid is None: data = request.form if request.form else json.loads( request.data, encoding='utf-8') else: data = {'ids': [pkgid]} # Below will decide if it's simple drop or drop with cascade call if self.cmd == 'delete': # This is a cascade operation cascade = True else: cascade = False try: for pkgid in data['ids']: SQL = render_template("/".join( [self.template_path, 'properties.sql']), scid=scid, pkgid=pkgid) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) if not res['rows']: return make_json_response( success=0, errormsg=_('Error: Object not found.'), info=_('The specified package could not be found.\n')) res['rows'][0]['schema'] = self.schema SQL = render_template("/".join( [self.template_path, 'delete.sql']), data=res['rows'][0], cascade=cascade) if only_sql: return SQL status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) return make_json_response(success=1, info=_("Package dropped")) except Exception as e: return internal_server_error(errormsg=str(e))
def save(trans_id): """ This method is used to save the data changes to the server Args: trans_id: unique transaction id """ if request.data: changed_data = json.loads(request.data, encoding='utf-8') else: changed_data = request.args or request.form # Check the transaction and connection status status, error_msg, conn, trans_obj, session_obj = \ check_transaction_status(trans_id) if error_msg == ERROR_MSG_TRANS_ID_NOT_FOUND: return make_json_response(success=0, errormsg=error_msg, info='DATAGRID_TRANSACTION_REQUIRED', status=404) if status and conn is not None and \ trans_obj is not None and session_obj is not None: # If there is no primary key found then return from the function. if ('primary_keys' not in session_obj or len(session_obj['primary_keys']) <= 0 or len(changed_data) <= 0) and \ 'has_oids' not in session_obj: return make_json_response( data={ 'status': False, 'result': gettext('No primary key found for this object, ' 'so unable to save records.') } ) is_error, errmsg, conn = _check_and_connect(trans_obj) if is_error: return make_json_response( data={'status': status, 'result': "{}".format(errmsg)} ) status, res, query_results, _rowid = trans_obj.save( changed_data, session_obj['columns_info'], session_obj['client_primary_key'], conn) else: status = False res = error_msg query_results = None _rowid = None transaction_status = conn.transaction_status() return make_json_response( data={ 'status': status, 'result': res, 'query_results': query_results, '_rowid': _rowid, 'transaction_status': transaction_status }, encoding=conn.python_encoding )
def create(self, gid, sid, did, scid): """ This function will creates new the type object Args: gid: Server Group ID sid: Server ID did: Database ID scid: Schema ID tid: Type ID """ data = request.form if request.form else json.loads(request.data, encoding='utf-8') required_args = {'name': 'Name', 'typtype': 'Type'} for arg in required_args: if arg not in data: return make_json_response( status=410, success=0, errormsg=gettext( "Could not find the required parameter (%s)." % required_args[arg])) # Additional checks goes here # If type is composite then check if it has two members if data and data[arg] == 'c': if len(data['composite']) < 2: return make_json_response( status=410, success=0, errormsg=gettext( 'Composite types require at least two members.')) # If type is range then check if subtype is defined or not if data and data[arg] == 'r': if 'typname' not in data or data['typname'] is None: return make_json_response( status=410, success=0, errormsg=gettext( 'Subtype must be defined for range types.')) # If type is external then check if input/output # conversion function is defined if data and data[arg] == 'b': if 'typinput' not in data or \ 'typoutput' not in data or \ data['typinput'] is None or \ data['typoutput'] is None: return make_json_response( status=410, success=0, errormsg=gettext( 'External types require both input and output conversion functions.' )) # To format privileges coming from client if 'typacl' in data and data['typacl'] is not None: data['typacl'] = parse_priv_to_db(data['typacl'], self.acl) data = self._convert_for_sql(data) try: if 'composite' in data and len(data['composite']) > 0: for each_type in data['composite']: each_type = self.convert_length_precision_to_string( each_type) each_type['cltype'] = self._cltype_formatter( each_type['type']) each_type['hasSqrBracket'] = self.hasSqrBracket SQL = render_template("/".join([self.template_path, 'create.sql']), data=data, conn=self.conn) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) if 'schema' in data: # we need scid to update in browser tree SQL = render_template("/".join( [self.template_path, 'get_scid.sql']), schema=data['schema']) status, scid = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=scid) # we need oid to to add object in tree at browser SQL = render_template("/".join([self.template_path, 'get_oid.sql']), scid=scid, data=data) status, tid = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=tid) return jsonify(node=self.blueprint.generate_browser_node( tid, scid, data['name'], icon="icon-type")) except Exception as e: return internal_server_error(errormsg=str(e))
def enable_disable_trigger(self, gid, sid, did, scid, tid, trid): """ This function will enable OR disable the current compound trigger object Args: gid: Server Group ID sid: Server ID did: Database ID scid: Schema ID tid: Table ID trid: Trigger ID """ data = request.form if request.form else json.loads( request.data, encoding='utf-8' ) # Convert str 'true' to boolean type is_enable_trigger = data['is_enable_trigger'] try: SQL = render_template("/".join([self.template_path, 'properties.sql']), tid=tid, trid=trid, datlastsysoid=self.datlastsysoid) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) if len(res['rows']) == 0: return gone(gettext( """Could not find the compound trigger in the table.""") ) o_data = dict(res['rows'][0]) # If enable is set to true means we need SQL to enable # current compound trigger which is disabled already so we need to # alter the 'is_enable_trigger' flag so that we can render # correct SQL for operation o_data['is_enable_trigger'] = is_enable_trigger # Adding parent into data dict, will be using it while creating sql o_data['schema'] = self.schema o_data['table'] = self.table SQL = render_template("/".join([self.template_path, 'enable_disable_trigger.sql']), data=o_data, conn=self.conn) status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) return make_json_response( success=1, info="Compound Trigger updated", data={ 'id': trid, 'tid': tid, 'scid': scid } ) except Exception as e: return internal_server_error(errormsg=str(e))
def delete(self, gid, sid, did, scid, tid, clid=None): """ This function will updates existing the schema object Args: gid: Server Group ID sid: Server ID did: Database ID scid: Schema ID tid: Table ID clid: Column ID """ if clid is None: data = request.form if request.form else json.loads( request.data, encoding='utf-8' ) else: data = {'ids': [clid]} # We will first fetch the column name for current request # so that we create template for dropping column try: for clid in data['ids']: SQL = render_template( "/".join([self.template_path, 'properties.sql']), tid=tid, clid=clid, show_sys_objects=self.blueprint.show_system_objects ) status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) if not res['rows']: return make_json_response( success=0, errormsg=gettext( 'Error: Object not found.' ), info=gettext( 'The specified column could not be found.\n' ) ) data = dict(res['rows'][0]) # We will add table & schema as well data['schema'] = self.schema data['table'] = self.table SQL = render_template("/".join([self.template_path, 'delete.sql']), data=data, conn=self.conn) status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) return make_json_response( success=1, info=gettext("Column is dropped"), data={ 'id': clid, 'tid': tid } ) except Exception as e: return internal_server_error(errormsg=str(e))
def start_view_data(trans_id): """ This method is used to execute query using asynchronous connection. Args: trans_id: unique transaction id """ limit = -1 # Check the transaction and connection status status, error_msg, conn, trans_obj, session_obj = \ check_transaction_status(trans_id) if error_msg == ERROR_MSG_TRANS_ID_NOT_FOUND: return make_json_response(success=0, errormsg=error_msg, info='DATAGRID_TRANSACTION_REQUIRED', status=404) # get the default connection as current connection which is attached to # trans id holds the cursor which has query result so we cannot use that # connection to execute another query otherwise we'll lose query result. try: manager = get_driver(PG_DEFAULT_DRIVER).connection_manager( trans_obj.sid) default_conn = manager.connection(did=trans_obj.did) except (ConnectionLost, SSHTunnelConnectionLost) as e: raise except Exception as e: current_app.logger.error(e) return internal_server_error(errormsg=str(e)) # Connect to the Server if not connected. if not default_conn.connected(): status, msg = default_conn.connect() if not status: return make_json_response( data={'status': status, 'result': "{}".format(msg)} ) if status and conn is not None and \ trans_obj is not None and session_obj is not None: # set fetched row count to 0 as we are executing query again. trans_obj.update_fetched_row_cnt(0) # Fetch the sql and primary_keys from the object sql = trans_obj.get_sql(default_conn) pk_names, primary_keys = trans_obj.get_primary_keys(default_conn) session_obj['command_obj'] = pickle.dumps(trans_obj, -1) has_oids = False if trans_obj.object_type == 'table': # Fetch OIDs status has_oids = trans_obj.has_oids(default_conn) # Fetch the applied filter. filter_applied = trans_obj.is_filter_applied() # Fetch the limit for the SQL query limit = trans_obj.get_limit() can_edit = trans_obj.can_edit() can_filter = trans_obj.can_filter() # Store the primary keys to the session object session_obj['primary_keys'] = primary_keys # Store the OIDs status into session object session_obj['has_oids'] = has_oids update_session_grid_transaction(trans_id, session_obj) # Execute sql asynchronously status, result = conn.execute_async(sql) else: status = False result = error_msg filter_applied = False can_edit = False can_filter = False sql = None return make_json_response( data={ 'status': status, 'result': result, 'filter_applied': filter_applied, 'limit': limit, 'can_edit': can_edit, 'can_filter': can_filter, 'sql': sql, 'info_notifier_timeout': blueprint.info_notifier_timeout.get() } )
def detach(self, gid, sid, did, scid, tid, ptid): """ This function will reset statistics of table Args: gid: Server Group ID sid: Server ID did: Database ID scid: Schema ID tid: Table ID ptid: Partition Table ID """ # Fetch schema name status, parent_schema = self.conn.execute_scalar( render_template("/".join( [self.table_template_path, 'get_schema.sql']), conn=self.conn, scid=scid)) if not status: return internal_server_error(errormsg=parent_schema) # Fetch Parent Table name status, partitioned_table_name = self.conn.execute_scalar( render_template("/".join( [self.table_template_path, 'get_table.sql']), conn=self.conn, scid=scid, tid=tid)) if not status: return internal_server_error(errormsg=partitioned_table_name) # Get schema oid of partition status, pscid = self.conn.execute_scalar( render_template("/".join( [self.table_template_path, 'get_schema_oid.sql']), tid=ptid)) if not status: return internal_server_error(errormsg=scid) # Fetch schema name status, partition_schema = self.conn.execute_scalar( render_template("/".join( [self.table_template_path, 'get_schema.sql']), conn=self.conn, scid=pscid)) if not status: return internal_server_error(errormsg=partition_schema) # Fetch Partition Table name status, partition_name = self.conn.execute_scalar( render_template("/".join( [self.table_template_path, 'get_table.sql']), conn=self.conn, scid=pscid, tid=ptid)) if not status: return internal_server_error(errormsg=partition_name) try: temp_data = dict() temp_data['parent_schema'] = parent_schema temp_data['partitioned_table_name'] = partitioned_table_name temp_data['schema'] = partition_schema temp_data['name'] = partition_name SQL = render_template("/".join( [self.partition_template_path, 'detach.sql']), data=temp_data, conn=self.conn) status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) return make_json_response(success=1, info=gettext("Partition detached."), data={ 'id': ptid, 'scid': scid }) except Exception as e: return internal_server_error(errormsg=str(e))
def create(self, gid, sid, did, scid): """ Create the sequence. Args: gid: Server Group ID sid: Server ID did: Database ID scid: Schema ID Returns: """ required_args = [ u'name', u'schema', u'seqowner', ] data = request.form if request.form else json.loads(request.data, encoding='utf-8') for arg in required_args: if arg not in data: return make_json_response( status=400, success=0, errormsg=_("Could not find the required parameter (%s)." % arg)) # The SQL below will execute CREATE DDL only SQL = render_template("/".join([self.template_path, 'create.sql']), data=data, conn=self.conn) status, msg = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=msg) if 'relacl' in data: data['relacl'] = parse_priv_to_db(data['relacl'], 'DATABASE') # The SQL below will execute rest DMLs because we cannot execute CREATE with any other SQL = render_template("/".join([self.template_path, 'grant.sql']), data=data, conn=self.conn) SQL = SQL.strip('\n').strip(' ') if SQL and SQL != "": status, msg = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=msg) # We need oid of newly created sequence. SQL = render_template("/".join([self.template_path, 'get_oid.sql']), name=data['name'], schema=data['schema']) SQL = SQL.strip('\n').strip(' ') status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=rset) row = rset['rows'][0] return jsonify( node=self.blueprint.generate_browser_node(row['oid'], row['relnamespace'], data['name'], icon="icon-%s" % self.node_type))
def create(self, gid, sid, did, scid, tid): """ This function will creates new the schema object Args: gid: Server Group ID sid: Server ID did: Database ID scid: Schema ID tid: Table ID """ data = request.form if request.form else json.loads( request.data, encoding='utf-8' ) for k, v in data.items(): # comments should be taken as is because if user enters a # json comment it is parsed by loads which should not happen if k in ('description',): data[k] = v else: data[k] = json.loads(v, encoding='utf-8', cls=ColParamsJSONDecoder) required_args = { 'name': 'Name', 'cltype': 'Type' } for arg in required_args: if arg not in data: return make_json_response( status=410, success=0, errormsg=gettext( "Could not find the required parameter (%s)." % required_args[arg] ) ) # Parse privilege data coming from client according to database format if 'attacl' in data: data['attacl'] = parse_priv_to_db(data['attacl'], self.acl) # Adding parent into data dict, will be using it while creating sql data['schema'] = self.schema data['table'] = self.table # check type for '[]' in it data['cltype'] = self._cltype_formatter(data['cltype']) data['hasSqrBracket'] = self.hasSqrBracket data = self.convert_length_precision_to_string(data) SQL = render_template("/".join([self.template_path, 'create.sql']), data=data, conn=self.conn) status, res = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=res) # we need oid to to add object in tree at browser SQL = render_template( "/".join([self.template_path, 'get_position.sql']), tid=tid, data=data ) status, clid = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=tid) return jsonify( node=self.blueprint.generate_browser_node( clid, tid, data['name'], icon="icon-column" ) )
def delete(self, gid, sid, did, scid, pid): """ This function will drop the fts_parser object :param gid: group id :param sid: server id :param did: database id :param scid: schema id :param pid: fts tempate id """ # Below will decide if it's simple drop or drop with cascade call if self.cmd == 'delete': # This is a cascade operation cascade = True else: cascade = False try: # Get name for Parser from pid sql = render_template( "/".join([self.template_path, 'delete.sql']), pid=pid ) status, res = self.conn.execute_dict(sql) if not status: return internal_server_error(errormsg=res) if not res['rows']: return make_json_response( success=0, errormsg=_( 'Error: Object not found.' ), info=_( 'The specified FTS parser could not be found.\n' ) ) # Drop fts Parser result = res['rows'][0] sql = render_template( "/".join([self.template_path, 'delete.sql']), name=result['name'], schema=result['schema'], cascade=cascade ) status, res = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=res) return make_json_response( success=1, info=_("FTS Parser dropped"), data={ 'id': pid, 'sid': sid, 'gid': gid, 'did': did, 'scid': scid } ) except Exception as e: current_app.logger.exception(e) return internal_server_error(errormsg=str(e))
def get_types(self, gid, sid, did, scid, tid=None): """ This function will return list of types available as AJAX response. """ res = [] try: SQL = render_template("/".join( [self.template_path, 'get_types.sql'])) status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=res) for row in rset['rows']: # Attaching properties for precession # & length validation for current type precision = False length = False min_val = 0 max_val = 0 # Check against PGOID for specific type if row['elemoid']: if row['elemoid'] in (1560, 1561, 1562, 1563, 1042, 1043, 1014, 1015): typeval = 'L' elif row['elemoid'] in (1083, 1114, 1115, 1183, 1184, 1185, 1186, 1187, 1266, 1270): typeval = 'D' elif row['elemoid'] in (1231, 1700): typeval = 'P' else: typeval = ' ' # Logic to set precision & length/min/max values if typeval == 'P': precision = True if precision or typeval in ('L', 'D'): length = True min_val = 0 if typeval == 'D' else 1 if precision: max_val = 1000 elif min_val: # Max of integer value max_val = 2147483647 else: max_val = 10 res.append({ 'label': row['typname'], 'value': row['typname'], 'typval': typeval, 'precision': precision, 'length': length, 'min_val': min_val, 'max_val': max_val, 'is_collatable': row['is_collatable'] }) return make_json_response(data=res, status=200) except Exception as e: return internal_server_error(errormsg=str(e))
def create(self, gid, sid, did, scid, tid, cid=None): """ This function will create a primary key. Args: gid: Server Group ID sid: Server ID did: Database ID scid: Schema ID tid: Table ID cid: Check constraint ID Returns: """ required_args = ['consrc'] data = request.form if request.form else json.loads(request.data, encoding='utf-8') for k, v in data.items(): try: data[k] = json.loads(v, encoding='utf-8') except (ValueError, TypeError, KeyError): data[k] = v for arg in required_args: if arg not in data or data[arg] == '': return make_json_response( status=400, success=0, errormsg=_("Could not find the required parameter (%s)." % arg)) data['schema'] = self.schema data['table'] = self.table try: if 'name' not in data or data['name'] == "": SQL = "BEGIN;" # Start transaction. status, res = self.conn.execute_scalar(SQL) if not status: self.end_transaction() return internal_server_error(errormsg=res) # The below SQL will execute CREATE DDL only SQL = render_template("/".join([self.template_path, 'create.sql']), data=data) status, msg = self.conn.execute_scalar(SQL) if not status: self.end_transaction() return internal_server_error(errormsg=msg) if 'name' not in data or data['name'] == "": sql = render_template("/".join( [self.template_path, 'get_oid_with_transaction.sql'], ), tid=tid) status, res = self.conn.execute_dict(sql) if not status: self.end_transaction() return internal_server_error(errormsg=res) self.end_transaction() data['name'] = res['rows'][0]['name'] else: sql = render_template("/".join( [self.template_path, 'get_oid.sql']), tid=tid, name=data['name']) status, res = self.conn.execute_dict(sql) if not status: self.end_transaction() return internal_server_error(errormsg=res) if "convalidated" in res['rows'][0] and \ res['rows'][0]["convalidated"]: icon = "icon-check_constraint_bad" valid = False else: icon = "icon-check_constraint" valid = True return jsonify(node=self.blueprint.generate_browser_node( res['rows'][0]['oid'], tid, data['name'], icon=icon, valid=valid)) except Exception as e: self.end_transaction() return make_json_response(status=400, success=0, errormsg=e)
def create(self, gid, sid, did, scid): """ This function will creates new the FTS Configuration object :param gid: group id :param sid: server id :param did: database id :param scid: schema id """ # Mandatory fields to create a new FTS Configuration required_args = ['schema', 'name'] data = request.form if request.form else json.loads(request.data, encoding='utf-8') for arg in required_args: if arg not in data: return make_json_response( status=410, success=0, errormsg=_("Could not find the required parameter ({})." ).format(arg)) # Either copy config or parser must be present in data if 'copy_config' not in data and 'prsname' not in data: return make_json_response( status=410, success=0, errormsg=_("Provide at least copy config or parser.")) # Fetch schema name from schema oid sql = render_template( "/".join([self.template_path, self._SCHEMA_SQL]), data=data, conn=self.conn, ) status, schema = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=schema) # Replace schema oid with schema name before passing to create.sql # To generate proper sql query new_data = data.copy() new_data['schema'] = schema sql = render_template( "/".join([self.template_path, self._CREATE_SQL]), data=new_data, conn=self.conn, ) status, res = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=res) # We need cfgid to add object in tree at browser, # Below sql will give the same sql = render_template("/".join( [self.template_path, self._PROPERTIES_SQL]), name=data['name'], scid=data['schema']) status, res = self.conn.execute_2darray(sql) if not status: return internal_server_error(errormsg=res) res = res['rows'][0] return jsonify(node=self.blueprint.generate_browser_node( res['oid'], data['schema'], data['name'], icon="icon-fts_configuration"))
def update(self, gid, sid, did): """Update the database.""" data = self._get_data_from_request() # Update schema restriction in db object. DatabaseView._update_db_schema_res(data, did, sid) # Generic connection for offline updates conn = self.manager.connection(conn_id='db_offline_update') status, errmsg = conn.connect() if not status: current_app.logger.error( "Could not create database connection for offline updates\n" "Err: {0}".format(errmsg)) return internal_server_error(errmsg) fetching_error, err_msg = self._fetch_db_details(data, did) if fetching_error: return internal_server_error(errormsg=err_msg) # Release any existing connection from connection manager # to perform offline operation self.manager.release(did=did) all_ids = {'gid': gid, 'sid': sid, 'did': did} is_error, errmsg = self._check_rename_db_or_change_table_space( data, conn, all_ids) if is_error: return internal_server_error(errmsg) # Make connection for database again connection_error, errmsg = self._reconnect_connect_db(data, did) if connection_error: return internal_server_error(errmsg) sql = self.get_online_sql(gid, sid, data, did) sql = sql.strip('\n').strip(' ') if sql and sql != "": status, msg = self.conn.execute_scalar(sql) if not status: return internal_server_error(errormsg=msg) # Release any existing connection from connection manager # used for offline updates self.manager.release(conn_id="db_offline_update") # Fetch the new data again after update for proper node # generation status, rset = self.conn.execute_dict( render_template("/".join([self.template_path, self._NODES_SQL]), did=did, conn=self.conn, last_system_oid=0)) if not status: return internal_server_error(errormsg=rset) if len(rset['rows']) == 0: return gone(_("Could not find the database on the server.")) res = rset['rows'][0] can_drop = True error, errmsg, is_can_drop = self._commit_db_changes(res, can_drop) if error: return make_json_response(success=0, errormsg=errmsg) can_drop = can_dis_conn = is_can_drop return jsonify(node=self.blueprint.generate_browser_node( did, sid, res['name'], icon="pg-icon-{0}".format(self.node_type ) if self._db['datallowconn'] and self.conn.connected() else "icon-database-not-connected", connected=self.conn.connected( ) if self._db['datallowconn'] else False, tablespace=res['spcname'], allowConn=res['datallowconn'], canCreate=res['cancreate'], canDisconn=can_dis_conn, canDrop=can_drop, inode=True if res['datallowconn'] else False))
def get_external_functions_list(self, gid, sid, did, scid, tid=None): """ This function will return list of external functions available as AJAX response. """ res = [{'label': '', 'value': '', 'cbtype': 'all'}] try: # The SQL generated below will populate Input/Output/Send/ # Receive/Analyze/TypModeIN/TypModOUT combo box SQL = render_template("/".join( [self.template_path, 'get_external_functions.sql']), extfunc=True) if SQL: status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=res) for row in rset['rows']: res.append({ 'label': row['func'], 'value': row['func'], 'cbtype': 'all' }) # The SQL generated below will populate TypModeIN combo box SQL = render_template("/".join( [self.template_path, 'get_external_functions.sql']), typemodin=True) if SQL: status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=res) for row in rset['rows']: res.append({ 'label': row['func'], 'value': row['func'], 'cbtype': 'typmodin' }) # The SQL generated below will populate TypModeIN combo box SQL = render_template("/".join( [self.template_path, 'get_external_functions.sql']), typemodout=True) if SQL: status, rset = self.conn.execute_2darray(SQL) if not status: return internal_server_error(errormsg=res) for row in rset['rows']: res.append({ 'label': row['func'], 'value': row['func'], 'cbtype': 'typmodout' }) return make_json_response(data=res, status=200) except Exception as e: return internal_server_error(errormsg=str(e))
def create(self, gid, sid): """Create the database.""" required_args = [u'name'] data = request.form if request.form else json.loads(request.data, encoding='utf-8') for arg in required_args: if arg not in data: return make_json_response( status=410, success=0, errormsg=_("Could not find the required parameter ({})." ).format(arg)) # The below SQL will execute CREATE DDL only SQL = render_template("/".join([self.template_path, self._CREATE_SQL]), data=data, conn=self.conn) status, msg = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=msg) if 'datacl' in data: data['datacl'] = parse_priv_to_db(data['datacl'], 'DATABASE') # The below SQL will execute rest DMLs because we cannot execute # CREATE with any other SQL = render_template("/".join([self.template_path, self._GRANT_SQL]), data=data, conn=self.conn) SQL = SQL.strip('\n').strip(' ') if SQL and SQL != "": status, msg = self.conn.execute_scalar(SQL) if not status: return internal_server_error(errormsg=msg) # We need oid of newly created database SQL = render_template("/".join( [self.template_path, self._PROPERTIES_SQL]), name=data['name'], conn=self.conn, last_system_oid=0) SQL = SQL.strip('\n').strip(' ') if SQL and SQL != "": status, res = self.conn.execute_dict(SQL) if not status: return internal_server_error(errormsg=res) response = res['rows'][0] # Add database entry into database table with schema_restrictions. database = Database(id=response['did'], server=sid, schema_res=','.join(data['schema_res'])) db.session.add(database) db.session.commit() return jsonify(node=self.blueprint.generate_browser_node( response['did'], sid, response['name'], icon="icon-database-not-connected", connected=False, tablespace=response['default_tablespace'], allowConn=True, canCreate=response['cancreate'], canDisconn=True, canDrop=True))
def create_backup_objects_job(sid): """ Args: sid: Server ID Creates a new job for backup task (Backup Database(s)/Schema(s)/Table(s)) Returns: None """ data = json.loads(request.data, encoding='utf-8') backup_obj_type = data.get('type', 'objects') try: backup_file = filename_with_file_manager_path( data['file'], (data.get('format', '') != 'directory')) except Exception as e: return bad_request(errormsg=str(e)) # Fetch the server details like hostname, port, roles etc server = get_server(sid) if server is None: return make_json_response( success=0, errormsg=_("Could not find the specified server.")) # To fetch MetaData for the server from pgadmin.utils.driver import get_driver driver = get_driver(PG_DEFAULT_DRIVER) manager = driver.connection_manager(server.id) conn = manager.connection() connected = conn.connected() if not connected: return make_json_response( success=0, errormsg=_("Please connect to the server first.")) utility = manager.utility('backup') if backup_obj_type == 'objects' \ else manager.utility('backup_server') ret_val = does_utility_exist(utility) if ret_val: return make_json_response(success=0, errormsg=ret_val) args = _get_args_params_values(data, conn, backup_obj_type, backup_file, server, manager) escaped_args = [escape_dquotes_process_arg(arg) for arg in args] try: bfile = data['file'].encode('utf-8') \ if hasattr(data['file'], 'encode') else data['file'] if backup_obj_type == 'objects': args.append(data['database']) escaped_args.append(data['database']) p = BatchProcess(desc=BackupMessage(BACKUP.OBJECT, server.id, bfile, *args, database=data['database']), cmd=utility, args=escaped_args) else: p = BatchProcess(desc=BackupMessage( BACKUP.SERVER if backup_obj_type != 'globals' else BACKUP.GLOBALS, server.id, bfile, *args), cmd=utility, args=escaped_args) manager.export_password_env(p.id) # Check for connection timeout and if it is greater than 0 then # set the environment variable PGCONNECT_TIMEOUT. if manager.connect_timeout > 0: env = dict() env['PGCONNECT_TIMEOUT'] = str(manager.connect_timeout) p.set_env_variables(server, env=env) else: p.set_env_variables(server) p.start() jid = p.id except Exception as e: current_app.logger.exception(e) return make_json_response(status=410, success=0, errormsg=str(e)) # Return response return make_json_response(data={'job_id': jid, 'Success': 1})