def testcreation(self):
     ui = userinput.UserInput(dlxii.Device())
     ui.comm_port = Testutils.sdevice
     c = controller.Controller(ui)
     utili = utils.Utils(ui, c, testing=False, showhelp=False)
     self.assertFalse(utili.testing)
     utili = utils.Utils(ui, c, testing=True, showhelp=False)
     self.assertTrue(utili.testing)
     utili = utils.Utils(ui, c, testing=True, showhelp=True)
    def teststr(self):
        ui = userinput.UserInput(dlxii.Device())
        ui.comm_port = Testutils.sdevice
        c = controller.Controller(ui)
        utili = utils.Utils(ui, c, testing=False, showhelp=False)
        self.assertEqual(
            'testing:False, cmds: -acr, -rmn, -ran, -rmd, -cacn, -q',
            str(utili))
        utili = utils.Utils(ui, c, testing=True, showhelp=False)
        self.assertEqual(
            'testing:True, cmds: -acr, -rmn, -ran, -rmd, -cacn, -q',
            str(utili))

        # if showhelp true then something like
        """
def build_vocabulary(path, vocab_dict_path, nlp):
    """
    Builds vocabulary with the entirity of the datasets
    """
    # Create Utils instance
    U = utils.Utils()
    data_raw = U.jsons_to_list(path)

    # Create a vocab wrapper and add some special tokens.
    vocab = Vocabulary(nlp)
    vocab.add_word('<pad>')
    vocab.add_word('<start>')
    vocab.add_word('<end>')
    vocab.add_word('<unk>')

    all_adjs = []
    for f, file in enumerate(data_raw):  # 0_arch 1_des 2_...
        print("Starting building vocab corresponding to file: \n", f)
        for i in range(len(file)):  # 900 samples
            sample_dict = file[i]
            sample_text = sample_dict['text']  # this is a list with strings
            adj_list = vocab.get_adj_from_all_sentences(sample_text)
            all_adjs.append(adj_list)
        print("....Finishing vocab file: \n", f)

    for sublist in all_adjs:
        for adj in sublist:
            vocab.add_word(adj)

    # Save vocab in dict
    with open(vocab_dict_path, 'wb') as f:
        pickle.dump(vocab, f)
    f.close()
    return vocab
Esempio n. 4
0
    def __init__(self):

        # Check the OS first
        if not str(sys.platform) == "darwin":
            self.u.head("Incompatible System")
            print(" ")
            print("This script can only be run from macOS/OS X.")
            print(" ")
            print("The current running system is \"{}\".".format(sys.platform))
            print(" ")
            self.grab("Press [enter] to quit...")
            print(" ")
            exit(1)

        self.dl = downloader.Downloader()
        self.r = run.Run()
        self.u = utils.Utils()
        self.web_drivers = None
        self.os_build_number = None
        self.os_number = None
        self.wd_loc = None
        self.sip_checked = False
        self.installed_version = "Not Installed!"
        self.get_manifest()
        self.get_system_info()
Esempio n. 5
0
    def __init__(self):
        """
        Constructor
        """

        self.load_mongo_client()
        self.utils = utils.Utils()
Esempio n. 6
0
 def __fillConstants(self, test_mode):
     self.data = utils.Utils(test_mode)
     self.num_color = len(self.data.color_list)
     self.color_unspecified_index = self.data.color_index_dict[
         self.data.COLOR_UNSPECIFIED]
     self.color_begin = 1
     self.color_end = self.num_color
Esempio n. 7
0
 def _setup_logging(self, path_log=None):
     app_dir = os.path.join(path_log or utils.Utils().get_app_dir(),
                            'guardiancl.log')
     console.log("Log file in '%s'" % app_dir)
     logging.config.dictConfig({
         "version": 1,
         "disable_existing_loggers": False,
         "formatters": {
             "file": {
                 "format":
                 "%(asctime)s :: %(levelname)s :: %(name)s - %(message)s"
             }
         },
         "handlers": {
             "file_handler": {
                 "class": "logging.handlers.RotatingFileHandler",
                 "level": "DEBUG",
                 "formatter": "file",
                 "filename": str(app_dir),
                 "maxBytes": 10485760,
                 "backupCount": 3,
                 "encoding": "utf8"
             }
         },
         "root": {
             "level": "DEBUG",
             "handlers": ["file_handler"]
         }
     })
Esempio n. 8
0
def predict(test, predict, vecs, words):
    row = 916750
    col = 300
    util = utils.Utils(vecs, row, col, words)

    df = pd.read_csv(test)
    data_x = []

    for index, row in df.iterrows():
        x = util.embed_sentense(row['text'])
        x = np.append(x, [int(row['sex']) - 1, float(row['age']) / 100])
        data_x.append(x)

    data_x = np.array(data_x)

    json_file = open('model.json', 'r')
    loaded_model_json = json_file.read()
    json_file.close()
    model = model_from_json(loaded_model_json)
    # load weights into new model
    model.load_weights("model.h5")
    print("Loaded model from disk")

    data_y = model.predict(data_x)
    data_y = np.argmax(data_y, axis=1)
    df['event'] = pd.Series(data_y)
    df.to_csv(predict, index=False)
Esempio n. 9
0
    def __init__(self, answers, APP, nodeps = False, update = False, target_path = None, dryrun = False, **kwargs):
        run_path = os.path.dirname(os.path.realpath(__file__))
        self.dryrun = dryrun
        recursive = not nodeps

        app = APP #FIXME

        self.params = Params(recursive, update, target_path)
        self.utils = utils.Utils(self.params)

        if os.path.exists(app):
            logger.info("App path is %s, will be populated to %s" % (app, target_path))
            app = self.utils.loadApp(app)
        else:
            logger.info("App name is %s, will be populated to %s" % (app, target_path))
            
        if not target_path:
            if self.params.app_path:
                self.params.target_path = self.params.app_path
            else: 
                self.params.target_path = os.getcwd()

        self.params.app = app

        self.answers_file = answers
Esempio n. 10
0
    def __init__(self, files_paths, images_paths, new_folder, nlp, vocab):
        """
        train_file_path = list with files
        test_file_path = list with files
        """

        # Create pipe
        self.nlp = nlp

        # Inherit vocabulary
        self.VOCAB = vocab

        # Call to utils
        self.Utils = utils.Utils()

        # paths to jsons
        self.files_paths = files_paths

        # path to image folders
        self.images_paths = images_paths

        # path to new image_path
        self.new_image_path = new_folder

        # Dataset
        self.train_data = None
        self.dev_data = None
        self.train_labels = None
        self.dev_labels = None

        # Run
        print("\nRunner....")
        self.runner()
Esempio n. 11
0
    def __init__(self, s_start, s_goal, step_len, goal_sample_rate,
                 waypoint_sample_rate, iter_max):
        self.s_start = Node(s_start)
        self.s_goal = Node(s_goal)
        self.step_len = step_len
        self.goal_sample_rate = goal_sample_rate
        self.waypoint_sample_rate = waypoint_sample_rate
        self.iter_max = iter_max
        self.vertex = [self.s_start]
        self.vertex_old = []
        self.vertex_new = []
        self.edges = []

        self.env = env.Env()
        self.plotting = plotting.Plotting(s_start, s_goal)
        self.utils = utils.Utils()
        self.fig, self.ax = plt.subplots()

        self.x_range = self.env.x_range
        self.y_range = self.env.y_range
        self.obs_circle = self.env.obs_circle
        self.obs_rectangle = self.env.obs_rectangle
        self.obs_boundary = self.env.obs_boundary
        self.obs_add = [0, 0, 0]  # [x,y,r]

        self.path = []
        self.waypoint = []
Esempio n. 12
0
 def __init__(self, **kwargs):
     # Initialize modules
     self.dl = downloader.Downloader()
     self.u = utils.Utils()
     self.r = run.Run()
     # See if we have a "json" key - which supercedes all others
     j = kwargs.get("json", None)
     if j:
         # We have a json file listed
         cwd = os.getcwd()
         os.chdir(os.path.dirname(os.path.realpath(__file__)))
         if os.path.exists(j):
             kwargs = json.load(open(j))
         else:
             kwargs = {}
         os.chdir(cwd)
     # Set defaults
     self.update_url = kwargs.get("url", None)
     self.update_type = kwargs.get("update_type", "file")  # file or json
     self.update_key = kwargs.get(
         "update_key", 2)  # the json key, or line of the target file
     self.prompt = kwargs.get(
         "prompt", False)  # do we prompt the user for update notifications?
     self.prompt_key = kwargs.get(
         "prompt_key", None)  # the json key for an update description
     self.file = kwargs.get(
         "file", None)  # our current file for local version access
     self.update_mode = kwargs.get(
         "mode", "clone")  # clone/file; clone a repo, or just curl a file
     self.chmod = kwargs.get("chmod", [])  # list of files to chmod
     self.restart = kwargs.get("restart", True)  # restart on update?
     self.start_file = kwargs.get("restart_file",
                                  None)  # path to the file to restart
Esempio n. 13
0
    def AddData(self, data, dateAndTime, ID, dataType, GetID, netType):
        cell_id = GetID(ID)
        #print cell_id
        ut = utils.Utils()
        date, time = ut.GetDateAndTimeNormal(
            dateAndTime)  #这里返回的date,time均为psycopg2的格式
        dateTimeIdNT = ut.dateTimeIdNTToStr(
            [date, time, netType,
             cell_id])  # 用(cell_id, date, time)元组来代替dateTimeId
        # 可少量提升性能,有空再做。。。
        if dateTimeIdNT in self.dict:
            # 如果dict的key中存在该ID日期时间,且dict的value的key中存在该dataType
            # 则把要存储的数据加上原数据再存储
            oldData = self.dict[dateTimeIdNT][dataType]
            newData = data + oldData
            #print newData,
            #self.writer.writerow([data, oldData, newData])
            self.dict[dateTimeIdNT][dataType] = newData
            #print dataType
            if not dataType == "erl":
                #若更新的数据类型是updata或downdata则同时要更新alldata
                alldata = data + self.dict[dateTimeIdNT]['alldata']
                self.dict[dateTimeIdNT]['alldata'] = alldata
            else:
                pass

        else:
            self.dict[dateTimeIdNT] = {}
            self.dict[dateTimeIdNT][dataType] = data
            for type in ['erl', 'updata', 'downdata', 'alldata']:
                if not type == dataType:
                    self.dict[dateTimeIdNT][type] = 0
            if not dataType == "erl":
                self.dict[dateTimeIdNT]['alldata'] = data
Esempio n. 14
0
 def testprocessLoop(self):
     ui = userinput.UserInput(dlxii.Device())
     ui.comm_port = Testutils.sdevice
     utili = utils.Utils(ui,
                         controller.Controller(ui),
                         testing=True,
                         showhelp=False)
     utili.process_loop()
Esempio n. 15
0
 def SaveToDB(self):
     ut = utils.Utils()
     for dateTimeId in self.dict.keys():
         date, time, nettype, ID = ut.StrToDateTimeIdNT(dateTimeId)
         self.db.Insert(ID, date, time, self.dict[dateTimeId]["erl"],
                        self.dict[dateTimeId]["updata"] / 8000,
                        self.dict[dateTimeId]["downdata"] / 8000,
                        self.dict[dateTimeId]["alldata"] / 8000, nettype)
     self.db.Commit()
Esempio n. 16
0
 def testresetCmdNames(self):
     ui = userinput.UserInput(dlxii.Device())
     ui.comm_port = Testutils.sdevice
     utili = utils.Utils(ui,
                         controller.Controller(ui),
                         testing=True,
                         showhelp=False)
     myserial.MySerial._dbidx = -1
     utili.reset_cmd_names()
Esempio n. 17
0
 def __init__(self, config):
     self._utils = utils.Utils()
     self._aws_interact = aws_interact.AwsInteract()
     self._transcribe_job_name = self._utils._randomize_job_name()
     self._s3_file_name = self._utils._randomize_job_name() + '.mp3'
     self._path_audio_input = config['path_audio_input']
     self._aws_region = config['aws_region']
     self._bucket_name = config['bucket']
     self._path_transcribe_result_output = '../results/transcribe_result.json'
     self._path_subtitle_file = config['path_srt_output']
Esempio n. 18
0
    def timeline(self, info):

        # Selecionando os dados.
        id_doc = int(info["id_doc"].replace("checkbox_", ""))
        query = info["query"]
        query_doc = float(info["query_doc"])
        meses = int(info["meses"])
        salto = int(info["tam_intervalo"])
        classes = info["classes"]

        # Abrindo conexão com a base de dados.
        conn = utils.Utils().conectar('../database/database.ini')

        # Retorna a data do documento.
        sql = """SELECT data FROM documentos WHERE id_documento = %s"""
        cursor = conn.cursor()
        cursor.execute(sql, (id_doc, ))
        data_doc = str(cursor.fetchall()[0][0])

        # TODO: Os primeiros testes vão ser feitos resgatando somente
        # 60 dias a frente e após uma data prefixada, com a seleção de
        # apenas um documento podendo ser expandido depois.
        tempo = meses * 30

        # Gerando vetor de representação da query.
        #vetor_query = modelo_texto.infer_vector(query.lower().split())
        modulo_base = Base()
        vetor_query = modulo_base.inferir_vetor(query, modelo_texto)

        # Formatando as classes dos documentos.
        f_classes = modulo_base.formatar_classes(classes)

        # Procuando os documentos referentes ao passado.
        passado = self.expansao(id_doc,
                                query_doc,
                                vetor_query,
                                data_doc,
                                f_classes,
                                cursor,
                                limite=tempo,
                                salto=salto)

        # Expandindo em direção aos documentos no futuro.
        futuro = self.expansao(id_doc,
                               query_doc,
                               vetor_query,
                               data_doc,
                               f_classes,
                               cursor,
                               limite=tempo,
                               salto=salto,
                               sentido=1)

        return self.formatar(passado, futuro)
Esempio n. 19
0
 def oldAddGSMErl(self, data, dateAndTime, ID):
     cell_id = self.Get2GID(ID)
     #print cell_id
     #dbm = dbManip.dbManipulate()
     ut = utils.Utils()
     date, time = ut.GetDateAndTimeForPostgresql(dateAndTime)
     if self.dbm.IsHaveID(cell_id):
         #如果数据库中存在该ID,则把要存储的数据加上原数据再存储
         newDate = data + float(self.dbm.SelectItemByID(cell_id, "erl"))
         self.dbm.Update(cell_id, "erl", newDate)
     else:
         self.dbm.Insert(cell_id, date, time, data, 0, 0, 0, "2G")
Esempio n. 20
0
    def __init__(self, data_root: str) -> None:
        """
        Initialize data handler

        Args:
            data_root (str): Root path for data directory
        """

        self.data_root = data_root
        self.util_handle = utils.Utils()

        print(f"Your data root path : {self.data_root}")
        os.makedirs(self.data_root + "/processed/", exist_ok=True)
Esempio n. 21
0
def inferEachUser(user):

		util = utils.Utils(user)
		coordinateA = util.getCoordinate(util.user)

		coordinates = []
		#limit the amount of users retreived for Twitter rules
		for friend in util.user.friends():
			coordinates.append(util.getCoordinate(friend))
		coordinateB = center_geolocation(coordinates)

		#calculate the distance between two coordinates
		print utils.distance(coordinateA, coordinateB)
Esempio n. 22
0
    def __init__(self, s_start, s_goal, res=0.5):
        self.res = res
        self.s_start = self.pos2ind(s_start)
        self.s_goal = self.pos2ind(s_goal)
        self.Env = env.Env()  # class Env
        self.utils = utils.Utils()
        self.u_set = [(-1, 0), (-1, 1), (0, 1), (1, 1), (1, 0), (1, -1),
                      (0, -1), (-1, -1)]

        self.OPEN = my_queue.QueuePrior()  # priority queue / OPEN set
        self.CLOSED = []  # CLOSED set / VISITED order
        self.PARENT = dict()  # recorded parent
        self.g = dict()  # cost to come
Esempio n. 23
0
 def __init__(self, **kwargs):
     self.dl = downloader.Downloader()
     self.r = run.Run()
     self.u = utils.Utils("SSDT Time")
     self.iasl_url_macOS = "https://bitbucket.org/RehabMan/acpica/downloads/iasl.zip"
     self.iasl_url_linux = "http://amdosx.kellynet.nl/iasl.zip"
     self.iasl_url_windows = "https://acpica.org/sites/acpica/files/iasl-win-20200528.zip"
     self.iasl = self.check_iasl()
     if not self.iasl:
         raise Exception("Could not locate or download iasl!")
     self.dsdt = None
     self.dsdt_raw = None
     self.dsdt_lines = None
Esempio n. 24
0
    def __fillConstants(self):
        self.data = utils.Utils()

        self.word_vec_dim = self.data.word_vec_dict.values()[0].shape[0]
        self.max_question_len = max(
            [len(pair[0]) for pair in self.data.conv_pair])
        print 'Max question length: {}'.format(self.max_question_len)
        self.max_answer_len = max(
            [len(pair[1]) for pair in self.data.conv_pair])
        print 'Max answer length: {}'.format(self.max_answer_len)
        self.total_word_count = len(self.data.word_list)
        print 'Total word count: {}'.format(self.total_word_count)
        print 'Total training samples: {}'.format(len(self.data.conv_pair))
    def write(self):
        ut = utils.Utils()
        with xlrd.open_workbook(self.readfilename2) as workbook2:
            file2sheet = workbook2.sheet_by_index(0)
            file2rown = file2sheet.nrows
            file2coln = file2sheet.ncols

        with xlrd.open_workbook(self.readfilename1) as workbook1:
            file1sheet = workbook1.sheet_by_index(0)
            file1rown = file1sheet.nrows
            file1coln = file1sheet.ncols

            print "file1 rn=", file1rown, "cn=", file1coln
            print "file2 rn=", file2rown, "cn=", file2coln
            #file1coln=30
            #file2coln=30
            wb = Workbook()
            ws1 = wb.active
            if file2coln == 30:
                for r in range(file2rown):
                    for c in range(file2coln):
                        if r > 0 and c == 0:
                            date1 = ut.tuple2Sqlite3Timestring(xlrd.xldate_as_tuple(file2sheet.cell_value(r, c), 0)[:3])
                            ws1.cell(column=c + 1, row=r + 1, value=date1)
                        else:
                            ws1.cell(column=c+1, row=r+1, value=file2sheet.cell_value(r, c))
            elif file2coln == 31:
                for r in range(file2rown):
                    for c in range(file2coln-1):
                        if r > 0 and c > 26:
                            ws1.cell(column=c+1, row=r+1, value=file2sheet.cell_value(r, c+1))
                        elif r > 0 and c == 0:
                            date1 = ut.tuple2Sqlite3Timestring(xlrd.xldate_as_tuple(file2sheet.cell_value(r, c), 0)[:3])
                            print "date1=", date1
                            ws1.cell(column=c+1, row=r+1, value=date1)
                        else:
                            ws1.cell(column=c+1, row=r+1, value=file2sheet.cell_value(r, c))
            else:
                raise ("excel format error")

            for r in range(file1rown):
                if r is not 0:
                    for c in range(file1coln):
                        # if c == 0:
                        #     date2 = tuple([int(x) for x in file1sheet.cell_value(r, c).split('-')])
                        #     print "date2=", date2
                        #     ws1.cell(column=c + 1, row=r + file2rown, value=date2)
                        # else:
                            ws1.cell(column=c+1, row=r+file2rown, value=file1sheet.cell_value(r, c))

            wb.save(self.writefile)
Esempio n. 26
0
    def run_all(self, ipt, skiplist, cpsob):
        """
        Calls L{_run_one} for each item given as input, yielding
        the response, parsed annotations, document object and file name
        @param ipt: full path to input to run (or text-string to run)
        @param skiplist: filenames (one per line) to skip, if any
        """
        uts = ut.Utils(self.cfg)
        cat_ind = uts.load_entity_category_indicators()
        #input
        fn2txt = self.rd.read(ipt)
        try:
            dispipt = ipt[0:100]
        except IndexError:
            dispipt = ipt
        try:
            skips = [
                x.strip()
                for x in codecs.open(skiplist, "r", "utf8").readlines()
            ]
        except IOError:
            skips = []
        # run calls
        print "-- [{}] RUNNING COLLECTION: {}, {}".format(
            self.cl.name, dispipt, time.asctime(time.localtime()))
        dones = 0
        todo = self.cfg.limit
        for fn in sorted(fn2txt):
            if fn in skips:
                print "Skipping {}".format(repr(fn))
                continue
            # create doc objs
            dob = md.Document(fn, text=fn2txt[fn])
            dob.find_sentence_positions()
            # annots
            try:
                res, anns = self._run_one(fn, ut.Utils.norm_text(fn2txt[fn]),
                                          cpsob)
            except ValueError, msg:
                print "\n! Error with file: {}".format(fn)
                print "\n" + msg.message
                res, anns = {}, {}
            uts.add_sentence_number_to_annots(anns, dob)
            for link in [an.enti.link for posi, an in anns.items()]:
                cpsob.normalize_entity_categories(link, cat_ind)
            dones += 1
            yield res, anns, dob, fn

            if dones == todo:
                break
Esempio n. 27
0
def convert(inpath, outpath):
    videos = db.queryNotConvertVideos()
    print(videos)
    for video in videos:
        m3u8file = m3u8_path(os.path.join(inpath, str(video['ttype'])),
                             video['title'], os.path.basename(video['url']))
        outfile = os.path.join(os.path.join(outpath, str(video['ttype'])),
                               video['title'], 'output.mp4')

        if utils.Utils().m3u8ToMP4(m3u8file, outfile):
            db.updateVideo(video['id'], 3)  # set video processed.
        else:
            print('convert file {file} failed!'.format(file=m3u8file))
            continue
    pass
Esempio n. 28
0
    def _setup_system(self):
        config_dir = utils.Utils().get_app_dir()
        config_path = os.path.join(config_dir, '.guardiancl.ini')
        if not os.path.exists(config_path):
            config = configparser2.ConfigParser()
            config.add_section('ROUTES')
            config.set('ROUTES', 'auth',
                       'http://guardiaocloud.com.br/service/v1/authenticate')
            config.set('ROUTES', 'devices',
                       'http://guardiaocloud.com.br/service/v1/devices')
            config.set('ROUTES', 'collect',
                       'http://guardiaocloud.com.br/collect')

            with open(config_path, 'w') as configfile:
                config.write(configfile)
Esempio n. 29
0
 def __init__(self, rootPath):
     self.__rootPath = rootPath
     self.__heights = [364, 444, 524, 604, 684, 764, 844]
     self.__widths = [4, 124, 244, 364, 484, 604]
     self.__mid_h = [399, 479, 559, 639, 719, 799, 879]
     self.__mid_w = [59, 179, 299, 419, 539, 659]
     self.__round_h = 70
     self.__round_w = 405
     self.__widthSize = 115
     self.__heightSize = 75
     self.__OCR = ocr.OCR()
     self.__state = np.zeros((7, 6), dtype=np.int)
     self.__prevState = np.zeros((7, 6), dtype=np.int)
     self.__gameRound = 0
     self.__util = utils.Utils(rootPath)
Esempio n. 30
0
    def ranking(self, texto, data, meses, n_docs, classes):

        modulo_base = Base()
        # Abrindo conexão com a base de dados.
        conn = utils.Utils().conectar('../database/database.ini')

        # Criando vetor de representação do texto inserido pelo usuário.
        # TODO: VOLTAR PARA O BACKUP DO MODELO PARA A VERSÃO FINAL.
        # Estratégia 1
        vetor_pesquisa = modulo_base.inferir_vetor(texto, modelo_texto)
        # Estratégia 2
        #vetor_pesquisa = modelo_texto.infer_vector(texto.lower().split())
        # Estratégia 3
        #temporario = copy.deepcopy(modelo_texto)
        #vetor_pesquisa = temporario.infer_vector(texto.lower().split())

        # Resgatando os ids dos documentos do mês.
        # TODO: Os documentos estão sendo comparados somente com de uma tabela.
        cursor = conn.cursor()
        f_classes = modulo_base.formatar_classes(classes)
        sql = """SELECT id_documento FROM documentos WHERE data >= %s AND data <= %s AND classe IN ({0})""".format(
            f_classes)

        data_ini, data_fim = modulo_base.datas_raio(data, meses)
        cursor.execute(sql, (
            data_ini,
            data_fim,
        ))
        comparacoes = list()

        # Obtendo o valor de comparação dos vetores com o vetor de pesquisa.
        for id_doc in cursor.fetchall():
            v = modelo_texto['DOC_%s' % id_doc[0]]
            score = spatial.distance.cosine(v, vetor_pesquisa)
            comparacoes.append([str(id_doc[0]), 1 - score])
        comparacoes.sort(key=lambda x: x[1], reverse=True)

        # Resgatando os documentos mais parecidos.
        valores = ','.join([t[0] for t in comparacoes[:n_docs]])
        print("Valores: ", valores)
        sql = """SELECT * FROM documentos WHERE id_documento IN (%s)""" % valores
        cursor.execute(sql)

        # Criando json de resposta.
        resposta = modulo_base.queryset_para_json(cursor)
        cursor.close()
        conn.close()
        return resposta