def list_files():
    """Endpoint to list files on the server."""
    files = []
    for filename in os.listdir(UPLOAD_DIRECTORY):
        path = os.path.join(UPLOAD_DIRECTORY, filename)
        ic(path, filename)
        if os.path.isfile(path):
            files.append(filename)
    return jsonify(files)
def main():
    try:
        fn = sys.argv[1]
    except IndexError:
        print('Usage: {} file_name'.format(sys.argv[0]))
        sys.exit(1)

    ic(fn)

    import requests

    url = "http://localhost:5000"
    # url = "http://ryoko3.local:5000"

    print("Files on Server:")
    response = requests.get(
        '{}/filesBarriersEngagingTelecomRelating'.format(url))
    ic(response.status_code)
    ic(response.json())

    # http://docs.python-requests.org/en/latest/user/quickstart/#post-a-multipart-encoded-file

    print('-' * 23)

    print("Uploading file: {}".format(fn))
    fin = open(fn, 'rb')
    files = {'file': fin}
    try:
        r = requests.post(
            '{}/uploadAlgeriaFreedomBraceletWorlds'.format(url), files=files)
        ic(r.status_code)
    finally:
        fin.close()
def run():
    dict_data = {
    'edad' : [ 10, 9, 13, 14, 12, 11, 12],
    'cm' : [ 115, 110, 130, 155, 125, 120, 125],
    'pais' : [ 'co', 'mx', 'co', 'mx', 'mx', 'ch', 'ch'],
    'genero' : [ 'M', 'F', 'F', 'M', 'M', 'M', 'F'],
    'Q1' : [ 5, 10, 8, np.nan, 7, 8, 3],
    'Q2' : [ 7, 9, 9, 8, 8, 8, 9]
    }
    ic(dict_data)

    df = pd.DataFrame(dict_data)
    ic(df)
    # Instruccion para crear un archivo en formato excel a partir del dataframe
    df.to_excel(dir_pandas.format('test_excel.xlsx'))
    # Instruccion para crear un archivo en formato excel a partir del dataframe, quitando el indexado del dataframe
    df.to_excel(dir_pandas.format('test_excel_2.xlsx'), index = False)
    # Instruccion para crear un archivo en formato excel a partir del dataframe, quitando el indexado del dataframe y colocando un nombre a la hoja
    df.to_excel(dir_pandas.format('test_excel_3.xlsx'), index = False, sheet_name = 'Sheet 1')
    # Instruccion para crear un archivo en formato json a partir del dataframe
    df.to_json(dir_pandas.format('test_json.json'))
    # Instruccion para crear un archivo en formato pickle a partir del dataframe
    df.to_pickle(dir_pandas.format('test_pickle.pkl'))
    # Instruccion para crear un archivo en formato pickle a partir del dataframe
    df.to_parquet(dir_pandas.format('test_parquet.parquet'))
    # Instruccion para crear un archivo en formato hdf a partir del dataframe
    # Esta línea esta comentada debido a un problema para instalar las librerias necesarias para trabajar con este tipo de archivo
    #df.to_hdf(dir_pandas.format('test_hadoop.h5'), key = 'data', format = 'table')

    """
    La lectura de los archivos atiende al tipo y es similar en todos
    los casos. No existe un archivo correcto o incorrecto, esto depende
    totalmente del uso y las condiciones con las cuales estaremos
    trabajando, ya que hay archivos que destacan por el entorno, por la
    usabilidad, el peso e incluso por la herramienta que los usará como
    insumo. En esencia, todos brindan la misma información del dataframe,
    eso es lo verdaderamente importante

    """
    ic(pd.read_excel(dir_pandas.format('test_excel.xlsx')))
    ic(pd.read_excel(dir_pandas.format('test_excel_2.xlsx')))
    ic(pd.read_json(dir_pandas.format('test_json.json')))
    ic(pd.read_pickle(dir_pandas.format('test_pickle.pkl')))
    ic(pd.read_parquet(dir_pandas.format('test_parquet.parquet')))
Example #4
0
 def __init__(self, min=0, max=2):
     self.min_scale = min
     self.max_scale = max
     self.count = 0
     ic("USE BBOX_JITTER")
     ic(min, max)
Example #5
0
    def testSingleTupleArgument(self):
        with disableColoring(), captureStandardStreams() as (out, err):
            ic((a, b))

        pair = parseOutputIntoPairs(out, err, 1)[0][0]
        self.assertEqual(pair, ('(a, b)', '(1, 2)'))
Example #6
0
 def testReturnValue(self):
     with disableColoring(), captureStandardStreams() as (out, err):
         assert ic() is None
         assert ic(1) == 1
         assert ic(1, 2, 3) == (1, 2, 3)
Example #7
0
 def testCallSurroundedByExpressions(self):
     with disableColoring(), captureStandardStreams() as (out, err):
         noop(); ic(a); noop()  # noqa
     assert parseOutputIntoPairs(out, err, 1)[0][0] == ('a', '1')
Example #8
0
 def testMultipleArguments(self):
     with disableColoring(), captureStandardStreams() as (out, err):
         ic(a, b)
     pairs = parseOutputIntoPairs(out, err, 1)[0]
     assert pairs == [('a', '1'), ('b', '2')]
Example #9
0
 def testWithoutArgs(self):
     with disableColoring(), captureStandardStreams() as (out, err):
         ic()
     assert lineIsContextAndTime(err.getvalue())
Example #10
0
if __name__ == "__main__":
    model = Model("simple-acquaintances")

    add_predicates(model)
    add_rules(model)

    # inference

    results = infer(model)
    write_results(results, model)

    # save intermediate results

    out_path = pathlib.Path("knows_obs.tsv")
    ic(out_path)
    df = trace_predicate("Knows", Partition.OBSERVATIONS, out_path)
    ic(df)
    ic(compare_predicate(out_path, df))

    out_path = pathlib.Path("lived_obs.tsv")
    ic(out_path)
    df = trace_predicate("Lived", Partition.OBSERVATIONS, out_path)
    ic(df)

    out_path = pathlib.Path("likes_obs.tsv")
    ic(out_path)
    df = trace_predicate("Likes", Partition.OBSERVATIONS, out_path)
    ic(df)

    out_path = pathlib.Path("knows_tar.tsv")
Example #11
0
import os
from icecream import ic
ic(os.getcwd())
ic(__file__)
ic(os.path.dirname(os.path.abspath(__file__)))
Example #12
0
        temp = self.q
        self.q = self.p - self.q
        self.p = temp

    def search(self, sequence, element):
        self.start_init(sequence)
        result_index = -1
        while not self.stop:
            if self.i < 0:
                self.up_index()
            elif self.i >= len(sequence):
                self.down_index()
            elif sequence[self.i] == element:
                result_index = self.i
                break
            elif element < sequence[self.i]:
                self.down_index()
            elif element > sequence[self.i]:
                self.up_index()
        return result_index


if __name__ == '__main__':
    sequence = [-2, 0, 2, 4, 7, 10, 12, 45, 76]

    dude = FibonacciSearch()

    element = 7

    ic(dude.search(sequence, element))
        logging.info(
            str("##############################\n" + str(time.time())) +
            ", starting_trial, " + str(i) +
            str("\n##############################"))

        task.trial_start()

        while task.trial_running:
            task.run()

    raise SystemExit

# graceful exit
except (KeyboardInterrupt, SystemExit):
    print(Fore.RED + Style.BRIGHT + 'Exiting now...' + Style.RESET_ALL)
    ic('about to call end_session()')
    task.end_session()
    ic('just called end_session()')
    # save dicts to disk
    scipy.io.savemat(session_info['file_basename'] + '_session_info.mat',
                     {'session_info': session_info})
    pickle.dump(
        session_info,
        open(session_info['file_basename'] + '_session_info.pkl', "wb"))
    pygame.quit()

# # exit because of error
# except (RuntimeError) as ex:
#     print(Fore.RED + Style.BRIGHT + 'ERROR: Exiting now' + Style.RESET_ALL)
#     # save dicts to disk
#     scipy.io.savemat(session_info['file_basename'] + '_session_info.mat', {'session_info' : session_info})
Example #14
0
                    default="./")
parser.add_argument("-o",
                    "--out",
                    help="a single pickle file name as an output",
                    default="outfile.pkl")
parser.add_argument("-s",
                    "--entry_stop",
                    help="entry_stop to stop reading the root file",
                    default=None)
args = parser.parse_args()

files = os.listdir(args.dirname)

print(files)

dfs = []

for f in files:
    df = pd.read_pickle(args.dirname + f)
    dfs.append(df)

result = pd.concat(dfs)

ic(result)

result.to_pickle(args.dirname + "merged_" + str(len(files)) + ".pkl")

#df = pd.read_pickle(args.fname)

#ic(df)
def run():
    df_meteoritos = pd.read_csv(dir_pandas.format('Meteorite_Landings.csv'))
    
    # Se presentan los 10 primeros valores del csv
    ic(df_meteoritos.head(10))

    # Se presentan los 10 ultimos valores del csv
    ic(df_meteoritos.tail(10))

    # Se presentan 10 valores aleatorios del csv
    ic(df_meteoritos.sample(10))

    # Se presenta la composición matricial del csv, que en este caso es de 45716 filas con 10 columnas
    ic(df_meteoritos.shape)

    # Se presenta el tamaño del csv, que es de 45716 elementos
    ic(df_meteoritos.size)

    # Se presenta el valor del indexado del csv, que va de 0 a 45715 con avance de 1
    ic(df_meteoritos.index)

    # Se presenta el nombre de las columnas que componen al csv
    ic(df_meteoritos.columns)

    # Se presenta información general del csv, como la composición de columnas, tipos de dato y uso de memoria
    ic(df_meteoritos.info())

    # Se presenta información especial de los campos numéricos, un primer vistazo de la información
    ic(df_meteoritos.describe())

    # Se presenta información especial de los campos numéricos, un primer vistazo de la información, incluyendo los campos alfanumericos
    ic(df_meteoritos.describe(include = 'all'))
    
    # Se muestran los tipos de dato correspondientes a cada columna
    ic(df_meteoritos.dtypes)

    # Se muestran los tipos de dato correspondientes a cada columna con base a un proceso de optimización que realiza pandas
    ic(df_meteoritos.convert_dtypes().dtypes)
def uploaded_files(filename):
    ic(app.config['UPLOAD_FOLDER'])
    ic(filename)
    return send_from_directory(app.config['UPLOAD_FOLDER'], filename)
Example #17
0
#print('sayi1: ',sayi1)
#print('sayi2: ', sayi2)

#ic(sayi1)
#ic(sayi2)

def topla(sayi):
    return sayi + 5

#ic(topla(4))
#ic(topla(5))

def giris(kullanici:bool):
    if kullanici:
        ic()
    else:
        ic()

#giris(kullanici=False)

def zamanGetir():
    return f'{datetime.now()} |>'
ic.configureOutput(prefix=zamanGetir())

for _ in range(3):
    time.sleep(1)
    ic('Merhaba')

ic(topla(25))
ic(topla(-2))
Example #18
0
 def testSingleArgument(self):
     with disableColoring(), captureStandardStreams() as (out, err):
         ic(a)
     assert parseOutputIntoPairs(out, err, 1)[0][0] == ('a', '1')
Example #19
0
def giris(kullanici:bool):
    if kullanici:
        ic()
    else:
        ic()
Example #20
0
 def testMultipleCallsOnSameLine(self):
     with disableColoring(), captureStandardStreams() as (out, err):
         ic(a); ic(b, c)  # noqa
     pairs = parseOutputIntoPairs(out, err, 2)
     assert pairs[0][0] == ('a', '1')
     assert pairs[1] == [('b', '2'), ('c', '3')]
Example #21
0
from icecream import ic
if __name__ == '__main__':

    a = 'abcdefj'
    b = 'abc'

    if b in a:
        # 1:使用字符串切片,实现字符串翻转
        a = a[::-1]
        ic(a)
        # 2:使用reversed方法,缺点是速度慢
        a = ''.join(reversed(a))
        # a = ''.join(['a', 'b', 'c'])
        print(a, "+", type(a))
    #     for i in range(len(a), 0):
    #         tmp.append(a[i])
    #     a = str(tmp)
    #     print(a)


Example #22
0
 def testComments(self):
     with disableColoring(), captureStandardStreams() as (out, err):
         """Comment."""; ic(); # Comment.  # noqa
     assert lineIsContextAndTime(err.getvalue())
Example #23
0
from icecream import ic


# Load the MNIST dataset and apply min/max scaling to scale the pixel intensity value to the range [0,1](each image is
#represented by an 8*8 = 64-dim feature vector).
print("[INFO] loading MNIST (sample) dataset...")
digits = datasets.load_digits()
data = digits.data.astype("float")

# feature scaling: min/max normalizing.
data = (data - data.min()) / (data.max() - data.min())
print("[INFO] samples: {}, dim: {}".format(data.shape[0], data.shape[1]))

# Construct the training and testing splits.
(trainX, testX, trainY, testY) = train_test_split(data, digits.target, test_size=0.25)
ic(trainY)
# Convert the labels from integers to vectors, one-hot encoding.
trainY = LabelBinarizer().fit_transform(trainY)
ic(trainY)
testY = LabelBinarizer().fit_transform(testY)

# Train the network.
print("[INFO] training network...")
# 64(8*8 pixels)-32-16-10 architecture.
nn = NeuralNetwork([trainX.shape[1], 32, 16, 10])
print("[INFO] {}".format(nn))
nn.fit(trainX, trainY, epochs=1000)

# Evaluate the trained network.
print("[INFO] evaluating network...")
predictions = nn.predict(testX)
Example #24
0
    def testValues(self):
        with disableColoring(), captureStandardStreams() as (out, err):
            ic(3, 'asdf')

        pairs = parseOutputIntoPairs(out, err, 1)
        assert pairs == [[('3', None), ("'asdf'", None)]]
Example #25
0
def test_sum3():
    assert (30 == ic(my_sum(0, 5, 20)))
Example #26
0
    def testColoring(self):
        with captureStandardStreams() as (out, err):
            ic({1: 'str'})  # Output should be colored with ANSI control codes.

        assert hasAnsiEscapeCodes(err.getvalue())
Example #27
0
def test_sum4():
    assert (78 == ic(my_sum(target=20)))
Example #28
0
from icecream import ic

start_time = time.time()

for i in range(1):
    # Texto para análise
    text = pathlib.Path("./textos/bbb.txt").read_text()

    # Carregar spaCy, idiomas e configurações
    nlp = spacy.load("pt_core_news_sm")

    # Rodar documento no sPacy
    doc = nlp(text)

# Acessar dados
ic('==================== Análise Gramatical: ====================')
for token in doc:
    if token.pos_ == 'VERB':
        ic('---')
    ic(token.orth_, token.pos_)
    if token.pos_ == 'VERB':
        ic(token.lemma_)
        ic('---')

# ic('==================== Análise de Raízes: ====================')
# for token in doc:
#   if token.pos_ == 'VERB':
#     ic(token, token.lemma_)

ic('==================== Tempo de Execução: ====================')
Example #29
0
def test_sum5():
    assert (0 == ic(my_sum(0, 0, 1)))
Example #30
0
    # convert list of pd.Series then stack it
    df = (df.set_index(['date', 'id'])['hashtags'].apply(
        pd.Series).stack().reset_index().drop(
            'level_2', axis=1).rename(columns={0: 'hashtag'}))
    #lowercase!
    df["hashtag"] = df["hashtag"].str.lower()
    df["hashtag"] = df["hashtag"].str.replace("'.", "")
    df["hashtag"] = df["hashtag"].str.replace("’.", "")

    return df


# Aggregate a frequency DF
def get_hashtag_frequencies(df):
    df = hashtag_per_row(df)
    # Add freq of hashtags by themselves in the dataset
    tweet_freq = pd.DataFrame({
        'nr_of_hashtags':
        df.groupby(['hashtag', 'date']).size()
    }).reset_index()

    return tweet_freq


df = pd.read_csv("../all_vis.csv")
df = df.sort_values(by='created_at').reset_index(drop=True)
ic(len(df))

freq_hashtags = get_hashtag_frequencies(df)

freq_hashtags.to_csv("all_hashtags.csv", index=False)
Example #31
0
def test_sum1():
    assert (78 == ic(my_sum(3, 5, 20)))
Example #32
0
def get_xmltv():
    """
    Download XMLTV url and store channels and programs in the database.
    :return:
    None
    :return:
    """
    # http://wiki.xmltv.org/index.php/Main_Page/xmltvfileformat.html
    import urllib2
    import gzip
    import StringIO
    import xmltv
    url = cfg.TVGURL

    # Download XMLTV source
    out_file_path = url.split("/")[-1][:-3]
    print('Downloading TV program from: {}'.format(url))
    response = urllib2.urlopen(url)
    compressed_file = StringIO.StringIO(response.read())
    decompressed_file = gzip.GzipFile(fileobj=compressed_file)

    # Extract XMLTV
    with open(out_file_path, 'w') as outfile:
        outfile.write(decompressed_file.read())

    # Print XMLTV header
    xmltv_data = xmltv.read_data(open(out_file_path, 'r'))
    ic(xmltv_data)

    # Read xml channels
    xmlchannels = xmltv.read_channels(open(out_file_path, 'r'))
    print("Got {} channels from XMLTV source".format(len(xmlchannels)))

    # Drop content of XMLChannel
    XMLChannel.query.delete()
    db.session.commit()

    # Populate XMLChannel with channels from XMLTV source
    for xc in xmlchannels:
        xmlchannel = XMLChannel(id=int(xc['id']),
                                label=xc['display-name'][0][0].strip())
        db.session.add(xmlchannel)
    db.session.commit()

    programs = xmltv.read_programmes(open(out_file_path, 'r'))
    chunk = 1024
    index = 0
    for pr in programs:
        desc = ""
        try:
            desc = pr['desc'][0][0]
        except KeyError:
            pass
        a_category = Category.query.filter(
            Category.name == pr['category'][0][0]).first()
        if a_category:
            p = Program(channel=int(pr['channel']),
                        title=pr['title'][0][0],
                        start=duparse(pr['start']),
                        stop=duparse(pr['stop']),
                        desc=desc,
                        category_id=a_category.id)
            db.session.add(p)
        else:
            py = Category(name=pr['category'][0][0])
            Program(channel=int(pr['channel']),
                    title=pr['title'][0][0],
                    start=duparse(pr['start']),
                    stop=duparse(pr['stop']),
                    desc=desc,
                    category=py)
            db.session.add(py)
        index += 1
        if index % chunk == 0:
            db.session.commit()
    db.session.commit()

    categories = [x.name for x in Category.query.all()]
    ic(u', '.join(categories))
Example #33
0
def test_sum2():
    assert (63 == ic(my_sum(3, 0, 20)))