예제 #1
0
def main():
    args = get_args()
    files = args.files
    lang = args.lang
    mode = args.mode
    if mode == Mode.audio_file:
        parse_audio_files(files, lang)
    else:
        parse_live_speech(args)
예제 #2
0
def main():
    args = get_args()
    files = args.files
    lang = args.lang
    mode = args.mode
    if mode == Mode.audio_file:
        parse_audio_files(files, lang)
    else:
        parse_live_speech(args)
예제 #3
0
        updates = lasagne.updates.sgd(loss, params, args.learning_rate)
    elif args.optimizer == 'momentum':
        updates = lasagne.updates.momentum(loss, params, args.learning_rate)
    train_fn = theano.function([word_x, word_mask, sent_mask, label_y],
                               loss,
                               updates=updates)

    prediction = lasagne.layers.get_output(network_output, deterministic=True)
    eval_fn = theano.function([word_x, word_mask, sent_mask], prediction)
    fn_check_attention = theano.function([word_x, word_mask, sent_mask],
                                         att_out)
    return fn_check_attention, eval_fn, train_fn, params


if __name__ == '__main__':
    args = ap.get_args()
    logging.basicConfig(level=logging.DEBUG,
                        format="%(asctime)s %(message)s",
                        datefmt="%m-%d %H:%M")
    logging.basicConfig(level=logging.DEBUG,
                        format="%(asctime)s %(message)s",
                        datefmt="%m-%d %H:%M")
    logging.info(' '.join(sys.argv))
    # args.debug=True
    args.word_att = "dot"
    args.batch_size = 14
    # args.optimizer = "momentum"
    args.learning_rate = 0.2
    args.dropout_rate = 0.5
    main(args)
예제 #4
0
파일: drawer.py 프로젝트: kpj/FoodGraph
import sys, json
from collections import defaultdict

import matplotlib.pyplot as plt
import networkx as nx

from arg_parser import drawer_argparse as get_args
from draw_config import config


args = vars(get_args())

sample_file = args['sample']
img_file = args['image']
drawing_mode = args['mode']


graph = nx.Graph()

with open(sample_file, 'r') as fd:
	data = json.loads(fd.read()[11:])
	tdata = defaultdict(list)

	for e in data['data']:
		w = e[2]*10
		graph.add_edge(e[0], e[1], weight=w)
		tdata[w].append((e[0], e[1]))

plt.figure(1, figsize=(14, 14))
plt.axis('off')
plt.title('%s (%i ingredients)' % (data['props']['ftype'], data['props']['inum']))
예제 #5
0
if __name__ == "__main__":
    """
    App that performs the following tasks:
    - Load a dataset
    - Process the dataset
    - Train a model
    - Evaluate the model
    - Save the model
    """
    spark = SparkSession \
        .builder \
        .appName("form_completion_rate") \
        .getOrCreate()

    args = arg_parser.get_args()

    if args.debug:
        print("Using debug mode")

    df = dataset.load_dataset(spark, args.datasetPath, args.debug)

    df_processed = processing.transform(df, args.debug)

    train_df, test_df = df_processed.randomSplit([0.8, 0.2], seed=42)

    cv = model_selection.cross_validation(train_df, args.debug)
    model = cv.bestModel

    model_path = OUTPUT_DIR.joinpath("model")
    print("Saving best model at {}".format(model_path))
예제 #6
0
# from transformers import AlbertConfig, AlbertTokenizer
# from transformers import XLNetConfig, XLNetTokenizer
from transformers import LongformerConfig, LongformerTokenizer

from utils import metrics
from arg_parser import get_args
from data_loader import DocDataset, PadDoc

from model import BertPoolLSTM, BertPoolConv, BertPoolLinear  # AlbertLinear, AlbertLSTM
from model_lfmr import LongformerLinear
# from model_xlnet import XLNetLinear, XLNetLSTM, XLNetConv
from train import train_evaluate, test

#%% Setting
# Get arguments from command line
args = get_args()

# random seed
random.seed(args.seed)
#np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
torch.cuda.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False  # This makes things slower

# device
if torch.cuda.device_count() > 1:
    device = torch.cuda.current_device()
    print('Use {} GPUs: '.format(torch.cuda.device_count()), device)
elif torch.cuda.device_count() == 1:
예제 #7
0
def main():
    args = get_args()
    wt_gen = WorkingTimeGenerator(*args.month, args.range, args.worker, args.furlough, args.work)
    wt_gen.write_workbook()
예제 #8
0
파일: InstaDL.py 프로젝트: kshvmdn/insta-dl
import os
from scraper import scrape
from file_dl import main as download
from arg_parser import main as get_args

args = get_args()

file_dir = os.path.expanduser("./downloads")
if not os.path.exists(file_dir):
    os.mkdir(file_dir)

count = 0

for file in args.files:
    file_name = download(scrape(file), file_dir)
    if file_name is not None:
        count += 1
        print('Downloaded {0} as {1}'.format(file, file_name))
    else:
        print('Could not download {0} - invalid IG code or file already exists'
              .format(file))
print('Downloaded {0} files to {1}'.format(count, file_dir))
예제 #9
0
import tensorflow as tf
import time

import alpha_vantage_key
import arg_parser
import plotter
#import weather

from alpha_vantage.timeseries import TimeSeries
from bokeh.models import LinearAxis
from bokeh.palettes import Spectral11
from bokeh.plotting import figure, show, output_file
from pprint import pprint
from sklearn import preprocessing

config_folder, refresh, plot_inputs, plot_results, test_model, train_model, max_epoch, init_learning_rate, init_eopch, weather_data, ticker = arg_parser.get_args()

cwd = os.getcwd()
if not config_folder and refresh:
    today = time.strftime('%d-%m-%Y')
    config_folder = f'{ticker}_{today}'
elif not config_folder and not refresh:
    all_subdirs = [d for d in os.listdir(cwd) if os.path.isdir(d) and re.match(r'.{1,6}_[0-9]{2}-[0-9]{2}-[0-9]{4}', d)]
    print(all_subdirs)
    config_folder = max(all_subdirs, key=os.path.getmtime)
config_path = os.path.join(cwd, config_folder, f'{ticker}_trader_config.pkl')
data_path = os.path.join(cwd, config_folder, f'{ticker}_data.pkl')
meta_data_path = os.path.join(cwd, config_folder, f'{ticker}_meta_data.pkl')
plot_path = os.path.join(cwd, 'plots')

latest_data_path = os.path.join(cwd, config_folder)
예제 #10
0
def run():
    """
    Main process that\:
     * Parse command-line arguments,
     * Parse configuration file,
     * Initiates logger,
     * Check Handle Service connection,
     * Run the issue action.

    """
    try:
        # Get command-line arguments
        args = get_args()
        # init logging
        if args.version and args.log is not None:
            _init_logging(args.log, level='DEBUG')
        elif args.log is not None:
            _init_logging(args.log)
        else:
            _init_logging()
        if args.command == CHANGEPASS:
            if args.oldpass is not None and args.newpass is not None:
                _reset_passphrase(old_pass=args.oldpass, new_pass=args.newpass)
            else:
                _reset_passphrase()
        elif args.command == CREDSET:
            if args.username is not None and args.token is not None:
                _set_credentials(username=args.username, token=args.token)
            else:
                _set_credentials()
        elif args.command == CREDREMOVE:
            _reset_credentials()
        elif args.command == CREDTEST:
            _cred_test(args.institute, args.project, args.passphrase)

        elif args.command == CHECK:
            result = _check_pid(",".join(args.id), args.full, args.latest)
            # result printing.
            # For the time being bare print. Need better method for this.
            for element in result:
                print element

        # Retrieve command has a slightly different behavior from the rest so it's singled out
        elif args.command not in [RETRIEVE, CLOSE]:
            issue_file = _get_issue(args.issue)
            dataset_file = _get_datasets(args.dsets)
            process_command(command=args.command, issue_file=issue_file, dataset_file=dataset_file,
                            issue_path=args.issue, dataset_path=args.dsets)
        elif args.command == CLOSE:
            issue_file = _get_issue(args.issue)
            dataset_file = _get_datasets(args.dsets)
            process_command(command=args.command, issue_file=issue_file, dataset_file=dataset_file,
                            issue_path=args.issue, dataset_path=args.dsets, status=args.status)
        elif args.command == RETRIEVE:
            list_of_id = _prepare_retrieve_ids(args.id)
            if len(list_of_id) >= 1:
                process_command(command=RETRIEVE, issue_path=args.issues, dataset_path=args.dsets, list_of_ids=list_of_id)
            else:
                process_command(command=RETRIEVE_ALL, issue_path=args.issues, dataset_path=args.dsets)
    except KeyboardInterrupt:
        print('Keyboard interruption, exiting...')