예제 #1
0
    def plotAndSaveToFile(self,
                          outFile='bandstructure.pdf',
                          silent_overwrite=False,
                          **kwargs):

        import matplotlib
        matplotlib.use("pdf")
        from oppvasp.plotutils import prepare_canvas
        from matplotlib import rc
        rc('lines', linewidth=0.5)
        rc('font', **{'family': 'serif', 'serif': ['Palatino']})
        rc('font', family='serif')
        rc('text', usetex=True)
        prepare_canvas('10 cm')
        fig = plt.figure()
        p = [0.15, 0.15, 0.05, 0.05
             ]  # margins: left, bottom, right, top. Height: 1-.15-.46 = .39
        ax0 = fig.add_axes([p[0], p[1], 1 - p[2] - p[0], 1 - p[3] - p[1]])
        self.plot(ax0, **kwargs)
        # Write to file:
        if not silent_overwrite and os.path.exists(outFile):
            print "\nWARNING: The file \"%s\" already exists." % outFile
            if query_yes_no("         Do you want to overwrite it?") == 'no':
                return

        sys.stdout.write("\nSaving band structure to %s... " % outFile)
        sys.stdout.flush()
        plt.savefig(outFile)
        sys.stdout.write("done!\n\n")
예제 #2
0
    def plotAndSaveToFile(self, outFile = 'bandstructure.pdf', silent_overwrite = False, **kwargs):
            

        import matplotlib
        matplotlib.use("pdf")
        from oppvasp.plotutils import prepare_canvas
        from matplotlib import rc
        rc('lines', linewidth=0.5)
        rc('font',**{'family':'serif','serif':['Palatino']})
        rc('font', family='serif')
        rc('text', usetex=True)
        prepare_canvas('10 cm')
        fig = plt.figure()
        p = [0.15, 0.15, 0.05, 0.05]  # margins: left, bottom, right, top. Height: 1-.15-.46 = .39
        ax0 = fig.add_axes([ p[0], p[1], 1-p[2]-p[0], 1-p[3]-p[1] ])
        self.plot(ax0, **kwargs)
        # Write to file:
        if not silent_overwrite and os.path.exists(outFile):
            print "\nWARNING: The file \"%s\" already exists." % outFile
            if query_yes_no("         Do you want to overwrite it?") == 'no':
                return

        sys.stdout.write("\nSaving band structure to %s... " % outFile)
        sys.stdout.flush()
        plt.savefig(outFile)
        sys.stdout.write("done!\n\n")
예제 #3
0
def prompt_files():
    """Prompt for the data with Tkinter GUI.

    Returns:
        input_files (list): Full path to the files in a list.
    """

    while True and "dummy_var" not in locals():
        try:
            input_files = []
            root = Tkinter.Tk()
            list_of_files = tkFileDialog.askopenfiles(mode="r",
                                                      **config.dialog_files)
            root.destroy()

            for f in list_of_files:
                input_files.append(f.name)

            if not input_files:
                raise IndexError

            dummy_var = 0

        except IndexError:
            if not query_yes_no("You have not picked any files. Retry?"):
                sys.exit("No files selected.")

    return input_files
예제 #4
0
def prompt_folder():
    """Prompt for folder in which the data is stored.

    Returns:
        input_files (list): Full path to the files in a list.
    """

    while True and "dummy_var" not in locals():
        try:
            input_files = []
            root = Tkinter.Tk()
            folder = tkFileDialog.askdirectory(
                initialdir=config.dialog_files["initialdir"])
            root.destroy()

            for p, dirs, files in os.walk(folder, topdown=True):
                dirs[:] = [d for d in dirs if not d.startswith("_")]
                for f in files:
                    if (f.endswith(tuple(config.allowed_file_types))
                            and p.split("/")[len(p.split("/")) - 1][0] != "_"):
                        input_files.append(os.path.join(p, f))

            if not input_files:
                raise IndexError

            return input_files, folder

        except IndexError:
            if not query_yes_no("You have not picked any folder. Retry?"):
                sys.exit("No folder selected.")
예제 #5
0
def prompt_labjournal():
    """Prompt for Labjournal with Tkinter GUI.

    Returns:
        labj (Pandas DF): Labjournal imported from xlsx file.
        path_labj (str): Full path to the labjournal.
    """

    while True and "labj" not in locals():
        try:
            root = Tkinter.Tk()
            path_labj = tkFileDialog.askopenfiles(mode="rb",
                                                  **config.dialog_labj)
            root.destroy()
            labj = pandas.read_excel(
                path_labj[0],
                sheet_name=config.labj_ws_name,
                dtypes=str,
                converters={"ID": str},
            )

        except IndexError:
            if not query_yes_no("You have not picked any files. Retry?"):
                sys.exit(
                    "No Labjournal specified! You maybe want to change your config."
                )

    return labj, path_labj[0].name
예제 #6
0
def delete(config: dict, app_logger: logger.Logger) -> bool:
    """
    Delete migration from disk and from migrations database as well. Will ask confirmation.

    :param config: pymigrate configuration.
    :param app_logger: pymigrate configured logger.

    :return: True on success, False otherwise.
    """
    app_logger.log_with_ts('Running delete action', logger.Levels.DEBUG)
    migration_id = config['MIGRATION_ID']
    if util.query_yes_no(
            'Are you sure you want to delete {0} ?'.format(migration_id)):
        migrations_directory_path = os.path.join(
            os.pardir, config['PROJECT_DIR'] + '/' + config['MIGRATIONS_DIR'])
        if migration.delete_migration(migration_id, migrations_directory_path,
                                      app_logger):
            print('Deleted {0}'.format(migration_id))
        else:
            return False
    else:
        print("Aborting")
        return True

    return True
def main():
    args = parse_args()
    if args.random_seed is not None:
        # fixed random seeds for reproducibility
        np.random.seed(args.random_seed)
        torch.random.manual_seed(args.random_seed)

    # infer target label from image
    input_img = load_image(args.input_img, size=64)

    labels, confidences = send_query(input_img)
    target_idx = np.argmax(confidences)
    target_class = labels[target_idx]

    # ask user if he wants to continue
    print(f'Inferred label: {target_class}, confidence of {np.round(confidences[target_idx], 3)}')
    if not query_yes_no('Continue ?'):
        print('Please choose an input image which the API classifies as your target class. ')
        sys.exit(0)

    # generate adversarial image
    else:
        if not args.color:
            target_img = image_to_grayscale(input_img)
        else:
            target_img = input_img

        print('Generating adversarial...')
        adversarial, conf, num_queries, conv_images, cppn = generate_adversarial(target_class=target_class,
                                                                                 target_image=target_img,
                                                                                 color=args.color,
                                                                                 target_conf=args.target_conf,
                                                                                 max_queries=args.max_queries,
                                                                                 init=args.init)

        if conf < args.target_conf:
            print(f'Failed to generate an adversarial image after {args.max_queries} queries.')
            # write_to_log('log.tsv', f'{target_class}\t{conf}\t{num_queries}\t{args.color}\t{args.init}')
            sys.exit(0)
        print(f'Found an adversarial image with > {args.target_conf} API confidence after {num_queries} queries.')

        output_dir = Path(args.output_dir)
        print(f'\tSaving results in: {output_dir}/')

        # save adversarial image
        adversarial_fname = str(output_dir / f'adversarial_{clean_filename(target_class)}_{conf}')
        save_image(adversarial_fname + '.png', adversarial)
        if args.high_res:
            cppn.set_img_size(2000)
            adversarial_high_res = cppn.render_image()
            save_image(adversarial_fname + '_HD.png', adversarial_high_res)
        # save convergence gif
        if not args.no_gif:
            conv_gif_fname = str(output_dir / f'convergence_{clean_filename(target_class)}_{conf}.gif')
            save_gif_from_images(conv_gif_fname, conv_images)

        # write_to_log('log.tsv', f'{target_class}\t{conf}\t{num_queries}\t{args.color}\t{args.init}')
        print('Finished.')
예제 #8
0
def delete_comp(comp_num,conn):
    """Deletes a computation from the database, does not touch the actual data """
    r = conn.execute("select * from comps where comp_key = ?",(comp_num,)).fetchone();
    print "Preparing to delete the row " + str(comp_num)
    print r
    if util.query_yes_no("are you sure you want to delete this entry? ",default='no')=='yes':
        conn.execute("delete from comps where comp_key = ?",(comp_num,))
        conn.commit()
    pass
예제 #9
0
def main():
    keep_running = True
    while keep_running is True:
        print("\nPick an offer to ignore")
        offers = Serialization.load(Amex.Offer.FILE)
        ignored = Serialization.load(Amex.Offer.IGNOREDFILE)
        visible = [x for x in offers if x not in ignored]
        printlist = [repr(x) for x in visible]
        _, index = Serialization.pick_from_list(printlist, sort=True)
        item = visible[index]

        if query_yes_no("\nAdd to ignore list?"):
            ignored.append(item)
            Serialization.save(ignored, Amex.Offer.IGNOREDFILE)
        print("Added\n")
예제 #10
0
def create_migration(config: dict, app_logger: logger.Logger) -> bool:
    """
    Create directory for migration config['MIGRATION_ID'] and fill it with templates.

    :param config: pymigrate configuration
    :param app_logger: instance of configured logger
    """
    cur_timestamp = int(time.time())
    # try to extract timestamp from migration name
    try:
        ts = int(config['MIGRATION_ID'].split('-')[0])
        if abs(cur_timestamp - ts) <= 10000000:
            migration_id = config['MIGRATION_ID']
        else:
            if util.query_yes_no('Are you sure with timestamp? {0} '.format(ts)):
                migration_id = config['MIGRATION_ID']
            else:
                return False
    except ValueError:
        app_logger.log_with_ts('Unable to extract timestamp from provided MIGRATION_ID, '
                               'will append current timestamp', logger.Levels.DEBUG)
        migration_id = str(cur_timestamp) + '-' + config['MIGRATION_ID']
        # TODO: what about config dict immutability?
        config['MIGRATION_ID'] = migration_id

    migrations_directory_path = os.path.join(os.pardir, config['PROJECT_DIR'] + '/' + config['MIGRATIONS_DIR'])
    new_migration_path = migrations_directory_path + '/' + migration_id
    print("Generating migration {0}".format(migration_id))
    try:
        os.mkdir(new_migration_path, 0o775)
        # TODO: think about module paths for templates
        # TODO: do replacements in templates more elegantly
        with open(new_migration_path + '/README.md', mode='w') as f:
            f.write(templates.generate_readme_md(config))
        with open(new_migration_path + '/migrate.sh', mode='w') as f:
            f.write(templates.generate_migrate_sh(config))

        os.chmod(new_migration_path + '/migrate.sh', 0o775)

        if db_update(config, app_logger):
            print("Done!")
        else:
            app_logger.log_with_ts("Failed to update migrations db", logger.Levels.ERROR)
    except OSError:
        print("Failed to create migration {0}".format(migration_id))
        return False
예제 #11
0
from mmeximporter import MMexDB, Settings, UserError
import util
import sys, os
import csv
import datetime
from models import Transaction, CategoryID, SubCategoryID

confirm = util.query_yes_no("This will recategorize all transactions, are you sure?")
if not confirm:
  sys.exit(-1)
  
settings = Settings.Settings()
categories = settings.getCategories()
db = MMexDB.MMexDb(settings)

accounts = db.get_all_accounts()

for account in accounts:
  print "Updating account: %s" % (account.ACCOUNTNAME)
  transactions = db.get_transactions(account.ACCOUNTNUM)
  for t in transactions:
    payee_name = db.get_payee_name(t.PAYEEID)
    cat_id, sub_cat_id = util.get_categoryid_for_payee(payee_name, categories, db)
    t.CATEGID = cat_id
    t.SUBCATEGID = sub_cat_id
    t.save()
예제 #12
0
def gen_template(path):
    if util.query_yes_no("Would you like to create a new empty configuration file ?"):
        root_path = os.path.abspath(os.path.dirname(__file__))
        shutil.copyfile(root_path + '/config.json.template', path)
        print("Configuration file created: %s" % path)
예제 #13
0
파일: levi.py 프로젝트: leroix/Levi
    for k in s3.get_all_keys():
        print gen_param_str(k)

if subcmd == 'push':
    params = []
    print 'Configuration settings to be pushed:'
    for k in s3.get_all_keys():
        paramstr = gen_param_str(k)
        params.append(paramstr)
        print '\t' + paramstr

    print '\nServices they will be pushed to:'
    for svc in config.SERVICES:
        print '\t' + str(svc) + '\n'

    is_proceed = util.query_yes_no('Would you like to proceed?')

    if is_proceed:
        ps = []
        pipe = subprocess.PIPE
        for svc in config.SERVICES:
            args = [util.method2script(svc['method'])]
            args.extend(svc['args'])
            args.extend(params)
            p = subprocess.Popen(args, stdin=pipe, stdout=pipe, stderr=pipe)
            ps.append(p)
        
        while ps:
            p = ps.pop()
            if p.poll() is not None:
                stdouttext, stderrtext = p.communicate()
예제 #14
0
from mmeximporter import MMexDB, Settings, UserError
import util
import sys, os
import csv
import datetime
from models import Transaction, CategoryID, SubCategoryID

confirm = util.query_yes_no(
    "This will recategorize all transactions, are you sure?")
if not confirm:
    sys.exit(-1)

settings = Settings.Settings()
categories = settings.getCategories()
db = MMexDB.MMexDb(settings)

accounts = db.get_all_accounts()

for account in accounts:
    print "Updating account: %s" % (account.ACCOUNTNAME)
    transactions = db.get_transactions(account.ACCOUNTNUM)
    for t in transactions:
        payee_name = db.get_payee_name(t.PAYEEID)
        cat_id, sub_cat_id = util.get_categoryid_for_payee(
            payee_name, categories, db)
        t.CATEGID = cat_id
        t.SUBCATEGID = sub_cat_id
        t.save()
예제 #15
0
 def ask_and_send(self, question_text):
     ''' Query the user yes/no to send the file and send it. '''
     if query_yes_no(question_text):
         return self.send()
예제 #16
0
def main(arguments):
    parser = argparse.ArgumentParser(description='')

    parser.add_argument(
        '--cuda',
        help='-1 if no CUDA, else gpu id (single gpu is enough)',
        type=int,
        default=0)
    parser.add_argument('--random_seed',
                        help='random seed to use',
                        type=int,
                        default=111)

    # Paths and logging
    parser.add_argument('--log_file',
                        help='file to log to',
                        type=str,
                        default='training.log')
    parser.add_argument('--store_root',
                        help='store root path',
                        type=str,
                        default='checkpoint')
    parser.add_argument('--store_name',
                        help='store name prefix for current experiment',
                        type=str,
                        default='sts')
    parser.add_argument('--suffix',
                        help='store name suffix for current experiment',
                        type=str,
                        default='')
    parser.add_argument('--word_embs_file',
                        help='file containing word embs',
                        type=str,
                        default='glove/glove.840B.300d.txt')

    # Training resuming flag
    parser.add_argument('--resume',
                        help='whether to resume training',
                        action='store_true',
                        default=False)

    # Tasks
    parser.add_argument('--task',
                        help='training and evaluation task',
                        type=str,
                        default='sts-b')

    # Preprocessing options
    parser.add_argument('--max_seq_len',
                        help='max sequence length',
                        type=int,
                        default=40)
    parser.add_argument('--max_word_v_size',
                        help='max word vocab size',
                        type=int,
                        default=30000)

    # Embedding options
    parser.add_argument('--dropout_embs',
                        help='dropout rate for embeddings',
                        type=float,
                        default=.2)
    parser.add_argument('--d_word',
                        help='dimension of word embeddings',
                        type=int,
                        default=300)
    parser.add_argument('--glove',
                        help='1 if use glove, else from scratch',
                        type=int,
                        default=1)
    parser.add_argument('--train_words',
                        help='1 if make word embs trainable',
                        type=int,
                        default=0)

    # Model options
    parser.add_argument('--d_hid',
                        help='hidden dimension size',
                        type=int,
                        default=1500)
    parser.add_argument('--n_layers_enc',
                        help='number of RNN layers',
                        type=int,
                        default=2)
    parser.add_argument('--n_layers_highway',
                        help='number of highway layers',
                        type=int,
                        default=0)
    parser.add_argument('--dropout',
                        help='dropout rate to use in training',
                        type=float,
                        default=0.2)

    # Training options
    parser.add_argument('--batch_size',
                        help='batch size',
                        type=int,
                        default=128)
    parser.add_argument('--optimizer',
                        help='optimizer to use',
                        type=str,
                        default='adam')
    parser.add_argument('--lr',
                        help='starting learning rate',
                        type=float,
                        default=1e-4)
    parser.add_argument(
        '--loss',
        type=str,
        default='mse',
        choices=['mse', 'l1', 'focal_l1', 'focal_mse', 'huber'])
    parser.add_argument('--huber_beta',
                        type=float,
                        default=0.3,
                        help='beta for huber loss')
    parser.add_argument('--max_grad_norm',
                        help='max grad norm',
                        type=float,
                        default=5.)
    parser.add_argument('--val_interval',
                        help='number of iterations between validation checks',
                        type=int,
                        default=400)
    parser.add_argument('--max_vals',
                        help='maximum number of validation checks',
                        type=int,
                        default=100)
    parser.add_argument('--patience',
                        help='patience for early stopping',
                        type=int,
                        default=10)

    # imbalanced related
    # LDS
    parser.add_argument('--lds',
                        action='store_true',
                        default=False,
                        help='whether to enable LDS')
    parser.add_argument('--lds_kernel',
                        type=str,
                        default='gaussian',
                        choices=['gaussian', 'triang', 'laplace'],
                        help='LDS kernel type')
    parser.add_argument('--lds_ks',
                        type=int,
                        default=5,
                        help='LDS kernel size: should be odd number')
    parser.add_argument('--lds_sigma',
                        type=float,
                        default=2,
                        help='LDS gaussian/laplace kernel sigma')
    # FDS
    parser.add_argument('--fds',
                        action='store_true',
                        default=False,
                        help='whether to enable FDS')
    parser.add_argument('--fds_kernel',
                        type=str,
                        default='gaussian',
                        choices=['gaussian', 'triang', 'laplace'],
                        help='FDS kernel type')
    parser.add_argument('--fds_ks',
                        type=int,
                        default=5,
                        help='FDS kernel size: should be odd number')
    parser.add_argument('--fds_sigma',
                        type=float,
                        default=2,
                        help='FDS gaussian/laplace kernel sigma')
    parser.add_argument('--start_update',
                        type=int,
                        default=0,
                        help='which epoch to start FDS updating')
    parser.add_argument(
        '--start_smooth',
        type=int,
        default=1,
        help='which epoch to start using FDS to smooth features')
    parser.add_argument('--bucket_num',
                        type=int,
                        default=50,
                        help='maximum bucket considered for FDS')
    parser.add_argument('--bucket_start',
                        type=int,
                        default=0,
                        help='minimum(starting) bucket for FDS')
    parser.add_argument('--fds_mmt',
                        type=float,
                        default=0.9,
                        help='FDS momentum')

    # re-weighting: SQRT_INV / INV
    parser.add_argument('--reweight',
                        type=str,
                        default='none',
                        choices=['none', 'sqrt_inv', 'inverse'],
                        help='cost-sensitive reweighting scheme')
    # two-stage training: RRT
    parser.add_argument(
        '--retrain_fc',
        action='store_true',
        default=False,
        help='whether to retrain last regression layer (regressor)')
    parser.add_argument(
        '--pretrained',
        type=str,
        default='',
        help='pretrained checkpoint file path to load backbone weights for RRT'
    )
    # evaluate only
    parser.add_argument('--evaluate',
                        action='store_true',
                        default=False,
                        help='evaluate only flag')
    parser.add_argument('--eval_model',
                        type=str,
                        default='',
                        help='the model to evaluate on; if not specified, '
                        'use the default best model in store_dir')

    args = parser.parse_args(arguments)

    os.makedirs(args.store_root, exist_ok=True)

    if not args.lds and args.reweight != 'none':
        args.store_name += f'_{args.reweight}'
    if args.lds:
        args.store_name += f'_lds_{args.lds_kernel[:3]}_{args.lds_ks}'
        if args.lds_kernel in ['gaussian', 'laplace']:
            args.store_name += f'_{args.lds_sigma}'
    if args.fds:
        args.store_name += f'_fds_{args.fds_kernel[:3]}_{args.fds_ks}'
        if args.fds_kernel in ['gaussian', 'laplace']:
            args.store_name += f'_{args.fds_sigma}'
        args.store_name += f'_{args.start_update}_{args.start_smooth}_{args.fds_mmt}'
    if args.retrain_fc:
        args.store_name += f'_retrain_fc'

    if args.loss == 'huber':
        args.store_name += f'_{args.loss}_beta_{args.huber_beta}'
    else:
        args.store_name += f'_{args.loss}'

    args.store_name += f'_seed_{args.random_seed}_valint_{args.val_interval}_patience_{args.patience}' \
                       f'_{args.optimizer}_{args.lr}_{args.batch_size}'
    args.store_name += f'_{args.suffix}' if len(args.suffix) else ''

    args.store_dir = os.path.join(args.store_root, args.store_name)

    if not args.evaluate and not args.resume:
        if os.path.exists(args.store_dir):
            if query_yes_no('overwrite previous folder: {} ?'.format(
                    args.store_dir)):
                shutil.rmtree(args.store_dir)
                print(args.store_dir + ' removed.\n')
            else:
                raise RuntimeError('Output folder {} already exists'.format(
                    args.store_dir))
        logging.info(f"===> Creating folder: {args.store_dir}")
        os.makedirs(args.store_dir)

    # Logistics
    logging.root.handlers = []
    if os.path.exists(args.store_dir):
        log_file = os.path.join(args.store_dir, args.log_file)
        logging.basicConfig(
            level=logging.INFO,
            format="%(asctime)s | %(message)s",
            handlers=[logging.FileHandler(log_file),
                      logging.StreamHandler()])
    else:
        logging.basicConfig(level=logging.INFO,
                            format="%(asctime)s | %(message)s",
                            handlers=[logging.StreamHandler()])
    logging.info(args)

    seed = random.randint(1,
                          10000) if args.random_seed < 0 else args.random_seed
    random.seed(seed)
    torch.manual_seed(seed)
    if args.cuda >= 0:
        logging.info("Using GPU %d", args.cuda)
        torch.cuda.set_device(args.cuda)
        torch.cuda.manual_seed_all(seed)
    logging.info("Using random seed %d", seed)

    # Load tasks
    logging.info("Loading tasks...")
    start_time = time.time()
    tasks, vocab, word_embs = build_tasks(args)
    logging.info('\tFinished loading tasks in %.3fs', time.time() - start_time)

    # Build model
    logging.info('Building model...')
    start_time = time.time()
    model = build_model(args, vocab, word_embs, tasks)
    logging.info('\tFinished building model in %.3fs',
                 time.time() - start_time)

    # Set up trainer
    iterator = BasicIterator(args.batch_size)
    trainer, train_params, opt_params = build_trainer(args, model, iterator)

    # Train
    if tasks and not args.evaluate:
        if args.retrain_fc and len(args.pretrained):
            model_path = args.pretrained
            assert os.path.isfile(
                model_path), f"No checkpoint found at '{model_path}'"
            model_state = torch.load(model_path,
                                     map_location=device_mapping(args.cuda))
            trainer._model = resume_checkpoint(trainer._model,
                                               model_state,
                                               backbone_only=True)
            logging.info(f'Pre-trained backbone weights loaded: {model_path}')
            logging.info('Retrain last regression layer only!')
            for name, param in trainer._model.named_parameters():
                if "sts-b_pred_layer" not in name:
                    param.requires_grad = False
            logging.info(
                f'Only optimize parameters: {[n for n, p in trainer._model.named_parameters() if p.requires_grad]}'
            )
            to_train = [(n, p) for n, p in trainer._model.named_parameters()
                        if p.requires_grad]
        else:
            to_train = [(n, p) for n, p in model.named_parameters()
                        if p.requires_grad]

        trainer.train(tasks, args.val_interval, to_train, opt_params,
                      args.resume)
    else:
        logging.info("Skipping training...")

    logging.info('Testing on test set...')
    model_path = os.path.join(
        args.store_dir,
        "model_state_best.th") if not len(args.eval_model) else args.eval_model
    assert os.path.isfile(model_path), f"No checkpoint found at '{model_path}'"
    logging.info(f'Evaluating {model_path}...')
    model_state = torch.load(model_path,
                             map_location=device_mapping(args.cuda))
    model = resume_checkpoint(model, model_state)
    te_preds, te_labels, _ = evaluate(model,
                                      tasks,
                                      iterator,
                                      cuda_device=args.cuda,
                                      split="test")
    if not len(args.eval_model):
        np.savez_compressed(os.path.join(args.store_dir,
                                         f"{args.store_name}.npz"),
                            preds=te_preds,
                            labels=te_labels)

    logging.info("Done testing.")
예제 #17
0
def format_date(date):
    return date.strftime("%Y-%m-%d")


settings = Settings.Settings()

account_number = sys.argv[1]
file_to_import = sys.argv[2]

# Get sample of file
if not os.path.isfile(file_to_import):
    print "File '%s' does not exist." % (file_to_import)
    print "Usage: mmeximporter.py <account_number> <file_to_import>"
    sys.exit(-1)

confirm = util.query_yes_no("Importing %s into account: %s, ready?" %
                            (file_to_import, account_number))
if not confirm:
    sys.exit(-1)

db = MMexDB.MMexDb(settings)

reader = csv.reader(open(file_to_import, "r"))
schema = settings.getSchema(account_number)

categories = settings.getCategories()

if 'header' in schema and schema['header'] == "True":
    reader.next()

DATE = schema["date"]
DATE_FORMAT = schema['date_format']
 def ask_and_send(self, question_text):
     ''' Query the user yes/no to send the file and send it. '''
     if query_yes_no(question_text):
         return self.send()
예제 #19
0
    print('Creating profile {}'.format(profile_name))

    # Delete the old profile directory so we can start from scratch.
    try:
        shutil.rmtree(PROFILE_DIR)
        print('Removed old profile directory.')
    except OSError as ex:
        if ex.errno == errno.ENOENT:
            print('No old profile directory found.')
        else:
            print(ex)
            print('Failed to remove old profile directory, exiting...')
            util.exit()
    util.print_separator()

    forge = util.query_yes_no('Would you like to use Forge?', default='no')
    if forge:
        if os.path.exists(FORGE_DIR):
            print('The required Forge version has been detected on your system.')
            message = 'reinstall'
        else:
            print('The required Forge version has not been detected on your system.')
            message = 'install'
        # Ask the user whether or not they need Forge.
        if util.query_yes_no('Do you need to {} Forge?'.format(message), default='no'):
            forge = util.MODS['mods']['forge']
            name = forge['name']
            version = forge['version']
            jarName = 'forge.jar'

            if sys.platform == 'win32' or sys.platform == 'cygwin':
예제 #20
0
def format_date(date):
  return date.strftime("%Y-%m-%d")

settings = Settings.Settings()

account_number = sys.argv[1]
file_to_import = sys.argv[2]

# Get sample of file
if not os.path.isfile(file_to_import):
  print "File '%s' does not exist." % (file_to_import)
  print "Usage: mmeximporter.py <account_number> <file_to_import>"
  sys.exit(-1)

confirm = util.query_yes_no("Importing %s into account: %s, ready?" % (file_to_import, account_number))
if not confirm:
  sys.exit(-1)


db = MMexDB.MMexDb(settings)

reader = csv.reader(open(file_to_import, "r"))
schema = settings.getSchema(account_number)

categories = settings.getCategories()

if 'header' in schema and schema['header'] == "True":
  reader.next()
  
DATE = schema["date"]