Ejemplo n.º 1
0
def drop_tables():
    """
    Drop all tables
    :return: None
    """

    connection = psycopg2.connect(
        host=Config.get('database', 'host'),
        port=Config.get('database', 'port'),
        database=Config.get('database', 'datastore_dbname'),
        user=Config.get('database', 'username'),
        password=Config.get('database', 'password')
    )

    if yesno('Your are dropping all tables - all data will be deleted. Are you sure you want to continue?'):
        db_drop_table('table_updates', connection)
        # Delete all info in the module tables
        for task in [EcatalogueTask, ETaxonomyTask, EMultimediaTask]:
            db_drop_table(task.module_name, connection)

        for foreign_key in dataset_get_foreign_keys():
            db_drop_table(foreign_key.table, connection)

        connection.commit()
        connection.close()
Ejemplo n.º 2
0
def get_answers():
    questions = load_yaml(questions_file)
    default_vars_dict = load_yaml(defaults_vars_file)
    
    custom_vars_dict = dict()
    if path.exists(custom_vars_file):
        if yesno('Found custom vars file, load it ?', default='yes', suffix=':'):
            custom_vars_dict = load_yaml(custom_vars_file)

    vars_dict = default_vars_dict.copy()
    vars_dict.update(custom_vars_dict)

    result_dict = dict()


    for question in questions:
        question_var_name = question['var_name']
        question_type = question['type']
        result = ask_question(question, vars_dict)
        if question_type == 'free_text' and result != default_vars_dict[question_var_name]:
            result_dict[question_var_name] = result
        elif question_type == 'yesno' and not result:
            result_dict[question_var_name] = not default_vars_dict[question_var_name]

    # Save custom custome configuration file if different from default
    if result_dict:
        try:
            with open(custom_vars_file, 'w') as file:
                file.write(dump(result_dict, default_flow_style=False))
        except:
            pass
Ejemplo n.º 3
0
def ask_question(question, vars_dict):
    default = vars_dict[question['var_name']]
    prompt_line = question['prompt_line']

    if question['type'] == 'free_text':
        return prompt(prompt_line, default=default, suffix=':')
    elif question['type'] == 'yesno':
        return yesno(prompt_line, default='yes' if default else 'no', suffix=':')
Ejemplo n.º 4
0
def deploy_to_project(
    dst_project,
    no_confirm,
    secrets_local_dir,
    sets,
    all_services,
    secrets_src_project,
    env_files,
    template_dir,
    ignore_requires,
    scale_resources,
    custom_dir,
    pick,
    label,
    skip,
):
    if not custom_dir:
        path = appdirs_path / "custom"
        custom_dir = path if path.exists() else pathlib.Path(pathlib.os.getcwd()) / "custom"

    if not secrets_local_dir:
        path = appdirs_path / "secrets"
        secrets_local_dir = path if path.exists() else pathlib.Path(pathlib.os.getcwd()) / "secrets"

    if not dst_project:
        log.error("Error: no destination project given")
        sys.exit(1)

    verify_label(label)

    SecretImporter.local_dir = secrets_local_dir
    SecretImporter.source_project = secrets_src_project

    template_dir, specific_component, sets_selected, variables_data, confirm_msg = _parse_args(
        template_dir, all_services, sets, pick, dst_project, env_files
    )

    if not no_confirm and not prompter.yesno(confirm_msg):
        log.info("Aborted by user")
        sys.exit(0)

    switch_to_project(dst_project)

    DeployRunner(
        template_dir,
        dst_project,
        variables_data,
        ignore_requires=ignore_requires,
        service_sets_selected=sets_selected,
        resources_scale_factor=scale_resources,
        custom_dir=custom_dir,
        specific_component=specific_component,
        label=label,
        skip=skip.split(",") if skip else None,
        dry_run=False,
    ).run()

    list_routes(dst_project)
Ejemplo n.º 5
0
def yesno(msg, default = True, **kwargs):
    import prompter

    d = 'yes' if default else 'no'
    r = prompter.yesno(msg, default = d, **kwargs)
    if not default:
        r = not r

    return r
Ejemplo n.º 6
0
    def create_ckan_dataset(self):
        """
        Create a dataset on CKAN
        :return:
        """

        ckan = CKAN()

        pkg_dict = {
            'name': self.package_name,
            'notes': self.package_description,
            'title': self.package_title,
            'author': Config.get('ckan', 'dataset_author'),
            'license_id': Config.get('ckan', 'dataset_licence'),
            'resources': [
                {
                    'id': self.resource_id,
                    'name': self.resource_title,
                    'description': self.resource_description,
                    'format': self.resource_type,
                    'url': '_datastore_only_resource',
                    'url_type': 'dataset'
                }
            ],
            'dataset_category': Config.get('ckan', 'dataset_type'),
            'owner_org': Config.get('ckan', 'owner_org')
        }

        package = ckan.get_package(self.package_name)
        resource = ckan.get_resource(self.resource_id)
        # If we have a package, update resource modified date
        if package:
            logger.info('Updating CKAN dataset %s', self.package_name)
            package['last_modified'] = datetime.datetime.now().isoformat()
            ckan.update_package(package)
        else:
            if not yesno('Package {package_name} does not exist.  Do you want to create it?'.format(
                    package_name=self.package_name
            )):
                sys.exit("Import cancelled")

            # Check the resource doesn't already exist
            if resource:
                raise Exception('Resource {resource_title} ({resource_id}) already exists - package cannot be created')

            # Create the package
            ckan.create_package(pkg_dict)
Ejemplo n.º 7
0
    def update_record(self, record, progress):
        """
        Update the record and save it back to Alma if any changes were made.
        Returns the number of changes made.
        """
        changes = 0
        for step in self.steps:
            changes += step.run(record.marc_record, progress)

        if changes == 0:
            return 0

        if self.interactivity == INTERACTIVITY_INCREASED and not yesno('Update this record?', default='yes'):
            return 0

        self.ils.put_record(record, interactive=self.interactivity != INTERACTIVITY_NONE, show_diff=self.show_diffs)

        return changes
Ejemplo n.º 8
0
    def put_record(self, record, interactive=True, show_diff=False):
        """
        Store a Bib record to Alma

        :param show_diff: bool
        :param interactive: bool
        :type record: Bib
        """
        if record.cz_link is not None:
            log.warning(dedent(
                '''\
                Encountered a Community Zone record. Updating such records through the API will
                currently cause them to be de-linked from CZ, which is probably not what you want.
                Until Ex Libris fixes this, you're best off editing the record manually in Alma.\
                '''))

            if not interactive or yesno('Do you want to update the record and break CZ linkage?', default='no'):
                log.warning(' -> Skipping this record. You should update it manually in Alma!')
                return

            log.warning(' -> Updating the record. The CZ connection will be lost!')

        post_data = record.xml()
        diff = get_diff(record.orig_xml, post_data)
        additions = len([x for x in diff[2:] if x[0] == '+'])
        deletions = len([x for x in diff[2:] if x[0] == '-'])
        if show_diff:
            log.info('%d line(s) removed, %d line(s) added:\n%s', deletions, additions, format_diff(diff))
        else:
            log.debug('%d line(s) removed, %d line(s) added:\n%s', deletions, additions, format_diff(diff))

        if not self.dry_run:
            try:
                response = self.session.put(self.url('/bibs/{mms_id}', mms_id=record.id),
                                            data=BytesIO(post_data.encode('utf-8')),
                                            headers={'Content-Type': 'application/xml'})
                response.raise_for_status()
                record.init(response.text)

            except HTTPError:
                msg = '*** Failed to save record %s --- Please try to edit the record manually in Alma ***'
                log.error(msg, record.id)
Ejemplo n.º 9
0
def _bulletlsf():
    parser = argparse.ArgumentParser(description="Lies")

    add_argument(parser , '-q' , '--queue' , environ_var='BSUB_QUEUE' , help_str='the queue to submit the job to')
    add_argument(parser , '-b' , '--bin'   , environ_var='BSUB_BIN'   , help_str='path to the MPI binary to run')
    add_argument(parser , '-s' , '--sla'   , environ_var='BSUB_SLA'   , help_str='the SLA to submit to', required=False)
    parser.add_argument('-c' , '--config', default='', help='the config file to use', required=False)
    
    add_argument(parser , '-p' , '--ptile'   , environ_var='BSUB_PTILE'   , help_str='the number of processors per host')
    
    parser.add_argument('-n', required=True, type=int, help='the number of cores')
    parser.add_argument('-W', '--Wait', default=3, type=int, help='the number of minutes before termination')
    parser.add_argument('-l', '--log', default='log.log', help='the log file to write to')
    
    args = parser.parse_args()
    
    optional_cmd = ''
    
    bincmd = args.bin
    if args.config != '':
        bincmd = bincmd + ' --file=' + args.config
    
    if args.queue == "bullet":
    	queue    = "bulletmpi"
    	args.sla = None
    
    elif args.queue == "oak":
    	queue    = "beamphysics"
    	args.sla = None
    else:
    	queue = args.queue
    
    if args.sla is not None:
    	optional_cmd += ' -sla {sla}'.format(sla=args.sla)
    
    command = "bsub -a mympi -q {queue}{optional_cmd} -R \"span[ptile={ptile}]\" -W {Wait} -oo {log} -n {n} {bin}".format(queue=queue, optional_cmd=optional_cmd, sla=args.sla, Wait=args.Wait, log=args.log, n=args.n, bin=bincmd, ptile=args.ptile)
    
    print(command)
    
    if prompter.yesno('Is this command good?'):
    	subprocess.run(shlex.split(command))
Ejemplo n.º 10
0
Archivo: submit.py Proyecto: noa/brute
def main():
    # Get command line arguments
    args, leftovers = get_brute_args()

    # Pop the script arguments from leftovers
    args.brute_script_arg = []
    while len(leftovers) > 0 and is_script_arg(leftovers[0]):
        args.brute_script_arg += [ leftovers.pop(0) ]

    # Read the configuration file, if any
    config = get_conf(args)

    # Get absolute workspace path
    args.brute_dir = os.path.abspath(args.brute_dir)
    print('workspace = ' + args.brute_dir)

    # Get absolute script path
    args.brute_script = os.path.abspath(args.brute_script)
    print('script = ' + args.brute_script)

    # Make sure the script exists
    assert os.path.isfile(args.brute_script), "script argument is not a file"

    # Get the product of parameters
    params = get_job_params(leftovers)

    print(str(len(params)) + ' tasks')

    if not args.brute_no_prompt:
        proceed = yesno('Submit?')
        if not proceed:
            sys.exit(0)

    # Run the grid search
    loader = MyLoader()
    MyLoader.args = args
    MyLoader.params = params
    MyLoader.config = config
    sys.exit(DoitMain(loader).run(['--backend', 'json']))
Ejemplo n.º 11
0
def wipe(no_confirm, project, label):
    extra_msg = ""
    if label:
        extra_msg = " with label '{}'".format(label)

    if not no_confirm and prompter.yesno(
        "I'm about to delete everything in project '{}'{}.  Continue?".format(project, extra_msg),
        default="no",
    ):
        sys.exit(0)

    switch_to_project(project)

    if label:
        args = ["--selector", label]
    else:
        args = ["--all"]

    oc("delete", "all", *args, _exit_on_err=False)
    oc("delete", "configmap", *args, _exit_on_err=False)
    oc("delete", "secret", *args, _exit_on_err=False)
    oc("delete", "pvc", *args, _exit_on_err=False)
Ejemplo n.º 12
0
def main():

    parser = argparse.ArgumentParser(description='Import media and feeds listing from the production machine into the local database.')

    parser.add_argument( '--api-key', required=True )

    args = parser.parse_args()
    api_key = args.api_key

    if not yesno('This will erase all data in the current media cloud database. Are you sure you want to continue?'):
        exit()


    mc = mediacloud.api.MediaCloud(api_key, all_fields=True)

    conn = mc_database.connect_to_database()
    print "truncating tables"

    truncate_tables( conn )
    update_db_sequences(conn)
    print "obtaining tag sets"
    all_tag_sets = get_tag_sets( mc )

    print "importing tag sets"
    add_tag_sets_to_database( conn, all_tag_sets )

    print "obtaining media"
    all_media = get_media( mc )

    print "importing media"
    add_media_to_database( conn, all_media )

    print "importing feeds from media"
    add_feeds_from_media_to_database( conn, mc, all_media )

    print "updating sequences"
    update_db_sequences(conn)
Ejemplo n.º 13
0
 def do_addlabels(self, args):
     """add label(s)"""
     if not args.label_names:
         c_l_map = self._jw.component_labels_map
         issue_component = self._jw.get_component(self.issue).lower()
         if issue_component:
             args.label_names = _selector(
                 c_l_map[issue_component] if issue_component in c_l_map else
                 [], "Select label").split(' ')
         else:
             args.label_names = self.input("Enter label(s):").split(' ')
     current_labels = self.issue.fields.labels
     # use set to de-dupe but convert back to list for json serialization
     updated_labels = list(set(current_labels + args.label_names))
     try:
         self._jw.update_labels(self.issue, updated_labels)
     except InvalidLabelError as e:
         print(str(e))
         confirm = prompter.yesno("Add these labels anyway?")
         if confirm:
             try:
                 self._jw.update_labels(self.issue, updated_labels)
             except InvalidLabelError:
                 pass
Ejemplo n.º 14
0
def wipe(no_confirm, project, label):
    server = get_server_info()
    extra_msg = ""
    if label:
        extra_msg = " with label '{}'".format(label)

    if not no_confirm and prompter.yesno(
            "I'm about to delete everything in project '{}'{} on server {} -- continue?"
            .format(project, extra_msg, server),
            default="no",
    ):
        sys.exit(0)

    switch_to_project(project)

    if label:
        args = ["--selector", label]
    else:
        args = ["--all"]

    oc("delete", "all", *args, _exit_on_err=False)
    oc("delete", "configmap", *args, _exit_on_err=False)
    oc("delete", "secret", *args, _exit_on_err=False)
    oc("delete", "pvc", *args, _exit_on_err=False)
def rmtree_after_confirmation(path, force=False):
    if Path(path).exists():
        if not force and not prompter.yesno('Overwrite %s?' % path):
            sys.exit(0)
        else:
            shutil.rmtree(path)
Ejemplo n.º 16
0
from prompter import yesno

done = False
while not done:
    try:
        i = int(raw_input("Integer? "))
        done = True
    except ValueError:
        continueit = yesno('Continue? ')
        if continueit:
            done = False
        else:
            done = True
Ejemplo n.º 17
0
        except:
                print ("Checking...", end="")
                dot()
                print("[Done]")
                print("Oops!  That was no valid number.  Try again...")
                
print ("Applying...", end="")
dot()
print ("Setting Cryptomap to ", CRYPT)

ARPT = input("Please enter The Airport Code: ")
print ("Applying...", end="")
dot()
print ("Setting Airport Code to ", ARPT)

HA = yesno('Will this be a HA Pair?', default='no')
print ("Applying...", end="")
dot()
print ("Setting HA Pair in config")

DHCP = yesno('Enable DHCP for the DATA VLAN?', default='no')
print ("Applying...", end="")
dot()
print ("Setting DATA DHCP config")

VOICE = yesno('Enable DHCP for the VOICE VLAN?', default='no')
print ("Applying...", end="")
dot()
print ("Setting Voice interface in config")

while True:
Ejemplo n.º 18
0
def check_and_update_customer_information(cr, customer_id):
    # If the customer is found, then the tool will compare the address in the database by `<house number> <first token>` in the address. If this is a discrepancy, it prompts the user to make the change or not.
    # If the phone number is a mismatch and there is a new one, it just replaces it and moves the old one to the 2nd phone field in the customer record.
    # If the email is a mismatch and there is a new one, then it just replaces it wihtout prompting and moves it to the 2nd email field in the customer record.

    customer = Customer.get(customer_id, qb=qb_client)

    #phone number update
    phone_new = cr.customer_phone

    primary_phone_obj = customer.PrimaryPhone
    #print(type(customer.PrimaryPhone))
    if phone_new is not None and primary_phone_obj is not None:
        phone_orig = customer.PrimaryPhone.FreeFormNumber
        formatted_new = phonenumbers.format_number(
            phonenumbers.parse(phone_new, "US"),
            phonenumbers.PhoneNumberFormat.NATIONAL)
        formatted_orig = phonenumbers.format_number(
            phonenumbers.parse(phone_orig, "US"),
            phonenumbers.PhoneNumberFormat.NATIONAL)

        if formatted_new != formatted_orig:
            #update the phone field in the customer
            logging.warning(
                "The database customer phone number:[{}] is different from the order: [{}]. Updating..."
                .format(formatted_orig, formatted_new))
            orig_phone_struct = PhoneNumber()
            orig_phone_struct.FreeFormNumber = formatted_orig
            customer.AlternatePhone = orig_phone_struct
            customer.PrimaryPhone.FreeFormNumber = formatted_new
            customer.save(qb_client)
    else:
        if phone_new is not None:
            formatted_new = phonenumbers.format_number(
                phonenumbers.parse(phone_new, "US"),
                phonenumbers.PhoneNumberFormat.NATIONAL)
            logging.warning(
                "The database customer phone number is empty from the order: [{}]. Updating..."
                .format(formatted_new))
            new_phone_struct = PhoneNumber()
            new_phone_struct.FreeFormNumber = formatted_new
            customer.PrimaryPhone = new_phone_struct
            customer.save(qb_client)

    #Customer email update
    customer = Customer.get(customer_id, qb=qb_client)
    email_new = cr.customer_email
    email_orig_obj = customer.PrimaryEmailAddr
    if email_new is not None and email_orig_obj is not None:
        email_orig = customer.PrimaryEmailAddr.Address

        if email_orig != email_new:
            #update the phone field in the customer
            logging.warning(
                "The database customer email:[{}] is different from the order: [{}]. Updating..."
                .format(email_orig, email_new))
            customer.PrimaryEmailAddr.Address = email_new
            customer.save(qb_client)
    else:
        if email_new is not None:
            logging.warning(
                "The database customer email address is empty from the order: [{}]. Updating..."
                .format(email_new))
            new_email_struct = EmailAddress()
            new_email_struct.Address = email_new
            customer.PrimaryEmailAddr = new_email_struct
            customer.save(qb_client)

    #Customer address update
    customer = Customer.get(customer_id, qb=qb_client)
    address_line1_new = cr.customer_street
    address_line1_old_obj = customer.BillAddr
    if address_line1_new is not None and address_line1_old_obj is not None:
        address_line1_old = customer.BillAddr.Line1
        if address_line1_new != address_line1_old:
            #update the phone field in the customer
            logging.warning(
                "The database billing address:[{}] is different from the order: [{}]. Updating..."
                .format(address_line1_old, address_line1_new))
            answer = yesno(
                "Update the address from [{}] to [{}] for customer: [{}]".
                format(address_line1_old, address_line1_new,
                       customer.DisplayName))
            if answer:
                customer.BillAddr.Line1 = address_line1_new
                customer.BillAddr.City = cr.customer_city
                customer.BillAddr.CountrySubDivisionCode = cr.customer_state
                customer.BillAddr.PostalCode = cr.customer_zip
                customer.ShipAddr = customer.BillAddr
                try:
                    customer.save(qb_client)
                except ValidationException as ve:
                    print(ve.detail)
    else:
        if address_line1_new is not None:
            logging.warning(
                "The database customer billing address is empty from the order: [{}]. Updating..."
                .format(address_line1_new))
            new_address_struct = Address()
            new_address_struct.Line1 = address_line1_new
            new_address_struct.City = cr.customer_city
            new_address_struct.CountrySubDivisionCode = cr.customer_state
            new_address_struct.PostalCode = cr.customer_zip

            customer.BillAddr = customer.ShipAddr = new_address_struct
            customer.save(qb_client)
Ejemplo n.º 19
0
def create_encoder(parser, context, args):

	parser.add_argument('--files', required=True, metavar='FILE', type=str,
		nargs='+',
		help='File to process')

	parser.add_argument('--vocabulary-size', type=int, default=50000,
		help='Maximum number of word in the vocabulary')

	parser.add_argument('--char-size', type=int, default=200,
		help='Maximum number of characters in the vocabulary')

	parser.add_argument('--char-count', type=int, default=1,
		help='Minimum count of characters in the vocabulary (all characters are\
			included by default)')

	parser.add_argument('--tokenizer', type=str, default='word',
		choices=('space', 'char', 'word'),
		help='Tokenizer flag (space, char, word)')

	parser.add_argument('--lowercase', action='store_true',
		help='Lowercase the data')

	parser.add_argument('--hybrid', action='store_true',
		help='Create a hybrid word/character vocabulary')

	parser.add_argument('--save-to', required=True, metavar='FILE', type=str,
		help='Output file name')

	args = parser.parse_args(args)

	from modules.text import TextEncoder

	if args.tokenizer == 'char':
		tokenize = lambda s: list(s.strip())
	elif args.tokenizer == 'space' or args.tokenizer == 'bpe':
		tokenize = str.split
	elif args.tokenizer == 'word':
		import nltk
		from nltk import word_tokenize as tokenize

	token_count = Counter()
	char_count = Counter()

	character = args.tokenizer == 'char'    

	for filename in args.files:
		log.info('Processing %s' % os.path.basename(filename))
		with open(filename, 'rt') as f:
			for line in f:
				line = line.lower() if args.lowercase else line
				tokens = tokenize(line)
				token_count.update(tokens)
				if args.hybrid:
					char_count.update(''.join(tokens))
			f.close()

	log.info('Creating %s encoder' % os.path.splitext(args.save_to)[0])
	if args.hybrid:
		char_encoder = TextEncoder(
				counts = char_count,
				min_count = char_count,
				max_vocab = args.char_size,
				special=('<UNK>',))
		encoder = TextEncoder(
				counts = token_count,
				max_vocab = args.vocabulary_size,
				sub_encoder = char_encoder)
	else:
		encoder = TextEncoder(
				counts=token_count, 
				max_vocab=args.vocabulary_size,
				min_count=char_count if args.tokenizer == 'char' else None,
				special=('<S>', '</S>') + (() if args.tokenizer == 'char' else ('<UNK>',))
			)

	if os.path.isfile(os.path.splitext(args.save_to)[0] + ".vocab"):
		if not yesno('Encoder already exist. Replace?', default='yes'):
			args.save_to = prompt('%s' % log('Enter new encoder name:'))

	log.info('Exporting %s encoder' % os.path.basename(args.save_to))
	with open(os.path.splitext(args.save_to)[0] + ".vocab", 'wb') as f:
		pickle.dump(encoder, f, -1)
		f.close()

	log.info('Success')
Ejemplo n.º 20
0
def _setup_config():
    filename = None
    print("It looks like you have no config created.\n")
    if prompter.yesno("Create one now?"):
        filename = _create_config_file()
    return filename
Ejemplo n.º 21
0
###################################################################
# Create the CVS from the selected data

with open(valueForShotgun, "w") as scoreFile:  # open the CVS
    scoreFileWriter = csv.writer(
        scoreFile, lineterminator='\n',
        dialect='excel')  # linedelminator avoid jumping line inbetween
    scoreFileWriter.writerow(
        docTemplateHeader)  # write the header template first
    for list in listNoHeader:
        listNumItem = len(list)
        listCut = list[4:]  # Discard useless info, start the list from the ID
        taskBriefStatus = list[0]
        if taskBriefStatus == "BRIEFED":  # Check if the task is ready to be copied to Shotgun
            if listNumItem != 0:  # just checking the list exists
                scoreFileWriter.writerow(listCut)
scoreFile.close()

os.startfile(
    valueForShotgun)  # open the csv with the content to copy to shotgun

###################################################################
# Update the Gdoc with the SHOTGUN status
userInput = yesno(
    'Do you want to update your Gdoc %s Brief Status from BRIEFED to SHOTGUN?'
    % (sheetUser))
if userInput == True:
    searchReplace(separatorHeader + 1, rowNumber)

time.sleep(100)
Ejemplo n.º 22
0
def make_seeds(conn):
    cur = conn.cursor()

    cur.execute(
        "create table if not exists users( id serial primary key, name character varying, age integer)"
    )

    cur.execute("INSERT INTO users (name, age) VALUES ('aaa', 111)")
    cur.execute("INSERT INTO users (name, age) VALUES ('bbb', 222)")
    cur.execute("INSERT INTO users (name, age) VALUES ('ccc', 333)")
    cur.execute("INSERT INTO users (name, age) VALUES ('eee', 444)")
    cur.execute("INSERT INTO users (name, age) VALUES ('fff', 555)")
    cur.execute("INSERT INTO users (name, age) VALUES ('ggg', 666)")
    cur.execute("INSERT INTO users (name, age) VALUES ('hhh', 777)")
    cur.execute("INSERT INTO users (name, age) VALUES ('iii', 888)")
    cur.execute("INSERT INTO users (name, age) VALUES ('jjj', 999)")
    cur.execute("INSERT INTO users (name, age) VALUES ('kkk', 123)")

    conn.commit()
    conn.close()


if __name__ == "__main__":
    conn = open_db("127.0.0.1", "6543", "postgres", "postgres", "postgres")
    if True == yesno('is there old db existed?'):
        drop_db(conn, "my_db")
    create_db(conn, "my_db")

    conn = open_db("127.0.0.1", "6543", "postgres", "postgres", "my_db")
    make_seeds(conn)
Ejemplo n.º 23
0
    def register(self,
                 cloud_scene,
                 path_output_alignment,
                 distance_threshold,
                 init_with_global_features=True,
                 point_to_plane=True):
        def preprocess_cloud(pcd, voxel_size=0.005):
            print(":: Downsample with a voxel size %.3f." % voxel_size)
            pcd_down = open3d.geometry.voxel_down_sample(pcd, voxel_size)

            radius_normal = voxel_size * 2
            print(":: Estimate normal with search radius %.3f." %
                  radius_normal)
            open3d.geometry.estimate_normals(
                pcd_down,
                open3d.geometry.KDTreeSearchParamHybrid(radius=radius_normal,
                                                        max_nn=30))

            radius_feature = voxel_size * 5
            print(":: Compute FPFH feature with search radius %.3f." %
                  radius_feature)
            pcd_fpfh = open3d.registration.compute_fpfh_feature(
                pcd_down,
                open3d.geometry.KDTreeSearchParamHybrid(radius=radius_feature,
                                                        max_nn=100))
            return pcd_down, pcd_fpfh

        object_sampled, object_fpfh = preprocess_cloud(self.cloud)
        scene_sampled, scene_fpfh = preprocess_cloud(cloud_scene)

        if init_with_global_features is False:
            transformation = register_selected_points(self.cloud, cloud_scene,
                                                      True)
        else:
            print(":: Execute RANSAC alignment")
            transformation = open3d.registration.registration_ransac_based_on_feature_matching(
                object_sampled, scene_sampled, object_fpfh, scene_fpfh,
                distance_threshold,
                open3d.registration.TransformationEstimationPointToPoint(
                    False), 4,
                [
                    open3d.registration.CorrespondenceCheckerBasedOnEdgeLength(
                        0.9),
                    open3d.registration.CorrespondenceCheckerBasedOnDistance(
                        distance_threshold)
                ], open3d.registration.RANSACConvergenceCriteria(
                    4000000, 500)).transformation
            print(":: Result:\n", transformation)
            print(":: Visualize initial alignment ...")
            draw_registration_result(object_sampled, scene_sampled,
                                     transformation)
            if not prompter.yesno(
                    'Is this initial alignment good enough? (Otherwise select matching points manually)'
            ):
                transformation = register_selected_points(
                    self.cloud, cloud_scene, True)

        alignment_accepted = False
        if point_to_plane:
            icp_estimation_method = open3d.registration.TransformationEstimationPointToPlane(
            )
        else:
            icp_estimation_method = open3d.registration.TransformationEstimationPointToPoint(
            )
        while not alignment_accepted:
            print(":: Execute ICP alignment")
            transformation = open3d.registration.registration_icp(
                object_sampled, scene_sampled, distance_threshold,
                transformation, icp_estimation_method).transformation
            print(":: Result:\n", transformation)
            print(":: Visualize refined alignment ...")
            draw_registration_result(object_sampled, scene_sampled,
                                     transformation)

            if prompter.yesno(
                    'Is alignment good? (Otherwise select matching points manually and run ICP)'
            ):
                alignment_accepted = True
            else:
                transformation = register_selected_points(
                    self.cloud, cloud_scene, True)
                if prompter.yesno('Skip ICP?'):
                    alignment_accepted = True

        # Write alignment to file
        np.savetxt(str(path_output_alignment), transformation, delimiter=",")
Ejemplo n.º 24
0
from prompter import yesno
from definitions import *



Path = "/Users/erezcohen/Desktop/uBoone/AnalysisTreesAna" if flags.worker=="erez" else "/uboone/app/users/ecohen/AnalysisTreesAna"
TestSampleName = Path+"/TestSamples/testsample_" + TestSample + ".csv"
TestSampleScoresName = Path+"/TestSamples/testsample_" + TestSample + "_with_predicted_scores.csv"
model_path = ("/Users/erezcohen/Desktop/uBoone/" if flags.worker=="erez" else "/uboone/app/users/ecohen/") +"AnalysisTreesAna/GBDTmodels"




# (A) load the test sample data and the model
# ---------------------------------------
DoPredict = yesno('predict on MC?')


if (DoPredict):

    data = pd.read_csv( TestSampleName )

    if flags.verbose>0:
        print "loaded data and model"
        print "data: \n",data

    # (B) predict on the MC data
    # ---------------------------------------
    print "feature_names: ",feature_names
    data_scores = predict_cosmic.predict_data( data , model_path + "/" + ModelName + ".bst" , feature_names )
    # now dump the run and event number to csv to use as input to larsoft filter
Ejemplo n.º 25
0
def make_seeds(conn):
    cur = conn.cursor()

    cur.execute(
        "create table if not exists users( id serial primary key, name character varying, age integer)"
    )

    cur.execute("INSERT INTO users (name, age) VALUES ('aaa', 111)")
    cur.execute("INSERT INTO users (name, age) VALUES ('bbb', 222)")
    cur.execute("INSERT INTO users (name, age) VALUES ('ccc', 333)")
    cur.execute("INSERT INTO users (name, age) VALUES ('eee', 444)")
    cur.execute("INSERT INTO users (name, age) VALUES ('fff', 555)")
    cur.execute("INSERT INTO users (name, age) VALUES ('ggg', 666)")
    cur.execute("INSERT INTO users (name, age) VALUES ('hhh', 777)")
    cur.execute("INSERT INTO users (name, age) VALUES ('iii', 888)")
    cur.execute("INSERT INTO users (name, age) VALUES ('jjj', 999)")
    cur.execute("INSERT INTO users (name, age) VALUES ('kkk', 123)")

    conn.commit()
    conn.close()


if __name__ == "__main__":
    conn = open_db("127.0.0.1", "6543", "postgres", "postgres", "postgres")
    if True == yesno("is there old db existed?"):
        drop_db(conn, "my_db")
    create_db(conn, "my_db")

    conn = open_db("127.0.0.1", "6543", "postgres", "postgres", "my_db")
    make_seeds(conn)
Ejemplo n.º 26
0
def create_order(sr):
    #Get the sales receipt

    sales_receipt = SalesReceipt()
    sr_body = {
        "domain":
        "QBO",
        "Balance":
        0,
        "CustomerRef": {
            "name": "",
            "value": "6"
        },
        "CustomerMemo": {
            "value": ""
        },
        "sparse":
        "false",
        "Line": [{
            #"Description": "Custom Design",
            "DetailType": "SalesItemLineDetail",  #required
            "SalesItemLineDetail": {
                "Qty": 1,
                #"UnitPrice": 75,
                "ItemRef": {  #required
                    "value": "44"  #black mulch (1-9)
                }
            },
            "LineNum": 1,
            "Amount": 0,
        }],
        "CustomField": [{
            "DefinitionId": "1",
            "Name": "Scout Credit",
            "Type": "StringType",
            "StringValue": ""
        }],
        "PaymentMethodRef": {
            "value": sr.payment_method_ref
        },
        "DepositToAccountRef": {
            "value": sr.deposit_account_ref
        },
        "CustomerMemo": {
            "value": sr.memo
        },
    }
    #amys = Customer.filter(start_position=1, max_results=25, Active=True, FamilyName="Smith", qb=qb_client)
    #amys = Customer.query("SELECT * from Customers where FamilyName='Smith'", qb=qb_client)
    #amys = qb_client.query("select count(*) from Customer Where Active=true and DisplayName LIKE '%Smith'")

    #customer_street_number = sr.customer_street.split(' ')[0]
    query = "Active=true and DisplayName = '" + sr.customer_name.lower() + "'"
    try:
        customers_count = Customer.count(query, qb=qb_client)
    except ValidationException as ve:
        print(ve.detail)

    if customers_count == 0:
        # create a new customer?
        if AUTO_CREATE_CUSTOMERS:
            answer = yesno(
                "Customer [{}] not found residing on [{}]. Create the customer?"
                .format(sr.customer_name, sr.customer_street))
            if answer:
                logging.info(
                    "Creating the customer [{}] in quickbooks.".format(
                        sr.customer_name))
                customer = create_customer(sr)
                if customer is not None:
                    customers_count = 1
        else:
            logging.warning(
                "Customer [{}] not found. Not creating customer due to settings."
                .format(sr.customer_name))

    if customers_count == 1:
        #we have found a customer

        customers = Customer.where("Active=true and DisplayName LIKE '%" +
                                   sr.customer_name.lower() + "'",
                                   qb=qb_client)
        customer_id = customers[0].Id
        customer_name = customers[0].DisplayName
        logging.debug("Customer id: {}".format(customer_id))

        if customer_id is not None:
            check_and_update_customer_information(sr, customer_id)

        sr_body['CustomerRef']['value'] = customer_id
        sr_body['CustomerRef']['name'] = customer_name
        sr_body['Line'][0]['Amount'] = sr.total_price
        product_id = lookup_product(sr.product_name)
        sr_body['Line'][0]['SalesItemLineDetail']['ItemRef'][
            'value'] = product_id
        sr_body['Line'][0]['SalesItemLineDetail']['Qty'] = sr.product_qty
        sr_body['Line'][0]['SalesItemLineDetail'][
            'UnitPrice'] = sr.product_price
        logging.debug("Revised Customer: {}".format(sr_body))
        #print("SR Body: {}".format(sr_body))

        #post a new one
        sales_receipt = sales_receipt.from_json(sr_body)
        sales_receipt.TxnDate = sr.date

        #check for duplicates
        #get all customer sales receipts
        duplicate = False
        srs = SalesReceipt.filter(CustomerRef=customer_id, qb=qb_client)
        for asr in srs:
            #get item ref info
            item = Item.get(
                asr.Line[0].SalesItemLineDetail['ItemRef']['value'],
                qb=qb_client)
            #print(asr.Line[0].SalesItemLineDetail['ItemRef']['name'])
            asr_date = str(parse(asr.TxnDate).date())
            sr_date = str(parse(sr.date).date())
            if item.Name == sr.product_name \
                and asr_date == sr_date \
                and asr.Line[0].SalesItemLineDetail['Qty'] == sr.product_qty \
                and float(asr.TotalAmt) == float(sr.total_price):
                logging.warning(
                    "found a duplicate for this customer: {} on {} for item: {}, qty: {}, total: {}. skipping..."
                    .format(sr.customer_name, sr.date, sr.product_name,
                            sr.product_qty, sr.total_price))
                duplicate = True
        #add the item
        if not duplicate:
            try:
                sales_receipt.save(qb_client)
                logging.debug("SentBody: {}".format(json.dumps(sr_body)))
                logging.info(
                    "Successful entry of SalesReceipt: [{}] into quickbooks. OrderId:[{}], Item:[{}], Qty:[{}], Total:[{}]"
                    .format(sr.customer_last, sales_receipt.Id,
                            sr.product_name, sr.product_qty, sr.total_price))
            except QuickbooksException as e:
                logging.error("An error saving the sales_receipt: {}".format(
                    e.detail))
    elif customers_count > 1:
        logging.warning(
            "More than one customer matches name: [{}]. Cannot process record. Skipping."
            .format(sr.customer_last))
    else:
        print("no customer found")
Ejemplo n.º 27
0
def yesno_checker(default, value, expected_result):
    with patch('prompter.get_input', return_value=value):
        returned_value = yesno('Does this work?', default=default)
        assert_equals(returned_value, expected_result)
Ejemplo n.º 28
0
    def start(self):

        if self.ils.name is not None:
            log.debug('Alma environment: %s', self.ils.name)

        log.debug('Planned steps:')
        for i, step in enumerate(self.steps):
            log.debug(' %d. %s' % ((i + 1), step))

        # ------------------------------------------------------------------------------------
        # Del 1: Søk mot SRU for å finne over alle bibliografiske poster med emneordet.
        # Vi må filtrere resultatlista i etterkant fordi
        #  - vi mangler en egen indeks for Realfagstermer, så vi må søke mot `alma.subjects`
        #  - søket er ikke presist, så f.eks. "Monstre" vil gi treff i "Mønstre"
        #
        # I fremtiden, når vi får $0 på alle poster, kan vi bruke indeksen `alma.authority_id`
        # i stedet.

        valid_records = set()
        pbar = None

        try:
            for marc_record in self.sru.search(self.cql_query):
                if pbar is None and self.show_progress and self.sru.num_records > 50:
                    pbar = tqdm(total=self.sru.num_records,
                                desc='Filtering SRU results')

                log.debug('Checking record %s', marc_record.id)
                record_matching = False
                grep_matching = False
                for n, step in enumerate(self.steps):
                    step_matching = step.match(marc_record)

                    for field in marc_record.fields:
                        if self.grep is None or self.grep in str(
                                field).lower():
                            grep_matching = True

                    if step_matching:
                        log.debug('Step %d did match', n)
                        record_matching = True
                    else:
                        log.debug('Step %d did not match', n)

                if record_matching and grep_matching:
                    valid_records.add(marc_record.id)

                if pbar is not None:
                    pbar.update()
            if pbar is not None:
                pbar.close()

        except TooManyResults:
            log.error((
                'More than 10,000 results would have to be checked, but the Alma SRU service does '
                'not allow us to retrieve more than 10,000 results. Annoying? Go vote for this:\n'
                'http://ideas.exlibrisgroup.com/forums/308173-alma/suggestions/'
                '18737083-sru-srw-increase-the-10-000-record-retrieval-limi'))
            return []

        if len(valid_records) == 0:
            log.info('No matching catalog records found')
            return []
        elif self.action in ['interactive', 'list']:
            log.info('%d catalog records found', len(valid_records))
        else:
            log.info('%d catalog records to be changed', len(valid_records))

            if self.dry_run:
                log.warning(
                    'DRY RUN: No catalog records will actually be changed!')

            if not self.dry_run and self.interactivity == INTERACTIVITY_STANDARD and not yesno(
                    'Continue?', default='yes'):
                log.info('Job aborted')
                return []

        # ------------------------------------------------------------------------------------
        # Del 2: Nå har vi en liste over MMS-IDer for bibliografiske poster vi vil endre.
        # Vi går gjennom dem én for én, henter ut posten med Bib-apiet, endrer og poster tilbake.

        self.records_changed = 0
        self.changes_made = 0
        for idx, mms_id in enumerate(valid_records):
            if self.action not in ['list', 'interactive']:
                log.info('Record %d/%d: %s', idx + 1, len(valid_records),
                         mms_id)

            record = self.ils.get_record(mms_id)

            if self.list_options.get('show_titles'):
                utf8print('{}\t{}'.format(record.marc_record.id,
                                          record.marc_record.title()))

            if self.list_options.get('show_subjects'):
                for field in record.marc_record.fields:
                    if field.tag.startswith('6'):
                        if len(self.source_concepts) > 0 and field.sf(
                                '2') == self.source_concepts[0].sf['2']:
                            utf8print('  {}{}{}'.format(
                                Fore.YELLOW, field, Style.RESET_ALL))
                        else:
                            utf8print('  {}{}{}'.format(
                                Fore.CYAN, field, Style.RESET_ALL))

            c = self.update_record(record,
                                   progress={
                                       'current': idx + 1,
                                       'total': len(valid_records)
                                   })

            if c > 0:
                self.records_changed += 1
                self.changes_made += c

        return valid_records
Ejemplo n.º 29
0
    def start(self):

        if self.ils.name is not None:
            log.debug('Alma environment: %s', self.ils.name)

        log.debug('Planned steps:')
        for i, step in enumerate(self.steps):
            log.debug(' %d. %s' % ((i + 1), step))

        # ------------------------------------------------------------------------------------
        # Del 1: Søk mot SRU for å finne over alle bibliografiske poster med emneordet.
        # Vi må filtrere resultatlista i etterkant fordi
        #  - vi mangler en egen indeks for Realfagstermer, så vi må søke mot `alma.subjects`
        #  - søket er ikke presist, så f.eks. "Monstre" vil gi treff i "Mønstre"
        #
        # I fremtiden, når vi får $0 på alle poster, kan vi bruke indeksen `alma.authority_id`
        # i stedet.

        valid_records = set()
        pbar = None

        try:
            for marc_record in self.sru.search(self.cql_query):
                if pbar is None and self.show_progress and self.sru.num_records > 50:
                    pbar = tqdm(total=self.sru.num_records, desc='Filtering SRU results')

                log.debug('Checking record %s', marc_record.id)
                record_matching = False
                grep_matching = False
                for n, step in enumerate(self.steps):
                    step_matching = step.match(marc_record)

                    for field in marc_record.fields:
                        if self.grep is None or self.grep in str(field).lower():
                            grep_matching = True

                    if step_matching:
                        log.debug('Step %d did match', n)
                        record_matching = True
                    else:
                        log.debug('Step %d did not match', n)

                if record_matching and grep_matching:
                    valid_records.add(marc_record.id)

                if pbar is not None:
                    pbar.update()
            if pbar is not None:
                pbar.close()

        except TooManyResults:
            log.error((
                'More than 10,000 results would have to be checked, but the Alma SRU service does '
                'not allow us to retrieve more than 10,000 results. Annoying? Go vote for this:\n'
                'http://ideas.exlibrisgroup.com/forums/308173-alma/suggestions/'
                '18737083-sru-srw-increase-the-10-000-record-retrieval-limi'
            ))
            return []

        if len(valid_records) == 0:
            log.info('No matching catalog records found')
            return []
        elif self.action in ['interactive', 'list']:
            log.info('%d catalog records found', len(valid_records))
        else:
            log.info('%d catalog records to be changed', len(valid_records))

            if self.dry_run:
                log.warning('DRY RUN: No catalog records will actually be changed!')

            if not self.dry_run and self.interactivity == INTERACTIVITY_STANDARD and not yesno('Continue?', default='yes'):
                log.info('Job aborted')
                return []

        # ------------------------------------------------------------------------------------
        # Del 2: Nå har vi en liste over MMS-IDer for bibliografiske poster vi vil endre.
        # Vi går gjennom dem én for én, henter ut posten med Bib-apiet, endrer og poster tilbake.

        self.records_changed = 0
        self.changes_made = 0
        for idx, mms_id in enumerate(valid_records):
            if self.action not in ['list', 'interactive']:
                log.info('Record %d/%d: %s', idx + 1, len(valid_records), mms_id)

            record = self.ils.get_record(mms_id)

            if self.list_options.get('show_titles'):
                utf8print('{}\t{}'.format(record.marc_record.id, record.marc_record.title()))

            if self.list_options.get('show_subjects'):
                for field in record.marc_record.fields:
                    if field.tag.startswith('6'):
                        if len(self.source_concepts) > 0 and field.sf('2') == self.source_concepts[0].sf['2']:
                            utf8print('  {}{}{}'.format(Fore.YELLOW, field, Style.RESET_ALL))
                        else:
                            utf8print('  {}{}{}'.format(Fore.CYAN, field, Style.RESET_ALL))

            c = self.update_record(record, progress={'current': idx + 1, 'total': len(valid_records)})

            if c > 0:
                self.records_changed += 1
                self.changes_made += c

        return valid_records
Ejemplo n.º 30
0
import cPickle as pickle
import os
import numpy as np
from matplotlib import cm
import matplotlib.pyplot as plt
from prompter import yesno

file_path = './analysis.out'

premises, hypotheses, es, corr, goldlabels, predicteds = pickle.load(
    open(file_path, 'rb'))

plot_size = int(raw_input("How many to plot at once?\n"))
plot_wrong = yesno('Prompt only wrong plots?')
num_plotted = 0

num_batches = len(premises)


def num_to_name(num):
    return {0: "entailment", 1: 'neutral', 2: 'contradiction'}[num]


for i in range(num_batches):
    premise_batch = premises[i]
    hypothesis_batch = hypotheses[i]
    e_batch = es[i]
    correct_batch = corr[i]
    labels_batch = goldlabels[i]
    predicteds_batch = predicteds[i]
Ejemplo n.º 31
0
def deploy_to_project(
    dst_project,
    no_confirm,
    secrets_local_dir,
    sets,
    all_services,
    secrets_src_project,
    env_files,
    template_dir,
    ignore_requires,
    scale_resources,
    custom_dir,
    pick,
    label,
    skip,
    watch,
):
    if not custom_dir:
        path = appdirs_path / "custom"
        custom_dir = path if path.exists() else pathlib.Path(
            pathlib.os.getcwd()) / "custom"

    if not secrets_local_dir:
        path = appdirs_path / "secrets"
        secrets_local_dir = path if path.exists() else pathlib.Path(
            pathlib.os.getcwd()) / "secrets"

    if not dst_project:
        log.error("Error: no destination project given")
        sys.exit(1)

    verify_label(label)

    SecretImporter.local_dir = secrets_local_dir
    SecretImporter.source_project = secrets_src_project

    template_dir, specific_component, sets_selected, variables_data, confirm_msg = _parse_args(
        template_dir, all_services, sets, pick, dst_project, env_files)

    if not no_confirm and not prompter.yesno(confirm_msg):
        log.info("Aborted by user")
        sys.exit(0)

    switch_to_project(dst_project)

    if watch:
        event_watcher = start_event_watcher(dst_project)

    DeployRunner(
        template_dir,
        dst_project,
        variables_data,
        ignore_requires=ignore_requires,
        service_sets_selected=sets_selected,
        resources_scale_factor=scale_resources,
        custom_dir=custom_dir,
        specific_component=specific_component,
        label=label,
        skip=skip.split(",") if skip else None,
        dry_run=False,
    ).run()

    if watch and event_watcher:
        event_watcher.stop()

    list_routes(dst_project)
Ejemplo n.º 32
0
    def activate_ssh(self, public_key):
        """
        sets the public key path and copies the it to the SD card
        :param public_key: the public key location
        :return: True if successful
        """

        # set the keypath
        self.keypath = public_key
        if debug:
            print(self.keypath)
        if not os.path.isfile(self.keypath):
            ERROR("key does not exist", self.keypath)
            sys.exit()

        if dry_run:
            print("DRY RUN - skipping:")
            print("Activate ssh authorized_keys pkey:{}".format(public_key))
            return
        elif interactive:
            if not yesno("About to write ssh config. Please confirm:"):
                return

        # activate ssh by creating an empty ssh file in the boot drive
        pathlib.Path(self.filename("/ssh")).touch()
        # Write the content of the ssh rsa to the authorized_keys file
        key = pathlib.Path(public_key).read_text()
        ssh_dir = self.filename("/home/pi/.ssh")
        print(ssh_dir)
        if not os.path.isdir(ssh_dir):
            os.makedirs(ssh_dir)
        auth_keys = ssh_dir / "authorized_keys"
        auth_keys.write_text(key)

        # We need to fix the permissions on the .ssh folder but it is hard to
        # get this working from a host OS because the host OS must have a user
        # and group with the same pid and gid as the raspberry pi OS. On the PI
        # the pi uid and gid are both 1000.

        # All of the following do not work on OS X:
        # execute("chown 1000:1000 {ssh_dir}".format(ssh_dir=ssh_dir))
        # shutil.chown(ssh_dir, user=1000, group=1000)
        # shutil.chown(ssh_dir, user=1000, group=1000)
        # execute("sudo chown 1000:1000 {ssh_dir}".format(ssh_dir=ssh_dir))

        # Changing the modification attributes does work, but we can just handle
        # this the same way as the previous chown issue for consistency.
        # os.chmod(ssh_dir, 0o700)
        # os.chmod(auth_keys, 0o600)

        # /etc/rc.local runs at boot with root permissions - since the file
        # already exists modifying it shouldn't change ownership or permissions
        # so it should run correctly. One lingering question is: should we clean
        # this up later?

        new_lines = textwrap.dedent('''
                    # FIX298-START: Fix permissions for .ssh directory 
                    if [ -d "/home/pi/.ssh" ]; then
                        chown pi:pi /home/pi/.ssh
                        chmod 700 /home/pi/.ssh
                        if [ -f "/home/pi/.ssh/authorized_keys" ]; then
                            chown pi:pi /home/pi/.ssh/authorized_keys
                            chmod 600 /home/pi/.ssh/authorized_keys
                        fi
                    fi
                    # FIX298-END
                    ''')
        rc_local = self.filename("/etc/rc.local")
        new_rc_local = ""
        already_updated = False
        with rc_local.open() as f:
            for line in f:
                if "FIX298" in line:
                    already_updated = True
                    break
                if line == "exit 0\n":
                    new_rc_local += new_lines
                    new_rc_local += line
                else:
                    new_rc_local += line
        if not already_updated:
            with rc_local.open("w") as f:
                f.write(new_rc_local)
        self.disable_password_ssh()
Ejemplo n.º 33
0
def create_model(parser, context, args):

	parser.add_argument('--source-encoder', type=str, metavar='FILE',
		default=None, required=True,
		help='load source vocabulary ')

	parser.add_argument('--target-encoder', type=str, metavar='FILE',
		default=None, required=True,
		help='load target vocabulary')

	parser.add_argument('--source-tokenizer', type=str,
		choices=('word', 'space', 'char', 'bpe'), default='word',
		help='Type of Preprocessing source text')

	parser.add_argument('--target-tokenizer', type=str,
		choices=('word', 'space', 'char', 'bpe'), default='char',
		help='Type of Preprocessing target text')

	parser.add_argument('--alpha', type=float, default=0.01, metavar='X',
		help='Length penalty weight during beam translation')

	parser.add_argument('--beta', type=float, default=0.4, metavar='X',
		help='Coverage penalty weight during beam translation')

	parser.add_argument('--gamma', type=float, default=1.0, metavar='X',
		help='Over attention penalty weight during beam translation')

	parser.add_argument('--decoder-gate', type=str,
		choices=('lstm', 'context'), default='lstm',
		help='Tyoe of decoder gate (lstm or context)')

	parser.add_argument('--len-smooth', type=float, default=5.0, metavar='X',
		help='Smoothing constant for length penalty during beam translation')

	parser.add_argument('--word-embedding-dims', type=int, metavar='N',
		default=256, 
		help='Size of word embeddings')

	parser.add_argument('--target-embedding-dims', type=int, metavar='N',
		default=None, 
		help='Size of target embeddings (default: size of input word or char embedding')

	parser.add_argument('--char-embedding-dims', type=int, metavar='N',
		default=64, 
		help='Size of character embeddings')

	parser.add_argument('--dropout', type=float, metavar='FRACTION',
		default=0.0, 
		help='Use dropout for non-recurrent connections with the given factor')

	parser.add_argument('--encoder-state-dims', type=int, metavar='N',
		default=256, 
		help='Size of encoder state')

	parser.add_argument('--decoder-state-dims', type=int, metavar='N',
		default=512, 
		help='Size of decoder state')

	parser.add_argument('--attention-dims', type=int, metavar='N',
		default=256, 
		help='Size of attention vectors')

	parser.add_argument('--alignment-loss', type=float, metavar='X',
		default=0.0, 
		help='Alignment cross-entropy contribution to loss function (DEPRECATED)')

	parser.add_argument('--alignment-decay', type=float, metavar='X',
		default=0.9999, 
		help='Decay factor of alignment cross-entropy contribution (DEPRECATED)')

	parser.add_argument('--layer-normalization', action='store_true',
		help='Use layer normalization')

	parser.add_argument('--recurrent-dropout', type=float, metavar='FRACTION',
		default=0.0, 
		help='Use dropout for recurrent connections with the given factor')
	
	parser.add_argument('--source-lowercase', action='store_true',
		help='Convert source text to lowercase before processing')

	parser.add_argument('--target-lowercase', action='store_true',
		help='convert target text to lowercase before processing')

	parser.add_argument('--backwards', action='store_true',
		help='Reverse the order (token level) of all input data')

	parser.add_argument('--save-model', type=str, metavar='FILE',
		default=None, required=True,
		help='Output Model file')

	args = parser.parse_args(args)

	log.info('Loading Source language encoder')
	with open(args.source_encoder, 'rb') as f:
		args.source_encoder = pickle.load(f)
		f.close()

	log.info('Loading Target language encoder')
	with open(args.target_encoder, 'rb') as f:
		args.target_encoder = pickle.load(f)
		f.close()

	if args.target_embedding_dims is None:
		args.target_embedding_dims = (
			args.char_embedding_dims
			if args.target_tokenizer == 'char'
			else args.word_embedding_dims)

	log.info('Configuring model')
	config = {
		'ts_train': 0, 											# total training time in seconds
		'tn_epoch': 0, 											# total number of epochs
		'source_encoder': args.source_encoder,					#
		'target_encoder': args.target_encoder,					#
		'source_lowercase': args.source_lowercase,				# False
		'source_tokenizer': args.source_tokenizer,				# word
		'target_lowercase': args.target_lowercase,				# False
		'target_tokenizer': args.target_tokenizer,				# char
		'source_embedding_dims': args.word_embedding_dims,		# 256
		'source_char_embedding_dims': args.char_embedding_dims,	# 64
		'target_embedding_dims': args.target_embedding_dims,	# None
		'char_embeddings_dropout': args.dropout,				# 0.0
		'embeddings_dropout': args.dropout,						# 0.0
		'recurrent_dropout': args.recurrent_dropout,			# 0.0
		'dropout': args.dropout,								# 0.0
		'encoder_state_dims': args.encoder_state_dims,			# 256
		'decoder_state_dims': args.decoder_state_dims,			# 512
		'attention_dims': args.attention_dims,					# 256
		'layernorm': args.layer_normalization,					# False
		'alignment_loss': args.alignment_loss,					# 0.0
		'alignment_decay': args.alignment_decay,				# 0.9999
		'backwards': args.backwards,							# False
		'decoder_gate': args.decoder_gate,						# lstm
		'alpha': args.alpha,									# 0.01
		'beta': args.beta,										# 0.4
		'gamma': args.gamma,									# 1.0
		'decoder_gate': args.decoder_gate,						# lstm
		'len_smooth': args.len_smooth,							# 5.0
		'encoder_layernorm': 'ba2' if args.layer_normalization else False,
		'decoder_layernorm': 'ba2' if args.layer_normalization else False
	}

	if not config['source_encoder'].sub_encoder:
		log.warning('Source encoder is not hybrid')

	log.info('Checking existence')
	if os.path.isfile(args.save_model):
		if not yesno(log('Model %s exist, replace? ' % os.path.basename(args.save_model)), default='yes'):
			args.save_model = prompt(log('New model name: '))

	log.info('Creating model')
	model = NMT('nmt', config)
	
	log.info('Saving %s' % os.path.basename(args.save_model))
	with open(args.save_model, 'wb') as f:
		pickle.dump(config, f)
		model.save(f)
		f.close()

	log.info('Model Saved')
Ejemplo n.º 34
0
    def create(self,
               image,
               names,
               key,
               ips,
               ssid=None,
               psk=None,
               domain=None,
               bootdrive=None,
               rootdrive=None):
        """
        creates a repeated burn on all names specified,
        TODO: why is this not part of the previous class?

        :param ips: TODO
        :param domain: TODO
        :param image: TODO
        :param names: the hostnames of in hostlist format to be burned
        :param key: the public key location # TODO: should be defaulted
                    to ~/.ssh/id_rsa.pub
        :param bootdrive: the boot drive # BUG: on linux we do not have a
                          boot drive, so this should not be a parameter and
                          needs to be autodiscovered
        :param rootdrive: # BUG: on linux we do not have a boot drive, so this
               should not be a parameter and needs to be autodiscovered
        :param ssid: # TODO: should be set to None and if its None we do not do it
                     # we actually do not need wifi, should be handled differently
        :param psk: # TODO: should be set to None and if its None we do not do
                    # it we actually do not need wifi, should be handled differently
        :return:
        """
        """
        TODO The following commented code is specific to retrive the 
        USB drive name - boot drive
        DRIVE_TYPES = {
        0 : "Unknown",
        1 : "No Root Directory",
        2 : "Removable Disk",
        3 : "Local Disk",
        4 : "Network Drive",
        5 : "Compact Disc",
        6 : "RAM Disk"
        }

        c = wmi.WMI ()
        for drive in c.Win32_LogicalDisk ():
            print(drive.Caption, DRIVE_TYPES[drive.DriveType])
        
        """
        hosts = hostlist.expand_hostlist(names)
        iplist = hostlist.expand_hostlist(ips)
        # BUG: can we not discover the boot and rootdrive. Why do we have to set
        # it to I and G, can we not get the next free drive letter?
        # THis only seems to be needed for Windows?
        # bootdrive = find_next_free_drive_letter()
        # rootdrive = find_next_free_drive_letter()
        # BUG: are the drives released after use?
        print(bootdrive)
        print(rootdrive)
        if bootdrive:
            self.set_boot_drive(bootdrive)
        if rootdrive:
            self.set_root_drive(rootdrive)
        device = self.detect_device()
        if domain is not None:
            self.domain = domain
        for host, ip in zip(hosts, iplist):
            print("Start Time - {currenttime}".format(
                currenttime=datetime.datetime.now()))
            print(columns * '-')
            print("Burning", host)
            # break
            print(columns * '-')
            if not yesno('Please insert the card for ' + host +
                         " and wait until it is recognized by the system." +
                         "\nReady to continue?"):
                break

            print("Beginning to burn image {image} to {device}".format(
                image=image, device=device))
            self.burn(image, device)
            # Sleep for 5 seconds to have the SD to be mounted
            # TODO: OS X can eject ourselves:
            # diskutil eject /dev/{device}
            if not yesno('Please eject the SD card and re-insert.'
                         '\nReady to continue?'):
                break
            time.sleep(5)
            self.set_ip(ip)
            print("Set IP - {ip}".format(ip=ip))
            self.write_hostname(host)
            print("Updating host - {name}".format(name=host))

            print("ssid - {id}".format(id=ssid))
            print("psk - {pwd}".format(pwd=psk))
            if ssid:
                self.configure_wifi(ssid, psk)
                print("Updating wifi")

            self.activate_ssh(key)
            print("Updating ssh")

            self.configure_static_ip()
            print("Updating Network - Static IP")

            self.unmount(device)
            print("Removed drive")

            print("Please remove the card for host", host)
            if not yesno("Ready to continue?"):
                break

            print("take the card out")
            print("End Time - {currenttime}".format(
                currenttime=datetime.datetime.now()))
Ejemplo n.º 35
0
#!/usr/bin/python
# coding:utf8

from commands import getoutput
from distutils.util import strtobool
from prompter import yesno

psAEFdump = getoutput("ps -aef | grep " + raw_input("Enter search string"))
print psAEFdump

if yesno("Kill them all ?"):
    for line in psAEFdump.split('\n'):
        getoutput("kill -9 " + line.split()[1])
Ejemplo n.º 36
0
def yesno_checker(default, value, expected_result):
    with patch('prompter.get_input', return_value=value):
        returned_value = yesno('Does this work?', default=default)
        assert_equals(returned_value, expected_result)
Ejemplo n.º 37
0
def deploy_to_project(
    dst_project,
    no_confirm,
    secrets_local_dir,
    sets,
    all_services,
    secrets_src_project,
    env_values,
    env_files,
    template_dir,
    ignore_requires,
    scale_resources,
    root_custom_dir,
    pick,
    label,
    skip,
    watch,
    concurrent,
    threadpool_size,
):
    root_custom_dir = get_dir(root_custom_dir,
                              "custom",
                              "custom scripts",
                              optional=True)

    if not dst_project:
        log.error("Error: no destination project given")
        sys.exit(1)

    verify_label(label)

    if secrets_local_dir:
        SecretImporter.local_dir = get_dir(secrets_local_dir,
                                           "secrets",
                                           "secrets",
                                           optional=True)
    if secrets_src_project:
        SecretImporter.source_project = secrets_src_project

    template_dir, env_config_handler, specific_components, sets_selected, confirm_msg = _parse_args(
        template_dir, env_values, env_files, all_services, sets, pick,
        dst_project)

    if not no_confirm and not prompter.yesno(confirm_msg):
        log.info("Aborted by user")
        sys.exit(0)

    switch_to_project(dst_project)

    if watch:
        event_watcher = start_event_watcher(dst_project)

    DeployRunner(
        template_dir,
        dst_project,
        env_config_handler,
        ignore_requires=ignore_requires,
        service_sets_selected=sets_selected,
        resources_scale_factor=scale_resources,
        root_custom_dir=root_custom_dir,
        specific_components=specific_components,
        label=label,
        skip=skip.split(",") if skip else None,
        dry_run=False,
        concurrent=concurrent,
        threadpool_size=threadpool_size,
    ).run()

    if watch and event_watcher:
        event_watcher.stop()

    list_routes(dst_project)
Ejemplo n.º 38
0
# Create the CVS from the selected data

with open(valueForShotgun , "w") as scoreFile:                                          # open the CVS
    scoreFileWriter = csv.writer(scoreFile, lineterminator = '\n',dialect='excel')      # linedelminator avoid jumping line inbetween
    scoreFileWriter.writerow(docTemplateHeader)                                         # write the header template first
    for list in listNoHeader:
        listNumItem = len(list)
        listCut = list[4:]                                                              # Discard useless info, start the list from the ID
        taskBriefStatus = list[0]
        if taskBriefStatus == "BRIEFED":                                                # Check if the task is ready to be copied to Shotgun
            if listNumItem != 0:                                                        # just checking the list exists
                scoreFileWriter.writerow(listCut)
scoreFile.close()

os.startfile(valueForShotgun)                                                           # open the csv with the content to copy to shotgun

###################################################################
# Update the Gdoc with the SHOTGUN status
userInput = yesno('Do you want to update your Gdoc %s Brief Status from BRIEFED to SHOTGUN?'%(sheetUser))
if userInput == True:
    searchReplace(separatorHeader +1  , rowNumber )

time.sleep(100)