def opt_fg():
    #Check if queue.txt is usable
    log, ts, it_n, first_guess, queue_txt = fc.setup(True)

    #Get data and sample them
    one_queue, shift_queue = fc.sample_data(queue_txt, log)
    fc.print_pic_1_2(one_queue, shift_queue, queue_txt, it_n, ts, log)

    waiter, cooks = fc.allocation_fg(log, first_guess)

    fc.write_to_file(log, waiter, cooks, True)

    fc.print_3_fg(ts, log, first_guess)
    fc.print_3_im_new_all(ts, it_n, log, waiter, cooks)

    #Check for number of values file
    fc.finish(it_n, log)
Exemple #2
0
    def in_output(self):
        """Concourse resource `in` main """

        output = {
            "version": self.version,
            "metadata": self.metadata,
            "original_msg": self.original_msg
        }
        # Write response as bender.json for further use
        write_to_file(json.dumps(output),
                      '{}/bender.json'.format(self.working_dir))
        # Write template if specified
        if self.templated_string:
            write_to_file(
                self.templated_string, '{}/{}'.format(self.working_dir,
                                                      self.template_filename))

        # Print concourse output string
        print(json.dumps(output, indent=4, sort_keys=True))
def opt():
    #Set objectives
    obj_5ord = 0.7
    obj_8ord = 0.99
    obj_5kit = 0.7
    obj_8kit = 0.82

    #Set timestamp and iterationnumber and if queue.txt is usable
    log, ts, it_n, exp_txt, queue_txt = fc.setup()

    #Analyse previous results
    mean_res, std_res = fc.analyse(exp_txt, log)

    #Objectives fulfilled?
    fc.obj_fulfill(mean_res, log)

    #Utilisation rates
    fc.ut_rates(mean_res, log)

    #Get data and sample them
    one_queue, shift_queue = fc.sample_data(queue_txt, log)

    # print 1_queue_Iteration and 2_shift_queue_iteration
    max_dia = fc.print_pic_1_2(one_queue, shift_queue, queue_txt, it_n, ts,
                               log)

    #Update staff allocation (1. get and allocate, 2. update queues, 3. update obj)
    waiter, cooks = fc.allocation(log)
    waiter, cooks, co_ql = fc.upd_ql(log, it_n, waiter, cooks, shift_queue)
    waiter, cooks, co_obj = fc.upd_obj(log, it_n, waiter, cooks, shift_queue,
                                       mean_res)
    waiter, cooks, co_sm = fc.upd_sm(log, it_n, waiter, cooks, co_ql, co_obj)
    fc.print_changes(log, waiter, cooks, co_ql, co_obj, co_sm)
    print(waiter)
    print(cooks)
    #Write to files (allocation_staff.xlsx, waiter.txt, cook.txt)
    fc.write_to_file(log, waiter, cooks)

    #produce staffing plot
    fc.print_3_im_new_all(ts, it_n, log, waiter, cooks)

    #Clean up, update iterationnumber
    fc.finish(it_n, log)
def username(playernum):
    # User(s) enter their desired usernames #
    
    instruct_num = str(leaderboard_len("data/leaderboard.json") + 1)
    if request.method == "POST":
        
        if not (check_in_file("data/global_users.txt", request.form["username"].title())):
            
            # If not in global leaderboard #
            
            write_to_file("data/users.txt", request.form["username"].title() + "\n")
            write_to_file("data/global_users.txt", request.form["username"].title() + "\n")
            add_to_leaderboard(request.form["username"].title(), 'data/leaderboard.json')
            add_to_leaderboard_global(request.form["username"].title(), 'data/global_leaderboard.json')
            
            new_num = str(int(playernum) - 1)
            playernum = new_num
            
        
        else:
            
            if not (check_in_file("data/users.txt", request.form["username"].title())):
                
                # If in global but not in local leaderboard #
                
                write_to_file("data/users.txt", request.form["username"].title() + "\n")
                add_to_leaderboard(request.form["username"].title(), 'data/leaderboard.json')
                
                new_num = str(int(playernum) - 1)
                playernum = new_num
            
            else:
                
                # If in global and local leaderboards #
                
                flash("Sorry Your Chosen Username Is Unavailable. Please Try Another")
                
            
        if int(playernum) == 0:
            
                qnumber = str(
                    leaderboard_len("data/leaderboard.json") - 1
                    )
                question = random_number_generator()
                return redirect(
                    request.form["username"] + 
                    '/' + '1' + '/' + qnumber + 
                    '/' + question
                    )
        
        else:
            return redirect("setup" + '/' + playernum)
            
    return render_template("username.html", player=instruct_num)
def test_write_to_file():
    out = StringIO()
    functions.write_to_file(["one", "two", "three", "four"], out)
    n.assert_equal(out.getvalue(), "1 one\n2 two\n3 three\n4 four\n")
Exemple #6
0
     except err.ProjectFilesError:
         txt.print_red(
             "ProjectFilesError: you cannot read or modify project files!!"
         )
     except FileNotFoundError:
         txt.print_red("File does not exist!")
     except PermissionError:
         txt.print_red(
             "You do not have permission to read this file. :/")
 # if the command is 'write' then try to write to the provided filename
 # and if the filename was not provided or the string was not provided,
 # write an error message. If permission is denied, also write an error.
 elif commands[0].lower() == "write" or commands[0].lower() == "wrt":
     try:
         if len(commands) >= 3:
             fn.write_to_file(commands)
         else:
             raise err.FieldNotProvidedError
     except err.FieldNotProvidedError:
         txt.print_red(
             "FieldNotProvidedError: Cannot execute: not enough arguments!"
         )
     except err.ProjectFilesError:
         txt.print_red(
             "ProjectFilesError: you cannot read or modify project files!!"
         )
     except PermissionError:
         txt.print_red(
             "You do not have permission to write to this file. :/")
 elif commands[0].lower() == "append" or commands[0].lower() == "apd":
     try:
 def test_clear_text_file(self):
     # Test if function clears a txt file #
     write_to_file("data/users.txt", "data")
     self.assertEqual(clear_text_file("data/users.txt", 'data'), False)
 def test_write_to_file(self):
     # Test if function writes to a txt file #
     self.assertEqual(write_to_file("data/users.txt", 'Test'.title()), True)
 def test_check_in_file(self):
     # Test if functions checks if data is in a txt file #
     write_to_file("data/users.txt", 'Test'.title())
     self.assertEqual(check_in_file("data/users.txt", 'Test'.title()), True)
     self.assertEqual(check_in_file("data/users.txt", '0'), False)
############################################
#
# Jose Marcelo Sandoval-Castaneda (jms1595)
# Artificial Intelligence, Fall 2018
# 01 Nov 2018
#
############################################

import classes
import functions

# Load graph from file input.txt.
graph = classes.Graph('input.txt')

# Write key and clauses onto key.txt and clauses.txt, respectively.
functions.write_to_file('key.txt', graph.make_key())
functions.write_to_file('clauses.txt', graph.make_clauses())
Exemple #11
0
    tic = time()

    print("Found {} job links in page {}: {}".format(len(extracted_links),
                                                     page_num, base_url))
    print("Time taken to extract page links: {}".format(tic - toc))
    print("Starting scape and writing to csv...\n")

    i = 0
    toc = time()
    for extracted_link in extracted_links:
        scraped_html = None
        scraped_html = page_html(extracted_link)
        if scraped_html != None:
            job_num = job_num + 1
            jobpage_info = jobpage_scrape(extracted_link, scraped_html)
            write_to_file(jobpage_info)
            i = i + 1
            if (job_num >= target_num):
                run = False
                print('\n')
                print("Job done! Exiting program...")
                break
            else:
                stdout.write("\rPage scrape progress: %d/ %d" %
                             (i, len(extracted_links)))
                stdout.flush()
                # stdout.write("\n") # move the cursor to the next line
        else:
            # print("Error detected in one link")
            continue
    tic = time()
Exemple #12
0
def test_write_to_file():
    out = StringIO()
    functions.write_to_file(["one", "two", "three", "four"], out)
    n.assert_equal(out.getvalue(), "1 one\n2 two\n3 three\n4 four\n")
############################################

import functions

# Load the Davis-Putnam output and the key.
dp = functions.load_dp_output('dp-output.txt')
key = functions.load_key('key.txt')

# Assign truth values to the names in the key.
output = []
for d in dp:
    for k in key:
        if d[0] == k[0]:
            output.append([k[1], d[1]])
            break

# Make a list only of the truth values to establish a path and sort it.
ans = []
for i in range(len(output)):
    if output[i][1]:
        ans.append(output[i])
ans.sort(key=lambda x: int(x[0][-1]))

# Write the path onto a string.
output_str = ''
for val in ans:
    output_str += val[0] + '\n'

# Write the string onto a file.
functions.write_to_file('output.txt', output_str)
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from path import add_parent_to_path
add_parent_to_path()
from functions import read_corpus, replace_compounds, reddy_ncs, pre_process, print_every, read_ncs, \
    write_to_file, get_preprocess_args
import logging
from config import logging_config
import nltk
nltk.download('punkt')
nltk.download('wordnet')

if __name__ == '__main__':
    logging.info('Reading train and evaluation ncs')
    args = get_preprocess_args()
    r_ncs, _ = reddy_ncs(args.p2ec)
    ncs = read_ncs(args.p2tc)
    ncs.extend(r_ncs)
    logging.info('Reading corpus')
    sentences = read_corpus(args.p2corp)
    lemmatizer = nltk.stem.WordNetLemmatizer()
    output = []
    logging.info('Replacing ncs in corpus')
    for i in range(0, len(sentences)):
        s = sentences[i]
        print_every(s, i + 1, 10000)
        output.append(replace_compounds(pre_process(s, lemmatizer), ncs))
    logging.info('Writing results in ' + args.p2out)
    write_to_file(output, args.p2out)
Exemple #15
0
############################################
#
# Jose Marcelo Sandoval-Castaneda (jms1595)
# Artificial Intelligence, Fall 2018
# 01 Nov 2018
#
############################################

import functions

# Load clauses.
clauses = functions.load_clauses('clauses.txt')
# Execute Davis-Putnam.
result = functions.davis_putnam(clauses)
# Write results of Davis-Putnam onto a file.
functions.write_to_file('dp-output.txt', result)
Exemple #16
0
data_directory = "datasets/"
file_name = "DatafinitiElectronicsProductsPricingData.csv"
data = pd.read_csv(data_directory + file_name)

# limit to 1st 500 items, amountMin <= 150, sorted by brand
df = data
df = df[['prices.amountMin', 'brand']]
df = df[(data['prices.amountMin'] > 100)]
df = df.sort_values(by='brand')
df = df.reset_index()
df = df.head(500)

count = len(df['brand'].value_counts())
print(len(df))

func.write_to_file(df, 'def', 'csv')


# plot graph
fig = plt.figure()
plot = fig.add_subplot(111)

"""
Single
"""

# x_points = []
# y_points = []
#
# for index, row in df.iterrows():
#     x_points.append(index)