Beispiel #1
0
    def _test_appending(self, modified_input, expected_output):
        project = ProjectInDirectory(self.tmpdir)

        module_path = putfile(
            project.path, "module.py",
            read_data("appending_test_cases_module_initial.py"))
        test_module_path = putfile(
            project.path, "test_module.py",
            read_data("appending_test_cases_output_initial.py"))

        # Analyze the project with an existing test module.
        inspect_project(project)

        # Filesystem stat has resolution of 1 second, and we don't want to
        # sleep in a test, so we just fake the original files creation time.
        project["module"].created = 0
        project["test_module"].created = 0

        # Modify the application module and analyze it again.
        putfile(project.path, "module.py", read_data(modified_input))
        inspect_project(project)

        # Regenerate the tests.
        add_tests_to_project(project, [module_path], 'unittest')
        project.save()

        assert_length(project.get_modules(), 2)
        result = read_file_contents(test_module_path)
        expected_result = read_data(expected_output)
        assert_equal_strings(expected_result, result)
Beispiel #2
0
    def _test_appending(self, modified_input, expected_output):
        project = ProjectInDirectory(self.tmpdir)

        module_path = putfile(project.path, "module.py", read_data("appending_test_cases_module_initial.py"))
        test_module_path = putfile(project.path, "test_module.py", read_data("appending_test_cases_output_initial.py"))

        # Analyze the project with an existing test module.
        inspect_project(project)

        # Filesystem stat has resolution of 1 second, and we don't want to
        # sleep in a test, so we just fake the original files creation time.
        project["module"].created = 0
        project["test_module"].created = 0

        # Modify the application module and analyze it again.
        putfile(project.path, "module.py", read_data(modified_input))
        inspect_project(project)

        # Regenerate the tests.
        add_tests_to_project(project, [module_path], 'unittest')
        project.save()

        assert_length(project.get_modules(), 2)
        result = read_file_contents(test_module_path)
        expected_result = read_data(expected_output)
        assert_equal_strings(expected_result, result)
Beispiel #3
0
    def test_generates_test_stubs(self):
        expected_result = read_data("static_analysis_output.py")
        project = ProjectInDirectory(self.tmpdir)
        module_path = putfile(project.path, "module.py", read_data("static_analysis_module.py"))

        inspect_project(project)
        add_tests_to_project(project, [module_path], 'unittest')
        result = get_test_module_contents(project)

        assert_equal_strings(expected_result, result)
Beispiel #4
0
    def execute_with_point_of_entry_and_assert(self, id):
        expected_result = read_data("%s_output.py" % id)
        project = ProjectInDirectory(self.tmpdir).with_points_of_entry(["poe.py"])
        module_path = putfile(project.path, "module.py", read_data("%s_module.py" % id))
        write_content_to_file(read_data("generic_acceptance_poe.py"), project.path_for_point_of_entry("poe.py"))

        inspect_project(project)
        add_tests_to_project(project, [module_path], 'unittest')
        result = get_test_module_contents(project)

        assert_equal_strings(expected_result, result)
Beispiel #5
0
    def test_generates_test_stubs(self):
        expected_result = read_data("static_analysis_output.py")
        project = ProjectInDirectory(self.tmpdir)
        module_path = putfile(project.path, "module.py",
                              read_data("static_analysis_module.py"))

        inspect_project(project)
        add_tests_to_project(project, [module_path], 'unittest')
        result = get_test_module_contents(project)

        assert_equal_strings(expected_result, result)
Beispiel #6
0
    def execute_with_point_of_entry_and_assert(self, id):
        expected_result = read_data("%s_output.py" % id)
        project = ProjectInDirectory(self.tmpdir).with_points_of_entry(
            ["poe.py"])
        module_path = putfile(project.path, "module.py",
                              read_data("%s_module.py" % id))
        write_content_to_file(read_data("generic_acceptance_poe.py"),
                              project.path_for_point_of_entry("poe.py"))

        inspect_project(project)
        add_tests_to_project(project, [module_path], 'unittest')
        result = get_test_module_contents(project)

        assert_equal_strings(expected_result, result)
Beispiel #7
0
def prepareData(file_path, n, K):
    uic_lists = helper.read_data(file_path)
    print('read 5000 users done...')
    print('clustering users...This might take a minute...')
    cu = ClusterUsers(uic_lists, n, K)
    (centroids, groups) = cu.cluster()
    return (uic_lists, centroids, groups)
Beispiel #8
0
def pre_pos():
    training_data = helper.read_data('../asset/test_words.txt')
    a = []
    for i in training_data:
        temp = word(i)
        a.append(temp)
    return a
Beispiel #9
0
def request_evaluation(attackDetails):
    #Load the response system
    responseSystem = read_data('data/response_system.json')
    #Request for evaluation from the server
    client.sock.sendall(
        serialize('request_evaluation',
                  attackDetails + '!.!' + responseSystem[attackDetails]))
    #Wait for the ACK from server
    client.RESPONSE_ACK.wait()
Beispiel #10
0
def split():
    line = helper.read_data("./asset/training_data.txt")

    train_file = open("./asset/train.txt", 'w+')
    test_file = open("./asset/test.txt", 'w+')
    for i in line:
        if random.randint(0, 9):
            train_file.write(i + '\n')
        else:
            test_file.write(i + '\n')
Beispiel #11
0
    def fit(self, sess):
        saver = tf.train.Saver(max_to_keep=None)
        check_dir = Helper.get_checkpoint_dir(self.p.name)

        if self.p.restore:
            save_path = os.path.join(check_dir, f"epoch_{self.p.restore_epoch}")
            saver.restore(sess, save_path)

        if not self.p.onlyTest:
            print("Start fitting")
            validation_data = Helper.read_data(self.p.valid_data, self.p.granularity)

            for epoch in range(self.p.max_epochs):
                loss = self.run_epoch(sess, self.data)

                if epoch % 50 == 0:
                    print(f"Epoch {epoch}\t Loss {loss}\t model {self.p.name}")

                if epoch % self.p.test_freq == 0 and epoch != 0:
                    ## -- check pointing -- ##
                    save_path = os.path.join(check_dir, f"epoch_{epoch}")
                    saver.save(sess=sess, save_path=save_path)

                    print("Validation started")
                    if self.p.mode == "temporal":
                        Pred.temp_test_against(self, sess, validation_data, "valid", epoch)
                    elif self.p.mode == "entity":
                        Pred.test_link_against(self, sess, validation_data, "valid", epoch)
                    else:
                        raise ValueError

                    print("Validation ended")
        else:
            print("Testing started")
            test_data = Helper.read_data(self.p.test_data, self.p.granularity)
            if self.p.mode == "temporal":
                Pred.temp_test_against(self, sess, test_data, "test", self.p.restore_epoch)
            elif self.p.mode == "entity":
                Pred.test_link_against(self, sess, test_data, "test", self.p.restore_epoch)
            else:
                raise ValueError
            print("Testing ended")
Beispiel #12
0
def main():
    data_set, n_neurons, iterations, k, plot_k, neighborhood, learning_rate\
        , radius = get_input()
    cities = read_data(data_set)
    scaling, cities = normalize(cities)

    neuron_count = len(cities) * n_neurons
    neurons = init_neurons(neuron_count)

    # TODO maybe distance param?
    som(neurons, cities, iterations, k, plot_k, neighborhood, learning_rate,
        radius, scaling)
Beispiel #13
0
def convert():
    raw_data = helper.read_data('./asset/test.txt')
    result = []
    file = open("result.txt", "w+b")
    for i in raw_data:
        counter = 0
        for char in i:
            if char in '012':
                counter += 1
                if char == '1':
                    result.append(counter)
                    break
    pickle.dump(result, file)
Beispiel #14
0
def response_system(attackDetails):
    #Under attack state is being processed, clear it
    UNDER_ATTACK.clear()
    if not specifications['is_collaborative']:
        #After detecting the attack -> request evaluation from the server
        evaluationThread = threading.Thread(
            target=request_evaluation(attackDetails))
        evaluationThread.start()
        #Wait for the thread to finish
        evaluationThread.join()
        #Clear the response ack
        client.RESPONSE_ACK.clear()
        #Load the response system
        responseSystem = read_data('data/response_system.json')
        responseTechnique = responseSystem[attackDetails]
        techniquePath = responseTechniquesData[
            responseTechnique]  # The path for the technique script
        running_response_technique(attackDetails, responseTechnique,
                                   techniquePath)
    else:
        #Assign car to busy and save it locally
        specifications['is_busy'] = True
        save_data('data/specifications.json', specifications)
        #Load the response system
        responseSystem = read_data('data/response_system.json')
        responseTechnique = responseSystem[attackDetails]
        #Run the response technique applied by the response system
        print('Under {} attack! Running {}!'.format(attackDetails,
                                                    responseTechnique))
        techniquePath = responseTechniquesData[
            responseTechnique]  # The path for the technique script
        #Generate a new thread and run the response technique script
        responseThread = threading.Thread(target=running_response_technique(
            attackDetails, responseTechnique, techniquePath))
        responseThread.start()
        #Wait for the thread to finish
        responseThread.join()
        response_performed(attackDetails, responseTechnique, techniquePath)
Beispiel #15
0
def response_performed(attackDetails, responseTechnique, techniquePath):
    #Send a message to server letting it know that the response technique is performed
    client.sock.send(
        serialize('ack', '{}!.!{}'.format('response performed',
                                          responseTechnique)))
    #The client waits for the server to finish the effectiveness test attack
    print('Waiting for the server to perform the effectiveness attack')
    client.FINISH_TESTING_EFFECTIVENESS.wait()
    #If both are true, that means the response technique failed to stop the attack
    print('Server done with effectiveness attack')
    if UNDER_ATTACK.is_set(): isEffective = False
    elif not UNDER_ATTACK.is_set(): isEffective = True
    #Clear flags
    UNDER_ATTACK.clear()
    client.FINISH_TESTING_EFFECTIVENESS.clear()
    #Load the saved file
    print('Loading the log file')
    logFile = read_data('data/logs/{}/{}.json'.format(attackDetails,
                                                      responseTechnique))
    #Add effective status to the json file
    logFile['is_effective'] = isEffective
    #Send the log so we don't keep the server waiting for log file, in case this was the last technique to be assessed
    print('Sending the log file')
    client.sock.send(
        send_log(
            'log', "{}!.!{}!.!{}!.!{}".format(
                specifications['software_version'],
                specifications['hardware_specifications'], attackDetails,
                responseTechnique), logFile))
    #After sending the log, delete the log file
    try:
        os.remove('data/logs/{}/{}.json'.format(attackDetails,
                                                responseTechnique))
    except:
        print("Error while deleting file: ", logFile)

    #Revert changes done when the response technique applied
    print('Running revert operation')
    subprocess.run(shlex.split("{} {}".format(techniquePath, "revert")))
    print('Revert operation successful')
    #After getting back to the original state, change busy status to false
    specifications['is_busy'] = False
    #Save changes
    save_data('data/specifications.json', specifications)
    #Inform the server that the car is no longer busy
    print(
        'Sending busy status to server. Setting it to false (the car is ready for more work)'
    )
    client.sock.send(
        serialize('ack', '{}!.!{}'.format('client busy status', 'false')))
def main():

    # args = sys.argv[1:]
    #
    # if not len(args):
    #     sys.exit()
    #
    # array = [*map(cast_to_numeric, args)]

    file_name = sys.argv[1]
    if not len(file_name):
        sys.exit()

    array = read_data(file_name)

    output = selection_sort(array)
    print(' '.join(str(elem) for elem in output))
Beispiel #17
0
def evaluate_techniques(self, responseTechniques):
    print("Evaluating techniques...")
    #If one technique is not evaluated, then start the evaluation process
    isEvaluationRequired = False
    for technique in responseTechniques:
        if not responseTechniques[technique]['is_evaluated']:
            isEvaluationRequired = True
            break
    #Otherwise, no need for evaluation
    if not isEvaluationRequired : return
    #Update response techniques database
    import glob
    src = "data/logs/{}/{}/{}/".format(self.softwareVersion, self.hardwareSpecifications, self.attackDetails)
    files = glob.glob('{}/*'.format(src), recursive=False)
    # Loop through files
    for single_file in files:
        json_file = read_data(single_file)
        fileName = os.path.basename(single_file)
        techniqueName = fileName.replace('.json', '')
        isEffective = json_file['is_effective']
        responseTechniquesData[self.softwareVersion][self.hardwareSpecifications][self.attackDetails][techniqueName]['is_effective'] = isEffective

    #Add every effective technique and its duration    
    effectiveTechniques = {}
    for technique in responseTechniques:
        if responseTechniques[technique]['is_effective']: 
            effectiveTechniques[technique] = responseTechniques[technique]['duration']
    #Get the lowest duration technique
    bestResponseTechnique = min(effectiveTechniques, key=effectiveTechniques.get)
    print("Effective techniques are: ",  effectiveTechniques)
    print("Best effective response technique is {}.".format(bestResponseTechnique))
    #Modify as necessary
    responseTechniquesData[self.softwareVersion][self.hardwareSpecifications][self.attackDetails][bestResponseTechnique]['is_most_efficient'] = True
    responseTechniquesData[self.softwareVersion][self.hardwareSpecifications][self.attackDetails][bestResponseTechnique]['is_evaluated'] = True
    for technique in responseTechniques:
        if not technique == bestResponseTechnique:
            responseTechniquesData[self.softwareVersion][self.hardwareSpecifications][self.attackDetails][technique]['is_most_efficient'] = False
            responseTechniquesData[self.softwareVersion][self.hardwareSpecifications][self.attackDetails][technique]['is_evaluated'] = True

    #Save changes to the json file
    save_data('data/response_techniques_database.json', responseTechniquesData)
Beispiel #18
0
def check_undergoing_assessment():
    #First off, check if the car status is busy to see if there are unsent log files
    if specifications['is_busy']:
        print('The car is in busy status! Checking previous logs...')
        #If still busy, then look for an un-sent log to the server. This happens when the client reboots, for example
        #Get folder names (attack names, basically)
        attackList = os.listdir('data/logs/')
        #Loop through each subdirectory (attack) and send the response log
        import glob
        for attackName in attackList:
            src = "data/logs/{}/*.json".format(attackName)
            files = glob.glob(src, recursive=False)
            # Loop through files
            for single_file in files:
                json_file = read_data(single_file)
                fileName = os.path.basename(single_file)
                techniqueName = fileName.replace('.json', '')
                #Continue after performing the response
                print('running response performed method now!')
                response_performed(attackName, techniqueName, single_file)
                break
def main():

    # Read in data
    os.chdir(
        "/Users/MichaelChoie/Desktop/Data Science/deep-learning/embeddings")
    text = helper.read_data()

    # Process data
    words = helper.preprocess(text)
    vocab_to_int, int_to_vocab = helper.create_lookup_table(words)
    int_words = [vocab_to_int[word] for word in words]
    train_words = helper.subsampling(int_words)

    # Create directory for model checkpoints
    if not os.path.exists("checkpoints"):
        os.mkdirs("checkpoints")

    # Build computational graph
    embed_mat = build_graph(train_graph, int_to_vocab, train_words)

    # Visualize network
    visualize(embed_mat)
Beispiel #20
0
from pyro.optim import Adam

from helper import read_data, get_train_test_seqs
import model

sequence_length = 50
engines_eval = [1, 2]
cuda = True
ftype = torch.cuda.FloatTensor if cuda else torch.FloatTensor
ltype = torch.cuda.LongTensor if cuda else torch.LongTensor

sensor_cols = ['s' + str(i) for i in range(1, 22)]
sequence_cols = ['cycle', 'setting1', 'setting2', 'setting3', 'cycle_norm']
sequence_cols.extend(sensor_cols)

train_df, test_df = read_data()
trainX, trainY, valX, valY, testX, testY = get_train_test_seqs(
    train_df, test_df, sequence_length)

engines = []
for engine_id in engines_eval:
    engines.append([])
    train_one_eng = train_df[train_df.id == engine_id]
    for i in range(train_one_eng.shape[0]):
        engines[-1].append(train_one_eng[sequence_cols].values[:i])

sensor_cols = ['s' + str(i) for i in range(1, 22)]
sequence_cols = ['cycle', 'setting1', 'setting2', 'setting3', 'cycle_norm']
sequence_cols.extend(sensor_cols)

trainX = np.vstack([trainX, valX])
## import modules here 
import helper
import submission
from sklearn.metrics import f1_score

from sklearn import model_selection
import matplotlib.pyplot as plt

training_data = helper.read_data('./asset/training_data.txt')
classifier_path = './asset/classifier.dat'
Y = submission.train(training_data, classifier_path)

test_str1 = './asset/tiny_test.txt'
test_str2 = './asset/testing_data1.txt'
test_data = helper.read_data(test_str1)
prediction = submission.test(test_data, classifier_path)

ground_truth = [1,1,2,1]
print(f1_score(ground_truth, prediction, average='micro'))

#k_range = range(1, 15)
#k_scores = []
#for k in k_range:
#    knn = KNeighborsClassifier(n_neighbors=k)
#    scores = model_selection.cross_val_score(knn, X, Y, cv=5, scoring='accuracy')
#    print("k=",k," scores are", scores)
#    k_scores.append(scores.mean())

#plt.plot(k_range, k_scores)
#plt.xlabel('Value of K for KNN')
#plt.ylabel('Cross-Validated Accuracy')
Beispiel #22
0
import helper
import submission

training_data = helper.read_data('./asset/training_data.txt')
test_data = helper.read_data('./asset/test_words.txt')
classifier_path = './asset/Logistic.dat'
submission.train(training_data, classifier_path, multi=True, DEBUG=True)
submission.test(test_data, classifier_path, DEBUG=True)
Beispiel #23
0
def draw(data):
    result = []
    for i in data:
        if i[-1][0] == 0:
            pre = -1
        else:
            pre = i[i[-1][0] -1]
        if i[-1][0] == len(i) - 1:
            suff = -1
        else:
            suff = i[i[-1][0] +1]
        result.append((pre, suff))
    return result

ab = helper.read_data("./asset/training_data.txt")
b, vasual = pre_process(ab)
seprate_point = math.ceil(len(b)*0.8)
tran_data = b[:seprate_point]
test_data = b[seprate_point:]

# visual_data = {}
# for i in tran_data:
#     if i[-1][1] in visual_data:
#         visual_data[i[-1][1]].append(i)
#     else:
#         visual_data[i[-1][1]] = [i]
#
# for i in visual_data:
#     data = draw(i)
#     data_fram = pd.DataFrame(data,index=['pre', 'next'])
Beispiel #24
0
import helper
import submission
from sklearn.metrics import f1_score

if __name__ == '__main__':
    training_data = helper.read_data('./asset/training_data.txt')
    classifier_path = './asset/classifier.dat'
    submission.train(training_data, classifier_path)

    test_data = helper.read_data('./asset/tiny_test.txt')
    prediction = submission.test(test_data, classifier_path)
    print(prediction)

    ground_truth = [1, 1, 2, 1]
    print(f1_score(ground_truth, prediction, average='micro'))
Beispiel #25
0
def popular_artists():
    df = pd.read_csv('sentiment_data/filtered_grand_df.csv')
    df_normalized = read_data(df)
    chart_1 = sentiment_plot(df_normalized, 0.15)
    script_1, div_1 = components(chart_1)
    polarity = df.groupby('release_year')[['polarity']].mean().reset_index()
    polarity['text'] = round(polarity['polarity'], 2)
    chart_2 = polarity_plot(polarity, 0.1, 0.45)
    script_2, div_2 = components(chart_2)
    df_2000, df_2001, df_2002, df_2003, df_2004, df_2005, df_2006, df_2007, df_2008, df_2009, df_2010, df_2011, df_2012, df_2013, df_2014, df_2015, df_2016, df_2017, df_2018, df_2019 = topics_per_year(
    )
    artists = df.groupby('primary_artist')[[
        'anger', 'anticipation', 'disgust', 'fear', 'joy', 'positive',
        'negative', 'sadness', 'surprise', 'trust'
    ]].mean()
    artists = artists.reset_index()
    clusters = cluster_data(artists)
    clusters['artist'] = artists['primary_artist']
    chart_3 = artists_cluster(clusters)
    script_3, div_3 = components(chart_3)
    return render_template('popular_artists.html',
                           the_script_1=script_1,
                           the_div_1=div_1,
                           songs_no=df.shape[0],
                           the_script_2=script_2,
                           the_div_2=div_2,
                           topic_1=df_2000['words'][0],
                           topic_2=df_2000['words'][1],
                           topic_3=df_2000['words'][2],
                           topic_4=df_2001['words'][0],
                           topic_5=df_2001['words'][1],
                           topic_6=df_2001['words'][2],
                           topic_7=df_2002['words'][0],
                           topic_8=df_2002['words'][1],
                           topic_9=df_2002['words'][2],
                           topic_10=df_2003['words'][0],
                           topic_11=df_2003['words'][1],
                           topic_12=df_2003['words'][2],
                           topic_13=df_2004['words'][0],
                           topic_14=df_2004['words'][1],
                           topic_15=df_2004['words'][2],
                           topic_16=df_2005['words'][0],
                           topic_17=df_2005['words'][1],
                           topic_18=df_2005['words'][2],
                           topic_19=df_2006['words'][0],
                           topic_20=df_2006['words'][1],
                           topic_21=df_2006['words'][2],
                           topic_22=df_2007['words'][0],
                           topic_23=df_2007['words'][1],
                           topic_24=df_2007['words'][2],
                           topic_25=df_2008['words'][0],
                           topic_26=df_2008['words'][1],
                           topic_27=df_2008['words'][2],
                           topic_28=df_2009['words'][0],
                           topic_29=df_2009['words'][1],
                           topic_30=df_2009['words'][2],
                           topic_31=df_2010['words'][0],
                           topic_32=df_2010['words'][1],
                           topic_33=df_2010['words'][2],
                           topic_34=df_2011['words'][0],
                           topic_35=df_2011['words'][1],
                           topic_36=df_2011['words'][2],
                           topic_37=df_2012['words'][0],
                           topic_38=df_2012['words'][1],
                           topic_39=df_2012['words'][2],
                           topic_40=df_2013['words'][0],
                           topic_41=df_2013['words'][1],
                           topic_42=df_2013['words'][2],
                           topic_43=df_2014['words'][0],
                           topic_44=df_2014['words'][1],
                           topic_45=df_2014['words'][2],
                           topic_46=df_2015['words'][0],
                           topic_47=df_2015['words'][1],
                           topic_48=df_2015['words'][2],
                           topic_49=df_2016['words'][0],
                           topic_50=df_2016['words'][1],
                           topic_51=df_2016['words'][2],
                           topic_52=df_2017['words'][0],
                           topic_53=df_2017['words'][1],
                           topic_54=df_2017['words'][2],
                           topic_55=df_2018['words'][0],
                           topic_56=df_2018['words'][1],
                           topic_57=df_2018['words'][2],
                           topic_58=df_2019['words'][0],
                           topic_59=df_2019['words'][1],
                           topic_60=df_2019['words'][2],
                           the_script_3=script_3,
                           the_div_3=div_3)
Beispiel #26
0
def compare_artists():
    try:
        artist_1 = flask.request.args['name_1']
        artist_2 = flask.request.args['name_2']
        stripped_artist_1 = artist_1.lower().replace(' ', '').replace(
            '&', '').replace('é', 'e')
        stripped_artist_2 = artist_2.lower().replace(' ', '').replace(
            '&', '').replace('é', 'e')
        df_1 = pd.read_csv(f'sentiment_data/{stripped_artist_1}.csv')
        df_2 = pd.read_csv(f'sentiment_data/{stripped_artist_2}.csv')
        normalized_df_1 = read_data(df_1)[[
            'anger', 'positive', 'negative', 'anticipation', 'disgust', 'fear',
            'joy', 'sadness', 'surprise', 'trust'
        ]].mean()
        normalized_df_2 = read_data(df_2)[[
            'anger', 'positive', 'negative', 'anticipation', 'disgust', 'fear',
            'joy', 'sadness', 'surprise', 'trust'
        ]].mean()
        anger = (normalized_df_1['anger'] -
                 normalized_df_2['anger']) / normalized_df_1['anger'] * 100
        anticipation = (normalized_df_1['anticipation'] -
                        normalized_df_2['anticipation']
                        ) / normalized_df_1['anticipation'] * 100
        disgust = (normalized_df_1['disgust'] - normalized_df_2['disgust']
                   ) / normalized_df_1['disgust'] * 100
        fear = (normalized_df_1['fear'] -
                normalized_df_2['fear']) / normalized_df_1['fear'] * 100
        joy = (normalized_df_1['joy'] -
               normalized_df_2['joy']) / normalized_df_1['joy'] * 100
        sadness = (normalized_df_1['sadness'] - normalized_df_2['sadness']
                   ) / normalized_df_1['sadness'] * 100
        surprise = (normalized_df_1['surprise'] - normalized_df_2['surprise']
                    ) / normalized_df_1['surprise'] * 100
        trust = (normalized_df_1['trust'] -
                 normalized_df_2['trust']) / normalized_df_1['trust'] * 100
        positivity = (normalized_df_1['positive'] - normalized_df_2['positive']
                      ) / normalized_df_1['positive'] * 100
        topics_1 = pd.read_csv(f'topics_data/{stripped_artist_1}.csv',
                               index_col=0)
        topics_2 = pd.read_csv(f'topics_data/{stripped_artist_2}.csv',
                               index_col=0)
        most_frequent_words_1 = most_frequent_words(df_1)
        most_frequent_words_2 = most_frequent_words(df_2)
        return render_template('compare_artists.html',
                               artist_1=artist_1.upper(),
                               artist_2=artist_2.upper(),
                               artist_1_proper=artist_1,
                               artist_2_proper=artist_2,
                               positivity=round(positivity, 2),
                               anger=round(anger, 2),
                               anticipation=round(anticipation, 2),
                               disgust=round(disgust, 2),
                               fear=round(fear, 2),
                               joy=round(joy, 2),
                               sadness=round(sadness, 2),
                               surprise=round(surprise, 2),
                               trust=round(trust, 2),
                               topic_1=topics_1['words'][0],
                               topic_2=topics_1['words'][1],
                               topic_3=topics_1['words'][3],
                               topic_4=topics_2['words'][0],
                               topic_5=topics_2['words'][1],
                               topic_6=topics_2['words'][2],
                               freq_word_1=most_frequent_words_1[0],
                               freq_word_2=most_frequent_words_1[1],
                               freq_word_3=most_frequent_words_1[2],
                               freq_word_4=most_frequent_words_1[3],
                               freq_word_5=most_frequent_words_1[4],
                               freq_word_6=most_frequent_words_2[0],
                               freq_word_7=most_frequent_words_2[1],
                               freq_word_8=most_frequent_words_2[2],
                               freq_word_9=most_frequent_words_2[3],
                               freq_word_10=most_frequent_words_2[4])
    except:
        return render_template('inventory_error.html')
    dataset = 'ShapeNet'
    category = 'airplane'
    mode = 'test'
    root = '/disk1/yicheng/ShapeNet'
    dump_root = '/disk1/yicheng/' + dataset + '_csk/'

    if dataset not in ['ModelNet10', 'ShapeNet', 'dfaust', 'faces', 'sunrgbd']:
        raise Exception('dataset error.')

    if dataset == 'ShapeNet':
        data_dir = root + mode + '_data_npy/' + category + '/'
        list_el = glob.glob(os.path.join(data_dir, '*.npy'))
    elif dataset == 'ModelNet10':
        data_dir = root + category + '/' + mode + '/'
        list_el = glob.glob(os.path.join(data_dir, '*.off'))

    dump_dir = dump_root + mode + '_data_npy/' + category + '/'

    if not os.path.exists(dump_dir):
        os.makedirs(dump_dir)

    for i, name in enumerate(list_el):
        pc = read_data(name, dataset)
        pc = normalize_data(pc)

        file_name = Path(name).stem
        np.save(os.path.join(dump_dir, file_name + '.npy'), pc)

    print('done!')
Beispiel #28
0
def artist():
    try:
        artist = flask.request.args['name']
        stripped_artist = artist.replace(' ',
                                         '').replace('&',
                                                     '').replace('é', 'e')
        df = pd.read_csv(f'sentiment_data/{stripped_artist}.csv', index_col=0)
        df = df.drop_duplicates()
        polarity = round(df.polarity.mean(), 2)
        df['release_date'] = pd.to_datetime(df['release_date'],
                                            errors='coerce')
        df['release_year'] = df['release_date'].dt.year
        polarity_graph = df.groupby('release_year')[['polarity'
                                                     ]].mean().reset_index()
        polarity_graph['text'] = round(polarity_graph['polarity'], 2)
        chart_4 = polarity_plot(polarity_graph, -1.25, +2)
        normalized_df = read_data(df)
        chart_1 = sentiment_plot(normalized_df, 0.4)
        ts = total_sentiments(df)
        anger = round(float(ts.anger) * 100, 2)
        anticipation = round(float(ts.anticipation) * 100, 2)
        sadness = round(float(ts.sadness) * 100, 2)
        joy = round(float(ts.joy) * 100, 2)
        surprise = round(float(ts.surprise) * 100, 2)
        trust = round(float(ts.trust) * 100, 2)
        fear = round(float(ts.fear) * 100, 2)
        disgust = round(float(ts.disgust) * 100, 2)
        clusters = cluster_data(df)
        clusters['title'] = df['title']
        clusters['album'] = df['album']
        clusters = clusters.replace('None', 'Unknown')
        chart_2 = cluster_plot(clusters)
        albums = albums_data(df)
        chart_3 = view_albums(albums)
        script_1, div_1 = components(chart_1)
        script_2, div_2 = components(chart_2)
        script_3, div_3 = components(chart_3)
        script_4, div_4 = components(chart_4)
        image = f'static/images/word_clouds/{stripped_artist}.png'
        data_url = f'/artist_data?name={stripped_artist}'
        topics = pd.read_csv(f'topics_data/{stripped_artist}.csv', index_col=0)
        return render_template('artist.html',
                               the_script_1=script_1,
                               the_div_1=div_1,
                               the_script_2=script_2,
                               the_div_2=div_2,
                               polarity=polarity,
                               artist=artist.title(),
                               image=image,
                               the_script_3=script_3,
                               the_div_3=div_3,
                               data_url=data_url,
                               topic_1=topics['words'][0],
                               topic_2=topics['words'][1],
                               topic_3=topics['words'][2],
                               topic_4=topics['words'][3],
                               topic_5=topics['words'][4],
                               the_script_4=script_4,
                               the_div_4=div_4,
                               anger=anger,
                               anticipation=anticipation,
                               fear=fear,
                               joy=joy,
                               sadness=sadness,
                               trust=trust,
                               surprise=surprise,
                               disgust=disgust)
    except:
        return render_template('inventory_error.html')
Beispiel #29
0
def update_and_attack(onRoadCar, attackDetails, responseTechnique):
    #Check for non busy collaborative cars with the same software version and hardware specs
    print("Checking for non-busy collaborative cars")
    car = find_car(onRoadCar)
    print("Car ID: {} will start assessing: {} now!".format(car.vin, responseTechnique))
    #Event to wait for an ACK message regarding the update
    car.UPDATE_ACK = threading.Event()
    print("Sending update...")
    print("-------------------------")
    #Send update with the response technique to use
    car.socket.send(serialize('update', attackDetails + " " + responseTechnique))
    #Wait for the car to ACK
    car.UPDATE_ACK.wait()
    #Clear the ACK flag
    car.UPDATE_ACK.clear()
    print("Update applied successfully!")
    print("-------------------------")

    #Create and add the task into the work list
    task = WorkTask(car, attackDetails, responseTechnique)
    workList.append(task)
    print("Simulating {} attack on car: {} using technique: {}".format(attackDetails, car.vin, responseTechnique))
    print("*************************")

    #Start the attack simulation process
    attackData = read_data('data/attack_simulations.json')
    attackPath = attackData[attackDetails]
    #Run the attack script
    print('car address is: {}'.format(car.address[0]))
    print(type(car.address[0]))
    print('attack path is: {}'.format(attackPath))
    subprocess.run(shlex.split("{} {}".format(attackPath, car.address[0])))
    
    #After initiating the attack, measure the duration
    task.startTime = time.time()
    print("start: ", task.startTime)
    
    #Wait for effectiveness ACK from client showing that the client has performed the response technique and waiting for the effectiveness test
    task.EFFECTIVENESS_ACK.wait()
    #Mark the elapsed time
    task.elapsedTime = time.time() - task.startTime

    #Re-initiate the attack simulation again to check for effectiveness
    #Keep in mind that the car might get disconnected, so rely on the VIN instead
    #Run the attack script
    print('Re-initiating the attack to test for effectiveness...')
    subprocess.run(shlex.split("{} {}".format(attackPath, task.car.address[0])))

    #After running the attack script, send a message to let the client know that the server has finished attacking
    task.car.socket.send(serialize('ack', 'finish testing effectiveness'))
    #Wait for ACK on receiving the log file
    print("Waiting for the log file...")
    task.LOG_ACK.wait()
    #Update the technique's data
    responseTechniquesData[task.car.softwareVersion][task.car.hardwareSpecifications][attackDetails][responseTechnique]['is_assessed'] = True
    responseTechniquesData[task.car.softwareVersion][task.car.hardwareSpecifications][attackDetails][responseTechnique]['duration'] = task.elapsedTime
    #Remove task from the work list
    workList.remove(task)
    #Check if this is the last task
    if not workList:
        ASSESSMENT_FLAG.set()
Beispiel #30
0
This module use cross_validation to check the f1 of our classifier
in order to get a better classifier to the predict problem.

"""

import helper
from submission import *
import numpy as np
from sklearn.cross_validation import train_test_split
from sklearn.metrics import f1_score

if __name__ == '__main__':
    # 先把训练集读进来,然后弄出特征矩阵和对应的label数组
    # 然后用train_test_split来在训练集里面分出训练集和测试集,(这个过程只用到特征矩阵和label数组,与原来的数据字符串没有关系了)
    raw_data = helper.read_data('./asset/training_data.txt')

    # 在这里提取出想要的特征矩阵features和对应的分类数组labels数组
    features, labels = training_preprocess(raw_data)

    clf = get_selected_classifier()

    # 模式选择,输入y表示使用均值模拟,多次交叉检验,减少误差
    # 输入其他表示不使用均值模拟,减少运行时间
    print("Do you want to test for multiple times? y/n [default:n]")
    # choice = input()
    choice = 'y'

    if choice != 'y':
        x_train, x_test, y_train, y_test = train_test_split(features,
                                                            labels,
Beispiel #31
0
import maxSum as ms
import helper as hp


for i in range(1,4):

	print 'test '+str(i)+':'

	matrix,m,n = hp.read_data('../data/test'+str(i)+'.txt')
	print 'finish reading test data'

	T = ms.maxSum(matrix,m,n)
	print 'finish computing T'

	a,b,score = ms.findMaxScore(T,m,n)
	print 'finish finding max score'

	stack = ms.backTrack(T,matrix,a,b)
	print 'finish backTrack'

	output = hp.write_data(stack,score,'../data/test'+str(i)+'grp12.txt')
	print 'finish writing output'

Beispiel #32
0
    tgt_results = []
    pbar = ProgressBar()
    for i in pbar(range(len(orig_data))):
        en_point = i - batch_size
        de_point = i + batch_size
        if en_point < 0:
            en_point = 0
        if de_point > len(simi_src_data) - 1:
            de_point = len(simi_src_data) - 1
        max_bleu, bleu[0], bleu[1] = get_max_bleu(
            orig_data[i], simi_src_data[en_point:de_point])
        tgt_results.append(' '.join(
            simi_tar_data[(en_point + bleu[0]):(en_point + bleu[1])]))

    print_result(orig_data, tgt_results)

    return tgt_results


helper.log_w("Reading data. Please wait...")
orig_data = helper.read_data(ORIG_FILE)
simi_src_data = helper.read_data(SIMI_FILE_SRC_LANG)
simi_tar_data = helper.read_data(SIMI_FILE_TAR_LANG)

helper.log_w("Compute similarity...")
out_tgt_data = similarize(orig_data, simi_src_data, simi_tar_data, 10)

helper.log_w("Writing new data...")
helper.write_data(DEST_FILE, out_tgt_data)

helper.log_w("Done.")
Beispiel #33
0
from helper import parse_args, read_data
import numpy as np


def predict(X, W):
    return np.sign(np.sum(np.multiply(X, W)))


if __name__ == '__main__':
    input, output = parse_args()

    # read training data
    X, Y = read_data(input)

    n, d = X.shape  # number of training examples and features

    # initialize weights with 0
    W = np.zeros(d)

    result = np.empty(shape=(0, d), dtype=np.int)

    # repeat until convergence
    while True:
        converged = True
        for i in range(n):
            x, y = X[i], Y[i]
            y_pred = predict(x, W)

            if y * y_pred <= 0:  # misclassified?
                converged = False
                for j in range(d):
import helper as h
import numpy as np

data = h.read_data()
print data

def output_layer_fp(Input, W, b):
    # W shape: (1, input_size)
    # b shape: (1, 1)

    output = np.matmul(Input, W.T) + b
    return h.sigmoid(output)

def hidden_layer_fp(Input, W, b):
    # W shape: (hidden_size, feature_size)
    # b shape: (1, hidden_size)

    # Task 1: Perform sum(xi*wi) + b for every input value
    output = # TODO: use np.dot or np.matmul function
    return # TODO: Use the tanh function from the numpy package

# Task 2: Prepare the network parameters
X = data.as_matrix(['x1', 'x2'])
hidden_size = 10

W1 = # TODO: Create the (hidden_size, feature_size) array of weights
b1 = # TODO: Set the value of the bias

W2 = # TODO: Create the (1, hidden_size) array of weights
b2 = # TODO: Set the value of the bias
Beispiel #35
0
def apply_update(attackDetails, responseTechnique):
    print('Update received! Applying: {} for: {}'.format(responseTechnique, attackDetails))
    #Read and update the response_system file
    responseSystem = read_data('data/response_system.json')
    responseSystem[attackDetails] = responseTechnique
    save_data('data/response_system.json', responseSystem)