Ejemplo n.º 1
0
Archivo: api.py Proyecto: kjgross/songs
def song_analysis_get(id):
    """ Get the analysis for a single song endpoint """
    # Get the song from the database
    song = session.query(models.Song).get(id)

    # Check whether the song exists
    # If not return a 404 with a helpful message
    if not song:
        message = "Could not find song with id {}".format(id)
        data = json.dumps({"message": message})
        return Response(data, 404, mimetype="application/json")

    # Get the filename
    files = session.query(models.File).all()
    file1id = files[song.column-1].id
    #file1name = session.query(models.File).get(filename)
    #songname = file[song.column].filename
    songname = files[file1id-1].filename
    analysis = analyse(songname)
    data = json.dumps(analysis)




    return Response(data, 200, mimetype="application/json")
Ejemplo n.º 2
0
 def display_state(self, state, cols=[]):
     """Display state data for specified or default columns"""
     if len(cols) == 0:
         cols = self._default_columns
     state_data = self._data[self._data["state"] == state]
     state_freq_data = [an.analyse(state_data, col) for col in cols]
     graphs.display_freq(state_freq_data, filter=f"state: {state}")
Ejemplo n.º 3
0
def analyze_song(id):
    song = session.query(models.Song).get(id)
    if not song:
        data = {"message": "Could not find song with id {}".format(id)}
        return Response(json.dumps(data), 422, mimetype="application/json")
    filename = song.file.filename
    data = json.dumps(analysis.analyse(upload_path(filename)))
    return Response(data, 200, mimetype="application/json")
Ejemplo n.º 4
0
 def output_results(self):
     """Generate a report of the simulation"""
     rg = HtmlReportGenerator(self.args)
     add_plots(self, rg) # change add_plots to show different plots!
     rg.variables.update(analyse(self))
     rg.generate()
     rg.save()
     print("Report saved to {0}".format(self.args["filename_out"]))
Ejemplo n.º 5
0
def analyze_song(id):
    song = session.query(models.Song).get(id)
    if not song:
        data = {"message": "Could not find song with id {}".format(id)}
        return Response(json.dumps(data), 422, mimetype="application/json")
    filename = song.file.filename
    data = json.dumps(analysis.analyse(upload_path(filename)))
    return Response(data, 200, mimetype="application/json")
Ejemplo n.º 6
0
def song_analysis(id):
    song = session.query(models.Song).get(id)
    if not song:
        data = {"message": "Could not find song with id {}".format(id)}
        return Response(json.dumps(data), 404, mimetype="application/json")

    path = upload_path(song.file.filename)
    data = analysis.analyse(path)
    return Response(json.dumps(data), 200, mimetype="application/json")
Ejemplo n.º 7
0
 def graph_usa(self, cols=[], show=True, export=False):
     """Graphs the specified list of columns"""
     if len(cols) == 0:
         cols = self._default_columns
     freq_data = [an.analyse(self._data, col) for col in cols]
     graphs.graph_freq(freq_data,
                       title="Frequency of Digits for US",
                       export=export,
                       show=show,
                       export_path="USA")
Ejemplo n.º 8
0
    def graph_state(self, state, cols=[], show=True, export=False):
        """Graph state data for specified or default columns"""
        if len(cols) == 0:
            cols = self._default_columns

        state_data = self._data[self._data["state"] == state]
        state_freq_data = [an.analyse(state_data, col) for col in cols]
        graphs.graph_freq(state_freq_data,
                          title=f"Frequency of Digits for {state}",
                          export=export,
                          show=show,
                          export_path=state)
def cumulative(data, path):
    cols = [' New_cases', ' Cumulative_cases', ' New_deaths', ' Cumulative_deaths']
    analysis = [an.analyse(data, col) for col in cols]
    with pd.ExcelWriter(path) as writer:
        pd.DataFrame().to_excel(writer)

    with pd.ExcelWriter(path, mode='a') as writer:
        for col, freq in analysis:
            header = pd.DataFrame()
            header["title"] = [col]
            if len(freq["digit"] > 0):
                header.to_excel(writer, index=False, sheet_name=col, startcol=0)
                freq.to_excel(writer, sheet_name=col, startcol=1)
Ejemplo n.º 10
0
    def __init__(self, block_sizeGB=1, swap=False, pool_dtype='float32'):

        # Default RAM based data array:
        self.data = data_array(dtype='float32',
                               block_sizeGB=block_sizeGB,
                               swap=swap,
                               pool_dtype=pool_dtype)

        # Common classes for the cvolume and for the projections:
        self.io = io(self)
        self.display = display(self)
        self.analyse = analyse(self)

        # Specific to the projection data or the volume data:
        self.process = None
        self.meta = None
def song_analyze(id):
    #check wether a song with the correct ID exists
    song = session.query(models.Song).get(id)
    if not song:
        message = "Could not find song with id {}".format(id)
        data = json.dumps({"message": message})
        return Response(data, 404, mimetype="application/json")
    #get the filename of the song from the database
    song_file = session.query(models.File).filter_by(id=song.file_id).first()
    #save file to an upload folder using upload_path() function
    song_file_path = upload_path(song_file.filename)
    #call analyse function, passing in path of the uploaded file
    file_analysis = analysis.analyse(song_file_path)
    data = json.dumps(file_analysis)
    #return results of analysis function as a JSON object
    return Response(data, 201, mimetype="application/json")
def song_analyze(id):
	#check wether a song with the correct ID exists
	song = session.query(models.Song).get(id)
	if not song:
		message = "Could not find song with id {}".format(id)
		data = json.dumps({"message": message})
		return Response(data, 404, mimetype="application/json")
	#get the filename of the song from the database
	song_file = session.query(models.File).filter_by(id=song.file_id).first()
	#save file to an upload folder using upload_path() function
	song_file_path = upload_path(song_file.filename)
	#call analyse function, passing in path of the uploaded file
	file_analysis = analysis.analyse(song_file_path)
	data = json.dumps(file_analysis)
	#return results of analysis function as a JSON object
	return Response(data, 201, mimetype="application/json")
def translate(sentence, model, log = None):
	tokens = tokenize(sentence)
	if log:
		print(' tokens = {}\n'.format(tokens), file = log)

	analysis = analyse(tokens, model)

	if log:
		print(' analysis = {}\n'.format(analysis), file = log)
	
	chuncks = chuncker(analysis[0])
	
	if log:
		print(' chuncks = {}\n'.format(chuncks), file = log)
	
	translation = to_arabic(chuncks)
	
	if log:
		print(' translation = {}\n'.format(translation), file = log)
	
	return translation
def indiv_countries(data, path):
    country_col = " Country_code"
    countries = data[country_col].unique()
    cols = [' New_cases', ' Cumulative_cases', ' New_deaths', ' Cumulative_deaths']
    with pd.ExcelWriter(path) as writer:
        pd.DataFrame().to_excel(writer)

    progress = 0
    for country in countries:
        country_data = data[data[country_col] == country]

        analysis = [an.analyse(country_data, col) for col in cols]

        i = 0
        with pd.ExcelWriter(path, mode='a') as writer:
            for col, freq in analysis:
                header = pd.DataFrame()
                header["title"] = [col]
                if len(freq["digit"] > 0):
                    header.to_excel(writer, index=False, sheet_name=country, startcol=0, startrow=i)
                    freq.to_excel(writer, sheet_name=country, startcol=1, startrow=i)
                i += 10
        progress += 1
        print(f"Progress: {( progress / len(countries) * 100 ):.2f}%\t\tCountries: {progress} / {len(countries)}")
def task(quarter, k):
    """
    quarter : pandas.DataFrame
    k : int
    """
    quarter_normalized = pandas.DataFrame(minmax_scale(
        quarter.values.astype(numpy.float64)),
                                          columns=quarter.columns)
    quarter_normalized = reweight(quarter_normalized)
    points = quarter_normalized.values
    kdtree = KDTree(points)
    dist, ind = kdtree.query(points, k=k + 1)
    dist = dist[:, 1:]
    ind = ind[:, 1:]

    results = []
    for main_i, indices in enumerate(ind):
        indices = numpy.append([main_i], indices)
        knearest = pandas.DataFrame(quarter.values[indices],
                                    columns=quarter.columns)

        results.append(analyse(knearest))

    return results
Ejemplo n.º 16
0
def main(Scraper):
    #    Scraper = Scraper()
    #    reddit = Scraper.make_reddit_obj()
    #    Scraper.scrape("emojipasta", limit = 1000)
    analyse(posts_dir=r"../posts/", results_dir="../results/", chunk_size=None)
    return
Ejemplo n.º 17
0
def run(main_folder, tstart, teind, weight=10, growth=2, shrink=2):
    """
    Main function that controls the circle analysis and passes the resluts to
    the analysis file.
    """
    import time

    t1 = time.time()
    directory = f"{main_folder}_analysed"
    path = os.path.join(main_folder, directory)
    os.makedirs(path, exist_ok=True)
    output_name = f"{main_folder}_results.xlsx"
    output_path = os.path.join(path, output_name)

    import xlsxwriter

    export = xlsxwriter.Workbook(f"{output_path}")
    bold = export.add_format({"bold": True})
    red = export.add_format({"font_color": "red"})
    export_sheet = export.add_worksheet("Surface area")
    export_sheet_2 = export.add_worksheet("Slope")
    column = 0

    import imp

    import analysis

    imp.reload(analysis)
    print("reloaded analysis")

    for i, sub_folder in enumerate(os.listdir(main_folder)):  # open main folder
        img_folder = f"Circles_{sub_folder}"
        img_path = os.path.join(path, img_folder)
        os.makedirs(img_path, exist_ok=True)
        route = os.path.join(main_folder, sub_folder)
        results = files(route, img_path)

        _, stepsize = np.linspace(tstart[i], teind[i], len(results), retstep=True)
        x = [
            e - stepsize
            for e in np.linspace(tstart[i], teind[i] + stepsize, len(results))
        ]
        if x[0] != tstart[i]:
            x = [
                e - 2 * stepsize
                for e in np.linspace(tstart[i], teind[i] + 2 * stepsize, len(results))
            ]

        export_sheet.write(column + 2, 0, "Removed data points [hour]", bold)
        #        for ind, e in enumerate(removed):
        #            export_sheet.write(column+2, ind+1, np.around(x[e-1], decimals=1), red)

        analysis.analyse(
            results,
            export,
            export_sheet,
            export_sheet_2,
            column,
            sub_folder,
            weight,
            x,
            path,
        )
        column += 4
    export.close()
    t2 = time.time()
    dt = t2 - t1
    print("Deze analyse duurde totaal %.3f minuten" % (dt / 60))
Ejemplo n.º 18
0
XXX, yyy = import_wine_review(nbr_class=5, subset_size=10000)
X_train, X_test, y_train, y_test = train_test_split(XXX,
                                                    yyy,
                                                    test_size=0.15,
                                                    random_state=0)
fold = 10
print("----------Done loading dataset----------")
#endregion

#region[purple] DECISION TREE
#%%
clf = tree.DecisionTreeClassifier(criterion='entropy',
                                  max_leaf_nodes=200,
                                  max_depth=10)

clf = analyse(clf, fold, XXX, yyy, X_train, y_train, X_test, y_test)

# print("The depth is : {}".format(clf.get_depth()))
# print("The number of leaves is : {}".format(clf.get_n_leaves()))

# dot_data = io.StringIO()
# tree.export_graphviz(clf, out_file=dot_data, filled=True)
# graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
# graph.write_pdf("DT_WQ_PRUNED.pdf")

#%%
if (verbose): print("\n\n----------Maximum depth----------")
max_depths = [1, 2, 3, 4, 5, 6, 7, 8, 10, 12, 16, 20, 25, 30]

acc_min = []
acc_avg = []
Ejemplo n.º 19
0
def main():
    """The brains of the bot
    Finds all the races
    """
    betfairClient = bfclient.BfClient()
    eventResponse = None
    race_list = None
    cardGenTime = 0
    login_timer = 0

    while True:
        login_rsp = auth.login(betfairClient)
        # Generate the list of available races. Limit the result to 100
        (race_list, cardGenTime) = gen_race_list(cardGenTime, race_list, betfairClient)
        # get the exchangeId, marketId for the next race and race_queue
        # refactor - turn into class, return object.
        (exchangeId, marketId, race_queue, race_list) = get_next_race(race_list)
        # determine race status, proceed if it is active.
        market_price = qwertymarket.getMarketPricesCompressed(betfairClient,
                                                            exchangeId,
                                                            marketId)
        market_status = qwertymarket.marketStatus(market_price)
        if market_status == 'CLOSED':
            logger.debug('Oops market is closed, finding the next race/market')
            time.sleep(2)
            continue
        elif market_status == 'SUSPENDED':
            logger.debug('Market is now suspended, finding the next race/market')
            time.sleep(5)
            continue
        # Great the next race is active, lets find out when it starts
        market_info = qwertymarket.getMarketInfo(betfairClient,
                                                exchangeId,
                                                marketId)
        TTL = qwertymarket.timeTillRaceStarts(market_info)
        logger.info('Time till live: %s', TTL)
        if TTL > 240: #if time till race is more than 5 minutes away
            handle_long_ttl(cardGenTime, race_list, market_info)
        elif market_price.marketPrices.delay > 0:
            logger.info("-------------Market is in-play---------------")
            time.sleep(10)
            continue
        # nested while true, why? Because we don't want it to change races if it's active.
        while True:
            try:
                collectData.collectData(betfairClient, exchangeId,
                                        marketId)
                timeId = collectData.Datastore[marketId]['timeId']
                IDgap = collectData.Datastore[marketId]['IDgap']
                if IDgap == 0:
                    time.sleep(2)
                    logger.debug('IDgap is zero')
                    continue #don't run anything else
            except IndexError:
                if handle_IndexError(betfairClient, exchangeId, marketId):
                    continue
                else:
                    break
            except ZeroDivisionError:
                logger.warning('Zero Division Exception caught.')
                break
            except bfpy.bferror.BfNetworkError:
                logger.error("Network timed out...retrying...")
                continue
            except Exception, e:
                logger.exception('Collecting data at timeId: %s', timeId)
                raise e
            try:
                play.inPlay(betfairClient, exchangeId, marketId, timeId)
                play.pastPlay(betfairClient, exchangeId, marketId, timeId)
            except play.InPlayClause as e:
                logger.debug('In Play is greater than 30 seconds, look for the next race')
                break
            except Exception as e:
                logger.exception('Running inPlay or pastPlay')
                raise e
            try:
                for fillInId in range(timeId-IDgap+1, timeId+1):
                    for selectionId in collectData.Datastore[marketId]['runnerKeys']:
                        if fillInId != timeId:
                            collectData.fillInMissingData(marketId, selectionId,
                                                        fillInId, timeId)
                        if fillInId >= 30:
                            selection.calculateTPS(marketId, selectionId, fillInId)
            except IndexError:
                if handle_IndexError(betfairClient, exchangeId, marketId):
                    continue
                else:
                    break
            except ZeroDivisionError:
                logger.warning('Zero Division Exception caught.')
                break
            except Exception, e:
                logger.exception('Filling missing data or calculating TPS at timeId: %s', timeId)
                raise e
            try:
                if timeId > 1: selection.selectionCriteria(marketId, timeId)
                for selectionId in collectData.Datastore[marketId]['selectedRunnerIds']:
                    if 'max30BP' not in collectData.Datastore[marketId][selectionId][0].keys():
                        for backCalc in range(0, timeId - IDgap+1):
                            analysis.analyse(marketId, selectionId, backCalc)
                            if backCalc == timeId - IDgap:
                                logger.info('Successful back analysis of %s to timeId %s',
                                            collectData.Datastore[marketId][selectionId]['HorseName'],
                                            backCalc)
                                collectData.Datastore[marketId][selectionId]['backCalc'] = backCalc
                    for fillInId in range(timeId-IDgap+1, timeId+1):
                        analysis.analyse(marketId, selectionId, fillInId)
            except IndexError:
                if handle_IndexError(betfairClient, exchangeId, marketId):
                    continue
                else:
                    break
            except ZeroDivisionError:
                logger.warning('Zero Division Exception caught.')
                break
            except Exception, e:
                logger.exception('Error: Running Analysis or selecting horses at timeId: %s',
                                timeId)
                logger.exception("timeId: %s", timeId)
                raise e
Ejemplo n.º 20
0
 def display_usa(self, cols=[]):
     """Display USA data for specified or default columns"""
     if len(cols) == 0:
         cols = self._default_columns
     freq_data = [an.analyse(self._data, col) for col in cols]
     graphs.display_freq(freq_data)
Ejemplo n.º 21
0
        if print_trees:
            print >> log_out, source
            print >> log_out, target
        use, auto_ptb, auto_schema = (False, None, None)
        if 'method' in args:
            method_name = args.split('method=')[1].split()[0]
            ans = methods[method_name](source, sys.argv, log_out)
            use, auto_ptb, auto_schema = ans
        else:
            ans = trivial.convert(source, sys.argv, log_out)
            use, auto_ptb, auto_schema = ans

        if not use:
            print >> log_out, "Not being included"
        if auto_schema is not None:
            analysis.analyse(source, target, auto_ptb, auto_schema,
                             analysis_out)
        if tree_out is not None:
            if use:
                print >> gold_out, target.one_line_repr()
                print >> tree_out, auto_ptb.one_line_repr()
            elif not only_parsed:
                print >> gold_out, target.one_line_repr()
                print >> tree_out

        if print_trees:
            print >> log_out, auto_ptb
            if colour_out is not None:
                print >> colour_out, source
                print >> colour_out, auto_ptb.repr_with_corrections(target)

        scores = score_count(target, auto_ptb)
Ejemplo n.º 22
0
def submit():

    global color
    global bar_color

    depression_mark = 0

    name = request.form["name"]
    day = request.form["day"]
    previousDay = request.form["previousDay"]
    time = request.form["time"]
    future = request.form["future"]

    answers = [name, day, previousDay, time, future]
    answers = [x.strip() for x in answers if x.strip()]
    print(answers)

    file = open("./answers.txt", "w")
    file.writelines("\n".join(answers))
    file.close()

    absolute_self_centrism, rel_self_centrism, rel_swear_counter, \
            rel_absolute_counter, final_afinn_score, sentiment = analyse("answers.txt")

    first_person = "You referenced to yourself " + str(
        rel_self_centrism * 100) + "% of time. "
    if rel_self_centrism > 0.05:
        first_person += "So, you are quite focused on yourself."
        depression_mark += 1
    else:
        first_person += "Good news: that corresponds to the average use of first person pronouns."

    absolute = str(round(rel_absolute_counter * 100,
                         2)) + "% of words were absolute. "
    # Threshold taken from al-Mosaiwi(2018)
    if rel_absolute_counter > 0.012:
        absolute = absolute + "You seem to interpret your experiences in a very black or white kind of way. " \
                              "It is possible that you see all your experiences as either perfect or terrible. "
        depression_mark += 1
    else:
        absolute = absolute + "Good news: that corresponds to the average use of absolute words."

    swearSentence = str(round(rel_swear_counter * 100,
                              2)) + "% of words were swear words. "
    if rel_swear_counter > 0.002:
        swearSentence += "You are cursing much more than an average person. " \
                        "Perhaps you are in a negative state of mind or angry at something."
        depression_mark += 1
    else:
        swearSentence += "Good news: you are cursing not very much."

    if final_afinn_score <= 0:
        afinnString = "The afinn score is " + str(final_afinn_score) + \
                      ". That means that you seem to be thinking very negatively."
        depression_mark += 1
    else:
        afinnString = "The afinn score is " + str(final_afinn_score) + \
                      ". You're seeing your experiences in a neutral to positive view."

    SentimentString = "The sentiment analysis score is " + str(sentiment)
    if sentiment > 0.5:
        SentimentString += ". Good news: you are thinking rather positively."
    else:
        SentimentString += ". That means you are thinking rather negatively."
        depression_mark += 1

    mark = "Your overall depression mark is " + str(depression_mark)
    depr_sentence = "The greater is your overall depression mark, " \
                    "the more likely you have depression symptoms. " \
                    "0 means you have none. 5 means most probably you should go to the therapist."

    session['data'] = [
        final_afinn_score, sentiment, rel_swear_counter, rel_absolute_counter,
        rel_self_centrism, depression_mark
    ]

    relation = depression_mark / 5 * 100

    if depression_mark >= 3:
        color = "#726E6D"
        bar_color = "#8C001A"
    else:
        color = "#6AFB92"
        bar_color = "#FA69D1"

    return render_template("results.html",
                           sentimen_analysis=SentimentString,
                           absolute=absolute,
                           afinn=afinnString,
                           swear=swearSentence,
                           first_person_pronouns=first_person,
                           speaker_name=name,
                           color=color,
                           depression_mark=depression_mark,
                           relation=relation,
                           bar_color=bar_color)
Ejemplo n.º 23
0
X_clust = np.concatenate((X_clust,labs),axis=1)
#endregion



#%% region[green] EXPECTATION MAXIMIZATION
km = KMeans(n_clusters=10, random_state=0).fit(X_clust)
labs = np.array([km.labels_]).transpose()
scaler = preprocessing.StandardScaler()
labs = scaler.fit_transform(labs)
X_clust = np.concatenate((X_clust,labs),axis=1)
#endregion



# %% region[black] NEURAL NETWORK
X_clust_train, X_clust_test, y_clust_train, y_clust_test = train_test_split(X_clust, yyy, test_size=0.1, random_state=0)
network = (12,12,12)
print("For the network {}".format(network))
nn = MLPClassifier(hidden_layer_sizes=network, activation='relu', verbose=False, max_iter=4000)
nn = analyse(nn, fold, XXX, yyy, X_clust_train, y_clust_train, X_clust_test, y_clust_test)
winsound.Beep(frequency, duration)
#endregion





# %%

Ejemplo n.º 24
0
		if print_trees:
			print >> log_out, source
			print >> log_out, target
		use, auto_ptb, auto_schema = (False, None, None)
		if 'method' in args:
			method_name = args.split('method=')[1].split()[0]
			ans = methods[method_name](source, sys.argv, log_out)
			use, auto_ptb, auto_schema = ans
		else:
			ans = trivial.convert(source, sys.argv, log_out)
			use, auto_ptb, auto_schema = ans

		if not use:
			print >> log_out, "Not being included"
		if auto_schema is not None:
			analysis.analyse(source, target, auto_ptb, auto_schema, analysis_out)
		if tree_out is not None:
			if use:
				print >> gold_out, target.one_line_repr()
				print >> tree_out, auto_ptb.one_line_repr()
			elif not only_parsed:
				print >> gold_out, target.one_line_repr()
				print >> tree_out

		if print_trees:
			print >> log_out, auto_ptb
			if colour_out is not None:
				print >> colour_out, source
				print >> colour_out, auto_ptb.repr_with_corrections(target)

		scores = score_count(target, auto_ptb)
Ejemplo n.º 25
0
                              random_state=None,
                              schedule=mlr.ExpDecay())

nn_model1.fit(X_train_scaled, y_train)

y_train_pred = nn_model1.predict(X_train_scaled)
y_train_accuracy = accuracy_score(y_train, y_train_pred)
print(y_train_accuracy)

y_test_pred = nn_model1.predict(X_test_scaled)
y_test_accuracy = accuracy_score(y_test, y_test_pred)
print(y_test_accuracy)

winsound.Beep(frequency, duration)

analyse(nn_model1, fold, XXX, yyy, X_train, y_train, X_test, y_test)

winsound.Beep(frequency, duration)
#endregion

#%% #region[black] NN OPTIMIZATION WITH GA

nn_model1 = mlr.NeuralNetwork(hidden_nodes=[100],
                              activation='relu',
                              algorithm='genetic_alg',
                              max_iters=4000,
                              bias=True,
                              is_classifier=True,
                              learning_rate=0.75,
                              early_stopping=True,
                              clip_max=5,
Ejemplo n.º 26
0
    current_resigning[0].to_csv("../results/" + str(month_no) +
                                "/Resigning_Billable.csv",
                                index=False)
    current_resigning[1].to_csv("../results/" + str(month_no) +
                                "/Resigning_Bench.csv",
                                index=False)
    billable.to_csv("../results/" + str(month_no) + "/Billable_Resources.csv",
                    index=False)
    bench.to_csv("../results/" + str(month_no) + "/Benched_Resources.csv",
                 index=False)
    new_hires[month_no + 2].to_csv("../results/" + str(month_no) +
                                   "/SkillLists_To_Hire.csv",
                                   index=False)

    analysis = analyse(details, revenue_per_billable_r, cost_per_r)

    print("-" * 25)
    print("Details")
    for key in details.keys():
        print(key, ":", details[key])

    print("-" * 25)
    print("Analysis")
    for key in analysis.keys():
        print(key, ":", analysis[key])

    details['Month'] = month_no
    analysis['Month'] = month_no

    year_details = year_details.append(details, ignore_index=True)
Ejemplo n.º 27
0
                                                    "scorecards-actors",
                                                    args.start_date,
                                                    args.end_date)

        if not os.path.exists("scorecards-ransomware"):
            os.makedirs("scorecards-ransomware")
        print("Generating Ransomware scorecards")
        scorecards.generate_ransomware_scorecards(misp_data,
                                                  "scorecards-ransomware",
                                                  args.start_date,
                                                  args.end_date)

    elif args.analyse:
        # Perform some basic analysis on the MISP data, which can be useful
        # for learning what is present in the data
        analysis.analyse(misp_data)

    elif args.list_actors:
        # List the threat actors present in the data
        #
        threat_actors = utility.identify_threat_actors(misp_data, initial={})
        for actor in threat_actors:
            print(actor)

    else:
        # Generate the desired heat maps
        #
        if not os.path.exists("heatmaps"):
            os.makedirs("heatmaps")
        if args.num_days != 0 and args.bin_size != 0:
            print("Generating custom heatmaps")