Esempio n. 1
0
def case_age_model():
    group = [
        ['S_kids', 'S_normal', 'S_risk'],
        ['E_kids', 'E_normal', 'E_risk'],
        ['I_kids', 'I_normal', 'I_risk'],
        ['I2'],
        ['I3'],
        ['R'],
        ['D']
    ]
    label = {
        "S_kids": "S",
        "E_kids": "E",
        "I_kids": "I_1",
        "I2": "I_2",
        "I3": "I_3"
    }
    data = get_results(
        simulation_age_model, (N, B_KIDS, G_KIDS, P_KIDS, B_NORMAL, G_NORMAL,
                               P_NORMAL, B_NORMAL, G_NORMAL, P_NORMAL), NUM_SIM
    )
    times, avg, std = fill_around_std(
        kolor_palette, data, 1, states=group, labels=label
    )
    save_data([times, avg, std], (N, NUM_SIM,))
    def _get_visitorid(self):
        visitor_id = util.load_data(self.addon, self.VISITOR_FILE)
        if visitor_id is None:
            visitor_id = str(randint(0, 0x7fffffff))
            util.save_data(self.addon, self.VISITOR_FILE, visitor_id)

        return visitor_id
Esempio n. 3
0
async def introStartCommand(id,guildID):
	global currentintrosessionReport
	
	currentintrosessionReport[str(id)] = {}
	currentintrosessionReport[str(id)]['confirmed'] = "False"
	currentintrosessionReport[str(id)]['guildID'] = str(guildID)
	currentintrosessionReport[str(id)]['createdTime'] = str(time.time())
	#currentintrosessionReport[str(id)]['createdTime'] = "9999999999999"
	currentintrosessionReport[str(id)]['answers'] = []
	currentintrosessionReport[str(id)]['lastAnswerTime'] = str(int(time.time()))
	
	guild = discord.utils.get(botAcc.guilds, id=int(guildID))
	member = discord.utils.get(guild.members, id=int(id))
	
	if member is None:
		print("Member none")
		return False
	
	try:
		await member.send("Please answer each question.")
	except:
		return False
		
	messageToSendRAW = questions[0]
	messageToSend = messageToSendRAW.split("{")[0]
	messageSent = await member.send(messageToSend)
	util.save_data(currentintrosessionReport, "currentintrosessionReport")
Esempio n. 4
0
def get_data_and_show(kind, limit, use_index, is_show, dis_tick, data_kind,
                      xlabel, ylabel, line_color, fig_color, funciton, x_str,
                      y_str, title, figsize, *args, **kwargs):
    data, orient = get_data(data_kind, limit, *args, **kwargs)
    if is_show and not data.empty:
        dd = DataDisplay()
        if kind == 'pie' and isinstance(data, pd.core.frame.DataFrame):
            return dumps({'error_kind': "DataFrame don't support pie"})
        dd.show_graphic(data,
                        kind,
                        use_index,
                        xlabel=xlabel,
                        ylabel=ylabel,
                        line_color=line_color,
                        fig_color=fig_color,
                        funciton=funciton,
                        x_str=x_str,
                        y_str=y_str,
                        title=title,
                        figsize=figsize,
                        dis_tick=dis_tick)
    if request.args.get('save'):
        save_data(data, data_kind, request.args.get('save'),
                  request.args.get('pt'))
    return data.to_json(orient=orient)
Esempio n. 5
0
def minify(filepath=None):
    """Minify the file by removing unnecessary whitespace characters.
    Also remove properties which are not necessary.
    Currently will ignore properties which are idenitical to
    the default properties of an item."""
    if filepath is None:
        filepath = LATEST_DATA_SET_PATH
    data_set = load_data(filepath)

    basic_data = data_set["basic"]
    items = data_set["data"]

    smaller_items = dict()

    for item, data in items.items():
        smaller_item = dict()
        for key, value in data.items():
            if key in basic_data and value == basic_data[key]:
                # This property is the same as the default
                # It doesn't need to be re-specified
                continue
            smaller_item[key] = value

        smaller_items[item] = smaller_item

    data_set["data"] = smaller_items

    filename, extension = filepath.rsplit(".", 1)
    new_filepath = filename + "_" + FILE_AMMENDMENT + "." + extension

    save_data(data_set, new_filepath)
Esempio n. 6
0
def get_visitorid():
    visitor_id = util.load_data(addon, VISITOR_FILE)
    if visitor_id is False:
        from random import randint
        visitor_id = str(randint(0, 0x7fffffff))
        util.save_data(addon, VISITOR_FILE, visitor_id)

    return visitor_id
Esempio n. 7
0
def case_no_intervention():
    label = {
        "I1": "I_1",
        "I2": "I_2",
        "I3": "I_3"
    }
    data = get_results(simulation_no_intervention, (N,), NUM_SIM)
    times, avg, std = fill_around_std(color_palette, data, 1, labels=label)
    save_data([times, avg, std], (N, NUM_SIM))
Esempio n. 8
0
def case_average_second_wave():
    data = get_results(simulation_second_wave, tuple(), NUM_SIM)
    labels = {
        "I1": "I_{1}", "I2": "I_{2}", "I3": "I_{3}"
    }
    times, avg, std = fill_around_std(
        color_palette, data, 1, labels=labels
    )
    save_data([times, avg, std], (N, NUM_SIM))
Esempio n. 9
0
def hk(s, op_eps, max_rounds, eps=1e-6, conv_stop=True, save=False):
    '''Simulates the model of Hegselmann-Krause.

    This model does nto require an adjacency matrix. Connections between
    nodes are calculated depending on the proximity of their opinions.

    Args:
        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        op_eps: ε parameter of the model

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    z_prev = z.copy()
    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        for i in range(N):
            # The node chooses only those with a close enough opinion
            friends_i = np.abs(z_prev - z_prev[i]) <= op_eps
            z[i] = np.mean(z_prev[friends_i])
        opinions[t, :] = z
        z_prev = z.copy()
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('Hegselmann-Krause converged after {t} rounds'.format(t=t))
            break

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'hk' + timeStr
        save_data(simid,
                  N=N,
                  max_rounds=max_rounds,
                  eps=eps,
                  rounds_run=t + 1,
                  s=s,
                  op_eps=op_eps,
                  opinions=opinions[0:t + 1, :])

    return opinions[0:t + 1, :]
Esempio n. 10
0
def case_average_time():
    t = 5
    output = get_results(
        simulation_max_time, (N, t), NUM_SIM
    )
    timepoints, avg, std = fill_around_std(
        color_palette, output, 1
    )
    save_data([timepoints, avg, std], (N, NUM_SIM, t))
Esempio n. 11
0
def friedkinJohnsen(A, s, max_rounds, eps=1e-6, conv_stop=True, save=False):
    '''Simulates the Friedkin-Johnsen (Kleinberg) Model.

    Runs a maximum of max_rounds rounds of the Friedkin-Jonsen model. If the
    model converges sooner, the function returns. The stubborness matrix of
    the model is extracted from the diagonal of matrix A.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    B = np.diag(np.diag(A))  # Stubborness matrix of the model
    A_model = A - B  # Adjacency matrix of the model

    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = z

    for t in trange(1, max_rounds):
        z = A_model.dot(z) + B.dot(s)
        opinions[t, :] = z
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('Friedkin-Johnsen converged after {t} rounds'.format(t=t))
            break

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'fj' + timeStr
        save_data(simid,
                  N=N,
                  max_rounds=max_rounds,
                  eps=eps,
                  rounds_run=t + 1,
                  A=A,
                  s=s,
                  opinions=opinions[0:t + 1, :])

    return opinions[0:t + 1, :]
 def stop_recording(self):
     print("Stopping recording.")
     self.recording = False
     
     self.data["duration"] = int(time.time()-self.startTime)
     self.data["videoFile"] = self.videoData
     self.data["__type__"] = "irVideoRecording"
     self.camera.stop_recording()
     util.save_data(self.data, self.recordingFolder)
     self.data = {}
Esempio n. 13
0
def get_detail(shopID='l4k6FPkNSo8IQyF7',shopname='海底捞火锅'):
    shop_url = "http://www.dianping.com/shop/" + shopID + "/review_all/"
    for i in range(1, 2):
            url = shop_url + 'p' + str(i)
            print(url)
            html = getHTMLText(url)
            infoList = parsePage(html,shopID,shopname)
            for info in infoList:
                util.save_data(info)
            print('成功爬取第{}页数据,有评论{}条'.format(i,len(infoList)))
Esempio n. 14
0
def meetFriend_matrix_nomem(A, max_rounds, norm_type=2, save=False):
    '''Simulates the random meeting model (matrix version).

    Runs a maximum of max_rounds rounds of the "Meeting a Friend" model. If the
    model converges sooner, the function returns. The stubborness matrix of
    the model is extracted from the diagonal of matrix A. The function returns
    the distance from the equilibrium of the Friedkin-Johnsen when the process
    is complete. Uses less memory than the full model.

    Args:
        A (NxN numpy array): Weights matrix (its diagonal is the stubborness)

        max_rounds (int): Maximum number of rounds to simulate

        norm_type: The norm type used to calculate the difference from the
        equilibrium

        save (bool): Save the simulation data into text files

    Returns:
        The final distance from the equlibrium of the Friedkin-Johnsen model,
        using the specified norm.

    '''

    max_rounds = int(max_rounds)

    N = A.shape[0]
    B = np.diag(np.diag(A))

    nonzero_ids = [np.nonzero(A[i, :])[0] for i in xrange(A.shape[0])]

    equilibrium_matrix = np.dot(inv(np.eye(N) - (A - B)), B)
    R, Q = rand_matrices(A, 1, nonzero_ids)
    R = R + Q

    for t in trange(2, max_rounds + 2):
        A_t, B_t = rand_matrices(A, t, nonzero_ids)
        R = A_t.dot(R) + B_t

    distance = norm(R - equilibrium_matrix, ord=norm_type)

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'mf' + timeStr
        save_data(simid,
                  N=N,
                  max_rounds=max_rounds,
                  eps=eps,
                  rounds_run=max_rounds,
                  A=A,
                  distance=distance,
                  norm=norm_type)

    return distance
Esempio n. 15
0
def case_max_time():
    time = np.linspace(0, 15, 30)
    group = ["I3", "I1", "I2"]
    arguments = [(simulation_max_time, (N, t), group) for t in time]
    with Pool() as p:
        y = p.starmap(max_sim, arguments)
    save_data([time.tolist(), y], (N, NUM_SIM))
    plt.xlabel("point in time social distancing takes place")
    plt.ylabel("maximum count of infected people")
    plt.scatter(time, y, c="red")
    plt.show()
Esempio n. 16
0
def prettify(filepath=None):
    """Minifies the file by removing unnecessary whitespace
    Also remove properties which are the same as the default properties of an item"""
    if filepath is None:
        filepath = LATEST_DATA_SET_PATH
    data_set = load_data(filepath)

    filename, extension = filepath.rsplit(".", 1)
    new_filepath = filename + "_" + FILE_AMMENDMENT + "." + extension

    save_data(data_set, new_filepath, pretty=True)
Esempio n. 17
0
    def OnFormatChoice(self, event):
        img_format = event.GetString()
        shared.options['format'] = img_format
        save_data(shared.options)

        if self.img_panel:
            self.img_panel.Hide()
        self.img_panel = self.img_gui_dict.get(img_format)
        if self.img_panel:
            self.img_panel.Show()
            self.sizer.Layout()
            self.Refresh()
Esempio n. 18
0
def deGroot(A, s, max_rounds, eps=1e-6, conv_stop=True, save=False):
    '''Simulates the DeGroot Model.

    Runs a maximum of max_rounds rounds of the DeGroot model. If the model
    converges sooner, the function returns.

    Args:
        A (NxN numpy array): Adjacency matrix

        s (1xN numpy array): Initial opinions vector

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        z = A.dot(z)
        opinions[t, :] = z
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('DeGroot converged after {t} rounds'.format(t=t))
            break

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'dg' + timeStr
        save_data(simid,
                  N=N,
                  max_rounds=max_rounds,
                  eps=eps,
                  rounds_run=t + 1,
                  A=A,
                  s=s,
                  opinions=opinions[0:t + 1, :])

    return opinions[0:t + 1, :]
Esempio n. 19
0
def hk(s, op_eps, max_rounds, eps=1e-6, conv_stop=True, save=False):
    '''Simulates the model of Hegselmann-Krause.

    This model does nto require an adjacency matrix. Connections between
    nodes are calculated depending on the proximity of their opinions.

    Args:
        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        op_eps: ε parameter of the model

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    z_prev = z.copy()
    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        for i in range(N):
            # The node chooses only those with a close enough opinion
            friends_i = np.abs(z_prev - z_prev[i]) <= op_eps
            z[i] = np.mean(z_prev[friends_i])
        opinions[t, :] = z
        z_prev = z.copy()
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('Hegselmann-Krause converged after {t} rounds'.format(t=t))
            break

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'hk' + timeStr
        save_data(simid, N=N, max_rounds=max_rounds, eps=eps,
                  rounds_run=t+1, s=s, op_eps=op_eps,
                  opinions=opinions[0:t+1, :])

    return opinions[0:t+1, :]
Esempio n. 20
0
	async def abort(self,ctx):
		
		global currentintrosessionReport
		
		if not ctx.message.guild:
				
			if currentintrosessionReport[str(ctx.message.author.id)]['confirmed'] == "False":
				currentintrosessionReport[str(ctx.message.author.id)]['confirmed'] = 'Aborted'
				await ctx.message.author.send("You have now aborted your questions")
				util.save_data(currentintrosessionReport, "currentintrosessionReport")
		else:
			await ctx.message.author.send("Please answer the questions in the DM")
			await ctx.message.delete()
Esempio n. 21
0
def case_max_class():
    class_size = [5, 10, 15, 20, 25, 30]
    group = ["I3", "I1", "I2"]
    arguments = [
        (simulation_max_class, (N, cs, None, None), group) for cs in class_size
    ]
    with Pool() as p:
        y = p.starmap(max_sim, arguments)
    save_data([class_size, y], (N, NUM_SIM))
    plt.xlabel("average class size")
    plt.ylabel("maximum count of infected people")
    plt.scatter(class_size, y, c="red")
    plt.show()
Esempio n. 22
0
def case_max_half_edges():
    percentage = np.linspace(0, 1, 21)
    y = []
    group = ["I3", "I1", "I2"]
    arguments = [(simulation_max_time, (N, p), group) for p in percentage]
    with Pool() as p:
        y = p.starmap(max_sim, arguments)
    save_data([percentage.tolist(), y], (N, NUM_SIM))
    plt.xlabel("percentage of active edges")
    plt.ylabel("maximum count of infected people")
    plt.scatter(percentage, y, c="red")
    plt.gcf().canvas.set_window_title(sys.argv[1])
    plt.show()
Esempio n. 23
0
 def OnFolder(self, event, mode):
     ''' Open a file'''
     dlg = wx.DirDialog(self, 'Choose a folder', '', wx.DD_DEFAULT_STYLE)
     path = shared.options.get(mode)
     dlg.SetPath(path if (path is not None) else '')
     
     if dlg.ShowModal() == wx.ID_OK:
         path = dlg.GetPath()
         shared.options[mode] = path
         self.textDict[mode].SetLabel(path)
         self.Layout()
         save_data(shared.options)
     dlg.Destroy()
Esempio n. 24
0
def friedkinJohnsen(A, s, max_rounds, eps=1e-6, conv_stop=True, save=False):
    '''Simulates the Friedkin-Johnsen (Kleinberg) Model.

    Runs a maximum of max_rounds rounds of the Friedkin-Jonsen model. If the
    model converges sooner, the function returns. The stubborness matrix of
    the model is extracted from the diagonal of matrix A.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    B = np.diag(np.diag(A))  # Stubborness matrix of the model
    A_model = A - B  # Adjacency matrix of the model

    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = z

    for t in trange(1, max_rounds):
        z = A_model.dot(z) + B.dot(s)
        opinions[t, :] = z
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('Friedkin-Johnsen converged after {t} rounds'.format(t=t))
            break

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'fj' + timeStr
        save_data(simid, N=N, max_rounds=max_rounds, eps=eps,
                      rounds_run=t+1, A=A, s=s,
                      opinions=opinions[0:t+1, :])

    return opinions[0:t+1, :]
Esempio n. 25
0
def meetFriend_matrix_nomem(A, max_rounds, norm_type=2, save=False):
    '''Simulates the random meeting model (matrix version).

    Runs a maximum of max_rounds rounds of the "Meeting a Friend" model. If the
    model converges sooner, the function returns. The stubborness matrix of
    the model is extracted from the diagonal of matrix A. The function returns
    the distance from the equilibrium of the Friedkin-Johnsen when the process
    is complete. Uses less memory than the full model.

    Args:
        A (NxN numpy array): Weights matrix (its diagonal is the stubborness)

        max_rounds (int): Maximum number of rounds to simulate

        norm_type: The norm type used to calculate the difference from the
        equilibrium

        save (bool): Save the simulation data into text files

    Returns:
        The final distance from the equlibrium of the Friedkin-Johnsen model,
        using the specified norm.

    '''

    max_rounds = int(max_rounds)

    N = A.shape[0]
    B = np.diag(np.diag(A))

    nonzero_ids = [np.nonzero(A[i, :])[0] for i in xrange(A.shape[0])]

    equilibrium_matrix = np.dot(inv(np.eye(N) - (A - B)), B)
    R, Q = rand_matrices(A, 1, nonzero_ids)
    R = R + Q

    for t in trange(2, max_rounds+2):
        A_t, B_t = rand_matrices(A, t, nonzero_ids)
        R = A_t.dot(R) + B_t

    distance = norm(R - equilibrium_matrix, ord=norm_type)

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'mf' + timeStr
        save_data(simid, N=N, max_rounds=max_rounds, eps=eps,
                      rounds_run=max_rounds, A=A, distance=distance,
                      norm=norm_type)

    return distance
Esempio n. 26
0
def contact_tracing(p):
    time = [5, 7.5, 10]
    data = get_results(simulation_contact_tracing, (N, p, time), NUM_SIM)
    states = [
        ["S"], ["E"], ["I1"], ["I2"], ["I3"], ["R"], ["D"], ["Q1", "Q2", "Q3"],
        ["QS", "QE"]
    ]
    labels = {
        "Q1": "Q_I", "QS": "Q_{S, E}",
        "I1": "I_1", "I2": "I_2", "I3": "I_3"
    }
    times, avg, std = fill_around_std(
        quolor_palette, data, 1, states=states, labels=labels
    )
    save_data([times, avg, std], (N, NUM_SIM, time, p))
Esempio n. 27
0
def case_max_class_mod():
    class_sizes = [5, 10, 15, 20, 25, 30]
    degree = [1.25, 2.5, 3.75, 5, 6.25, 7.5]
    p = 0.3
    group = ["I3", "I1", "I2"]
    arguments = [
        (simulation_max_class, (N, cs, d, p), group)
        for cs, d in zip(class_sizes, degree)
    ]
    with Pool() as p:
        y = p.starmap(max_sim, arguments)
    save_data([class_sizes, y], (N, NUM_SIM))
    plt.xlabel("average class size")
    plt.ylabel("maximum count of infected people")
    plt.scatter(class_sizes, y, c="red")
    plt.show()
Esempio n. 28
0
def deGroot(A, s, max_rounds, eps=1e-6, conv_stop=True, save=False):
    '''Simulates the DeGroot Model.

    Runs a maximum of max_rounds rounds of the DeGroot model. If the model
    converges sooner, the function returns.

    Args:
        A (NxN numpy array): Adjacency matrix

        s (1xN numpy array): Initial opinions vector

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        z = A.dot(z)
        opinions[t, :] = z
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('DeGroot converged after {t} rounds'.format(t=t))
            break

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'dg' + timeStr
        save_data(simid, N=N, max_rounds=max_rounds, eps=eps,
                      rounds_run=t+1, A=A, s=s,
                      opinions=opinions[0:t+1, :])

    return opinions[0:t+1, :]
Esempio n. 29
0
def case_average_quarantine_rates():
    q1 = 0.5
    output = get_results(
        simulation_max_quarantine_rates, (N, q1, 0, 0), NUM_SIM
    )
    labels = {
        "I1": "I_1", "I2": "I_2", "I3": "I_3",
        "Q1": "Q", "Q2": "Q_2", "Q3": "Q_3",
    }
    groups = [
        ["Q1", "Q2", "Q3"]
    ]
    complete_group(groups, qolor_palette)
    timepoints, avg, std = fill_around_std(
        qolor_palette, output, 1, labels=labels, states=groups
    )
    save_data([timepoints, avg, std], (N, NUM_SIM, q1))
Esempio n. 30
0
def obfuscate_keystrokes(name, strategy, param):
    """

    """
    df = load_data(name)
    df = df.groupby(level=[0, 1]).apply(keystrokes2events).reset_index(level=[2, 3], drop=True)

    if strategy == 'delay':
        df = df.groupby(level=[0, 1]).apply(lambda x: delay_mix(x, param))
    elif strategy == 'interval':
        df = df.groupby(level=[0, 1]).apply(lambda x: interval_mix(x, param))
    else:
        raise Exception('Unknown masking strategy')

    df = df.groupby(level=[0, 1]).apply(events2keystrokes).reset_index(level=[2, 3], drop=True)
    save_data(df, name, masking=(strategy, param))
    return
Esempio n. 31
0
	async def on_message(self,message: discord.Message):
		
		global currentintrosessionReport
		
		if not message.author.bot:
			#print("Message is not a command")
			if not message.guild:
				#print("Message is a DM")
				if str(message.author.id) in currentintrosessionReport:
					if not currentintrosessionReport[str(message.author.id)]['confirmed'] == "True":
						if 'lastAnswerTime' in currentintrosessionReport[str(message.author.id)]:
							if (int(currentintrosessionReport[str(message.author.id)]['lastAnswerTime']) + 300) < int(time.time()):
								currentintrosessionReport[str(message.author.id)]['confirmed'] = 'Aborted'
								util.save_data(currentintrosessionReport, "currentintrosessionReport")
							else:
								await answer(message,message.content)
						else:
							await answer(message,message.content)
Esempio n. 32
0
    def refresh_login(self):
        """Deletes persisted cookies which forces a login attempt with current credentials"""

        s = requests.Session()
        empty_cookie_jar = s.cookies
        if util.save_data(addon, self.COOKIE_FILE_NAME, empty_cookie_jar):
            xbmcgui.Dialog().ok(addonname, "Cleared cookies. Please exit the addon and open it again.")
        else:
            xbmcgui.Dialog().ok(addonname, "Could not refresh lynda session cookies")
Esempio n. 33
0
async def confirm(authorID,serverID):
		
	global currentintrosessionReport
	global reportChannelID
	
	print("Doing confirm command")
		
	if len(currentintrosessionReport[str(authorID)]['answers']) >= len(questions):
		if currentintrosessionReport[str(authorID)]['confirmed'] == "False":
			#print(orderID)
			
			guild = discord.utils.get(botAcc.guilds, id=int(serverID))
			member = discord.utils.get(guild.members, id=int(authorID))
			
			if not member is None:
				
				guild = discord.utils.get(botAcc.guilds, id=int(currentintrosessionReport[str(authorID)]['guildID']))
				newIntroChannel = discord.utils.get(guild.channels, id=int(reportChannelID))
				
				introID = len(currentintrosessionReport) +1
				currentintrosessionReport[str(authorID)]['introID'] = str(introID)
				currentintrosessionReport[str(authorID)]['confirmedTime'] = str(time.time())
				currentintrosessionReport[str(authorID)]['confirmed'] = 'True'
				currentintrosessionReport[str(authorID)]['lastAnswerTime'] = str(int(time.time()))
				
				embed = discord.Embed(title=str(member)+" Report")
				#messageToSend = ""
				counter = 0
				for messageAnswer in currentintrosessionReport[str(authorID)]['answers'][:-1]:
					#messageToSend = messageToSend + questions[counter].split("{")[0] + ": " + str(messageAnswer)+"\n"
					
					counter = counter +1
				counter = 0
				for messageAnswer in currentintrosessionReport[str(authorID)]['answers']:
					if not messageAnswer == "":
						embed.add_field(name=questions[counter], value=str(messageAnswer),inline=False)
					counter = counter +1
				newIntroMessage = await newIntroChannel.send(embed=embed)
						
				currentintrosessionReport[str(authorID)]['newIntroMessageID'] = str(newIntroMessage.id)
						
				await member.send("Thank you. The answers have been sent to the GFS moderators.")
				util.save_data(currentintrosessionReport, "currentintrosessionReport")
Esempio n. 34
0
def obfuscate_keystrokes(name, strategy, param):
    """

    """
    df = load_data(name)
    df = df.groupby(level=[0, 1]).apply(keystrokes2events).reset_index(
        level=[2, 3], drop=True)

    if strategy == 'delay':
        df = df.groupby(level=[0, 1]).apply(lambda x: delay_mix(x, param))
    elif strategy == 'interval':
        df = df.groupby(level=[0, 1]).apply(lambda x: interval_mix(x, param))
    else:
        raise Exception('Unknown masking strategy')

    df = df.groupby(level=[0, 1]).apply(events2keystrokes).reset_index(
        level=[2, 3], drop=True)
    save_data(df, name, masking=(strategy, param))
    return
Esempio n. 35
0
    def router(self, paramstring):
        """Router function that calls other functions depending on the provided paramstrings"""

        # Parse a URL-encoded paramstring to the dictionary of {<parameter>: <value>} elements
        params = dict(parse_qsl(paramstring[1:]))

        if params:
            if params['action'] != 'play':
                self.ga.track(params['action'])

            # Cookiejar should definitely exist by now. Even if empty
            cookiejar = util.load_data(addon, self.COOKIE_FILE_NAME)
            self.api = LyndaApi(cookiejar)

            if params['action'] == 'search':
                self.search()
            elif params['action'] == 'list_course_chapters':
                self.list_course_chapters(int(params['course_id']))
            elif params['action'] == 'list_chapter_videos':
                self.list_chapter_videos(int(params['course_id']),
                                         int(params['chapter_id']))
            elif params['action'] == 'play':
                # Log the video as being played if user is logged in
                if self.api.logged_in:
                    self.api.log_video(int(params['video_id']))
                self.play_video(int(params['course_id']),
                                int(params['video_id']))
            elif params['action'] == 'show_access_error':
                self.show_access_error()
            elif params['action'] == 'list_my_courses':
                self.list_my_courses()
            elif params['action'] == 'refresh_login':
                self.refresh_login()
        else:
            self.ga.track('list_root_options')

            cookiejar = util.load_data(addon, self.COOKIE_FILE_NAME)
            if cookiejar:
                self.api = LyndaApi(cookiejar)
            else:
                self.api = LyndaApi()

            auth_type = xbmcplugin.getSetting(__handle__, "auth_type")

            # Try to log the user in if necessary
            if not self.api.logged_in and auth_type != 'None':
                self.login(auth_type)

            # Save cookie jar to disk so other screens in addon can resume session
            if not util.save_data(addon, self.COOKIE_FILE_NAME,
                                  self.api.get_cookies()):
                xbmcgui.Dialog().ok(addonname,
                                    "Could not save lynda session cookies")

            self.list_root_options()
Esempio n. 36
0
def case_infectious_tracing():
    time = [5, 7.5, 10]
    p = [0.7, 1]
    data = get_results(
        simulation_infectious_tracing, (N, p[0], p[1], time),
        NUM_SIM,

    )
    states = [
        ["S"], ["E"], ["I1"], ["I2"], ["I3"], ["R"], ["D"], ["Q1", "Q2", "Q3"],
        ["QS", "QE"]
    ]
    labels = {
        "Q1": "Q_{I}", "QS": "Q_{S, E}",
        "I1": "I_{1}", "I2": "I_{2}", "I3": "I_{3}"
    }
    times, avg, std = fill_around_std(
        quolor_palette, data, 1, states=states, labels=labels
    )
    save_data([times, avg, std], (N, NUM_SIM, time, p))
    def fit_all_models(self, solver_params=None):
        """Fits parameters for all models then saves to disk.

        Parameters
        ----------
        solver_params : dict
            Dict of parameters for minimizer algorithm.

        """
        if not solver_params:
            print("Solver params not set, using preconfigured defaults")
            solver_params = {
                "niter": 200,
                "stepsize": 100,
                "interval": 10,
                "method": "TNC",
                "use_jac": True,
            }
        cell_fits = {}
        cell_lls = {}

        for cell in self.cell_range:
            print("Fitting cell {0}".format(cell))
            cell_fits[cell] = {}
            cell_lls[cell] = {}
            for model in self.model_dict:
                model_instance = self.model_dict[model][cell]
                if model_instance.bounds is None:
                    raise ValueError(
                        "model \"{0}\" bounds not yet set".format(model))

                print("Fitting {0}".format(model))
                model_instance.fit_params(solver_params)
                # Build dict for json dump, json requires list instead of ndarray
                param_dict = {param: model_instance.fit.tolist()[index]
                              for index, param in enumerate(model_instance.param_names)}
                cell_fits[cell][model_instance.__class__.__name__] = param_dict
                cell_lls[cell][model_instance.__class__.__name__] = model_instance.fun

            util.save_data({cell:cell_fits[cell]}, "cell_fits", cell=cell)
            util.save_data({cell:cell_lls[cell]}, "log_likelihoods", cell=cell)
def test(model_filename, img_type, dir_name):
    print("Testing {}".format(model_filename))
    model = torch.load(os.path.join(dir_name, model_filename))
    base_dirs = ['KKI', 'NeuroIMAGE', 'OHSU', 'Peking_1', 'Peking_2', 'Peking_3', 'Pittsburgh','WashU']
    model.eval()
    total_attempts = 0
    total_correct = 0
    res = collections.defaultdict(dict)
    for base_dir in base_dirs:
        d = dataset.get_data_loader(base_dir, img_type, batch_size=util.BATCH_SIZE, train=False)
        dataset_correct = 0
        dataset_attempt = 0
        for idx, (img, label, err) in enumerate(d):
            err_exists = sum(err) != 0
            if err_exists:
                print("Error in loading {}".format(base_dir))
                continue
            label = label.cuda()
            img = Variable(img.float()).cuda().contiguous()
            img = img.view(img.shape[0], 1, img.shape[1], img.shape[2], img.shape[3])
            # print(img.shape)
            with torch.no_grad():
                out = model(img)
            prediction = out.data.max(1)[1]
            correct = float(prediction.eq(label.data).sum())
            dataset_attempt += len(label)
            dataset_correct += correct
        if dataset_attempt != 0:
            accuracy = (dataset_correct / float(dataset_attempt)) * 100.0
            res[base_dir]['correct'] = dataset_correct
            res[base_dir]['attempt'] = dataset_attempt
            res[base_dir]['accuracy'] = accuracy
            total_correct += dataset_correct
            total_attempts += dataset_attempt
            print('Test Accuracy {}: {}/{}={}'.format(base_dir, dataset_correct, dataset_attempt, accuracy))
    res['summary']['correct'] = total_correct
    res['summary']['attempt'] = total_attempts
    res['summary']['accuracy'] = float(total_correct)/total_attempts * 100.0
    util.save_data(res, os.path.join(dir_name, 'test_result.pckl'))
    print('Total Accuracy: {}'.format(res['summary']['accuracy']))
    return res
    def run(self):

        # Craete folder to save images in
        imagesFolder = os.path.join(allImagesFolder, (str(int(time.time())))+util.rand_str())
        os.makedirs(imagesFolder)
        # Save images
        imageIndex = 0
        for i in self.images:
            imageName = str(imageIndex).zfill(6) + '.jpg'
            imageIndex += 1
            cv2.imwrite(os.path.join(imagesFolder, imageName), np.uint8(i))

        # Render into avi
        inputF = os.path.join(imagesFolder, "%06d.jpg")
        outputF = os.path.join(imagesFolder, "file.avi")
        command = "/usr/local/bin/ffmpeg -r 5 -i {i} {o}".format(
            i = inputF, o = outputF)
        print(command)
        os.system(command)
        util.save_data(self.data, outputF)
        shutil.rmtree(imagesFolder)
Esempio n. 40
0
    def accepted(self):
        login = self.ui.loginEdit.text()
        password = self.ui.passwordEdit.text()
        dbname = self.ui.dbEdit.text()

        dbase = QtSql.QSqlDatabase.addDatabase("QMYSQL")
        dbase.setHostName("localhost")
        dbase.setDatabaseName(dbname)
        dbase.setUserName(login)
        dbase.setPassword(password)

        if not dbase.open():
            dbase.close()
            for name in QtSql.QSqlDatabase.connectionNames():
                QtSql.QSqlDatabase.removeDatabase(name)
            QtWidgets.QMessageBox.critical(self, "Ошибка авторизации", "Не удалось установить соединение с СУБД с заданными параметрами. Убедитесь, что СУБД запущена, а данные для входа введены верно.")
        else:
            LoginDialog.wnd = MainWindow(dbase)
            LoginDialog.wnd.show()
            password = password if self.ui.rememberPassword.isChecked() else ""
            save_data(login, password, dbname)
            self.accept()
Esempio n. 41
0
def router(paramstring):
    """
    Router function that calls other functions depending on the provided
    paramstrings
    """

    # Parse a URL-encoded paramstring to the dictionary of
    # {<parameter>: <value>} elements
    params = dict(parse_qsl(paramstring[1:]))

    # Check the parameters passed to the plugin
    if params:
        s = util.load_data(addon, "lynda_session")
        if s == False:
            xbmcgui.Dialog().ok(addonname, "Could not load session data")
            return

        if params['action'] != 'play':
            ga_track(params['action'])

        if params['action'] == 'search':
            keyboard = xbmc.Keyboard("", "Search", False)
            keyboard.doModal()
            if keyboard.isConfirmed() and keyboard.getText() != "":
                query = keyboard.getText()
                # xbmcgui.Dialog().ok(addonname, query)
                courses = scrape.course_search(s, query)
                list_courses(s, courses)

        elif params['action'] == 'list_my_courses':
            courses = scrape.get_my_courses(s)
            list_courses(s, courses)

        elif params['action'] == 'list_categories_letters':
            list_categories_letters(s)

        elif params['action'] == 'list_category_letter_contents':
            list_category_letter_contents(s, params['letter'])

        elif params['action'] == 'list_category_courses':
            link = params['link']
            courses = scrape.courses_for_category(s, link)
            list_courses(s, courses)

        elif params['action'] == 'list_course_chapters':
            list_course_chapters(s, params['courseId'])

        elif params['action'] == 'list_course_chapter_videos':
            list_course_chapter_videos(s, params['courseId'], params['chapterIndex'])

        elif params['action'] == 'play':
            # Play a video from a provided URL.
            play_video(s, params['videoId'])
    else:
        ga_track('list_root_options')
        s = auth.initSession()
        name = None

        DEBUG = xbmcplugin.getSetting(__handle__, "debug")
        DEBUG = DEBUG == 'true'
        print("DEBUG VAR: ", DEBUG)

        auth_type = xbmcplugin.getSetting(__handle__, "auth_type")
        if auth_type == "Organisation":
            username = xbmcplugin.getSetting(__handle__, "username")
            password = xbmcplugin.getSetting(__handle__, "password")
            org_url = xbmcplugin.getSetting(__handle__, "org_url")

            ret = auth.org_login(s,
                                 username=username,
                                 password=password,
                                 orgURL=org_url,
                                 LDEBUG=DEBUG)

            if ret != False:
                name = ret
            else:
                xbmcgui.Dialog().ok(addonname,
                                    "Could not login.",
                                    "Please check your credentials are correct.")
        elif auth_type == "Library":
            libraryCardNum = xbmcplugin.getSetting(__handle__, "libraryCardNum")
            libraryCardPin = xbmcplugin.getSetting(__handle__, "libraryCardPin")
            org_url = xbmcplugin.getSetting(__handle__, "org_url")

            ret = auth.library_login(s,
                                     libCardNum=libraryCardNum,
                                     libCardPin=libraryCardPin,
                                     orgDomain=org_url,
                                     LDEBUG=DEBUG)

            if ret != False:
                name = ret
            else:
                xbmcgui.Dialog().ok(addonname,
                                    "Could not login.",
                                    "Please check your credentials are correct.")

        if not util.save_data(addon, "lynda_session", s):
            xbmcgui.Dialog().ok(addonname, "Could not save requests session")

        list_root_options(name)
Esempio n. 42
0
def kNN_static(A, s, K, max_rounds, eps=1e-6, conv_stop=True, save=False):
    '''Simulates the static K-Nearest Neighbors Model.

    In this model, each node chooses his K-Nearest Neighbors during the
    averaging of his opinion.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        K (int): The number of the nearest neighbors to listen to

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        plot (bool): Plot preference (default: False)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    # All nodes must listen to themselves for the averaging to work
    A_model = A + np.eye(N)

    # The matrix contains 0/1 values
    A_model = A_model.astype(np.int8)

    z_prev = z.copy()
    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        for i in range(N):
            # Find neighbors in the underlying social network
            neighbor_i = A_model[i, :] > 0
            # Sort the nodes by opinion distance
            sorted_dist = np.argsort(abs(z_prev - z_prev[i]))
            # Change the order of the logican neighbor_i array
            neighbor_i = neighbor_i[sorted_dist]
            # Keep only sorted neighbors
            friends_i = sorted_dist[neighbor_i]
            # In case that we have less than K friends numpy
            # will return the whole array (< K elements)
            k_nearest = friends_i[0:K]
            z[i] = np.mean(z_prev[k_nearest])
        opinions[t, :] = z
        z_prev = z.copy()
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('K-Nearest Neighbors (static) converged after {t} '
                  'rounds'.format(t=t))
            break

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'kNNs' + timeStr
        save_data(simid, N=N, max_rounds=max_rounds, eps=eps,
                      rounds_run=t+1, A=A, s=s, K=K,
                      opinions=opinions[0:t+1, :])

    return opinions[0:t+1, :]
Esempio n. 43
0
def hk_local_nomem(A, s, op_eps, max_rounds, eps=1e-6, conv_stop=True,
                   save=False):
    '''Simulates the model of Hegselmann-Krause with an Adjacency Matrix

    Contrary to the standard Hegselmann-Krause Model, here we make use of
    an adjacency matrix that represents an underlying social structure
    independent of the opinions held by the members of the society. This
    variant does not store the intermediate opinions and as a result uses
    much less memory.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        op_eps: ε parameter of the model

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        t, z where t is the convergence time and z the vector of the
        final opinions.

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    # All nodes must listen to themselves for the averaging to work
    A_model = A + np.eye(N)

    z_prev = z.copy()

    for t in trange(1, max_rounds):
        for i in range(N):
            # Neighbors in the underlying social network
            neighbor_i = A_model[i, :] > 0
            opinion_close = np.abs(z_prev - z_prev[i]) <= op_eps
            # The node listens to those who share a connection with him
            # in the underlying network and also have an opinion
            # which is close to his own
            friends_i = np.logical_and(neighbor_i, opinion_close)
            z[i] = np.mean(z_prev[friends_i])
        if conv_stop and \
           norm(z - z_prev, np.inf) < eps:
            print('Hegselmann-Krause (Local Knowledge) converged after {t} '
                  'rounds'.format(t=t))
            break
        z_prev = z.copy()

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'hkloc' + timeStr
        save_data(simid, N=N, max_rounds=max_rounds, eps=eps,
                  rounds_run=t+1, A=A, s=s, op_eps=op_eps, opinions=z)

    return t, z
Esempio n. 44
0
 def OnCheckBox(self, event, attr):
     shared.options[attr] = event.IsChecked()
     save_data(shared.options)
Esempio n. 45
0
def kNN_dynamic_nomem(A, s, K, max_rounds, eps=1e-6, conv_stop=True,
                      save=False):
    '''Simulates the dynamic K-Nearest Neighbors Model. Reduced Memory.

    In this model, each nodes chooses his K-Nearest Neighbors during the
    averaging of his opinion. Opinions over time are not saved.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        K (int): The number of the nearest neighbors to listen to

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        t, z, Q where t is the convergence time, z the vector of the
        final opinions and Q the final adjacency matrix of the network

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    # All nodes must listen to themselves for the averaging to work
    A_model = A + np.eye(N)

    # The matrix contains 0/1 values
    A_model = A_model.astype(np.int8)

    z_prev = z.copy()

    for t in trange(1, max_rounds):
        Q = np.zeros((N, N))
        # TODO: Verify that this contains the original paths of A
        A_squared = A_model.dot(A_model)
        for i in range(N):
            # Find 2-neighbors in the underlying social network
            neighbor2_i = A_squared[i, :] > 0
            # Sort the nodes by opinion distance
            sorted_dist = np.argsort(abs(z_prev - z_prev[i]))
            # Change the order of the logican neighbor2_i array
            neighbor2_i = neighbor2_i[sorted_dist]
            # Keep only sorted neighbors
            friends_i = sorted_dist[neighbor2_i]
            # In case that we have less than K friends numpy
            # will return the whole array (< K elements)
            k_nearest = friends_i[0:K]
            Q[i, k_nearest] = 1/k_nearest.size
            z[i] = np.mean(z_prev[k_nearest])
        A_model = Q.copy()
        if conv_stop and \
           norm(z - z_prev, np.inf) < eps:
            print('K-Nearest Neighbors (dynamic) converged after {t} '
                  'rounds'.format(t=t))
            break
        z_prev = z.copy()

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'kNNd' + timeStr
        save_data(simid, N=N, max_rounds=max_rounds, eps=eps,
                      rounds_run=t+1, A=A, s=s, K=K, opinions=z, Q=Q)

    return t, z, Q
Esempio n. 46
0
 def OnTextCtrl(self, e, attr, ctrl):
     self.GetParent().SetFocus()
     shared.options[attr] = ctrl.GetValue()
     save_data(shared.options)
Esempio n. 47
0
 def OnEvent(self, event, attr, func):
     if not self.format in shared.format_dict:
         shared.format_dict[self.format] = {}
     shared.format_dict[self.format][attr] = func()
     save_data(shared.format_dict, file=self.savefile)
Esempio n. 48
0
 def OnCheckBox(self, event, attr, ctrl, related=[]):
     checked = ctrl.IsChecked()
     shared.options[attr] = checked
     for item in related:
         item.Enable(checked)
     save_data(shared.options)
Esempio n. 49
0
 def OnChoice(self, event, attr, edict=None):
     shared.options[attr] = edict[event.GetString()]
     save_data(shared.options)
Esempio n. 50
0
def meetFriend_nomem(A, s, max_rounds, eps=1e-6, conv_stop=True, save=False):
    '''Simulates the random meeting model.

    Runs a maximum of max_rounds rounds of the "Meeting a Friend" model. If the
    model converges sooner, the function returns. The stubborness matrix of
    the model is extracted from the diagonal of matrix A. This function does
    not save the opinions overtime and cannot generate a plot. However it uses
    very little memory and is useful for determining the final opinions and
    the convergence time of the model.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        t, z where t is the convergence time and z the vector of the
        final opinions.

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    nonzero_ids = [np.nonzero(A[i, :])[0] for i in xrange(A.shape[0])]

    z_prev = z.copy()

    if np.size(np.nonzero(A.sum(axis=1))) != N:
        raise ValueError("Matrix A has one or more zero rows")

    for t in trange(1, max_rounds):
        # Update the opinion for each node
        for i in range(N):
            r_i = rchoice(A[i, :], nonzero_ids[i])
            if r_i == i:
                op = s[i]
            else:
                op = z_prev[r_i]
            z[i] = (op + t*z_prev[i]) / (t+1)
        if conv_stop and \
           norm(z - z_prev, np.inf) < eps:
            print('Meet a Friend converged after {t} rounds'.format(t=t))
            break
        z_prev = z.copy()

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'mf' + timeStr
        save_data(simid, N=N, max_rounds=max_rounds, eps=eps,
                      rounds_run=t+1, A=A, s=s, opinions=z)

    return t, z
Esempio n. 51
0
def meetFriend(A, s, max_rounds, eps=1e-6, conv_stop=True, save=False):
    '''Simulates the random meeting model.

    Runs a maximum of max_rounds rounds of the "Meeting a Friend" model. If the
    model converges sooner, the function returns. The stubborness matrix of
    the model is extracted from the diagonal of matrix A.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    nonzero_ids = [np.nonzero(A[i, :])[0] for i in xrange(A.shape[0])]

    z_prev = z.copy()
    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    # Cannot allow zero rows because rchoice() will fail
    if np.size(np.nonzero(A.sum(axis=1))) != N:
        raise ValueError("Matrix A has one or more zero rows")

    for t in trange(1, max_rounds):
        # Update the opinion for each node
        for i in range(N):
            r_i = rchoice(A[i, :], nonzero_ids[i])
            if r_i == i:
                op = s[i]
            else:
                op = z_prev[r_i]
            z[i] = (op + t*z_prev[i]) / (t+1)
        z_prev = z.copy()
        opinions[t, :] = z
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('Meet a Friend converged after {t} rounds'.format(t=t))
            break

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'mf' + timeStr
        save_data(simid, N=N, max_rounds=max_rounds, eps=eps,
                      rounds_run=t+1, A=A, s=s,
                      opinions=opinions[0:t+1, :])

    return opinions[0:t+1, :]
Esempio n. 52
0
def reducer(a, b):
    highlighted_image, detected = detect_motion(a, b)
    if detected:
        save_data(highlighted_image)
    return b
Esempio n. 53
0
def ga(A, B, s, max_rounds, eps=1e-6, conv_stop=True, save=False, **kwargs):
    '''Simulates the Generalized Asymmetric Coevolutionary Game.

    This model does nto require an adjacency matrix. Connections between
    nodes are calculated depending on the proximity of their opinions.

    Args:
        A (NxN numpy array): Adjacency matrix (its diagonal is the stubborness)

        B (NxN numpy array): The stubborness of each node

        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        op_eps: ε parameter of the model

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

        **kargs: Arguments c, eps, and p for dynamic_weights function (eps and
        p need to be specified only if c='pow') (default: c='linear')

    Returns:
        A txN vector of the opinions of the nodes over time

    '''

    # Check if c function was specified
    if kwargs:
        c = kwargs['c']
        # Extra parameters for pow function
        eps_c = kwargs.get('eps', 0.1)
        p_c = kwargs.get('eps', 2)
    else:
        # Otherwise use linear as default
        c = 'linear'
        eps_c = None
        p_c = None

    N, z, max_rounds = preprocessArgs(s, max_rounds)

    # The matrix contains 0/1 values
    A_model = A.astype(np.int8)

    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        Q = dynamic_weights(A_model, s, z, c, eps_c, p_c) + B
        Q = row_stochastic(Q)
        B_temp = np.diag(np.diag(Q))
        Q = Q - B_temp
        z = Q.dot(z) + B_temp.dot(s)
        opinions[t, :] = z
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('G-A converged after {t} rounds'.format(t=t))
            break

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'ga' + timeStr
        save_data(simid, N=N, max_rounds=max_rounds, eps=eps,
                      rounds_run=t+1, A=A, s=s, B=B, c=c, eps_c=eps_c,
                      p_c=p_c, opinions=opinions[0:t+1, :])

    return opinions[0:t+1, :]
Esempio n. 54
0
def hk_rand(s, K, op_eps, max_rounds, eps=1e-6, conv_stop=True, save=False):
    '''Simulate the model of Hegselmann-Krause with random sampling.

    In each round every node chooses K other nodes uniformly at random and
    updates his opinion to be the average of the opinions of those K nodes
    that have a opinion distance at most equal to op_eps.

    Args:
        s (1xN numpy array): Initial opinions (intrinsic beliefs) vector

        K (int): The number of nodes which will be randomly chosen in each
        round.

        op_eps: ε parameter of the model

        max_rounds (int): Maximum number of rounds to simulate

        eps (double): Maximum difference between rounds before we assume that
        the model has converged (default: 1e-6)

        conv_stop (bool): Stop the simulation if the model has converged
        (default: True)

        save (bool): Save the simulation data into text files

    Returns:
        A txN vector of the opinions of the nodes over time

    '''
    N, z, max_rounds = preprocessArgs(s, max_rounds)

    z_prev = z.copy()
    opinions = np.zeros((max_rounds, N))
    opinions[0, :] = s

    for t in trange(1, max_rounds):
        for i in range(N):
            # Choose K random nodes as temporary "neighbors"
            rand_sample = np.array(stdrand.sample(xrange(N), K))
            neighbors_i = np.zeros(N, dtype=bool)
            neighbors_i[rand_sample] = 1
            # Always choose yourself
            neighbors_i[i] = 1
            # The node chooses only those with a close enough opinion
            friends_i = np.abs(z_prev - z_prev[i]) <= op_eps
            friends_i = np.logical_and(neighbors_i, friends_i)
            z[i] = np.mean(z_prev[friends_i])
        opinions[t, :] = z
        z_prev = z.copy()
        if conv_stop and \
           norm(opinions[t - 1, :] - opinions[t, :], np.inf) < eps:
            print('Hegselmann-Krause (random) converged after {t}'
                  ' rounds'.format(t=t))
            break

    if save:
        timeStr = datetime.now().strftime("%m%d%H%M")
        simid = 'hk' + timeStr
        save_data(simid, N=N, max_rounds=max_rounds, eps=eps,
                  rounds_run=t+1, s=s, op_eps=op_eps,
                  opinions=opinions[0:t+1, :])

    return opinions[0:t+1, :]