コード例 #1
0
 def __init__(self,
              populationSize,
              vaccPercentage,
              virus,
              initialInfected=1):
     self.population = []
     self.logger = Logger('log.txt')
     self.itterations = 0
     self.currentlyAlive = populationSize
     self.virus = virus
     self.vacc_percentage = vaccPercentage
     self.currentlyAlive = populationSize
     self.currentlyInfected = 0
     self.currentlyVaccinated = 0
     self.popSize = populationSize
     self.graph = Grapher(['Alive', 'Vaccinated', 'Infected', 'Dead'],
                          virus.name)
     self.initializePopulation(initialInfected)
コード例 #2
0
def main():
    """Main pygame loop and instanciations"""

    screen_dimensions = (800, 400)
    screen = pygame.display.set_mode(screen_dimensions)
    pygame.display.set_caption("Covid spread simulation")

    sim = particle.Simulation(screen_dimensions)
    graph = Grapher()

    sim.add_particle(n=150, speed=6, freezed=93, killer=4)
    sim.add_wall(add=False)

    timer = pygame.time.Clock()
    elapsed_time = 0
    time_serie = []

    running = True
    while running:
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                running = False

        dt = timer.tick() / 1000

        sim.update(dt)
        screen.fill(sim.colour)

        for p in sim.particles:
            pygame.draw.circle(screen, p.colour, (int(p.x), int(p.y)), p.size,
                               p.thickness)

        if isinstance(sim.wall, particle.Wall):
            pygame.draw.rect(
                screen, sim.wall.colour,
                (sim.wall.x, sim.wall.y, sim.wall.width, sim.wall.height),
                sim.wall.thickness)
        pygame.display.flip()
        elapsed_time += dt
        time_serie.append(elapsed_time)
コード例 #3
0
logging.debug('tail data set')
logging.debug(training_data.tail(2))

#####
# Calculate basic stats
#####
sentiments = np.array(training_data['sentiments'])
count_negative_reviews = (sentiments == False).sum()
count_positive_reviews = (sentiments == True).sum()
logging.info('training set:  neg reviews {} and pos reviews {}'.format(
    count_negative_reviews, count_positive_reviews))

Grapher().show_bar(["Negative", "Positive"],
                   [count_negative_reviews, count_positive_reviews],
                   xaxislabel='Sentiments',
                   yaxislabel='Count of reviews',
                   title='Movie review sentiments')

if (args.interactive):
    input()

training_data['review_length'] = training_data['data'].str.len()

positive_reviews_df = training_data.loc[lambda df: df.sentiments == True]
logging.debug('positive review df')
logging.debug(positive_reviews_df.head())

negative_reviews_df = training_data.loc[lambda df: df.sentiments == False]
logging.debug('negative review df')
logging.debug(negative_reviews_df.head())
コード例 #4
0
ファイル: nprnn.py プロジェクト: Nintorac/Numpy-RNNs
		ix = result
	return ''.join(output).rstrip()
	
# initial states 
h_prev = np.array([np.zeros((BATCH_SIZE, HIDDEN_LAYER_SIZE)) for d in xrange(DEPTH)])
p = 0

# momentum for Adagrad
mWxh = [np.zeros((VOCABULARY_SIZE if d == 0 else HIDDEN_LAYER_SIZE, HIDDEN_LAYER_SIZE)) + 0.1 for d in xrange(DEPTH)]
mWhh = [np.zeros((HIDDEN_LAYER_SIZE, HIDDEN_LAYER_SIZE)) + 0.1 for d in xrange(DEPTH)]
mbh = [np.zeros((1, HIDDEN_LAYER_SIZE)) + 0.1 for d in xrange(DEPTH)]
mWhy = np.zeros((HIDDEN_LAYER_SIZE, VOCABULARY_SIZE)) + 0.1
mby = np.zeros((1, VOCABULARY_SIZE)) + 0.1

losses = {}
graph = Grapher('Train Loss')
# training loop
for iteration in xrange(MAX_ITERATIONS):
	# get inputs for current iteration
	if p == 0:
		h_prev = np.array([np.zeros((BATCH_SIZE, HIDDEN_LAYER_SIZE)) for d in xrange(DEPTH)])
	targets, _ = util.batch_input_seq(input, p + 1, SEQ_SIZE, BATCH_SIZE, char_to_index)
	t, _ = util.batch_input_seq(input, p, SEQ_SIZE, BATCH_SIZE, char_to_index)
	inputs, p = util.batch_input_one_hot_seq(input, p, SEQ_SIZE, BATCH_SIZE, VOCABULARY_SIZE, char_to_index)
	

	loss, dWxh, dWhh, dbh, dWhy, dby, h_prev = forward_backward(inputs, targets, h_prev)

	# update model parameters
	all_d = dWxh + dWhh + dbh + [dWhy, dby]
	all_m = mWxh + mWhh + mbh + [mWhy, mby]
コード例 #5
0
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from graph import Grapher
from math import pi

grapher = Grapher()
grapher.graph_all()
コード例 #6
0
ファイル: main.py プロジェクト: Synirrr/SigmaDraconisMap
def main():
    input("Welcome to SigmaDraconis Map! Press ENTER to start!")
    gr = Grapher()
    ########################## Setup google credentials and API access ##########################################
    scope = [
        'https://spreadsheets.google.com/feeds',
        'https://www.googleapis.com/auth/drive'
    ]

    credentials = ServiceAccountCredentials.from_json_keyfile_name(
        'src/main/starmap-310623-baf93c6b9991.json', scope)

    gc = gspread.authorize(credentials)
    #############################################################################################################

    # Get records from spread sheet
    wks = gc.open('CSV-SDW-GPS').sheet1

    records_dict = wks.get_all_records()
    objects = []
    annots = []
    for record in records_dict:
        # Make solar bodies
        if record['Visible'] == 'Y':
            objects.append(
                gr.spheres(record['Size'], record['Colour'], record['X'],
                           record['Y'], record['Z'], record['Name']))
            if record["ShowName"] == 'Y':
                annots.append(
                    gr.annot(record['X'], record['Y'],
                             record['Z'] + record['Size'] * 2, record['Name']))

    layout = go.Layout(
        title='Solar System',
        showlegend=False,
        paper_bgcolor='grey',
        scene=dict(
            xaxis=dict(title='X',
                       titlefont_color='black',
                       range=[-10000000, 10000000],
                       backgroundcolor='black',
                       color='black',
                       gridcolor='black'),
            yaxis=dict(title='Y',
                       titlefont_color='black',
                       range=[-10000000, 10000000],
                       backgroundcolor='black',
                       color='black',
                       gridcolor='black'),
            zaxis=dict(title='Z',
                       titlefont_color='black',
                       range=[-10000000, 10000000],
                       backgroundcolor='black',
                       color='white',
                       gridcolor='black'),
            aspectmode=
            'manual',  #this string can be 'data', 'cube', 'auto', 'manual'
            #a custom aspectratio is defined as follows:
            aspectratio=dict(x=1, y=1, z=1),
            annotations=annots))

    fig = go.Figure(layout=layout)
    for item in objects:
        fig.add_trace(item)

    fig.show()
    fig.write_html("Solar_system.html")
コード例 #7
0
def get_data():
    """
    returns one big graph with unconnected graphs with the following:
    - x (Tensor, optional) – Node feature matrix with shape [num_nodes, num_node_features]. (default: None)
    - edge_index (LongTensor, optional) – Graph connectivity in COO format with shape [2, num_edges]. (default: None)
    - edge_attr (Tensor, optional) – Edge feature matrix with shape [num_edges, num_edge_features]. (default: None)
    - y (Tensor, optional) – Graph or node targets with arbitrary shape. (default: None)
    - validation mask, training mask and testing mask 
    """
    path = "../../data/raw/box/"
    l = os.listdir(path)
    files = [x.split('.')[0] for x in l]
    files.sort()
    all_files = files[1:]

    list_of_graphs = []
    r"""to create train,test,val data"""
    files = all_files.copy()
    random.shuffle(files)
    r"""Resulting in 500 receipts for training, 63 receipts for validation, and 63 for testing."""
    training, testing, validating = files[:500], files[500:563], files[563:]

    for file in all_files:

        connect = Grapher(file)
        G, _, _ = connect.graph_formation()
        df = connect.relative_distance()
        individual_data = from_networkx(G)

        feature_cols = ['rd_b', 'rd_r', 'rd_t', 'rd_l','line_number',\
                'n_upper', 'n_alpha', 'n_spaces', 'n_numeric','n_special']

        features = torch.tensor(df[feature_cols].values.astype(np.float32))

        for col in df.columns:
            try:
                df[col] = df[col].str.strip()
            except AttributeError:
                pass

        df['labels'] = df['labels'].fillna('undefined')
        df.loc[df['labels'] == 'company', 'num_labels'] = 1
        df.loc[df['labels'] == 'address', 'num_labels'] = 2
        df.loc[df['labels'] == 'invoice', 'num_labels'] = 3
        df.loc[df['labels'] == 'date', 'num_labels'] = 4
        df.loc[df['labels'] == 'total', 'num_labels'] = 5
        df.loc[df['labels'] == 'undefined', 'num_labels'] = 6

        assert df['num_labels'].isnull().values.any(
        ) == False, f'labeling error! Invalid label(s) present in {file}.csv'
        labels = torch.tensor(df['num_labels'].values.astype(np.int))
        text = df['Object'].values

        individual_data.x = features
        individual_data.y = labels
        individual_data.text = text
        r"""Create masks"""
        if file in training:
            individual_data.train_mask = torch.tensor([True] * df.shape[0])
            individual_data.val_mask = torch.tensor([False] * df.shape[0])
            individual_data.test_mask = torch.tensor([False] * df.shape[0])

        elif file in validating:
            individual_data.train_mask = torch.tensor([False] * df.shape[0])
            individual_data.val_mask = torch.tensor([True] * df.shape[0])
            individual_data.test_mask = torch.tensor([False] * df.shape[0])
        else:
            individual_data.train_mask = torch.tensor([False] * df.shape[0])
            individual_data.val_mask = torch.tensor([False] * df.shape[0])
            individual_data.test_mask = torch.tensor([True] * df.shape[0])

        print(f'{file} ---> Success')
        list_of_graphs.append(individual_data)

    data = torch_geometric.data.Batch.from_data_list(list_of_graphs)
    data.edge_attr = None

    save_path = "../../data/processed/"
    torch.save(data, save_path + 'data_withtexts.dataset')
    print('Data is saved!')