Exemple #1
0
    def __init__(self, sys_exec_profile, sys_comm_profile, sys_comm_req,
                 mapping, arrival_rate):
        """
        sys_exec_profile: dictionary {node number: {task: exec time}}.
        sys_comm_profile: 2D matrix, element [i][j] specifies the comm cost between node i and j.
        sys_comm_req: dictionary {task: dict({next task: comm_req})}
        mapping: dictionary {task: [list of node numbers]}
        arrival_rate: input jobs arrival rate
        """
        self.mapping = mapping
        self.sys_exec_profile = sys_exec_profile
        self.sys_comm_profile = sys_comm_profile
        self.sys_comm_req = sys_comm_req

        self.node_list = [
        ]  # NOTE: Nodes are not ordered with respect to their node_num
        self.init_nodes = []  # task nodes (numbers) with inputs
        self.end_nodes = []  # Nodes with final task

        # Construct and initialize nodes and instream destinations
        self.generate_nodes()

        util.echo("Node list indexes {}".format(
            [node.node_num for node in self.node_list]))
        util.echo("Initial task nodes: {}".format(self.init_nodes))
        util.echo("End task nodes: {}".format(
            [node.node_num for node in self.end_nodes]))

        self.instream = InStream(arrival_rate)  # Input job generator
        self.set_input_nodes()
Exemple #2
0
def map_data(filename, rows, cols):
    # read data from filename into a 2D array
    instream = InStream(filename)
    data = []
    for n in range(rows):
        data.append([])
        for m in range(cols):
            data[n].append(instream.readInt())
    return data
Exemple #3
0
 def __init__(self, filename=None, delimiter=None):
     self._e = 0
     self._adj = dict()
     if filename is not None:
         instream = InStream(filename)
         while instream.hasNextLine():
             line = instream.readLine()
             names = line.split(delimiter)
             for i in range(1, len(names)):
                 self.addEdge(names[0], names[i])
Exemple #4
0
 def __init__(self, filename=None, delimiter=None):
     self._e = 0
     self._adj = dict()
     if filename is not None:
         instream = InStream(filename)
         while instream.hasNextLine():
             line = instream.readLine()
             names = line.split(delimiter)
             for i in range(1, len(names)):
                 self.addEdge(names[0], names[i])
def main():
    instream = InStream('misspellings.txt')
    lines = instream.readAllLines()
    misspellings = SymbolTable()
    for line in lines:
        tokens = line.split(' ')
        misspellings[tokens[0]] = tokens[1]
    while not stdio.isEmpty():
        word = stdio.readString()
        if word in misspellings:
            stdio.write(word + '-->' + misspellings[word])
 def __init__(self, fileName):
     inStream = InStream(fileName)
     self._name = inStream.readLine()      # Customer name
     self._cash = inStream.readFloat()     # Cash balance
     self._stockCount = inStream.readInt() # Number of stocks
     # Stock symbols
     self._stocks = stdarray.create1D(self._stockCount, 0)
     # Share counts
     self._shares = stdarray.create1D(self._stockCount, 0)
     for i in range(self._stockCount):
         self._shares[i] = inStream.readInt()
         self._stocks[i] = inStream.readString()
Exemple #7
0
 def __init__(self, filename):
     instream = InStream(filename)
     n = instream.readInt()
     r = instream.readFloat()
     stddraw.setXscale(-r, r)
     stddraw.setYscale(-r, r)
     self._bodies = []
     for i in range(n):
         rx = instream.readFloat()
         ry = instream.readFloat()
         vx = instream.readFloat()
         vy = instream.readFloat()
         mass = instream.readFloat()
         name = instream.readString()
         r = Vector([rx, ry])
         v = Vector([vx, vy])
         b = Body(r, v, mass, name)
         print('Body created!')
         self._bodies.append(b)
Exemple #8
0
 def __init__(self, filename):
     instream = InStream(filename)
     n = instream.readInt()
     radius = instream.readFloat()
     stddraw.setXscale(-radius, +radius)
     stddraw.setYscale(-radius, +radius)
     self._bodies = stdarray.create1D(n)
     for i in range(n):
         rx = instream.readFloat()
         ry = instream.readFloat()
         vx = instream.readFloat()
         vy = instream.readFloat()
         mass = instream.readFloat()
         r = Vector([rx, ry])
         v = Vector([vx, vy])
         self._bodies[i] = Body(r, v, mass)
Exemple #9
0
 def __init__(self, filename):
     instream = InStream(filename)
     n = instream.readInt()
     radius = instream.readFloat()
     stddraw.setXscale(-radius, +radius)
     stddraw.setYscale(-radius, +radius)
     self._bodies = stdarray.create1D(n)
     for i in range(n):
         rx   = instream.readFloat()
         ry   = instream.readFloat()
         vx   = instream.readFloat()
         vy   = instream.readFloat()
         mass = instream.readFloat()
         r = Vector([rx, ry])
         v = Vector([vx, vy])
         self._bodies[i] = Body(r, v, mass)
Exemple #10
0
from instream import InStream
from outstream import OutStream

#-----------------------------------------------------------------------

# Accept string fileName and integer fieldCount as command-line
# arguments. Split the file whose name is fileName.csv, by field,
# into fieldCount+1 files named fileName1.txt, fileName2.txt, etc.

DELIM = ','

fileName = sys.argv[1]
fieldCount = int(sys.argv[2])

# Create the input stream.
inStream = InStream(fileName + '.csv')

# Create output streams.
outStreams = stdarray.create1D(fieldCount)
for i in range(fieldCount):
    file = OutStream(fileName + str(i) + '.txt')
    outStreams[i] = file

# Read lines from the input stream and write them to the
# output stream.
while inStream.hasNextLine():
    line = inStream.readLine()
    fields = line.split(DELIM)
    for i in range(fieldCount):
        outStreams[i].writeln(fields[i])
import smallworld
from graph import Graph
from instream import InStream

# Accept the name of a movie-cast file and a delimiter as command-line
# arguments and create the associated performer-performer graph. Write
# to standard output the number of vertices, the average degree,
# the average path length, and the clustering coefficient of the graph.
# Assume that the performer-performer graph is connected so that the
# average page length is defined.

file = sys.argv[1]
delimiter = sys.argv[2]

graph = Graph()
instream = InStream(file)
while instream.hasNextLine():
    line = instream.readLine()
    names = line.split(delimiter)
    for i in range(1, len(names)):
        for j in range(i + 1, len(names)):
            graph.addEdge(names[i], names[j])

degree = smallworld.averageDegree(graph)
length = smallworld.averagePathLength(graph)
cluster = smallworld.clusteringCoefficient(graph)

stdio.writef('number of vertices     = %d\n', graph.countV())
stdio.writef('average degree         = %7.3f\n', degree)
stdio.writef('average path length    = %7.3f\n', length)
stdio.writef('clustering coefficient = %7.3f\n', cluster)
Exemple #12
0
from instream import InStream
from hashst import SymbolTable

#-----------------------------------------------------------------------

# Accept string inStream, integer keyField, and integer valField as
# command-line arguments. inStream is the name of a file. Each line
# of the file contains fields separated by commas. keyField identifies
# which field of each line is a key, and valField identifies which
# field of each line is a value. Use the keys and values to create a
# symbol table. Then read a key from standard input, search
# the symbol table for the key, and write to standard output the
# key's value or 'not found' as appropriate. Repeat until end-of-file
# of standard input.

inStream = InStream(sys.argv[1])
keyField = int(sys.argv[2])
valField = int(sys.argv[3])

# Build a database from inStream.
database = inStream.readAllLines()

# Extract keys and values from the database and add them to st.
st = SymbolTable()
for line in database:
    tokens = line.split(',')
    key = tokens[keyField]
    val = tokens[valField]
    st[key] = val

# Read keys, search st, and write values.
def _readHTML(stockSymbol):
    WEBSITE = 'https://finance.yahoo.com/quote/'
    page = InStream(WEBSITE + stockSymbol)
    html = page.readAll()
    return html
#-----------------------------------------------------------------------
# cat.py
#-----------------------------------------------------------------------

import sys
from instream import InStream
from outstream import OutStream

# Copy files or web pages whose names are given by sys.argv[1:n-2]
# to the file whose name is given by sys.argv[n-1].

inFilenames = sys.argv[1:len(sys.argv) - 1]
outFilename = sys.argv[len(sys.argv) - 1]
outstream = OutStream(outFilename)
for filename in inFilenames:
    instream = InStream(filename)
    s = instream.readAll()
    outstream.write(s)

#-----------------------------------------------------------------------

# more in1.txt
# This is

# more in2.txt
# a tiny
# test.

# python cat.py in1.txt in2.txt out.txt

# more out.txt
Exemple #15
0
snow = [
    W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W,
    W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W, W,
    W, W, W, W, W, W, W, W, W, W, W, W, W, W
]

useSensor = True

while True:
    events = sense.stick.get_events()
    for event in events:
        if event.action == 'pressed' and event.direction != 'middle':
            if useSensor:
                print('Using data from openweathermap.org')
                stream = InStream(
                    'http://api.openweathermap.org/data/2.5/weather?q=Jena&units=metric&lang=de&APPID=7fa8d8270c50a5274bc055415e448b35'
                )
                weather = json.loads(stream.readAll())['main']
            else:
                print('Using data from Raspberry Pi sensors')
            useSensor = not useSensor
    if useSensor:
        if sense.humidity > 80 and sense.temp > 20:
            sense.set_pixels(rainbow)
        elif sense.humidity <= 80 and sense.temp > 20:
            sense.set_pixels(sunny)
        elif sense.humidity > 80 and sense.temp < 0:
            sense.set_pixels(snow)
        else:
            sense.clear()
    else:
Exemple #16
0
from instream import InStream
from hashst import SymbolTable

#-----------------------------------------------------------------------

# Accept string inStream, integer keyField, and integer valField as
# command-line arguments. inStream is the name of a file. Each line
# of the file contains fields separated by commas. keyField identifies
# which field of each line is a key, and valField identifies which
# field of each line is a value. Use the keys and values to create a
# symbol table. Then read a key from standard input, search
# the symbol table for the key, and write to standard output the
# key's value or 'not found' as appropriate. Repeat until end-of-file
# of standard input.

inStream = InStream(sys.argv[1])
keyField = int(sys.argv[2])
valField = int(sys.argv[3])

# Build a database from inStream.
database = inStream.readAllLines()

# Extract keys and values from the database and add them to st.
st = SymbolTable()
for line in database:
    tokens = line.split(',')
    key = tokens[keyField]
    val = tokens[valField]
    st[key] = val
    
# Read keys, search st, and write values.
Exemple #17
0
def _readHTML(stockSymbol):
    WEBSITE = 'http://finance.yahoo.com/q?s='
    page = InStream(WEBSITE + stockSymbol)
    html = page.readAll()
    return html
def _readHTML(city):
    WEBSITE = 'https://15tianqi.cn/'
    page = InStream(WEBSITE + city + '15tian')
    html = page.readAll()
    return html
Exemple #19
0
class Dispatcher(object):
    def __init__(self, sys_exec_profile, sys_comm_profile, sys_comm_req,
                 mapping, arrival_rate):
        """
        sys_exec_profile: dictionary {node number: {task: exec time}}.
        sys_comm_profile: 2D matrix, element [i][j] specifies the comm cost between node i and j.
        sys_comm_req: dictionary {task: dict({next task: comm_req})}
        mapping: dictionary {task: [list of node numbers]}
        arrival_rate: input jobs arrival rate
        """
        self.mapping = mapping
        self.sys_exec_profile = sys_exec_profile
        self.sys_comm_profile = sys_comm_profile
        self.sys_comm_req = sys_comm_req

        self.node_list = [
        ]  # NOTE: Nodes are not ordered with respect to their node_num
        self.init_nodes = []  # task nodes (numbers) with inputs
        self.end_nodes = []  # Nodes with final task

        # Construct and initialize nodes and instream destinations
        self.generate_nodes()

        util.echo("Node list indexes {}".format(
            [node.node_num for node in self.node_list]))
        util.echo("Initial task nodes: {}".format(self.init_nodes))
        util.echo("End task nodes: {}".format(
            [node.node_num for node in self.end_nodes]))

        self.instream = InStream(arrival_rate)  # Input job generator
        self.set_input_nodes()

    def set_input_nodes(self):
        # Set the destinations for input stream => Init task nodes
        for node_idx_list in self.init_nodes:
            node_list = []
            for node_num in node_idx_list:
                node_list.append(self.node_list[node_num])
            self.instream.add_dests(node_list)

    def generate_nodes(self):
        """
        Generate parents dictionary of lists
        parents: {task: [list of parent tasks]}
        """
        self.parents = {}
        #import pdb;pdb.set_trace()
        for task in self.mapping:
            self.parents[task] = []
            for pre_task in self.sys_comm_req:
                if self.sys_comm_req[pre_task] and task in self.sys_comm_req[
                        pre_task]:
                    # print("{} is a pre-task of task {}".format(pre_task, task))
                    self.parents[task].append(pre_task)

            # Identify the tasks where input queue is required
            if not self.parents[task]:
                self.parents[task].append("Input")
                self.init_nodes.append(self.mapping[task])

        ### e.g. parents = {A: [], B: [A], C:[A], D:[B,C]}
        # print("Parents dict:", self.parents)

        # Generate nodes and append in node list
        for node_num in self.sys_exec_profile.keys():
            for task in self.mapping:
                if node_num in self.mapping[task]:
                    # Nodes requires: (node_num, task, comm_req, exec_times, sys_comm_profile, parent_list)
                    self.node_list.append(
                        Node(
                            node_num,
                            task,
                            self.sys_comm_req[
                                task],  # if task in self.sys_comm_req else []
                            self.sys_exec_profile[node_num],
                            self.sys_comm_profile,
                            self.parents[task]))
                    break

        # Set destinations when all nodes are generated
        for node in self.node_list:
            for next_task in self.sys_comm_req[node.task]:
                task_nodes = []
                for node_num in self.mapping[next_task]:
                    assigned_node_num_l = np.array(
                        [n.node_num for n in self.node_list])
                    node_idx = int(
                        np.where(assigned_node_num_l == node_num)[0])
                    task_nodes.append(self.node_list[node_idx])  #node_num])
                node.add_dests(task_nodes)

            if not node.dest:
                # Destination list is empty => end node
                self.end_nodes.append(node)

        return

    def start_simulation(self, total_jobs):
        self.total_jobs = total_jobs
        util.echo("Starting simulation with {} jobs".format(self.total_jobs))

        for node in self.node_list:
            util.echo("Starting node {}".format(node.node_num))
            # Start each node on a separate thread
            t = threading.Thread(target=node.run)
            t.daemon = True
            t.start()

        # Start generating inputs
        t = threading.Thread(
            target=self.instream.generate_inputs, args=(self.total_jobs * 2, )
        )  ### this is to avoid some corner effects, where the end task runs short of 1 parent task
        t.daemon = True
        t.start()

        while True:
            if self.check_end_status():  # Check if all jobs executed
                util.echo("=== Ending the simulation ===")
                return self.end_simulation()

    def end_simulation(self):
        # Turn off all the nodes
        for node in self.node_list:
            # This causes the function thread to die
            node.set_status(False)

        # Calculate the average times for each task
        self.avg_times = dict()
        for task in self.mapping:
            total_exec_time = 0
            total_jobs = 0
            for node_num in self.mapping[task]:
                assigned_node_num_l = np.array(
                    [n.node_num for n in self.node_list])
                node_idx = int(np.where(assigned_node_num_l == node_num)[0])
                total_exec_time = max(total_exec_time,
                                      self.node_list[node_idx].total_exec_time
                                      )  #node_num].total_exec_time
                total_jobs += self.node_list[
                    node_idx].num_finished_tasks  #node_num].num_finished_tasks
            self.avg_times[task] = total_exec_time / total_jobs

        # Average finish time over all tasks
        end_times = []
        for node in self.end_nodes:
            end_times += node.finish_times

        sum_time = 0
        for start, end in zip(self.instream.start_times, end_times):
            sum_time += (end - start).total_seconds()
        self.avg_finish_time = sum_time / total_jobs

        return (self.avg_finish_time, self.avg_times)

    def check_end_status(self):
        # Check to see if the total number of executed jobs in the final nodes sums to the total jobs
        completed_jobs = 0
        for node in self.end_nodes:
            completed_jobs += node.num_finished_tasks
        ### some problems with duplicated end nodes
        return completed_jobs == self.total_jobs
Exemple #20
0
# -----------------------------------------------------------------------
# cat.py
# -----------------------------------------------------------------------

import sys
from instream import InStream
from outstream import OutStream

# Copy files or web pages whose names are given by sys.argv[1:n-2]
# to the file whose name is given by sys.argv[n-1].

inFilenames = sys.argv[1 : len(sys.argv) - 1]
outFilename = sys.argv[len(sys.argv) - 1]
outstream = OutStream(outFilename)
for filename in inFilenames:
    instream = InStream(filename)
    s = instream.readAll()
    outstream.write(s)

# -----------------------------------------------------------------------

# more in1.txt
# This is

# more in2.txt
# a tiny
# test.

# python cat.py in1.txt in2.txt out.txt

# more out.txt
Exemple #21
0
#-----------------------------------------------------------------------

# Accept integers k and d as command-line arguments. Read a document
# list from standard input, compute profiles based on k-gram
# frequencies for all the documents, and write a matrix of similarity
# measures between all pairs of documents. d is the dimension of the
# profiles.

k = int(sys.argv[1])
d = int(sys.argv[2])

filenames = stdio.readAllStrings()
sketches = stdarray.create1D(len(filenames))

for i in range(len(filenames)):
    text = InStream(filenames[i]).readAll()
    sketches[i] = Sketch(text, k, d)

stdio.write('    ')
for i in range(len(filenames)):
    stdio.writef('%8.4s', filenames[i])
stdio.writeln()

for i in range(len(filenames)):
    stdio.writef('%.4s', filenames[i])
    for j in range(len(filenames)):
        stdio.writef('%8.2f', sketches[i].similarTo(sketches[j]))
    stdio.writeln()

#-----------------------------------------------------------------------