Esempio n. 1
0
 def spellLight(self):
     if len(self.magicList[0]) <= 0:
         pass
     else:
         self.magicList[0].pop(-1)
         self.light = Lightning(self.x, self.y, self.dir, self.bg)
         game_world.add_object(self.light, 1)
Esempio n. 2
0
def Train(opt):
    # init Lightning Model
    light = Lightning(**opt['light_conf'])

    # mkdir the file of Experiment path
    os.makedirs(os.path.join(opt['resume']['path'],
                             opt['resume']['checkpoint']),
                exist_ok=True)
    checkpoint_path = os.path.join(opt['resume']['path'],
                                   opt['resume']['checkpoint'])
    checkpoint = ModelCheckpoint(checkpoint_path,
                                 monitor='val_loss',
                                 mode='min',
                                 save_top_k=1,
                                 verbose=1,
                                 save_last=True)

    # Early Stopping
    early_stopping = False
    if opt['train']['early_stop']:
        early_stopping = EarlyStopping(monitor='val_loss',
                                       patience=opt['train']['patience'],
                                       mode='min',
                                       verbose=1)

    # Don't ask GPU if they are not available.
    if torch.cuda.is_available():
        gpus = len(opt['gpu_ids'])
    else:
        gpus = None
    # logger

    # default logger used by trainer
    logger = TensorBoardLogger(save_dir='./logger',
                               version=1,
                               name='lightning_logs')
    # Trainer
    trainer = pl.Trainer(
        max_epochs=opt['train']['epochs'],
        checkpoint_callback=checkpoint,
        early_stop_callback=early_stopping,
        default_root_dir=checkpoint_path,
        gpus=gpus,
        distributed_backend=opt['train']['distributed_backend'],
        train_percent_check=1.0,  # Useful for fast experiment
        gradient_clip_val=5.,
        logger=logger)

    trainer.fit(light)
Esempio n. 3
0
    def __init__(self,
                 ncenters=3,
                 ndims=2,
                 std=0.2,
                 seed=None,
                 update='drift',
                 interval=15,
                 transition=None):
        multiprocessing.Process.__init__(self)
        self.stop_event = multiprocessing.Event()
        """
        Set up parameters for a streaming kmeans algorithm demo.

        Parameters
        ----------
        ncenters : int, or array-like (ncenters, ndims)
          Number of clusters as an integer, or an array of starting cluster centers.
          If given as an integer, cluster centers will be determined randomly.

        ndims : int
          Number of dimensions

        std : scalar
          Cluster standard deviation

        """

        np.random.seed(seed)
        if np.size(ncenters) == 1:
            centers = np.random.randn(ncenters, ndims) * 2
        else:
            centers = np.asarray(ncenters)
            ncenters = centers.shape[0]
        self.centers = centers
        self.ncenters = ncenters
        self.ndims = ndims
        self.npoints = 50
        self.std = std
        self.update = update
        self.interval = interval
        self.transition = transition
        self.lgnAddress = "http://localhost:3010"
        self.lgn = Lightning(self.lgnAddress)
        self.lgn.create_session('kafka-streaming-kmeans')
        self.lgn.session.open()
        self.vizPanel = None
Esempio n. 4
0
def main():
    targets = sys.argv[1:]
    prefix = "./wordCountSplit/"
    foldersIni = list(filter(lambda x: x[0] != ".", os.listdir(prefix)))
    count = []
    numOfWords = len(sys.argv) - 1
    numOfFiles = 0

    #initialize the array
    for folder in foldersIni:
        fileName = prefix + folder + "/part-00000"
        count.append(addData(fileName, targets, numOfWords))
        numOfFiles += 1

    #plot the initialized array
    lgn = Lightning()
    numOfFiles = 10 if (numOfFiles > 10) else numOfFiles

    series = np.array(count[:numOfFiles]).reshape((numOfWords, numOfFiles))
    viz = lgn.linestreaming(
        series,
        max_width=15,
        xaxis="Window No. (each window is 60 sec with 5 sec update interval",
        yaxis="Word Frequency")

    time.sleep(4)
    for c in count[numOfFiles:]:
        viz.append(np.array(c).reshape((numOfWords, 1)))
        time.sleep(0.3)

    # update the new data generated by Spark Streaming
    while True:
        folders = filter(lambda x: x[0] != ".", os.listdir(prefix))
        for folder in folders:
            if folder not in foldersIni:
                time.sleep(5)
                fileName = prefix + folder + "/part-00000"
                newData = addData(fileName, targets, numOfWords)
                viz.append(np.array(newData).reshape((numOfWords, 1)))
                time.sleep(0.3)
Esempio n. 5
0
feeder_params = {"linger_time": -1, "max_files": 10, "poll_time": 5}

test_data_params = {
    "prefix": "input_",
    "num_files": 10,
    "approx_file_size": 10.0,
    "records_per_file": 512 * 512,
    "copy_period": 10
}

##########################################
# Analysis configuration stuff starts here
##########################################

# TODO Need to insert the Lightning client here
lgn = Lightning("http://kafka1.int.janelia.org:3000/")
lgn.create_session('test')

image_viz = lgn.image(zeros((512, 512)))
line_viz = lgn.linestreaming(zeros((10, 1)))

analysis1 = Analysis.SeriesMeanAnalysis(
    input=dirs['input'],
    output=os.path.join(dirs['output'], 'images'),
    prefix="output",
    format="text").toImage(dims=(512, 512)).toLightning(image_viz,
                                                        only_viz=True)
#analysis2 = Analysis.SeriesFiltering2Analysis(input=dirs['input'], output=os.path.join(dirs['output'], 'filtered_series'), prefix="output", format="text").toSeries().toLightning(line_viz, only_viz=True)

#analysis2.receive_updates(analysis1)
def main():

    # parse arguments
    parser = argparse.ArgumentParser(description='Spark Logistic  Regression.')
    parser = baseargs(parser)
    '''
    parser.add_argument('-nc', '--ncenters', type=int, default=3, required=False, 
        help='Number of cluster centers')
    parser.add_argument('-nd', '--ndims', type=int, default=2, required=False, 
        help='Number of dimensions')
    parser.add_argument('-rs', '--randomseed', type=int, default=None, required=False,
        help='Random seed')
    parser.add_argument('-sd', '--std', type=float, default=0.3, required=False,
        help='Standard deviation of points')
    parser.add_argument('-up', '--update', type=str, choices=('jump', 'drift', 'none'), default='drift', required=False,
        help='Update behavior')
    '''
    parser.add_argument(
        '-a',
        '--autoopen',
        type=bool,
        choices=(True, False),
        default=True,
        required=False,
        help='Whether to automatically open Lightning session on a browser')
    parser.add_argument('-laddr',
                        '--lightningAddr',
                        type=str,
                        choices=('http://acm:3010'),
                        default='http://acm:3010',
                        required=False,
                        help='Lightning server address')
    args = parser.parse_args()
    # basic setup
    sparkhome = findspark()
    jar = findjar()

    # set up lightning
    print("lgn address", args.lightningAddr)
    lgn = Lightning(args.lightningAddr)
    lgn.create_session('spark-logistic-regression')
    if (args.autoopen):
        lgn.session.open()

    # set temp path
    path = args.path
    if not path or path == '':
        path = tempfile.gettempdir()
    tmpdir = os.path.join(path, 'sparklogisticregression')

    # setup the demo
    #s = StreamingDemo.make('kmeans', npoints=args.npoints, nbatches=args.nbatches)
    #s.setup(tmpdir, overwrite=args.overwrite)
    #s.params(ncenters=args.ncenters, ndims=args.ndims, std=args.std, seed=args.randomseed, update=args.update)

    # setup the spark job
    sparkSubmit = sparkhome + "/bin/spark-submit"
    sparkArgs = [
        "--class", "spark.classification.LogisticRegressionWithLBFGSExample",
        jar
    ]
    #demoArgs = [s.datain, s.dataout, str(args.batchtime), str(args.ncenters), str(args.ndims), str(args.halflife), str(args.timeunit)]
    demoArgs = []
    cmd = [sparkSubmit] + sparkArgs + demoArgs

    try:
        # start the spark job
        p = subprocess.Popen(cmd)
        # wait for spark streaming to start up
        time.sleep(4)
        # start the demo
        #s.run(lgn)

    finally:
        pass
Esempio n. 7
0
from lightning import Lightning
lgn = Lightning(host='192.168.99.100')


def show(G):
    mat, labels = nx.attr_matrix(G)
    g = np.array(list(G.degree().values()))
    return lgn.force(mat, group=g, labels=[i for i in labels])
Esempio n. 8
0
#!/usr/bin/env python

from lightning import Lightning
from time import sleep
from numpy import random
from cassandra.cluster import Cluster
from scipy.ndimage.filters import gaussian_filter
from pandas import Series

#initialize cassandra connectivity
cluster = Cluster()
cass = cluster.connect()
cass.set_keyspace("sparkml")

#initialize lightning viz. server
lgn = Lightning(host="https://spark-streaming-ml.herokuapp.com")
lgn.create_session('streaming-kmeans')
lgn.session.open()

while True:
    accuracy = cass.execute(
        "select unixTimestampOf(event_time) as tm, mse, rmse from accuracy")
    tm = [row.tm for row in accuracy]
    rmse = [row.rmse for row in accuracy]
    obj = Series(rmse, index=tm)
    pts = obj.sort_index(ascending=True).values
    #viz = lgn.line(gaussian_filter(pts, 10))
    viz = lgn.line(pts)
    sleep(0.25)
"""rows = cass.execute("select * from predictions")
prediction = [row.prediction for row in rows]
Esempio n. 9
0
def force(indf, dic, allengraph, lblid, thr, k, name):
    # force graph
    lgn = Lightning(ipython=True, local=True)

    #     means = indf.Mean.values
    means = indf.Sum.values
    zeros = np.zeros((means.shape[0] + 1, means.shape[0] + 1))
    zeros[:-1, -1] = means
    zeros[-1, :-1] = means
    df = pd.DataFrame(zeros)
    lbls = np.append(indf.Label.values, lblid)

    lblsdf = pd.DataFrame(lbls)
    lblsdf.columns = ['lbls']
    lblsdf = lblsdf.replace(dic)

    df.columns = lblsdf.lbls.values
    df.index = lblsdf.lbls.values

    # threshold for force graph
    dfthr = df.copy()
    thrval = indf.Sum.mean() * thr
    #     thrval = indf.Mean.mean() * thr
    dfthr[dfthr < thrval] = 0

    # drop zeros
    dfthr = dfthr[(dfthr.T != 0).any()]
    dfthr = dfthr.loc[:, (dfthr != 0).any(axis=0)]

    parents = []

    lbls = dfthr.index.values

    # get parent ids
    for l, lbl in enumerate(lbls):

        # path id
        path = allengraph.structure_id_path[allengraph.acronym == lbls[l]]

        # remove /
        numpath = re.sub("[/]", ' ', str(path))

        # get digits
        digpath = [int(s) for s in numpath.split() if s.isdigit()]
        digpath = digpath[1:]  # drop 1st index

        # get great grand parent
        if len(path) == 0:
            parent = allengraph.id[allengraph.acronym == lbls[l]]
            if len(parent) == 0:
                parent = 688
        elif len(digpath) < 3:
            parent = digpath[0]
        else:
            parent = digpath[2]

        parents.append(parent)

    sizes = (dfthr.PL.values + 1) * k
    sizes[-1] = np.max(sizes)

    #     return lgn.force(dfthr,group=parents,labels=dfthr.index.values,values=dfthr.max(),size=sizes,width=2000,height=1500,colormap=cmap)

    f = lgn.force(dfthr, labels=dfthr.index.values, size=sizes, width=2500, height=1500)
    f.save_html('%s_conn_force.html' % name, overwrite=True)
Esempio n. 10
0
        self.CAN_SAVE = can_save
        # additional config options for database connection or fukebane(s)
        self.HAS_CONFIG = has_config

    def parse(self, addressbook, conf):
        '''load file / open database connection'''
        # XXX: set addressbook in __init__?
        self.ab = addressbook
        pass

    def add(self, name, birthday):
        '''save new birthday to file/database (only if CAN_SAVE == true)'''
        pass

    def save_config(self, conf):
        '''record current entries in config menu into configuration'''
        pass
    
    def create_config(self, vbox, conf):
        '''create additional pygtk config in config menu'''
        pass

from csv import CSV
from evolution import Evolution
from lightning import Lightning
from mysql import MySQL
from sunbird import Sunbird

mysql_db = MySQL()
DATABASES = [CSV(), Evolution(), Lightning(), mysql_db, Sunbird()]
Esempio n. 11
0
 def createLightning(self, position):
     lightning = Lightning(position)
     return lightning
Esempio n. 12
0
"""
tools for making plots
"""

import time
import algorithms
import numpy as np
from lightning import Lightning

lgn = Lightning(host='http://psdb3:3000')


class RunPlots(object):
    def __init__(self, run_num, qs):

        self.run_num = run_num
        self.qs = qs
        self.session = lgn.create_session('Run %d' % run_num)

        self.las_diff = lgn.line(
            np.zeros_like(qs),
            index=qs,
            xaxis='q / A^{-1}',
            yaxis='Intensity',
            description='Run %d laser on minus laser off' % run_num)
        self.las_on_off = lgn.line(
            [
                np.zeros_like(qs),
            ] * 2,
            index=qs,
            xaxis='q / A^{-1}',
Esempio n. 13
0
import numpy as np
from lightning import Lightning
lgn = Lightning(local=True)
connections = np.matrix("0 0 0 1;0 0 0 0;0 0 0 0;0 0 0 0")
lgn.circle(connections)
Esempio n. 14
0
from lightning import Lightning
from numpy import random

lgn = Lightning()

x = random.randn(100)
y = random.randn(100)
group = (random.rand(100) * 5).astype('int')
size = random.rand(100) * 20 + 5

lgn.scatter(x, y, group=group, size=size)
Esempio n. 15
0
 def lgn(self, host):
     lgn = Lightning(host)
     lgn.create_session("test-images")
     return lgn
Esempio n. 16
0
def createconnectogram(num_out_lbl, heatmap, annot_csv, uniq_lbls, targ, dic):
    """ Generates & saves the connectome graph of the connectiviy matrix
    """

    print(
        "\n Computing & saving the interactive connectivity graph (connectogram) "
    )

    lgn = Lightning(ipython=True, local=True)

    # create circle connectome
    connections = np.zeros(((2 * num_out_lbl) + 1, (2 * num_out_lbl) + 1))

    connections[1:(num_out_lbl + 1), 0] = uniq_lbls.T
    connections[num_out_lbl + 1:, 0] = targ.T

    connections[0, 1:(num_out_lbl + 1)] = uniq_lbls.T
    connections[0, (num_out_lbl + 1):] = targ.T

    # propagate connections

    alllbls = connections[1:, 0]

    for l, lbl in enumerate(alllbls):

        for t, tart in enumerate(alllbls):
            iind = heatmap[:-1, 0] == lbl
            jind = heatmap[num_out_lbl, :] == tart
            val = heatmap[np.where(iind == True), np.where(jind == True)]

            connections[l + 1, t + 1] = val if val > 0 else 0

    # threshold connections
    thr = 0.1
    connections[connections < thr] = 0

    # lbls abrv
    alllbls_abrv = pd.DataFrame(alllbls)
    alllbls_abrv = alllbls_abrv.replace(dic)
    alllbls_abrv = np.array(alllbls_abrv[0])

    # get grand parents ids for groups
    ggp_parents = np.array([
        annot_csv['parent_structure_id'][annot_csv['id'] == lbl].item()
        for l, lbl in enumerate(alllbls)
    ])

    parent_grps = ggp_parents

    for i in range(2):
        parent_grps = np.array([
            annot_csv['parent_structure_id'][annot_csv['id'] == lbl].item() if
            (lbl != 997) else 997 for l, lbl in enumerate(parent_grps)
        ])

    # make dic
    repl = np.unique(ggp_parents)
    np.place(repl, repl > 0, range(len(repl)))
    uniq_parents = np.unique(ggp_parents)
    parents_dic = dict(zip(uniq_parents, repl))

    # replace
    ggp_parents = pd.DataFrame(ggp_parents)
    groups = ggp_parents.replace(parents_dic)
    groups = np.array(groups[0])

    # make dic
    repl2 = np.unique(parent_grps)
    np.place(repl2, repl2 > 0, range(len(repl2)))
    uniq_parents2 = np.unique(parent_grps)
    parents_dic2 = dict(zip(uniq_parents2, repl2))

    parent_grps = pd.DataFrame(parent_grps)
    parent_groups = parent_grps.replace(parents_dic2)
    parent_groups = np.array(parent_groups[0])

    justconn = connections[1:, 1:]

    c = lgn.circle(justconn,
                   labels=alllbls_abrv,
                   group=[parent_groups, groups],
                   width=1000,
                   height=1000)

    c.save_html('connectogram_grouped_by_parent_id_%d_labels.html' %
                num_out_lbl,
                overwrite=True)
Esempio n. 17
0
            temp_county = row[2]
            temp_value = row[5]

            if temp_state != "" and temp_county != "" and temp_value != "":
                for i in range(0, d):
                    if states[i] == temp_state:
                        county[i].append(temp_county)
                        value[i].append(float(temp_value) / 100)

    for i in range(0, d):
        if len(value[i]) == 1:
            vec1.append(len(county[i]))
            vec2.append(0.0)
        else:
            vec1.append(len(county[i]))
            vec2.append(statistics.stdev(value[i]))

    colors = np.random.rand(d)
    area = vec2  # 0 to 15 point radii

    plt.scatter(states, vec1, s=area, c=colors, alpha=0.5)
    plt.xlabel("Code of state")
    plt.ylabel("Number of counties per state")
    plt.show()

    lgn = Lightning(ipython=True, local=True)

    values = random.randn(len(states))

    lgn.map(states, vec2, colormap='Blues')