Example #1
0
def main():
    # Will use GLUT https://compgraphics.info/OpenGL/template_glut.php,
    # but on Python https://wiki.python.org/moin/PyOpenGL

    obj_file = ObjLoader(f"models/{Config.model}.obj")
    obj_file.add_plane(plane(0.2, 0.1))

    glutInit(sys.argv)
    glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH)
    glutInitWindowSize(Config.width, Config.height)
    glutCreateWindow("HW3, Shadows, Tankov Vladislav")

    # glEnable(GL_DEBUG_OUTPUT)
    # glDebugMessageCallback(GLDEBUGPROC(cb_dbg_msg), None)
    glEnable(GL_TEXTURE_2D)
    glEnable(GL_DEPTH_TEST)

    Model.create_buffers(obj_file.prepared_vertices,
                         obj_file.prepared_tex_coords,
                         obj_file.prepared_normals)
    ShadowMap.create()

    ShadowProgram.create()
    ShadowProgram.attach_shader(
        Shader.load("shader/shadow_vertex.glsl", GL_VERTEX_SHADER))
    ShadowProgram.attach_shader(
        Shader.load("shader/shadow_fragment.glsl", GL_FRAGMENT_SHADER))
    ShadowProgram.link()
    ShadowProgram.use()

    Program.create()
    Program.attach_shader(
        Shader.load("shader/main_vertex.glsl", GL_VERTEX_SHADER))
    Program.attach_shader(
        Shader.load("shader/main_fragment.glsl", GL_FRAGMENT_SHADER))
    Program.link()
    Program.use()

    Camera.update_gl()
    Lightning.update_gl()

    glutDisplayFunc(
        Display.display(obj_file.prepared_vertices, obj_file.prepared_normals))
    glutReshapeFunc(Display.reshape)

    glutIdleFunc(Display.idle)

    glutMouseFunc(Controls.mouse)
    glutMotionFunc(Controls.motion)

    glutMainLoop()
Example #2
0
    def setUp(self):
        logging.basicConfig(
            level='ERROR',
            format='[%(asctime)s] (%(levelname)s) %(module)s.%(funcName)s:%(lineno)d %(message)s',
            datefmt='%Y-%m-%d %I:%M:%S %p'
        )

        self.start_redis()
        self.config = {
            'sql_connection': 'dsn=SQLServer;uid=fakeuser;pwd=fakepassword;database=LIGHTNING_TEST;driver={SQL Server Native Client 10.0}',
            'environment': 'local',
            'redis_host': 'localhost',
            'redis_port': self.redis_port,
        }

        @defer.inlineCallbacks
        def on_build(app):
            self.app = app
            yield self.reset_db(app.db)
            if self.use_networking:
                self.listeners = [
                    reactor.listenTCP(0, self.app.site),
                ]


        # XXX - something goes wrong in the teardown process related to the redis connection pool
        # that we  can't figure out.  Fortunately, none of our tests actually need
        # to make use of that connection pool, so we added this do_connect_redis parameter to skip the
        # creation of the redis connection pool when running tests.
        return Lightning.build(self.config, do_connect_redis=False).addCallback(on_build)
Example #3
0
 def spellLight(self):
     if len(self.magicList[0]) <= 0:
         pass
     else:
         self.magicList[0].pop(-1)
         self.light = Lightning(self.x, self.y, self.dir, self.bg)
         game_world.add_object(self.light, 1)
Example #4
0
    def __init__(self,
                 ncenters=3,
                 ndims=2,
                 std=0.2,
                 seed=None,
                 update='drift',
                 interval=15,
                 transition=None):
        multiprocessing.Process.__init__(self)
        self.stop_event = multiprocessing.Event()
        """
        Set up parameters for a streaming kmeans algorithm demo.

        Parameters
        ----------
        ncenters : int, or array-like (ncenters, ndims)
          Number of clusters as an integer, or an array of starting cluster centers.
          If given as an integer, cluster centers will be determined randomly.

        ndims : int
          Number of dimensions

        std : scalar
          Cluster standard deviation

        """

        np.random.seed(seed)
        if np.size(ncenters) == 1:
            centers = np.random.randn(ncenters, ndims) * 2
        else:
            centers = np.asarray(ncenters)
            ncenters = centers.shape[0]
        self.centers = centers
        self.ncenters = ncenters
        self.ndims = ndims
        self.npoints = 50
        self.std = std
        self.update = update
        self.interval = interval
        self.transition = transition
        self.lgnAddress = "http://localhost:3010"
        self.lgn = Lightning(self.lgnAddress)
        self.lgn.create_session('kafka-streaming-kmeans')
        self.lgn.session.open()
        self.vizPanel = None
Example #5
0
def main():
    # Will use GLUT https://compgraphics.info/OpenGL/template_glut.php,
    # but on Python https://wiki.python.org/moin/PyOpenGL

    obj_file = ObjLoader(f"models/{Config.model}.obj")

    glutInit(sys.argv)
    glutInitDisplayMode(GLUT_RGB | GLUT_DOUBLE | GLUT_DEPTH)
    glutInitWindowSize(Config.width, Config.height)
    glutCreateWindow("HW2, Object, Tankov Vladislav")

    Program.prepare(obj_file.prepared_vertices, obj_file.prepared_normals)

    Program.create()
    # forward tex coord to fragment
    Program.attach_shader(
        Shader.load("shader/vertex_noise.glsl", GL_VERTEX_SHADER))
    # apply noise
    Program.attach_shader(
        Shader.load("shader/fragment_noise.glsl", GL_FRAGMENT_SHADER))
    Program.link()

    Texture.create_2d(obj_file.prepared_tex_coords)

    Program.use()

    Camera.update_gl()
    Lightning.update_gl()

    glutDisplayFunc(Display.display(obj_file.prepared_vertices))
    glutReshapeFunc(Display.reshape)

    glutIdleFunc(Display.idle)

    glutMouseFunc(Controls.mouse)
    glutMotionFunc(Controls.motion)

    glutCreateMenu(Controls.menu)
    glutAddMenuEntry("Disable Dissolve", 0)
    glutAddMenuEntry("Enable Dissolve", 1)
    glutAttachMenu(GLUT_RIGHT_BUTTON)

    glutMainLoop()
Example #6
0
def main():
    targets = sys.argv[1:]
    prefix = "./wordCountSplit/"
    foldersIni = list(filter(lambda x: x[0] != ".", os.listdir(prefix)))
    count = []
    numOfWords = len(sys.argv) - 1
    numOfFiles = 0

    #initialize the array
    for folder in foldersIni:
        fileName = prefix + folder + "/part-00000"
        count.append(addData(fileName, targets, numOfWords))
        numOfFiles += 1

    #plot the initialized array
    lgn = Lightning()
    numOfFiles = 10 if (numOfFiles > 10) else numOfFiles

    series = np.array(count[:numOfFiles]).reshape((numOfWords, numOfFiles))
    viz = lgn.linestreaming(
        series,
        max_width=15,
        xaxis="Window No. (each window is 60 sec with 5 sec update interval",
        yaxis="Word Frequency")

    time.sleep(4)
    for c in count[numOfFiles:]:
        viz.append(np.array(c).reshape((numOfWords, 1)))
        time.sleep(0.3)

    # update the new data generated by Spark Streaming
    while True:
        folders = filter(lambda x: x[0] != ".", os.listdir(prefix))
        for folder in folders:
            if folder not in foldersIni:
                time.sleep(5)
                fileName = prefix + folder + "/part-00000"
                newData = addData(fileName, targets, numOfWords)
                viz.append(np.array(newData).reshape((numOfWords, 1)))
                time.sleep(0.3)
Example #7
0
def Train(opt):
    # init Lightning Model
    light = Lightning(**opt['light_conf'])

    # mkdir the file of Experiment path
    os.makedirs(os.path.join(opt['resume']['path'],
                             opt['resume']['checkpoint']),
                exist_ok=True)
    checkpoint_path = os.path.join(opt['resume']['path'],
                                   opt['resume']['checkpoint'])
    checkpoint = ModelCheckpoint(checkpoint_path,
                                 monitor='val_loss',
                                 mode='min',
                                 save_top_k=1,
                                 verbose=1,
                                 save_last=True)

    # Early Stopping
    early_stopping = False
    if opt['train']['early_stop']:
        early_stopping = EarlyStopping(monitor='val_loss',
                                       patience=opt['train']['patience'],
                                       mode='min',
                                       verbose=1)

    # Don't ask GPU if they are not available.
    if torch.cuda.is_available():
        gpus = len(opt['gpu_ids'])
    else:
        gpus = None
    # logger

    # default logger used by trainer
    logger = TensorBoardLogger(save_dir='./logger',
                               version=1,
                               name='lightning_logs')
    # Trainer
    trainer = pl.Trainer(
        max_epochs=opt['train']['epochs'],
        checkpoint_callback=checkpoint,
        early_stop_callback=early_stopping,
        default_root_dir=checkpoint_path,
        gpus=gpus,
        distributed_backend=opt['train']['distributed_backend'],
        train_percent_check=1.0,  # Useful for fast experiment
        gradient_clip_val=5.,
        logger=logger)

    trainer.fit(light)
Example #8
0
        self.CAN_SAVE = can_save
        # additional config options for database connection or fukebane(s)
        self.HAS_CONFIG = has_config

    def parse(self, addressbook, conf):
        '''load file / open database connection'''
        # XXX: set addressbook in __init__?
        self.ab = addressbook
        pass

    def add(self, name, birthday):
        '''save new birthday to file/database (only if CAN_SAVE == true)'''
        pass

    def save_config(self, conf):
        '''record current entries in config menu into configuration'''
        pass
    
    def create_config(self, vbox, conf):
        '''create additional pygtk config in config menu'''
        pass

from csv import CSV
from evolution import Evolution
from lightning import Lightning
from mysql import MySQL
from sunbird import Sunbird

mysql_db = MySQL()
DATABASES = [CSV(), Evolution(), Lightning(), mysql_db, Sunbird()]
Example #9
0
 def idle():
     Lightning.update_gl()
     DissolveAnimation.update_gl()
     glutPostRedisplay()
Example #10
0
ax = sns.barplot(x="title",
                 y="total_earnings",
                 data=sorted_salaries,
                 palette='muted')
plt.xlabel('Meslek', fontsize=16)
plt.ylabel("Ortalama MaaÅŸ", fontsize=16)
ax.set_xticklabels(ax.get_xticklabels(), rotation=90)
ax.set_title('Meslek Ortalama Maaşları')

# In[ ]:

#Lightning kütüphanesini import edip görselleştirmelerini inceleyelim.

from lightning import Lightning

lgn = Lightning(ipython=True, local=True)

# In[ ]:

#Dağılım grafiği gösterimi.
#Shift tuşuyla fırçalama yapılabilir

c = [100, 200, 100]
lgn.scatter(sal7['dept_average'],
            sal7['total_earnings'],
            alpha=0.8,
            brush=True,
            color=c,
            size=5)

# In[ ]:
from lightning import Lightning
from numpy import random

lgn = Lightning()

x = random.randn(100)
y = random.randn(100)
group = (random.rand(100) * 5).astype('int')
size = random.rand(100) * 20 + 5

lgn.scatter(x, y, group=group, size=size)
from lightning import Lightning
from numpy import random

lgn = Lightning()

series = random.randn(5,10)

viz = lgn.linestreaming(series)

for _ in range(100):
	viz.append(random.randn(5, 1))
Example #13
0
#coding:utf-8
import os
from lightning import Lightning
lgn = Lightning(host="http://rocky-river-8489.herokuapp.com/")
lgn.create_session('么么哒')

lgn.line([1,2,3,4,5,6,7,8,0,-2,2])
lgn.scatter([1,2,3],[2,9,4])
lgn.scatter([1,2,3],[2,9,4], label=[1,2,3], size=[5,10,20])
lgn.plot(data={"series": [1,2,3]}, type='line')
def main():

    # parse arguments
    parser = argparse.ArgumentParser(description='Spark Logistic  Regression.')
    parser = baseargs(parser)
    '''
    parser.add_argument('-nc', '--ncenters', type=int, default=3, required=False, 
        help='Number of cluster centers')
    parser.add_argument('-nd', '--ndims', type=int, default=2, required=False, 
        help='Number of dimensions')
    parser.add_argument('-rs', '--randomseed', type=int, default=None, required=False,
        help='Random seed')
    parser.add_argument('-sd', '--std', type=float, default=0.3, required=False,
        help='Standard deviation of points')
    parser.add_argument('-up', '--update', type=str, choices=('jump', 'drift', 'none'), default='drift', required=False,
        help='Update behavior')
    '''
    parser.add_argument(
        '-a',
        '--autoopen',
        type=bool,
        choices=(True, False),
        default=True,
        required=False,
        help='Whether to automatically open Lightning session on a browser')
    parser.add_argument('-laddr',
                        '--lightningAddr',
                        type=str,
                        choices=('http://acm:3010'),
                        default='http://acm:3010',
                        required=False,
                        help='Lightning server address')
    args = parser.parse_args()
    # basic setup
    sparkhome = findspark()
    jar = findjar()

    # set up lightning
    print("lgn address", args.lightningAddr)
    lgn = Lightning(args.lightningAddr)
    lgn.create_session('spark-logistic-regression')
    if (args.autoopen):
        lgn.session.open()

    # set temp path
    path = args.path
    if not path or path == '':
        path = tempfile.gettempdir()
    tmpdir = os.path.join(path, 'sparklogisticregression')

    # setup the demo
    #s = StreamingDemo.make('kmeans', npoints=args.npoints, nbatches=args.nbatches)
    #s.setup(tmpdir, overwrite=args.overwrite)
    #s.params(ncenters=args.ncenters, ndims=args.ndims, std=args.std, seed=args.randomseed, update=args.update)

    # setup the spark job
    sparkSubmit = sparkhome + "/bin/spark-submit"
    sparkArgs = [
        "--class", "spark.classification.LogisticRegressionWithLBFGSExample",
        jar
    ]
    #demoArgs = [s.datain, s.dataout, str(args.batchtime), str(args.ncenters), str(args.ndims), str(args.halflife), str(args.timeunit)]
    demoArgs = []
    cmd = [sparkSubmit] + sparkArgs + demoArgs

    try:
        # start the spark job
        p = subprocess.Popen(cmd)
        # wait for spark streaming to start up
        time.sleep(4)
        # start the demo
        #s.run(lgn)

    finally:
        pass
Example #15
0
from lightning import Lightning
from numpy import random

lgn = Lightning()

x = random.randn(10)
y = random.randn(10)
mat = random.rand(10, 10)
mat[mat > 0.75] = 0
group = (random.rand(10) * 5).astype('int')

lgn.graph(x, y, mat, group=group)
 def lgn(self, host):
     lgn = Lightning(host)
     lgn.create_session("test-plots")
     return lgn
Example #17
0
def force(indf, dic, allengraph, lblid, thr, k, name):
    # force graph
    lgn = Lightning(ipython=True, local=True)

    #     means = indf.Mean.values
    means = indf.Sum.values
    zeros = np.zeros((means.shape[0] + 1, means.shape[0] + 1))
    zeros[:-1, -1] = means
    zeros[-1, :-1] = means
    df = pd.DataFrame(zeros)
    lbls = np.append(indf.Label.values, lblid)

    lblsdf = pd.DataFrame(lbls)
    lblsdf.columns = ['lbls']
    lblsdf = lblsdf.replace(dic)

    df.columns = lblsdf.lbls.values
    df.index = lblsdf.lbls.values

    # threshold for force graph
    dfthr = df.copy()
    thrval = indf.Sum.mean() * thr
    #     thrval = indf.Mean.mean() * thr
    dfthr[dfthr < thrval] = 0

    # drop zeros
    dfthr = dfthr[(dfthr.T != 0).any()]
    dfthr = dfthr.loc[:, (dfthr != 0).any(axis=0)]

    parents = []

    lbls = dfthr.index.values

    # get parent ids
    for l, lbl in enumerate(lbls):

        # path id
        path = allengraph.structure_id_path[allengraph.acronym == lbls[l]]

        # remove /
        numpath = re.sub("[/]", ' ', str(path))

        # get digits
        digpath = [int(s) for s in numpath.split() if s.isdigit()]
        digpath = digpath[1:]  # drop 1st index

        # get great grand parent
        if len(path) == 0:
            parent = allengraph.id[allengraph.acronym == lbls[l]]
            if len(parent) == 0:
                parent = 688
        elif len(digpath) < 3:
            parent = digpath[0]
        else:
            parent = digpath[2]

        parents.append(parent)

    sizes = (dfthr.PL.values + 1) * k
    sizes[-1] = np.max(sizes)

    #     return lgn.force(dfthr,group=parents,labels=dfthr.index.values,values=dfthr.max(),size=sizes,width=2000,height=1500,colormap=cmap)

    f = lgn.force(dfthr, labels=dfthr.index.values, size=sizes, width=2500, height=1500)
    f.save_html('%s_conn_force.html' % name, overwrite=True)
Example #18
0
            temp_county = row[2]
            temp_value = row[5]

            if temp_state != "" and temp_county != "" and temp_value != "":
                for i in range(0, d):
                    if states[i] == temp_state:
                        county[i].append(temp_county)
                        value[i].append(float(temp_value) / 100)

    for i in range(0, d):
        if len(value[i]) == 1:
            vec1.append(len(county[i]))
            vec2.append(0.0)
        else:
            vec1.append(len(county[i]))
            vec2.append(statistics.stdev(value[i]))

    colors = np.random.rand(d)
    area = vec2  # 0 to 15 point radii

    plt.scatter(states, vec1, s=area, c=colors, alpha=0.5)
    plt.xlabel("Code of state")
    plt.ylabel("Number of counties per state")
    plt.show()

    lgn = Lightning(ipython=True, local=True)

    values = random.randn(len(states))

    lgn.map(states, vec2, colormap='Blues')
Example #19
0
class Consumer(multiprocessing.Process):
    def __init__(self,
                 ncenters=3,
                 ndims=2,
                 std=0.2,
                 seed=None,
                 update='drift',
                 interval=15,
                 transition=None):
        multiprocessing.Process.__init__(self)
        self.stop_event = multiprocessing.Event()
        """
        Set up parameters for a streaming kmeans algorithm demo.

        Parameters
        ----------
        ncenters : int, or array-like (ncenters, ndims)
          Number of clusters as an integer, or an array of starting cluster centers.
          If given as an integer, cluster centers will be determined randomly.

        ndims : int
          Number of dimensions

        std : scalar
          Cluster standard deviation

        """

        np.random.seed(seed)
        if np.size(ncenters) == 1:
            centers = np.random.randn(ncenters, ndims) * 2
        else:
            centers = np.asarray(ncenters)
            ncenters = centers.shape[0]
        self.centers = centers
        self.ncenters = ncenters
        self.ndims = ndims
        self.npoints = 50
        self.std = std
        self.update = update
        self.interval = interval
        self.transition = transition
        self.lgnAddress = "http://localhost:3010"
        self.lgn = Lightning(self.lgnAddress)
        self.lgn.create_session('kafka-streaming-kmeans')
        self.lgn.session.open()
        self.vizPanel = None

    def stop(self):
        self.stop_event.set()

    def notifyLgn(self, model):
        time.sleep(1)
        # plot an update (if we got a valid model)
        #if len(model) == self.ncenters:

        pts, labels = make_blobs(self.npoints,
                                 self.ndims,
                                 self.centers,
                                 cluster_std=self.std)

        clrs = labels
        order = np.argsort(labels)
        clrs = clrs[order]
        pts = pts[order]
        s = np.ones(self.npoints) * 10

        if self.ndims == 1:
            pts = np.vstack((pts, model[:, None]))
        else:
            print("halil:", pts.shape, model.shape)
            pts = np.vstack((pts, model))
        clrs = np.hstack((clrs, np.ones(self.ncenters) * 5))
        s = np.hstack((s, np.ones(self.ncenters) * 10))

        # wait a few iterations before plotting
        # scatter plot for two dimensions
        if self.ndims == 2:
            if self.vizPanel is None:
                self.vizPanel = self.lgn.scatterstreaming(pts[:, 0],
                                                          pts[:, 1],
                                                          labels=clrs,
                                                          size=s)
            else:
                self.vizPanel.append(pts[:, 0], pts[:, 1], labels=clrs, size=s)

        # line plot for one dimension
        elif self.ndims == 1:
            if self.vizPanel is None:
                self.vizPanel = self.lgn.linestreaming(pts,
                                                       labels=clrs,
                                                       size=s / 2)
            else:
                self.vizPanel.append(pts, labels=clrs, size=s / 2)

        else:
            raise Exception('Plotting only supported with 1 or 2 dimensions')

    def processModelPrediction(self, msg):
        pass
        model_, prediction_ = msg.value.split("-----")
        model = None
        try:
            model = np.fromstring(model_, dtype=floay, sep=",")
        except:
            pass
            model = np.zeros(shape=(1, 2))
        print("notify lightning server")
        self.notifyLgn(model)

    def run(self):
        #value_deserializer=lambda m: json.loads(m.decode('utf-8')),
        consumer = KafkaConsumer(
            bootstrap_servers='acm:9092',
            auto_offset_reset='earliest',
            value_deserializer=lambda m: m.decode('utf-8'),
            consumer_timeout_ms=1000)
        consumer.subscribe(['kmeans-output-topic'])

        while not self.stop_event.is_set():
            for message in consumer:
                #print(message)
                self.processModelPrediction(message)
                if self.stop_event.is_set():
                    break

        consumer.close()
Example #20
0
import numpy as np
from lightning import Lightning
lgn = Lightning(local=True)
connections = np.matrix("0 0 0 1;0 0 0 0;0 0 0 0;0 0 0 0")
lgn.circle(connections)
Example #21
0
from lightning import Lightning
from numpy import random, ceil, array

lgn = Lightning()

series = random.randn(5,50)

lgn.line(series)
}

test_data_params = { 
    "prefix": "input_",
    "num_files": 10,
    "approx_file_size": 10.0,
    "records_per_file": 512 * 512,
    "copy_period": 10
}

##########################################
# Analysis configuration stuff starts here
##########################################

# TODO Need to insert the Lightning client here
lgn = Lightning("http://kafka1.int.janelia.org:3000/")
lgn.create_session('test')

image_viz = lgn.image(zeros((512, 512)))
line_viz = lgn.linestreaming(zeros((10,1)))

analysis1 = Analysis.SeriesMeanAnalysis(input=dirs['input'], output=os.path.join(dirs['output'], 'images'), prefix="output", format="text").toImage(dims=(512,512)).toLightning(image_viz, only_viz=True)
#analysis2 = Analysis.SeriesFiltering2Analysis(input=dirs['input'], output=os.path.join(dirs['output'], 'filtered_series'), prefix="output", format="text").toSeries().toLightning(line_viz, only_viz=True)

#analysis2.receive_updates(analysis1)

tssc.add_analysis(analysis1)
#tssc.add_analysis(analysis2)

updaters = [
    LightningUpdater(tssc, image_viz, analysis1.identifier)
Example #23
0
#!/usr/bin/env python

from lightning import Lightning
from time import sleep
from numpy import random
from cassandra.cluster import Cluster
from scipy.ndimage.filters import gaussian_filter
from pandas import Series

#initialize cassandra connectivity
cluster = Cluster()
cass = cluster.connect()
cass.set_keyspace("sparkml")

#initialize lightning viz. server
lgn = Lightning(host="https://spark-streaming-ml.herokuapp.com")
lgn.create_session('streaming-kmeans')
lgn.session.open()

while True:
    accuracy = cass.execute(
        "select unixTimestampOf(event_time) as tm, mse, rmse from accuracy")
    tm = [row.tm for row in accuracy]
    rmse = [row.rmse for row in accuracy]
    obj = Series(rmse, index=tm)
    pts = obj.sort_index(ascending=True).values
    #viz = lgn.line(gaussian_filter(pts, 10))
    viz = lgn.line(pts)
    sleep(0.25)
"""rows = cass.execute("select * from predictions")
prediction = [row.prediction for row in rows]
Example #24
0
from lightning import Lightning
from numpy import random

lgn = Lightning()

connections = random.rand(50,50)
connections[connections<0.98] = 0

g1 = (random.rand(50) * 3).astype('int')
g2 = (random.rand(50) * 3).astype('int')
group = [g2, g1]

lgn.circle(connections, group=group)
Example #25
0
#!/usr/bin/env python

from lightning import Lightning
from time import sleep
from numpy import random
from cassandra.cluster import Cluster
from scipy.ndimage.filters import gaussian_filter
from pandas import Series

#initialize cassandra connectivity
cluster = Cluster()
cass = cluster.connect()
cass.set_keyspace("sparkml")

#initialize lightning viz. server
lgn = Lightning(host="https://spark-streaming-ml.herokuapp.com")
lgn.create_session('streaming-kmeans')
lgn.session.open()
    
while True:
	accuracy = cass.execute("select unixTimestampOf(event_time) as tm, mse, rmse from accuracy")
	tm = [row.tm for row in accuracy]
	rmse = [row.rmse for row in accuracy]
	obj = Series(rmse, index = tm)
	pts = obj.sort_index(ascending=True).values
	#viz = lgn.line(gaussian_filter(pts, 10))
	viz = lgn.line(pts)
	sleep(0.25)
	
"""rows = cass.execute("select * from predictions")
prediction = [row.prediction for row in rows]
Example #26
0
 def lgn(self, host):
     lgn = Lightning(host)
     lgn.create_session("test-images")
     return lgn
Example #27
0
"""
tools for making plots
"""

import time
import algorithms
import numpy as np
from lightning import Lightning

lgn = Lightning(host='http://psdb3:3000')


class RunPlots(object):
    def __init__(self, run_num, qs):

        self.run_num = run_num
        self.qs = qs
        self.session = lgn.create_session('Run %d' % run_num)

        self.las_diff = lgn.line(
            np.zeros_like(qs),
            index=qs,
            xaxis='q / A^{-1}',
            yaxis='Intensity',
            description='Run %d laser on minus laser off' % run_num)
        self.las_on_off = lgn.line(
            [
                np.zeros_like(qs),
            ] * 2,
            index=qs,
            xaxis='q / A^{-1}',
Example #28
0
 def idle():
     Lightning.update_angle()
     glutPostRedisplay()
     return
Example #29
0
from lightning import Lightning
from numpy import random

lgn = Lightning()

mat = random.rand(10,10)
mat[mat>0.75] = 0
group = (random.rand(10) * 5).astype('int')

lgn.force(mat, group=group)
Example #30
0
from lightning import Lightning
from sklearn import datasets

lgn = Lightning()

imgs = datasets.load_sample_images()['images']

lgn.imagepoly(imgs[0])
 def createLightning(self, position):
     lightning = Lightning(position)
     return lightning
from lightning import Lightning
from numpy import random,
 
lgn = Lightning()
 
x = random.randn(100)
y = random.randn(100)
 
viz = lgn.scatterstreaming(x, y)
 
for _ in range(100):
    x = random.randn(100)
    y = random.randn(100)
    viz.append(x, y)
from lightning import Lightning
from sklearn import datasets

lgn = Lightning()

imgs = datasets.load_sample_images()['images']

lgn.gallery(imgs)
Example #34
0
from lightning import Lightning
lgn = Lightning(host='192.168.99.100')


def show(G):
    mat, labels = nx.attr_matrix(G)
    g = np.array(list(G.degree().values()))
    return lgn.force(mat, group=g, labels=[i for i in labels])
Example #35
0
from lightning import Lightning
from numpy import random

lgn = Lightning()

connections = random.rand(50, 50)
connections[connections < 0.98] = 0

g1 = (random.rand(50) * 3).astype('int')
g2 = (random.rand(50) * 3).astype('int')
group = [g2, g1]

lgn.circle(connections, group=group)
Example #36
0
feeder_params = {"linger_time": -1, "max_files": 10, "poll_time": 5}

test_data_params = {
    "prefix": "input_",
    "num_files": 10,
    "approx_file_size": 10.0,
    "records_per_file": 512 * 512,
    "copy_period": 10
}

##########################################
# Analysis configuration stuff starts here
##########################################

# TODO Need to insert the Lightning client here
lgn = Lightning("http://kafka1.int.janelia.org:3000/")
lgn.create_session('test')

image_viz = lgn.image(zeros((512, 512)))
line_viz = lgn.linestreaming(zeros((10, 1)))

analysis1 = Analysis.SeriesMeanAnalysis(
    input=dirs['input'],
    output=os.path.join(dirs['output'], 'images'),
    prefix="output",
    format="text").toImage(dims=(512, 512)).toLightning(image_viz,
                                                        only_viz=True)
#analysis2 = Analysis.SeriesFiltering2Analysis(input=dirs['input'], output=os.path.join(dirs['output'], 'filtered_series'), prefix="output", format="text").toSeries().toLightning(line_viz, only_viz=True)

#analysis2.receive_updates(analysis1)
Example #37
0
from lightning import Lightning
from numpy import random, ceil, array

lgn = Lightning()

series = random.randn(5, 50)

lgn.line(series)
Example #38
0
def createconnectogram(num_out_lbl, heatmap, annot_csv, uniq_lbls, targ, dic):
    """ Generates & saves the connectome graph of the connectiviy matrix
    """

    print(
        "\n Computing & saving the interactive connectivity graph (connectogram) "
    )

    lgn = Lightning(ipython=True, local=True)

    # create circle connectome
    connections = np.zeros(((2 * num_out_lbl) + 1, (2 * num_out_lbl) + 1))

    connections[1:(num_out_lbl + 1), 0] = uniq_lbls.T
    connections[num_out_lbl + 1:, 0] = targ.T

    connections[0, 1:(num_out_lbl + 1)] = uniq_lbls.T
    connections[0, (num_out_lbl + 1):] = targ.T

    # propagate connections

    alllbls = connections[1:, 0]

    for l, lbl in enumerate(alllbls):

        for t, tart in enumerate(alllbls):
            iind = heatmap[:-1, 0] == lbl
            jind = heatmap[num_out_lbl, :] == tart
            val = heatmap[np.where(iind == True), np.where(jind == True)]

            connections[l + 1, t + 1] = val if val > 0 else 0

    # threshold connections
    thr = 0.1
    connections[connections < thr] = 0

    # lbls abrv
    alllbls_abrv = pd.DataFrame(alllbls)
    alllbls_abrv = alllbls_abrv.replace(dic)
    alllbls_abrv = np.array(alllbls_abrv[0])

    # get grand parents ids for groups
    ggp_parents = np.array([
        annot_csv['parent_structure_id'][annot_csv['id'] == lbl].item()
        for l, lbl in enumerate(alllbls)
    ])

    parent_grps = ggp_parents

    for i in range(2):
        parent_grps = np.array([
            annot_csv['parent_structure_id'][annot_csv['id'] == lbl].item() if
            (lbl != 997) else 997 for l, lbl in enumerate(parent_grps)
        ])

    # make dic
    repl = np.unique(ggp_parents)
    np.place(repl, repl > 0, range(len(repl)))
    uniq_parents = np.unique(ggp_parents)
    parents_dic = dict(zip(uniq_parents, repl))

    # replace
    ggp_parents = pd.DataFrame(ggp_parents)
    groups = ggp_parents.replace(parents_dic)
    groups = np.array(groups[0])

    # make dic
    repl2 = np.unique(parent_grps)
    np.place(repl2, repl2 > 0, range(len(repl2)))
    uniq_parents2 = np.unique(parent_grps)
    parents_dic2 = dict(zip(uniq_parents2, repl2))

    parent_grps = pd.DataFrame(parent_grps)
    parent_groups = parent_grps.replace(parents_dic2)
    parent_groups = np.array(parent_groups[0])

    justconn = connections[1:, 1:]

    c = lgn.circle(justconn,
                   labels=alllbls_abrv,
                   group=[parent_groups, groups],
                   width=1000,
                   height=1000)

    c.save_html('connectogram_grouped_by_parent_id_%d_labels.html' %
                num_out_lbl,
                overwrite=True)
Example #39
0
from lightning import Lightning
from numpy import random

lgn = Lightning()

x = random.randn(10)
y = random.randn(10)
mat = random.rand(10,10)
mat[mat>0.75] = 0
group = (random.rand(10) * 5).astype('int')

lgn.graph(x, y, mat, group=group)