Example #1
0
def test_template_definitions(modelFile="tests/brusselator1d_bound_U.json"):
    '''
    DESCRIPTION:
    Generate cpp for centrals from
    template.

    template in :
       'criminal/templates/definitions.template'

    out will be in:
       'tests/introduction/src/from_test_template_definitions.cpp'

    '''
    if type(modelFile) == str:
        model = get_model_for_tests(modelFile)
    else:
        model = modelFile

    # model = get_model_for_tests(modelFile)

    params = Params()
    cppGen = CppOutGen()

    # parameters for definitions
    params.set_params_for_definitions(model)
    
    out = cppGen.get_out_for_definitions(params)

    to_file(out, 'from_test_template_definitions.cpp')

    return(out)
Example #2
0
def test_matchzoo():
    
    params = Params()
    config_file = 'config/qalocal.ini'    # define dataset in the config
    params.parse_config(config_file)
    params.network_type = "anmm.ANMM"
    
    reader = qa.setup(params)
    qdnn = models.setup(params)
    model = qdnn.getModel()
    
    
    model.compile(loss = params.loss,
                optimizer = units.getOptimizer(name=params.optimizer,lr=params.lr),
                metrics=['accuracy'])
    model.summary()
    
#    generators = [reader.getTrain(iterable=False) for i in range(params.epochs)]
#    q,a,score = reader.getPointWiseSamples()
#    model.fit(x = [q,a],y = score,epochs = 1,batch_size =params.batch_size)
    
    def gen():
        while True:
            for sample in reader.getPointWiseSamples(iterable = True):
                yield sample
    model.fit_generator(gen(),epochs = 2,steps_per_epoch=1000)
Example #3
0
  def test_roi (self):
    """Upload an roi and test it's fields"""

    # Make a skeleton
    makeAnno (p, 9)

    # test the parent
    parent = random.randint (0,65535)
    f = setField(p, 'parent', parent)
    f = getField(p, 'parent')
    assert parent == int(f.content)

    # make a bunch of children ROIs
    q = Params()
    q.token = 'unittest'
    q.resolution = 0
    q.channels = ['unit_anno']

    childids = []
    for i in range(0,4):
      makeAnno ( q, 9)
      f = setField(q, 'parent', p.annoid)
      childids.append(q.annoid)

    # Test children
    f = getField(p, 'children')
    rchildids = f.content.split(',')
    for cid in rchildids:
      assert int(cid) in childids
    assert len(rchildids) == 4
Example #4
0
    def __init__(self):
        self.model = Model()
        self.params = Params()
        self.screen = Screen()

        self.fzf = FzfPrompt()
        self.info = ''
Example #5
0
  def test_node (self):
    """Upload a skeleton node and test it's fields"""

    # Make a node
    makeAnno (p, 7)

    # test the nodetype
    nodetype = random.randint (0,100)
    f = setField(p, 'nodetype', nodetype)
    f = getField(p, 'nodetype')
    assert nodetype == int(f.content)

    # test the skeletonid
    skeletonid = random.randint (0,65535)
    f = setField(p, 'skeletonid', skeletonid)
    f = getField(p, 'skeletonid')
    assert skeletonid == int(f.content)

    # test the pointid
    pointid = random.randint (0,65535)
    f = setField(p, 'pointid', pointid)
    f = getField(p, 'pointid')
    assert pointid == int(f.content)

    # test the parentid
    parentid = random.randint (0,65535)
    f = setField(p, 'parentid', parentid)
    f = getField(p, 'parentid')
    assert parentid == int(f.content)

    # test the radius
    radius = random.random()
    f = setField(p, 'radius', radius)
    f = getField(p, 'radius')
    assert abs(radius - float(f.content)) < 0.001

    # test the location
    location = [random.random(), random.random(), random.random()]
    f = setField(p, 'location', ','.join([str(i) for i in location]))
    f = getField(p, 'location')
    assert ','.join([str(i) for i in location]) == f.content

    # make a bunch of children
    q = Params()
    q.token = 'unittest'
    q.resolution = 0
    q.channels = ['unit_anno']

    childids = []
    for i in range(0,4):
      makeAnno ( q, 9)
      f = setField(q, 'parent', p.annoid)
      childids.append(q.annoid)

    # Test children
    f = getField(p, 'children')
    rchildids = f.content.split(',')
    for cid in rchildids:
      assert int(cid) in childids
    assert len(rchildids) == 4
Example #6
0
    def __init__(self, answers, APP, nodeps = False, update = False, target_path = None, dryrun = False, **kwargs):
        run_path = os.path.dirname(os.path.realpath(__file__))
        self.dryrun = dryrun
        recursive = not nodeps

        app = APP #FIXME

        self.params = Params(recursive, update, target_path)
        self.utils = utils.Utils(self.params)

        if os.path.exists(app):
            logger.info("App path is %s, will be populated to %s" % (app, target_path))
            app = self.utils.loadApp(app)
        else:
            logger.info("App name is %s, will be populated to %s" % (app, target_path))
            
        if not target_path:
            if self.params.app_path:
                self.params.target_path = self.params.app_path
            else: 
                self.params.target_path = os.getcwd()

        self.params.app = app

        self.answers_file = answers
Example #7
0
    def __init__(self,
                 autogenerated,
                 entity_grid,
                 pos_x=None,
                 pos_y=None,
                 strength=None,
                 energy=None,
                 creature_id=None,
                 general_nn=None,
                 inherit_nn=None):
        self.params = Params()
        if self.params.seed:
            seed(self.params.seed)
        self.life_time = 1

        self.pos_x = pos_x
        self.pos_y = pos_y

        self.strength = strength
        self.energy = energy
        self.creature_id = creature_id

        self.inherit_nn = inherit_nn
        self.neural_net = general_nn

        if autogenerated:
            self.spawn_random(entity_grid)
Example #8
0
def main():
    # Read Params
    parser = argparse.ArgumentParser()
    parser.add_argument("-s" , "--solver", default="Jacobi", help="one of Jacobi, Gauss, SOR")
    parser.add_argument("-w" , "--omega" , default="1.0", help="w for SOR")
    parser.add_argument("-x" , "--length" , default="1.0", help="Length of one side of domain")
    args = parser.parse_args()
    P = Params("params.txt")
    P.solver = args.solver
    P.Lx = P.Ly = P.Lz = float(args.length)
    P.omega = args.omega
    P.set_dependent()

    # Initialize Domain
    dom_initial = conditions.initial_domain()

    # Assign Solver
    solver = {'Jacobi': solvers.Jacobi, 'Gauss': solvers.Gauss, 'SOR': solvers.SOR}[P.solver]

    # Assign Driver
    driver = drivers.CN(solver)

    # Assign Boundary
    boundary = boundaries.Dirichlet()

    # Run it!
    tic = time.clock()
    dom_final, meaniters = evolve(dom_initial, driver, boundary)
    print (time.clock() - tic) / P.nSteps, meaniters

    # Plot it!
    if (P.plot):
        plot(dom_initial, dom_final)
Example #9
0
    def test_roi(self):
        """Upload an roi and test it's fields"""

        # Make a skeleton
        makeAnno(p, 9)

        # test the parent
        parent = random.randint(0, 65535)
        f = setField(p, 'parent', parent)
        f = getField(p, 'parent')
        assert parent == int(f.content)

        # make a bunch of children ROIs
        q = Params()
        q.token = 'unittest'
        q.resolution = 0
        q.channels = ['unit_anno']

        childids = []
        for i in range(0, 4):
            makeAnno(q, 9)
            f = setField(q, 'parent', p.annoid)
            childids.append(q.annoid)

        # Test children
        f = getField(p, 'children')
        rchildids = f.content.split(',')
        for cid in rchildids:
            assert int(cid) in childids
        assert len(rchildids) == 4
Example #10
0
    def test_node(self):
        """Upload a skeleton node and test it's fields"""

        # Make a node
        makeAnno(p, 7)

        # test the nodetype
        nodetype = random.randint(0, 100)
        f = setField(p, 'nodetype', nodetype)
        f = getField(p, 'nodetype')
        assert nodetype == int(f.content)

        # test the skeletonid
        skeletonid = random.randint(0, 65535)
        f = setField(p, 'skeletonid', skeletonid)
        f = getField(p, 'skeletonid')
        assert skeletonid == int(f.content)

        # test the pointid
        pointid = random.randint(0, 65535)
        f = setField(p, 'pointid', pointid)
        f = getField(p, 'pointid')
        assert pointid == int(f.content)

        # test the parentid
        parentid = random.randint(0, 65535)
        f = setField(p, 'parentid', parentid)
        f = getField(p, 'parentid')
        assert parentid == int(f.content)

        # test the radius
        radius = random.random()
        f = setField(p, 'radius', radius)
        f = getField(p, 'radius')
        assert abs(radius - float(f.content)) < 0.001

        # test the location
        location = [random.random(), random.random(), random.random()]
        f = setField(p, 'location', ','.join([str(i) for i in location]))
        f = getField(p, 'location')
        assert ','.join([str(i) for i in location]) == f.content

        # make a bunch of children
        q = Params()
        q.token = 'unittest'
        q.resolution = 0
        q.channels = ['unit_anno']

        childids = []
        for i in range(0, 4):
            makeAnno(q, 9)
            f = setField(q, 'parent', p.annoid)
            childids.append(q.annoid)

        # Test children
        f = getField(p, 'children')
        rchildids = f.content.split(',')
        for cid in rchildids:
            assert int(cid) in childids
        assert len(rchildids) == 4
Example #11
0
	def __init__(self):
		Params.__init__(self)
		self.PrivateUrl = "https://poloniex.com/tradingApi" #base url for private API methods
		self.nonce = None
		self.repeat = False #if it is true: the downloading cycle repeates, it is meant for requests with erros
		self.numberOfRepeats = 0 
		self.maxNumberOfRepeats = 10 #max number of attemps of same reuests
Example #12
0
 def __init__(self):
     Params.__init__(self)
     self.number_of_pairs = len(self.currencyPairs)
     # self.Spred = None unesessary?
     self.epochCounter = 0  #counts for tensorboard
     self.logs_path = 'tensorboard/' + strftime("%Y_%m_%d_%H_%M_%S",
                                                gmtime())
def main():
    dict_ = pickle.load(open(Params.data_dir + "dictionary.pkl","r"))
    vocab_size = dict_.vocab_size
    model = Model(is_training = True, vocab_size = vocab_size); print("Built model")
    init = False
    devdata, dev_ind = get_dev()
    if not os.path.isfile(os.path.join(Params.logdir,"checkpoint")):
        init = True
        glove = np.memmap(Params.data_dir + "glove.np", dtype = np.float32, mode = "r")
        glove = np.reshape(glove,(vocab_size,Params.emb_size))
    with model.graph.as_default():
        config = tf.ConfigProto()
        config.gpu_options.allow_growth = True
        sv = tf.train.Supervisor(logdir=Params.logdir,
                        save_model_secs=0,
                        global_step = model.global_step,
                        init_op = model.init_op)
        with sv.managed_session(config = config) as sess:
            if init: sess.run(model.emb_assign, {model.word_embeddings_placeholder:glove})
            pr = Params()
            pr.dump_config(Params.__dict__)
            for epoch in range(1, Params.num_epochs+1):
                train_loss = []
                if sv.should_stop(): break
                for step in tqdm(range(model.num_batch), total = model.num_batch, ncols=70, leave=False, unit='b'):
                    _, loss = sess.run([model.train_op, model.mean_loss],
                                        feed_dict={model.dropout: Params.dropout if Params.dropout is not None else 0.0})
                    train_loss.append(loss)
                    if step % Params.save_steps == 0:
                        gs = sess.run(model.global_step)
                        sv.saver.save(sess, Params.logdir + '/model_epoch_%d_step_%d'%(gs//model.num_batch, gs%model.num_batch))
                    if step % Params.dev_steps == 0:
                        EM_ = []
                        F1_ = []
                        dev = []
                        for i in range(Params.dev_batchs):
                            sample = np.random.choice(dev_ind, Params.batch_size)
                            feed_dict = {data: devdata[i][sample] for i,data in enumerate(model.data)}
                            index, dev_loss = sess.run([model.output_index, model.mean_loss], feed_dict = feed_dict)
                            F1, EM = 0.0, 0.0
                            for batch in range(Params.batch_size):
                                f1, em = f1_and_EM(index[batch], devdata[-1][sample][batch], devdata[0][sample][batch], dict_)
                                F1 += f1
                                EM += em
                            F1 /= float(Params.batch_size)
                            EM /= float(Params.batch_size)
                            EM_.append(EM)
                            F1_.append(F1)
                            dev.append(dev_loss)
                        EM_ = np.mean(EM_)
                        F1_ = np.mean(F1_)
                        dev = np.mean(dev)
                        sess.run(model.metric_assign,{model.F1_placeholder: F1_, model.EM_placeholder: EM_, model.dev_loss_placeholder: dev})
                        print("\nTrain_loss: {}\nDev_loss: {}\nDev_Exact_match: {}\nDev_F1_score: {}".format(np.mean(train_loss),dev,EM_,F1_))
                        train_loss = []
Example #14
0
 def test_serialization(self):
     """Tests serialization methods."""
     p = Params()
     p.define("x", 0, "x desc")
     p.define("y", 1, "")
     p.y = 2
     self.assertEqual(p, Params.loads(p.dumps()))
Example #15
0
def submit_env(request):
    params = Params( request.POST )
    
    w2sDict = { 'X7_Q':'X7_Q_W2S', 'X7_E':'X7_E_W2S', 'X7_RK':'X7_PK_W2S' }
    client = MqClient( w2sDict )
    client.connect()
    client.send( params.json() )      
    s2wDict = { 'X7_Q':'X7_Q_S2W', 'X7_E':'X7_E_S2W', 'X7_RK':'X7_PK_S2W' }
    srv = MqServer(None,s2wDict)
    srv.connect()
    return render_to_response('init/progress.html')
Example #16
0
    def __init__(self):
        self.model = Model()
        self.task = Task()
        self.tag = Tag()
        self.config = Config()
        self.params = Params()
        self.screen = Screen()

        self.fzf = FzfPrompt()

        self.parser = argparse.ArgumentParser('ztm')
        self.parser.add_argument('-c', '--current', dest='active', action='store_true')
        self.parser.set_defaults(active=False)
Example #17
0
 def MakeModel(self, layer):
   params = Params()
   params.num_scales = self.NUM_SCALES
   params.s1_num_orientations = self.NUM_ORIENTATIONS
   model = Model(params = params)
   L = model.Layer
   if layer in (L.S2, L.C2, L.IT):
     # Make uniform-random S2 kernels
     kernel_shape = (self.NUM_PROTOTYPES,) + model.s2_kernel_shape
     kernels = np.random.uniform(0, 1, size = kernel_shape)
     for k in kernels:
       k /= np.linalg.norm(k)
     model.s2_kernels = kernels
   return model
Example #18
0
def relabel_tab(tab_label_store: str):
    """ If the setttings have any renaming for tab labels, apply them """
    preventupdate_if_empty(tab_label_store)
    params = Params(**json.loads(tab_label_store))
    params.fill_defaults()

    return [
        params.co_label,
        params.cu_label,
        params.ds_label,
        params.ex_label,
        params.pe_label,
        params.sa_label,
    ]
Example #19
0
class Vk_object:
    def __init__(self, method):
        self.method = method
        self.params = Params()
        self.manager = Manager(self.method)
        self.list = 'Try to get_batch or get_all'

    def _get_batch_(self, count=100, offset=0):

        self.params.add(count=count)
        self.params.add(offset=offset)
        batch = self.manager.get(self.params)
        self.params.pop('count')
        self.params.pop('offset')
        return batch

    def get_all(self, count=-1):
        if count == -1:
            count = self.count
        if self.method in ('groups.getMembers', 'friends.get', 'wall.get',
                           'wall.getComments'):
            self.list = []
            for i in range((count // 100) + 1):
                self.list.append(self._get_batch_(count=100, offset=i * 100))
            return self.list
        else:
            print('Nothing to get. Method "' + self.method +
                  '" does not have id to get as list.')
Example #20
0
 def test_description(self):
     """Tests description method."""
     p = Params()
     p.define("x", 0, "x desc")
     p.define("y", 1, "")
     self.assertEqual(p.description("x"), "x desc")
     self.assertEqual(p.description("y"), "")
     self.assertRaises(AttributeError, lambda: p.description("z"))
Example #21
0
    def __init__(self, address, protocol, vehicle):
        self.address = address
        self.vehicle = vehicle
        self.protocol = protocol

        # Queue is filled with incoming messages (receive_thread) and emptied by receive worker (receive_task_thread)
        self.msg_queue = Queue.Queue()

        # Loads the initial vehicle's values according to class Params
        self.vehicle_params = Params(network=self, vehicle=vehicle)

        # For collision avoidance purposes
        self.drones = []
        self.context = {'mode': None, 'mission': None, 'next_wp': None}
        self.priority = None

        # First entry in drones list will be our own vehicle
        self.drones.append(self.vehicle_params)

        # Normally it is kept as is until the first receive_thread
        # self.populate_drones('two_dummies')

        self.sock_send = None
        self.sock_receive = None

        # Create transceiver and worker threads
        self.t_send = SendThread(self, self.address)
        self.t_receive = ReceiveThread(self, self.msg_queue)
        self.t_task = ReceiveTaskThread(self, self.msg_queue)

        self.receive_count = self.t_receive.count
        self.task_count = self.t_task.count
Example #22
0
def AE(X, AE_Model, AE_Model_var):
    model = AE_Model_var
    if model == None:
        print('training')
        print('AE_Model', AE_Model)
        params = Params("hparams.yaml", AE_Model)
        net = getattr(models, params.model_name)
        input_dim = X.shape[-1]
        model = net(input_dim, l1_factor=params.l1_factor)
        X = X.astype('float32').values
        model.compile(loss=losses.mean_squared_error,
                      optimizer=Adam(lr=params.lr),
                      metrics=['mse'])
        training_logs = model.fit(X,
                                  X,
                                  epochs=params.num_epochs,
                                  verbose=0,
                                  batch_size=params.batch_size,
                                  shuffle=True)
    else:
        print('already trained')
        X = X.astype('float32').values

    preds = np.array(model.predict(X))
    mse = np.mean((X - preds)**2, axis=1)

    return mse, model
Example #23
0
def make_sorted_gt():
    # only keep video snapshots that contain at least 10 face crops from each video.
    # the sorted gt will only include ground truth from videos that satistfies the minimal requirement.
    # im_list is a variable that saves a list of images, sorted by the ground truth's video sequence.
    # you may edit im_list to make a image list for caffe, or other frameworks.

    p = Params(config_path='params.cfg')
    cha_raw_gt = pd.read_pickle(p.cha_gt_file)
    new_gt = pd.DataFrame(columns=list(cha_raw_gt.columns.values))
    video_list = cha_raw_gt.index.tolist()

    face_img_names = listdir(p.crop_face_im_dir)
    im_list, im_count_list = [], []
    for cur_ind, cur_video in enumerate(video_list):
        if cur_ind % 100 == 0:
            print '{}-th video out of {}...'.format(cur_ind, len(video_list))
        pattern = cur_video[:-4]
        subset = fnmatch.filter(face_img_names, pattern+'*.jpg')
        if len(subset) > p.lower_bound_frame_num:
            im_list.extend(subset)
            im_count_list.append(len(subset))
            new_gt.loc[cha_raw_gt.index[cur_ind]] = cha_raw_gt.iloc[cur_ind]
        else:
            print 'video {} only contains {} image snapshots. Not included in the selection'.format(cur_video, len(subset))

    new_gt['img_frame_num'] = pd.Series(im_count_list, index=new_gt.index)
    new_gt.to_pickle(p.mapped_gt_file)
    print 'New gt length = {}'.format(len(new_gt))  # how many videos are kept.

    return
Example #24
0
    def __init__(self):
        self.params = Params()

        if self.params.seed:
            seed(self.params.seed)

        self.action_space = [i for i in range(self.params.action_size)]
        self.state_size = (self.params.vision_grid, self.params.vision_grid,
                           self.params.state_features)

        self.experience_replay = deque(maxlen=self.params.memory_size)

        self.run_counter = 1  # creature counter
        self.align_counter = 1  # batch retrain counter
        self.q_eval = DeepQNetwork()
        self.q_next = DeepQNetwork()

        # tensorboard
        if self.params.tensorboard:
            self.writer = self.board()
        self.cum_reward = 0

        self.align_target()
        self.random_action = True
        self.loss = 0
        self.agent_hash = randint(1, 10000000)
Example #25
0
 def makeEvolutionMatrix(age):
     params = Params(age)
     matrix = EvolutionMatrix()
     matrix._matrix[State.EXPOSED][
         State.PRODROMIC_INFECTIOUS] = 1 / params['inv_epsilon']
     matrix._matrix[State.PRODROMIC_INFECTIOUS][
         State.ASYMPTOMATIC_INFECTIOUS] = params['p_a'] / params['inv_mu_p']
     matrix._matrix[State.PRODROMIC_INFECTIOUS][State.MILD_INFECTIOUS] = (
         1 - params['p_a']) * params['p_ms'] / params['inv_mu_p']
     matrix._matrix[State.PRODROMIC_INFECTIOUS][State.SEVERE_INFECTIOUS] = (
         1 - params['p_a']) * params['p_ss'] / params['inv_mu_p']
     matrix._matrix[State.PRODROMIC_INFECTIOUS][
         State.PAUCYSYMPTOMATIC_INFECTIOUS] = (
             1 - params['p_a']) * params['p_ps'] / params['inv_mu_p']
     matrix._matrix[State.ASYMPTOMATIC_INFECTIOUS][
         State.RECOVERED] = 1 / params['inv_mu']
     matrix._matrix[State.MILD_INFECTIOUS][
         State.RECOVERED] = 1 / params['inv_mu']
     matrix._matrix[State.PAUCYSYMPTOMATIC_INFECTIOUS][
         State.RECOVERED] = 1 / params['inv_mu']
     matrix._matrix[State.SEVERE_INFECTIOUS][
         State.HOSPITAL] = 1 / params['inv_mu'] * (1 - params['p_ICU'])
     matrix._matrix[State.SEVERE_INFECTIOUS][
         State.ICU] = 1 / params['inv_mu'] * (params['p_ICU'])
     matrix._matrix[State.HOSPITAL][State.DEAD] = params['lambda_H_D']
     matrix._matrix[State.HOSPITAL][State.RECOVERED] = params['lambda_H_R']
     matrix._matrix[State.ICU][State.DEAD] = params['lambda_ICU_D']
     matrix._matrix[State.ICU][State.RECOVERED] = params['lambda_ICU_R']
     matrix.set_diagonal()
     return matrix
Example #26
0
    def check_yaml(path: str) -> Params:
        import yaml

        yml = yaml.safe_load(open(path, 'r'))

        if (Checker.url(yml['url']) and Checker.tag_type(yml['tag_type'])
                and Checker.tag_identifier(yml['tag_identifier'])
                and Checker.tag_value(yml['tag_value'])
                and Checker.freq_val(yml['freq_val'])
                and Checker.freq_type(yml['freq_type'])):
            return Params(yml)
        else:
            if not Checker.url(yml['url']):
                print(f"URL '{yml['url']}' was not valid.")

            if not Checker.tag_type(yml['tag_type']):
                print(f"Tag type '{yml['tag_type']}' was not valid.")

            if not Checker.tag_identifier(yml['tag_identifier']):
                print(
                    f"Tag identifier '{yml['tag_identifier']}' was not valid.")

            if not Checker.tag_value(yml['tag_value']):
                print(f"'{yml['tag_value']}' was not valid.")

            if not Checker.freq_val(yml['freq_val']):
                print(f"Frequency '{yml['freq_val']}' was not valid.")

            if not Checker.freq_type(yml['freq_type']):
                print(f"Time measure '{yml['freq_type']}' was not valid.")
class TestRegionMapper(unittest.TestCase):
    params = Params()
    params.retina_enabled = True
    params.retina_kwidth = 15
    params.s1_kwidth = 11
    params.s1_scaling = 2
    params.c1_kwidth = 5
    params.c1_scaling = 2
    params.s2_kwidth = 7
    params.c2_kwidth = 3
    params.c2_scaling = 2
    rm = RegionMapper(params)

    def testRetinaToImage(self):
        self.assertEqual(slice(0, 15), self.rm.MapRetinaToImage(slice(0, 1)))

    def testS1ToImage(self):
        self.assertEqual(slice(0, 25), self.rm.MapS1ToImage(slice(0, 1)))

    def testC1ToImage(self):
        self.assertEqual(slice(0, 33), self.rm.MapC1ToImage(slice(0, 1)))

    def testS2ToImage(self):
        self.assertEqual(slice(0, 57), self.rm.MapS2ToImage(slice(0, 1)))

    def testC2ToImage(self):
        self.assertEqual(slice(0, 73), self.rm.MapC2ToImage(slice(0, 1)))
Example #28
0
def run_evaluation(model_type):
    params = Params('params/' + model_type + '.json')
    
    score, scores = evaluate_actor_critic(params, 'models/' + model_type + '.pt')

    print('Average reward after 100 episodes: {0:.2f}'.format(score))
    plot_performance(scores)
Example #29
0
    def __init__(self):
        self.params = Params()
        self.y_boundary = None
        self.o_height = 400
        self.o_width = 400
        self.height = 400
        self.width = 400

        self.quit_command = False

        self.color = Color()

        self.click_pos_x = None
        self.click_pos_y = None

        self.gap = None
        self.box_size = None
        self.padding_width = None
        self.padding_height = None
        self.hovered_x = None
        self.hovered_y = None

        pygame.init()
        self.canvas = pygame.display.set_mode((self.width, self.height), pygame.RESIZABLE)
        pygame.display.set_caption('Ecosystem Simulator')

        self.mouse_pos_x = pygame.mouse.get_pos()[0]
        self.mouse_pos_y = pygame.mouse.get_pos()[1]
Example #30
0
def sample_all_video():
    start_time = time.time()
    p = Params(config_path='params.cfg')
    generate_mp4_list(p)

    if not os.path.exists(p.crop_full_im_dir):
        os.makedirs(p.crop_full_im_dir)

    if not os.path.exists(p.crop_face_im_dir):
        os.makedirs(p.crop_face_im_dir)

    with open(p.mp4_list_file) as f:  # Iteratively process all 6000 videos.
        file_list = f.readlines()

    face_counts_per_video = []
    time_points = np.linspace(0, p.video_total_msec, p.upper_bound_frame_num)
    face_cascade = cv2.CascadeClassifier(
        p.face_detector_file)  # initialize face detector
    for i, cur_v_path in enumerate(file_list):
        v_path = cur_v_path.strip()
        face_count = sample_one_video(v_path, p, time_points, face_cascade)
        face_counts_per_video.append(face_count)
        if i % 20 == 0:
            print 'Current video num {} out of {}. Total elapsed time: {} seconds. \n'.\
                format(i, len(file_list), time.time() - start_time)

    df = pd.DataFrame({
        'filename': file_list,
        'image_count': face_counts_per_video
    })
    df.to_pickle(p.sample_num_record_file)
    return
Example #31
0
    def __init__(self, answers, APP, dryrun = False, debug = False, **kwargs):

        self.debug = debug
        self.dryrun = dryrun
        self.kwargs = kwargs
        if "answers_output" in kwargs:
            self.answers_output = kwargs["answers_output"]

        if os.environ and "IMAGE" in os.environ:
            self.app_path = APP
            APP = os.environ["IMAGE"]
            del os.environ["IMAGE"]

        if APP and os.path.exists(APP):
            self.app_path = APP
        else:
            self.app_path = os.getcwd()
            install = Install(answers, APP, dryrun = dryrun, target_path = self.app_path)
            install.install()

        self.params = Params(target_path=self.app_path)
        if "ask" in kwargs:
            self.params.ask = kwargs["ask"]

        self.utils = Utils(self.params)

        self.answers_file = answers
        self.plugin = Plugin()
        self.plugin.load_plugins()
Example #32
0
def main():
    logging.basicConfig(filename='load_balancer.log', level=logging.INFO)
    logging.info('Started')

    #This takes input and validates it
    params = Params()

    #This is how you access the required params
    print params.vip_interface
    print params.vip_port
    print params.vip_ip
    print params.target_ip #do remember that the string eements of the list are in unicode
    print params.load_balancer_algorithm

    #After opening the socket, and receiving the entire packet in sampleFrame
    sampleFrame = '\x00\x02\x157\xa2D\x00\xae\xf3R\xaa\xd1\x08\x00E\x00\x00C\x00\x01\x00\x00@\x06x<\xc0\xa8\x05\x15B#\xfa\x97\x00\x14\x00P\x00\x00\x00\x00\x00\x00\x00\x00P\x02 \x00\xbb9\x00\x00GET /index.html HTTP/1.0 \n\n'
    
    #Instantiate the frame
    frame = Frame(sampleFrame)


    try:
        s = socket.socket( socket.AF_PACKET , socket.SOCK_RAW , socket.ntohs(0x0003))
        s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
    except ValueError:
        print "cant open socket"
    print 'socket opened'

    s.bind(("virtual0",0))
    print 'socket binded to virtual0'
    
    logging.info('Finished')
Example #33
0
def menu():
    url = load_wsdl_url()
    while True:
        print("Выберите операцию:")
        print("1. Сложение")
        print("2. Вычитание")
        print("3. Умножение")
        print("4. Деление")
        print("0. Выход")
        option = int(input())

        if option not in range(5):
            print("Неверный ввод!")
            continue
        if option == 0:
            break

        operation = ''
        if option == 1:
            operation = 'add'
        if option == 2:
            operation = 'sub'
        if option == 3:
            operation = 'mul'
        if option == 4:
            operation = 'div'
        print("Введите 1 операнд:")
        arg1 = input()
        print("Введите 2 операнд:")
        arg2 = input()
        params = Params(arg1, arg2, operation)
        print(f"Результат: {soap_request(url, params)}")
        print('---------------------------------')
Example #34
0
    def on_press(self):
        _findchild = self.parent.findChild  # for leaner code

        url = _findchild(UrlInput, UrlInput.name).text()
        tag_type = _findchild(TagType, TagType.name).currentText()
        tag_identifier = _findchild(TagIdentifier,
                                    TagIdentifier.name).currentText()
        tag_value = _findchild(TagIdentifierValue,
                               TagIdentifierValue.name).text()
        freq_type = _findchild(FreqType, FreqType.name).currentText()
        freq_val = _findchild(FreqValue, FreqValue.name).value()

        if (Checker.url(url) and Checker.tag_type(tag_type)
                and Checker.tag_identifier(tag_identifier)
                and Checker.tag_value(tag_value) and Checker.freq_val(freq_val)
                and Checker.freq_type(freq_type)):
            # start monitoring
            params = Params({
                'url': url,
                'tag_type': tag_type,
                'tag_identifier': tag_identifier,
                'tag_value': tag_value,
                'freq_type': freq_type,
                'freq_val': freq_val,
            })
            monitor = Monitor(params)
            monitor.start_thread(self.on_html_tag_change)

            # insert result label
            self.result_lab = ResultLab(self.parent)
            _findchild(QHBoxLayout, 'row5').addWidget(self.result_lab)

            # start animation
            self.result_lab.animate()
        else:
            # handle error
            error_msg = "Your input had the following errors:\n"
            if not Checker.url(url):
                error_msg += f"- URL '{url}' was not valid. Make sure it's not empty, does not contain spaces and starts with 'http://'\n"

            if not Checker.tag_type(tag_type):
                error_msg += f"- Tag type '{tag_type}' is not a valid HTML tag.\n"

            if not Checker.tag_identifier(tag_identifier):
                error_msg += f"- Tag identifier '{tag_identifier}' must be either 'id' or 'class'.\n"

            if not Checker.tag_value(tag_value):
                error_msg += f"- Tag identifier '{tag_value}' must not be empty and must not contain spaces.\n"

            if not Checker.freq_val(freq_val):
                error_msg += f"- Frequency '{freq_val}' must be at least 1.\n"

            if not Checker.freq_type(freq_type):
                error_msg += f"- Time measure '{freq_type}' must be one of 'seconds', 'minutes' or 'hours'. Shortcuts 's', 'm' and 'h' are also accepted.\n"

            error_dialog = QMessageBox()
            error_dialog.setIcon(QMessageBox.Critical)
            error_dialog.setText(error_msg)
            error_dialog.setWindowTitle("Errore!")
            error_dialog.exec_()
Example #35
0
def run_inference(model_type):
    params = Params('params/' + model_type + '.json')
    
    score, scores = actor_critic_inference(params, 'models/' + model_type + '.pt')

    print('Total score: {0:.2f}'.format(score))
    plot_performance(scores)
def read_test_result_from_metrics_file(path: str) -> Result:
    els = path.split('/')
    filename = els[-1]
    classifier = els[-2]
    dirname = els[-3]
    stacked = '_on_' in dirname
    balanced = 'balanced_' in classifier
    to_binary = 'binary_' in classifier
    use_only_ab = '_using_only_ab' in dirname
    features_str = filename[:-len('_metrics.txt')]
    features = features_str.split('__')
    with open(path, 'r', encoding="utf-8") as f:
        lines = f.readlines()
        accuracy = float(lines[0].split(': ')[1])
        f1_score = None
        balanced_accuracy_adjusted = None
        for line in lines:
            if 'f1_score' in line:
                f1_score = float(line.split(': ')[1])
            if 'balanced_accuracy_adjusted' in line:
                balanced_accuracy_adjusted = float(line.split(': ')[1])

        metrics = Metrics(accuracy, balanced_accuracy_adjusted=balanced_accuracy_adjusted, f1_score=f1_score)
        # TODO read f1 scores and more here as well
        # TODO pca is hardset to False

        return Result(metrics=metrics,
                      params=Params(features=features, classifier=classifier, dirname=dirname, stacked=stacked,
                                    params={'balanced': balanced}, to_binary=to_binary, use_only_ab=use_only_ab,
                                    pca=False))
Example #37
0
    def __init__(self, name, schema = None, dryrun = False):
        self.name = name
        self.app_id = self._nameToId(name)
        self.dryrun = dryrun
        self.schema_path = schema

        if not self.schema_path:
            self.schema_path = SCHEMA_URL

        self.params = Params()
        self.params.app = self.app_id
Example #38
0
  def test_neuron (self):
    """Upload a neuron and test it's fields"""

    # Make a neuron
    makeAnno (p, 5)

    # make a bunch of segments and add to the neuron
    q = Params()
    q.token = 'unittest'
    q.resolution = 0
    q.channels = ['unit_anno']

    segids = []
    for i in range(0,5):
      makeAnno ( q, 4)
      f = setField(q, 'neuron', p.annoid)
      segids.append(q.annoid)

    # Test segments
    f = getField(p, 'segments')
    rsegids = f.content.split(',')
    for sid in rsegids:
      assert int(sid) in segids
    assert len(rsegids) == 5
Example #39
0
def gibbs_doc(model, doc, params = None, callback = None):
  """Runs Gibbs iterations on a single document, by sampling with a prior constructed from each sample in the given Model. params applies to each sample, so should probably be much more limited than usual - the default if its undefined is to use 1 run and 1 sample and a burn in of only 500. Returns a DocModel with all the relevant samples in."""
  
  # Initialisation stuff - handle params, create the state and the DocModel object, plus a reporter...
  if params==None:
    params = Params()
    params.runs = 1
    params.samples = 1
    params.burnIn = 500

  state = State(doc, params)
  dm = DocModel()
  reporter = ProgReporter(params,callback,model.sampleCount())

  # Iterate and run for each sample in the model...
  for sample in model.sampleList():
    tempState = State(state)
    tempState.setGlobalParams(sample)
    tempState.addPrior(sample)
    gibbs_run(tempState,reporter.next)
    dm.addFrom(tempState.getModel())

  # Return...
  return dm
Example #40
0
    def __init__(self, answers, APP, dryrun = False, debug = False, **kwargs):

        self.debug = debug
        self.dryrun = dryrun
        self.kwargs = kwargs
        if "answers_output" in kwargs:
            self.answers_output = kwargs["answers_output"]

        if APP and os.path.exists(APP):
            self.app_path = APP
        else:
            raise Exception("App path %s does not exist." % APP)

        self.params = Params(target_path=self.app_path)
        if "ask" in kwargs:
            self.params.ask = kwargs["ask"]

        self.utils = Utils(self.params)

        self.answers_file = answers
        self.plugin = Plugin()
        self.plugin.load_plugins()
Example #41
0
class Install():
    dryrun = False
    params = None
    answers_file = None

    def __init__(self, answers, APP, nodeps = False, update = False, target_path = None, dryrun = False, **kwargs):
        run_path = os.path.dirname(os.path.realpath(__file__))
        self.dryrun = dryrun

        app = APP #FIXME

        self.params = Params(nodeps, update, target_path)
        self.utils = utils.Utils(self.params)

        if os.path.exists(app):
            logger.info("App path is %s, will be populated to %s" % (app, target_path))
            app = self.utils.loadApp(app)
        else:
            logger.info("App name is %s, will be populated to %s" % (app, target_path))

        if not target_path:
            if self.params.app_path:
                self.params.target_path = self.params.app_path
            else: 
                self.params.target_path = os.getcwd()

        self.params.app = app

        self.answers_file = answers

    def _copyFromContainer(self, image):
        image = self.utils.getImageURI(image)

        name = "%s-%s" % (self.utils.getComponentName(image), ''.join(random.sample(string.letters, 6)))
        logger.debug("Creating a container with name %s" % name)
        
        create = ["docker", "create", "--name", name, image, "nop"]
        subprocess.call(create)
        cp = ["docker", "cp", "%s:/%s" % (name, utils.APP_ENT_PATH), self.utils.tmpdir]
        logger.debug(cp)
        if not subprocess.call(cp):
            logger.debug("Application entity data copied to %s" % self.utils.tmpdir)

        rm = ["docker", "rm", name]
        subprocess.call(rm)

    def _populateApp(self, src = None, dst = None):
        logger.info("Copying app %s" % self.utils.getComponentName(self.params.app))
        if not src:
            src = os.path.join(self.utils.tmpdir, APP_ENT_PATH)

        if not dst:
            dst = self.params.target_path
        distutils.dir_util.copy_tree(src, dst, update=(not self.params.update))
        self.utils.checkAllArtifacts()

    def install(self):
        self.params.loadAnswers(self.answers_file)

        if self.params.app_path and not self.params.target_path == self.params.app_path:
            logger.info("Copying content of directory %s to %s" % (self.params.app_path, self.params.target_path))
            self._populateApp(src=self.params.app_path)

        mainfile_path = os.path.join(self.params.target_path, MAIN_FILE)

        if not self.params.app_path and (self.params.update or not os.path.exists(self.utils.getComponentDir(self.params.app))):
            self.utils.pullApp(self.params.app)
            self._copyFromContainer(self.params.app)
            mainfile_path = os.path.join(self.utils.getTmpAppDir(), MAIN_FILE)
            logger.debug("%s path for pulled image: %s" % (MAIN_FILE, mainfile_path))
            self.params.loadMainfile(mainfile_path)
            logger.debug("App ID: %s" % self.params.app_id)

            self._populateApp()
        else:
            logger.info("Component data exist in %s, skipping population..." % self.utils.getComponentDir(self.params.app))

        if not self.params.mainfile_data:
            self.params.loadMainfile(mainfile_path)

        values = {}
        if not self.params.nodeps:
            logger.info("Installing dependencies for %s" % self.params.app_id)
            values = self._installDependencies()

        logger.debug(values)
        self.params.loadAnswers(values)
        logger.debug(self.params.answers_data)
        if self.params.write_sample_answers:
            print("blah")
            self.params.writeAnswersSample()

        return values

    def _installDependencies(self):
        values = {}
        for component, graph_item in self.params.mainfile_data["graph"].iteritems():
            if not self.utils.isExternal(graph_item):
                values[component] = self.params.getValues(component, skip_asking = True)
                logger.debug("Component %s is part of the app" % component)
                logger.debug("Values: %s" % values)
                continue

            logger.info("Component %s is external dependency" % component)

            image_name = self.utils.getSourceImage(graph_item)
            component_path = self.utils.getExternalAppDir(component)
            logger.debug("Component path: %s" % component_path)
            if not component == self.params.app_id and (not os.path.isdir(component_path) or self.params.update): #not self.params.app_path or  ???
                logger.info("Pulling %s" % image_name)
                component_app = Install(self.params.answers_data, image_name, self.params.nodeps, self.params.update, component_path, self.dryrun)
                values = self.params._update(values, component_app.install())
                logger.info("Component installed into %s" % component_path)
            else:
                logger.info("Component %s already exists at %s - remove the directory or use --update option" % (component, component_path))

        return values
Example #42
0
import random
import h5py

sys.path += [os.path.abspath("../django")]
import OCP.settings

os.environ["DJANGO_SETTINGS_MODULE"] = "OCP.settings"

from params import Params
from postmethods import getURL, postURL, putAnnotation
import makeunitdb
import site_to_test

SITE_HOST = site_to_test.site

p = Params()
p.token = "unittest"
p.resolution = 0
p.channels = ["unit_anno"]


class Test_Annotation_Json:
    def setup_class(self):
        """Setup Parameters"""
        makeunitdb.createTestDB(p.token, readonly=0)

    def teardown_class(self):
        """Teardown Parameters"""
        makeunitdb.deleteTestDB(p.token)

    def test_basic_json(self):
Example #43
0
import random
import csv
import numpy as np
import pytest
import httplib
from contextlib import closing

from params import Params
from postmethods import putAnnotation, getAnnotation, getURL, postURL
import kvengine_to_test
import site_to_test
import makeunitdb
SITE_HOST = site_to_test.site


p = Params()
p.token = 'unittest'
p.resolution = 0
p.channels = ['unit_anno']


def H5AnnotationFile ( annotype, annoid, kv=None ):
  """Create an HDF5 file and populate the fields. Return a file object.
      This is a support routine for all the RAMON tests."""

  # Create an in-memory HDF5 file
  tmpfile = tempfile.NamedTemporaryFile()
  h5fh = h5py.File ( tmpfile.name )

  # Create the top level annotation id namespace
  idgrp = h5fh.create_group ( str(annoid) )
Example #44
0
import tempfile
import h5py
import urllib2
import zlib
import cStringIO
import blosc
import time

sys.path += [os.path.abspath('../django/')]
import OCP.settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'ocpblaze.settings'

from ocplib import MortonXYZ
from params import Params

p = Params()
p.token = "blaze"
p.resolution = 0
p.channels = ['image']
p.window = [0,0]
p.channel_type = "image"
p.datatype = "uint32"
SIZE = 1024
ZSIZE = 16

def generateURL(zidx):
  """Run the Benchmark."""

  i = zidx
  [x,y,z] = MortonXYZ(i)
  p.args = (x*SIZE, (x+1)*SIZE, y*SIZE, (y+1)*SIZE, z*ZSIZE, (z+1)*ZSIZE)
Example #45
0
# limitations under the License.

import pytest
import random
import numpy as np
import makeunitdb
from ndlib.ndtype import *
from params import Params
from postmethods import *
from test_settings import *

# Test_Blosc
# 1 - test_get_blosc
# 2 - test_post_blosc

p = Params()
p.token = 'unittest'
p.resolution = 0
p.channels = ['IMAGE1', 'IMAGE2']
p.window = [0,500]
p.channel_type = TIMESERIES
p.datatype = UINT8
p.voxel = [4.0,4.0,3.0]

class Test_Blosc:

  def setup_class(self):

    makeunitdb.createTestDB(p.token, channel_list=p.channels, channel_type=p.channel_type, channel_datatype=p.datatype)

  def teardown_class(self):
Example #46
0
class Run():
    debug = False
    dryrun = False
    params = None
    answers_data = {GLOBAL_CONF: {}}
    tmpdir = None
    answers_file = None
    provider = DEFAULT_PROVIDER
    installed = False
    plugins = []
    update = False
    app_path = None
    target_path = None
    app_id = None
    app = None
    answers_output = None
    kwargs = None

    def __init__(self, answers, APP, dryrun = False, debug = False, **kwargs):

        self.debug = debug
        self.dryrun = dryrun
        self.kwargs = kwargs
        if "answers_output" in kwargs:
            self.answers_output = kwargs["answers_output"]

        if APP and os.path.exists(APP):
            self.app_path = APP
        else:
            raise Exception("App path %s does not exist." % APP)

        self.params = Params(target_path=self.app_path)
        if "ask" in kwargs:
            self.params.ask = kwargs["ask"]

        self.utils = Utils(self.params)

        self.answers_file = answers
        self.plugin = Plugin()
        self.plugin.load_plugins()

    def _dispatchGraph(self):
        if not "graph" in self.params.mainfile_data:
            raise Exception("Graph not specified in %s" % MAIN_FILE)

        for component, graph_item in self.params.mainfile_data["graph"].iteritems():
            if self.utils.isExternal(graph_item):
                component_run = Run(self.answers_file, self.utils.getExternalAppDir(component), self.dryrun, self.debug, **self.kwargs)
                ret = component_run.run()
                if self.answers_output:
                    self.params.loadAnswers(ret)
            else:
                self._processComponent(component, graph_item)

    def _applyTemplate(self, data, component):
        template = Template(data)
        config = self.params.getValues(component)
        logger.debug("Config: %s " % config)

        output = None
        while not output:
            try:
                logger.debug(config)
                output = template.substitute(config)
            except KeyError as ex:
                name = ex.args[0]
                logger.debug("Artifact contains unknown parameter %s, asking for it" % name)
                config[name] = self.params._askFor(name, {"description": "Missing parameter '%s', provide the value or fix your %s" % (name, MAIN_FILE)})
                if not len(config[name]):
                    raise Exception("Artifact contains unknown parameter %s" % name)
                self.params.loadAnswers({component: {name: config[name]}})

        return output

    def _processComponent(self, component, graph_item):
        logger.debug("Processing component %s" % component)
        
        data = None
        artifacts = self.utils.getArtifacts(component)
        artifact_provider_list = []
        if not self.params.provider in artifacts:
            raise Exception("Data for provider \"%s\" are not part of this app" % self.params.provider)
        
        dst_dir = os.path.join(self.utils.tmpdir, component)
        for artifact in artifacts[self.params.provider]:
            artifact_path = self.utils.sanitizePath(artifact)
            with open(os.path.join(self.app_path, artifact_path), "r") as fp:
                data = fp.read()

            logger.debug("Templating artifact %s/%s" % (self.app_path, artifact_path))
            data = self._applyTemplate(data, component)
        
            artifact_dst = os.path.join(dst_dir, artifact_path)
            
            if not os.path.isdir(os.path.dirname(artifact_dst)):
                os.makedirs(os.path.dirname(artifact_dst))
            with open(artifact_dst, "w") as fp:
                logger.debug("Writing artifact to %s" % artifact_dst)
                fp.write(data)

            artifact_provider_list.append(artifact_path)

        provider_class = self.plugin.getProvider(self.params.provider)
        provider = provider_class(self.params.getValues(component), artifact_provider_list, dst_dir, self.dryrun)
        if provider:
            logger.info("Using provider %s for component %s" % (self.params.provider, component))
        else:
            raise Exception("Something is broken - couldn't get the provider")
        provider.init()
        provider.deploy()

    def run(self):
        self.params.loadMainfile(os.path.join(self.params.target_path, MAIN_FILE))
        self.params.loadAnswers(self.answers_file)

        self.utils.checkArtifacts()
        config = self.params.get()
        if "provider" in config:
            self.provider = config["provider"]

        self._dispatchGraph()


#Think about this a bit more probably - it's (re)written for all components...
        if self.answers_output:
            self.params.writeAnswers(self.answers_output)
            return self.params.answers_data

        return None
Example #47
0
# 2 - test_yz
# 3 - test_xz
# 4 - test_xy_incorrect

# Test_Image_Post
# 1 - test_npz 
# 2 - test_npz_incorrect_region
# 3 - test_npz_incorrect_datatype
# 4 - test_hdf5
# 5 - test_hdf5_incorrect_region
# 6 - test_hdf5_incorrect_datatype
# 7 - test_npz_incorrect_channel
# 8 - test_hdf5_incorrect_channel


p = Params()
p.token = 'unittest'
p.resolution = 0
p.channels = ['CHAN1', 'CHAN2']
p.window = [0,500]
p.channel_type = IMAGE
p.datatype = FLOAT32
#p.args = (3000,3100,4000,4100,500,510)


class Test_Probability_Slice:

  def setup_class(self):

    makeunitdb.createTestDB(p.token, channel_list=p.channels, channel_type=p.channel_type, channel_datatype=p.datatype)
Example #48
0
'''
This implements the standard SGD algorithm
'''

from params import Params
from latent_model import LatentModel
from mm_data_file import MMDataFile
from sgd_mf_machine import SGDMachine
import time
import numpy as np


if __name__ == "__main__":
    # Parameters/Model initialization
    params = Params()
    params.parse_args("SGD method.")
    params.print_params()

    model = LatentModel(params)

    # Handling I/O things
    data = MMDataFile()
    data.read_file(params)
    print "File load successful..."
    
    # Run the actual training program
    program = SGDMachine()
    start = time.clock()
    program.train(params, model, data)
    proc_time = (time.clock() - start)
    
application = get_wsgi_application()

from postmethods import getURL, postNPZ, getNPZ
from ocptype import PROPAGATED, NOT_PROPAGATED, UNDER_PROPAGATION, ISOTROPIC, READONLY_TRUE, READONLY_FALSE
from params import Params
import kvengine_to_test
import site_to_test
import makeunitdb

SITE_HOST = site_to_test.site

# Test_Propagate
#
# 1 - test_update_propagate - Test the propagate service set values

p = Params()
p.token = 'unittest'
p.resolution = 0
p.channels = ['chan1']
p.channel_type = "image"
p.datatype = "uint8"

class Test_Image_Zslice_Propagate:
  """Test image propagation"""

  def setup_class(self):
    """Create the unittest database"""
    makeunitdb.createTestDB(p.token, public=True, channel_list=p.channels, channel_type=p.channel_type, channel_datatype=p.datatype, ximagesize=1000, yimagesize=1000, zimagesize=10)

  def teardown_class (self):
    """Destroy the unittest database"""
Example #50
0
# 2 - test_yz
# 3 - test_xz
# 4 - test_xy_incorrect

# Test_Image_Post
# 1 - test_npz
# 2 - test_npz_incorrect_region
# 3 - test_npz_incorrect_datatype
# 4 - test_hdf5
# 5 - test_hdf5_incorrect_region
# 6 - test_hdf5_incorrect_datatype
# 7 - test_npz_incorrect_channel
# 8 - test_hdf5_incorrect_channel


p = Params()
p.datatype = FLOAT32


class Test_Probability_Slice:

  def setup_class(self):

    makeunitdb.createTestDB(p.token, channel_list=p.channels, channel_type=p.channel_type, channel_datatype=p.datatype)

  def teardown_class(self):
    makeunitdb.deleteTestDB(p.token)

  def test_xy (self):
    """Test the xy slice cutout"""
Example #51
0
import numpy as np
import pytest
from contextlib import closing

import makeunitdb
from params import Params
from postmethods import postNPZ, getNPZ 
import kvengine_to_test
import site_to_test
SITE_HOST = site_to_test.site

from ocptype import UINT8, UINT16, UINT32, ANNOTATION, IMAGE 

from OCP import celery_app 

p = Params()
p.token = 'unittest'
p.channels = ['testchannel']
p.args = (0,1024,0,1024,1,11)

class Test_Histogram8:

  def setup_class(self):
    """Create the unittest database"""
    makeunitdb.createTestDB(p.token, p.channels, channel_type=IMAGE, channel_datatype=UINT8, public=True, ximagesize=1024, yimagesize=1024, zimagesize=10, xvoxelres=1.0, yvoxelres=1.0, zvoxelres=10.0, readonly=0)

  def teardown_class (self):
    """Destroy the unittest database"""
    makeunitdb.deleteTestDB(p.token)

  def test_genhistogram (self):
Example #52
0
 def permutations(self):
   p = Params()
   for c in self.c:
     p.setC(c)
     for rebalance in self.rebalance:
       p.setRebalance(c)
       for kernel in self.kernel:
         p.setKernel(kernel)
         for p1 in self.p1:
           p.setP1(p1)
           for p2 in self.p2:
             p.setP2(p2)
             yield copy(p)
Example #53
0
class Create(object):
    name = None
    app_id = None
    dryrun = False
    schema = None
    def __init__(self, name, schema = None, dryrun = False):
        self.name = name
        self.app_id = self._nameToId(name)
        self.dryrun = dryrun
        self.schema_path = schema

        if not self.schema_path:
            self.schema_path = SCHEMA_URL

        self.params = Params()
        self.params.app = self.app_id

    def _loadSchema(self):
        if not os.path.isfile(self.schema_path):
            response = urllib2.urlopen(self.schema_path)
            with open(os.path.basename(self.schema_path), "w") as fp:
                fp.write(response.read())
                self.schema_path = os.path.basename(self.schema_path)

        with open(self.schema_path, "r") as fp:
            self.schema = json.load(fp)

    def create(self):
        self._loadSchema()
        if self.schema and "elements" in self.schema:
            self._writeFromSchema(self.schema["elements"])
        else:
            print("Corrupted schema, couldn't create app")

    def build(self, tag):
        if not tag:
            tag = self.app_id

        cmd = ["docker", "build", "-t", tag, "."]
        if self.dryrun:
            print("Build: %s" % " ".join(cmd))
        else:
            subprocess.call(cmd)


    def _writeFromSchema(self, elements):
        for element in elements:
            value = element["value"]
            if not element["contents"] and not value:
                continue
            if element["name"] == "application":
                value = self.app_id
            print("Writing %s" % element["name"])
            if element["type"] == "directory":
                if value:
                    os.mkdir(value)
                    os.chdir(value)
                    self._writeFromSchema(element["contents"])
                    os.chdir("..")
                else:
                    logger.debug("No value for directory %s", element["name"])
            elif element["type"] == "file":
                with open(value, "w") as fp:
                    logger.debug(element)
                    if element["contents"]:
                        if isinstance(element["contents"], str) or isinstance(element["contents"], unicode):
                            fp.write(element["contents"])
                        elif isinstance(element["contents"], collections.Mapping):
                            fp.write(anymarkup.serialize(self._generateContents(element["contents"]), format='yaml'))
#                        elif element["contentType"] == "application/json":
#                            if element["name"] == "Atomicfile":
#                                element["contents"] = self._updateAtomicfile(element["contents"])

#                            fp.write(json.dumps(element["contents"]))

    def _pickOne(self, what, info, options):
        options_text = ""
        for i, option in enumerate(options):
            options_text += "%s. %s\n" % (i+1, option)

        required = False

        if "required" in info:
            required = info["required"]

        value = raw_input("%s (%s)\n Options:\n%s\nYour choice (default: 1): " % (what, info["description"], options_text))
        if len(value) == 0:
            value = 1
        elif int(value) == 0 and not required:
            return None

        return options[int(value)-1]

    def _getName(self, element, content, path = None):
        name = None
        if not "name" in content:
            name = element
        elif not content["name"]:
            name = self._generateValue(path)
            if not name:
                name = self.params.askFor(element, content)
        elif type(content["name"]) is list:
            name = self._pickOne(element, content, content["name"])
        else:
            name = content["name"]

        logger.debug(name)

        return name

    def _generateContents(self, contents, path="root"):
        result = {}
        for element, content in contents.iteritems():

            local_path = "%s.%s" % (path, element)
            name = self._getName(element, content, local_path)

            print("Filling %s" % name)
            if not content["required"]:
                skip = self.params.askFor("Element %s not required, do you want to skip it?" % name, {"description": "Type y or n", "default": "Y"})
                if isTrue(skip):
                    continue
            #logger.debug("Key: %s, value %s", element, content["value"])

            if content["type"] == "object":
                result[name] = self._generateContents(content["value"], local_path)
            elif content["type"] == "list":

                tmp_results = []
                while True:
                    value = self.params.askFor(content["value"].keys()[0], content["value"][content["value"].keys()[0]])
                    if len(value) == 0:
                        break
                    tmp_results.append(value)

                result[name] = tmp_results
            else:
                if not content["value"]:
                    logger.debug(local_path)
                    value = self._generateValue(local_path)
                    if not value:
                        value = self.params.askFor(element, content)
                    logger.debug(value)
                else:
                    value = content["value"]
                result[name] = value

        return result

    def _generateValue(self, element):
        if element == "root.id":
            return self.app_id
        elif element == "root.metadata.name":
            return self.name
        elif element == "root.graph.component":
            return self.app_id

        return None

    def _nameToId(self, name):
        return name.strip().lower().replace(" ", "-")

    def _updateAtomicfile(self, contents):
        print(contents)
        if "name" in contents:
            contents["name"] = self.name
        if "id" in contents:
            contents["id"] = self.app_id
        if "graph" in contents:
            component = {"repository": "", "name": self.app_id}
            contents["graph"].append(component)

        return contents
Example #54
0
  def test_skeleton (self):
    """Upload a skeleton and test it's fields"""

    # Make a node
    makeAnno (p, 8)

    # test the nodetype
    skeletontype = random.randint (0,100)
    f = setField(p, 'skeletontype', skeletontype)
    f = getField(p, 'skeletontype')
    assert skeletontype == int(f.content)

    # test the rootnode
    rootnode = random.randint (0,65535)
    f = setField(p, 'rootnode', rootnode)
    f = getField(p, 'rootnode')
    assert rootnode == int(f.content)

    # add some nodes to the skeleton and query them
    # make a bunch of children cnodes
    q = Params()
    q.token = 'unittest'
    q.resolution = 0
    q.channels = ['unit_anno']

    r = Params()
    r.token = 'unittest'
    r.resolution = 0
    r.channels = ['unit_anno']
    # make a root node

    s = Params()
    s.token = 'unittest'
    s.resolution = 0
    s.channels = ['unit_anno']
    # make a root node

    skelids = []

    # make a root node
    makeAnno ( q, 7)
    setField(p, 'rootnode', q.annoid)
    setField(q, 'skeleton', p.annoid)
    skelids.append(q.annoid)

    # Make 2 children and four grandchildren
    for i in range(0,2):
      makeAnno ( r, 7)
      f = setField(r, 'parent', q.annoid)
      f = setField(r, 'skeleton', p.annoid)
      skelids.append(r.annoid)
      for i in range(0,2):
        makeAnno ( s, 7)
        f = setField(s, 'parent', r.annoid)
        f = setField(s, 'skeleton', p.annoid)
        skelids.append(s.annoid)

    # Test skeleton
    f = getField(p, 'nodes')
    rskelids = f.content.split(',')
    for sid in rskelids:
      assert int(sid) in skelids
    assert len(rskelids) == 7
Example #55
0
from postmethods import getURL
from params import Params
import kvengine_to_test
import site_to_test
import makeunitdb

SITE_HOST = site_to_test.site


# Test_Info
# 1 - test_public_tokens - Test the public tokens interface
# 2 - test_info - Test the json info interface
# 3 - test_projinfo - Test the hdf5 info interface
# 4 - test_reserve - Test the reserve tokens interface

p = Params()
p.token = 'unittest'
p.channels = ['unit_anno']

class Test_Info:
  """Other interfaces to OCPCA that don't fit into other categories"""

  def setup_class(self):
    """Create the unittest database"""
    makeunitdb.createTestDB(p.token, public=True)

  def teardown_class (self):
    """Destroy the unittest database"""
    makeunitdb.deleteTestDB(p.token)

  def test_public_tokens (self):
Example #56
0
import random

sys.path += [os.path.abspath('../django')]
import OCP.settings
os.environ['DJANGO_SETTINGS_MODULE'] = 'OCP.settings'

from ocptype import IMAGE, UINT8
from params import Params
from jsonproj import createJson
from postmethods import getURL, postURL, postNPZ, getNPZ
import makeunitdb
import site_to_test

SITE_HOST = site_to_test.site

p = Params()
p.token = 'unittest'
p.resolution = 0
p.channels = ['CHAN1', 'CHAN2']
p.channel_type = IMAGE
p.datatype = UINT8
p.dataset = 'unittest_ds'

class Test_Project_Json():

  def setup_class(self):
    """Setup Parameters"""
    pass

  def teardown_class(self):
    """Teardown Parameters"""
# Mount a volume

import re, os.path, time

from stack import Stack,Mount
from layers import Layer
from params import Params


# Mount parameters
Params.add_option(
    "--no_auto_mount", "--no-auto-mount",
    help=("don't automatically try to automount the final device; "
          "mount must be specified explicitly"))


class MountPartition(Layer,Mount):
    name = 'part'
    mount_cmd = '/bin/mount'
    umount_cmd = '/bin/umount'

    def __init__(self,arg_str,params,parent_layer):

        super(MountPartition,self).__init__(arg_str,params,parent_layer)

        self.mount_db = None


    def print_info(self):
        self.infomsg("Initialized mount object parameters:")
        # Don't print parent_device; this invokes unwanted mdadm runs
Example #58
0
import pytest
from contextlib import closing
import networkx as nx
import time

import makeunitdb
from ocptype import ANNOTATION, UINT32
from params import Params
from ramon import H5AnnotationFile, setField, getField, queryField, makeAnno, createSpecificSynapse
from postmethods import putAnnotation, getAnnotation, getURL, postURL
import kvengine_to_test
import site_to_test
#from ocpgraph import genGraphRAMON
SITE_HOST = site_to_test.site

p = Params()
p.token = 'unittest'
p.resolution = 0
p.channels = ['ANNO1']
p.channel_type = ANNOTATION
p.datatype = UINT32

class Test_GraphGen:

  def setup_class(self):
    """Create the unittest database"""
    makeunitdb.createTestDB(p.token, channel_list=p.channels, public=True, readonly=0)

    cutout1 = "0/2,5/1,3/0,2"
    cutout2 = "0/1,3/4,6/2,5"
    cutout3 = "0/4,6/2,5/5,7"
Example #59
0
 def compute_inside(weights, data, dropout=True):
     params = Params()
     params.feed(weights)
     nn_types.USE_DROPOUT = dropout
     return network.forward_pass(data, params)