def main():

    save_key = os.path.basename(__file__).split('.')[0]

    ### DEFINE MODEL ###
    input_shape = (None, None, 6)
    inputs = Input(shape=input_shape)

    y1 = Conv2D(128, (5, 5), padding='same', activation='relu',
                name='bendy1')(inputs)
    y2 = Conv2D(64, (5, 5), padding='same', activation='relu',
                name='bendy2')(y1)
    y3 = Conv2D(64, (3, 3), padding='same', activation='relu',
                name='smoothy1')(y2)
    y4 = Conv2D(64, (3, 3), padding='same', activation='relu',
                name='smoothy2')(y3)
    y5 = Conv2D(8, (70, 3), padding='same', activation='relu', name='harm')(y4)
    y6 = Conv2D(8, (360, 1),
                padding='same',
                activation='relu',
                name='distribute')(y5)
    y7 = Conv2D(1, (1, 1),
                padding='same',
                activation='sigmoid',
                name='squishy')(y6)
    predictions = Lambda(lambda x: K.squeeze(x, axis=3))(y7)

    model = Model(inputs=inputs, outputs=predictions)

    experiment.experiment(save_key, model)
Ejemplo n.º 2
0
 def test_prob_experiment(self):
     hat = Hat(blue=3, red=2, green=6)
     probability = experiment(hat=hat,
                              expected_balls={
                                  "blue": 2,
                                  "green": 1
                              },
                              num_balls_drawn=4,
                              num_experiments=1000)
     actual = probability
     expected = 0.272
     self.assertAlmostEqual(
         actual,
         expected,
         delta=0.01,
         msg='Expected experiemnt method to return a different probability.'
     )
     hat = Hat(yellow=5, red=1, green=3, blue=9, test=1)
     probability = experiment(hat=hat,
                              expected_balls={
                                  "yellow": 2,
                                  "blue": 3,
                                  "test": 1
                              },
                              num_balls_drawn=20,
                              num_experiments=100)
     actual = probability
     expected = 1.0
     self.assertAlmostEqual(
         actual,
         expected,
         delta=0.01,
         msg='Expected experiment method to return a different probability.'
     )
Ejemplo n.º 3
0
def test_experiment_mnist_custom(experiment_files_fixture):
  """
  Test of a MIA on the MNIST dataset with custom model for the MNIST
  model, custom mode for the MIA model and custom optimizer options
  """
  experiment(academic_dataset    = 'mnist', 
             target_model_path   = target_path.as_posix(),
             mia_model_path      = mia_path.as_posix(),
             custom_target_model = OrderedDict([
               ('conv1'       , nn.Conv2d(1, 10, 3, 1)),
               ('relu1'       , nn.ReLU()),
               ('maxpool1'    , nn.MaxPool2d(2, 2)),
               ('conv2'       , nn.Conv2d(10, 10, 3, 1)),
               ('relu2'       , nn.ReLU()),
               ('maxpool2'    , nn.MaxPool2d(2, 2)),
               ('to1d'        , Flatten()),
               ('dense1'      , nn.Linear(5*5*10, 500)),
               ('tanh'        , nn.Tanh()),
               ('dense2'      , nn.Linear(500, 10)),
               ('logsoftmax'  , nn.LogSoftmax(dim=1))
             ]),
             custom_target_optim_args = {'lr' : 0.02, 'momentum' : 0.3},
             custom_mia_model         = OrderedDict([
               ('dense1'      , nn.Linear(20, 50)),
               ('tanh'        , nn.Tanh()),
               ('dense2'      , nn.Linear(50, 2)),
               ('logsoftmax'  , nn.LogSoftmax(dim=1))
             ]),
             custom_mia_optim_args = {'lr' : 0.02, 'momentum' : 0.3},
             shadow_number         = 50,
             custom_shadow_model   = OrderedDict([
               ('conv1'       , nn.Conv2d(1, 15, 7, 1)),
               ('relu1'       , nn.ReLU()),
               ('maxpool1'    , nn.MaxPool2d(2, 2)),
               ('conv2'       , nn.Conv2d(15, 25, 7, 1)),
               ('relu2'       , nn.ReLU()),
               ('maxpool2'    , nn.MaxPool2d(2, 2)),
               ('to1d'        , Flatten()),
               ('dense1'      , nn.Linear(2*2*25, 50)),
               ('tanh'        , nn.Tanh()),
               ('dense2'      , nn.Linear(50, 10)),
               ('logsoftmax'  , nn.LogSoftmax(dim=1))
             ]),
             custom_shadow_optim_args = {'lr' : 0.02, 'momentum' : 0.3},
             shadow_model_base_path   = shadow_base_path.as_posix(),
             mia_train_ds_path        = mia_train_ds_path.as_posix(),
             mia_test_ds_path         = mia_test_ds_path.as_posix(),
             class_number             = 10)
  
  assert target_path.exists()
  assert mia_path.exists()
  remove_experiment_files()
Ejemplo n.º 4
0
    def check_experiment(self, id):
        """Provide details of an experiment."""
        exp = experiment.experiment(new_experiment=False, ts=id)

        start_time = time.time()
        condition = True

        while exp.metadata is None and condition:
            now = time.time()
            if now - start_time > 3:
                condition = False
                return "Experiment is not found!"
            exp = experiment.experiment(new_experiment=False, ts=id)
            time.sleep(0.01)

        cam_statement = str()
        for i in range(7):
            fname = os.path.join("data/", str(id), "/", str(i) + "/")
            if os.path.exists(fname):
                n = len(self.um.find_images(fname))
            else:
                n = 0
            cam_statement += "Camera {i}: {n} images found! ".format(i=i, n=n)

        date = self.um.timestamp_to_date(id / 1000)
        f = os.path.join(os.path.dirname(os.path.realpath(__file__)), "backup",
                         str(id) + ".zip")
        is_archived = self.um.check_file_exists(f)
        if is_archived:
            img = "true.png"
        else:
            img = "false.png"

        try:
            label = exp.metadata["label"]
        except:
            label = None

        pd_images = exp.metadata["pose_detection"].values()
        user = {
            "timestamp": id,
            "date": date,
            "camera": exp.metadata["number_of_cameras"],
            "n_images": exp.metadata["number_of_images"],
            "room": exp.metadata["room"],
            "label": label,
            "image": img,
            "exp": exp.metadata,
            "pose_detection_processed_images": pd_images
        }
        return render_template('experiment.html', user=user)
Ejemplo n.º 5
0
def test_experiment_mnist_basic():
  """
  Test a default MIA experiment on the MNIST dataset
  """
  experiment(academic_dataset       = 'mnist', 
             target_model_path      = target_path.as_posix(),
             mia_model_path         = mia_path.as_posix(),
             shadow_model_base_path = shadow_base_path.as_posix(),
             mia_train_ds_path      = mia_train_ds_path.as_posix(),
             mia_test_ds_path       = mia_test_ds_path.as_posix(),
             class_number           = 10)
  
  assert target_path.exists()
  assert mia_path.exists()
  remove_experiment_files()
Ejemplo n.º 6
0
    def draw_matchsticks(self, exp_id, camera_id):
        """
        """
        v = visualization.visualization()
        exp = experiment.experiment(new_experiment=False, ts=exp_id)
        room_name = exp.metadata["room"]

        if room_name.lower() == "cears":
            room_id = 1
        elif room_name.lower() == "computer_lab":
            room_id = 0

        devices = self.rooms[room_id]["devices"]
        devices.sort()
        try:
            cam_name = os.path.basename(devices[int(camera_id)])
        except Exception as e:
            print e
            self.app.logger.info(e)
            return "No cam found sorry!"

        fcamera_path = os.path.join("/dev/v4l/by-id", cam_name)
        nframes = exp.metadata["number_of_images"][fcamera_path]
        ret_combined = ""
        for frame_id in range(nframes):
            ret = self.skeletons(exp_id, camera_id, frame_id)
            ret_combined += "<br>" + ret
        return ret_combined
def default_experiment(toa_strings=(), isotope="Re-187", timestep_size=1e+6):
    """
    Run a single experiment with no fudge_factor
    *toa_strings* - tuple of all relevant strings (folder, name)
    *isotope* - str - name of isotope to be manipulated
    *timestep_size* - float - size of timestep in experiment
    """
    exp_folder = toa_strings[0] #name of experiment folder
    exp_name = toa_strings[1]
    
    fudge_factor = 1.0
    data_filename = exp_folder + "/" + exp_name + "default.npy"

    #instance of experiment-object
    exp_instance = experiment(isotope, fudge_factor, dt=timestep_size, bestfit_namespace=current_bestfit)
    #save data to appropriately named file
    exp_instance.save2file(data_filename, write_index_file=True)
    #write number of timepoints to README
    readmestring = "Data from 'default-experiment' \n"
    readmestring += "timestep-number: %d \n"%len(exp_instance.history.age)
    readmefilename = exp_folder + "/README.md"
    with open(readmefilename, 'a') as readmefile:
        readmefile.write('\n')
        readmefile.write(readmestring)
    #delete object
    del exp_instance #delete instance
def single_experiment(experiment_index, toa_strings=(),
                      isotope="Re-187", timestep_size=1e+6):
    """
    Run a single experiment with bestfit values, and a fudge-factor stapled 
    to the isotope. 
    *experiment_index* - int - index of experiment in queue
    *toa_strings* - tuple of all relevant strings (folder, name)
    *isotope* - str - name of isotope to be manipulated
    *timestep_size* - float - size of timestep in experiment
    """
    exp_folder = toa_strings[0] #name of experiment folder
    exp_name = toa_strings[1] #name of experiments
    exp_fudge = "fudge_factors.dat"
    #exp_fudge = toa_strings[2] #name of datafile in folder
    
    #bestfit_special_timesteps = 0 #disable log-timesteps
    #bestfit_dt = timestep_size

    fudge_factor = read_fudge_factor(filename=exp_folder+'/'+exp_fudge,
                                     req_index=experiment_index)
    data_filename = exp_folder + "/" + exp_name + str(experiment_index) + ".npy"

    #instance of experiment-object
    exp_instance = experiment(isotope, fudge_factor, dt=timestep_size, bestfit_namespace=current_bestfit)
    #save data to appropriately named file
    exp_instance.save2file(data_filename)
    del exp_instance #delete instance
Ejemplo n.º 9
0
    def get_matchstick_video(self, exp_id, camera_id):
        """
        """
        exp = experiment.experiment(new_experiment=False, ts=exp_id)
        room_name = exp.metadata["room"]

        if room_name.lower() == "cears":
            room_id = 1
        elif room_name.lower() == "computer_lab":
            room_id = 0

        devices = self.rooms[room_id]["devices"]
        devices.sort()
        try:
            cam_name = os.path.basename(devices[int(camera_id)])
        except Exception as e:
            print e
            self.app.logger.info(e)
            return "No cam found sorry!"

        fcamera_path = os.path.join("/dev/v4l/by-id", cam_name)
        nframes = exp.metadata["number_of_images"][fcamera_path]
        exp_path = self.um.experiment_path(str(exp_id))
        pathout = os.path.join(exp_path, "output/pose/video")
        pathout = os.path.join(pathout, cam_name)

        try:
            return send_from_directory(pathout,
                                       filename="video.avi",
                                       as_attachment=True,
                                       mimetype='video/x-msvideo')
        except Exception as e:
            return str(e)
Ejemplo n.º 10
0
 def reload(self):
     pmax = len(self.exp)
     self.bad = []
     QtGui.QApplication.setOverrideCursor(QtGui.QCursor(QtCore.Qt.WaitCursor))
     logString = 'Reloading all the curves\n'
     self.simpleLogger(logString)
     progress = QtGui.QProgressDialog("Reloading curves...", "Cancel reloading", 0, pmax);
     i=0
     tempExp = deepcopy(self.exp)
     self.exp = None
     self.exp = experiment.experiment()
     for c in tempExp:
         aligned = c[0].direction == 'far'
         self.alignFlags.append(aligned)
         self.badFlags.append(True)
         self.ctPoints [i] = None
         i+=1
         progress.setValue(i)
         self.exp.addFiles([c.filename])
     progress.setValue(pmax)
     QtGui.QApplication.restoreOverrideCursor()
     tempExp = None
     self.fitFlag = False
     self.setStatus(2)
     if np.array(self.alignFlags).all():
         self.setStatus(3)
     self.refillList()
     self.goToCurve(1)
     self.ui.slide1.setValue(0)
     self.ui.slide2.setValue(0)
     logString = 'Curves reloaded\n'
     self.simpleLogger(logString)
Ejemplo n.º 11
0
def worker_task(i):
	global logger
	if logger is None:
		logging.basicConfig(format="%(asctime)s [%(process)-4.4s--%(threadName)-12.12s] [%(levelname)-5.5s]  %(message)s")
		fileHandler = logging.FileHandler('WS_log.log.{}'.format(os.getpid()),mode='w')
		logger=logging.getLogger()
		logger.addHandler(fileHandler)
		logger.setLevel(logging.DEBUG)
	rounds = 10
	random.seed(datetime.now())	
	start_time = time.time()
	logger.info("# iteration %d" % (i+1))
	NODES = 7115
	min_edges = 75000
	max_edges = 125000
	incr = 0.001
	p = 0.001 # probability
	seed = 100
	radius = 2
	weak_ties = [i*5 for i in range(0, 3)]
	ret = None
	avgc = 0
	edges = 0
	with DirectedGraph.WS2DGraph(NODES, random.randint(min_edges, max_edges), radius, weak_ties) as graph:
		edges = graph.size()[1]
		avgc = graph.toUndirect().average_clustering()
		ret = experiment(graph, seed, rounds)
	#	print("# iteration %d done in %f" % (i+1, time.time() - start_time))
	elapsed = time.time() - start_time
	ret.append((edges, avgc, elapsed, p, radius))
	logger.info("# iteration %d done in %f" % (i+1, elapsed))
	logger.info("# {}".format(ret))
	return ret
Ejemplo n.º 12
0
    def label_experiment(self, exp_id):
        """Create/Update the label of an experiment."""
        exp = experiment.experiment(new_experiment=False, ts=str(exp_id))
        label = request.form.get('label')
        exp.update_metadata(change_label=True, label=label)

        return "OK"
def experiment_with_fudge_factors(isotope, loa_rel_dev):
    filename = "data_5point/ism_%s"%(isotope)
    loa_fudge_factors = [1+rel_dev for rel_dev in loa_rel_dev]
    #Perform experiments in comprehensive list
    loa_experiments = [experiment(isotope, fudge_factor,
                                  input_timesteps=global_special_timestep,
                                  dt=global_constant_timestep,
                                  bestfit_namespace=cbf)
                       for fudge_factor in loa_fudge_factors]
    #save all data
    for i, exp in enumerate(loa_experiments):
        data_filename = filename + "_%dm"%int(1000*loa_fudge_factors[i])
        exp.save2file(filename=data_filename, write_index_file=True)
        
    #list of names of experiments
    loa_names = [r"$\hat{Y}_{%s}=%1.2f\sigma$"%(isotope, sigma)
                 for sigma in loa_fudge_factors]

    #save figure
    title = "%s abundance in proxy-MW by 'Omega'"%(isotope)
    #plot ism-content for isotope in question.
    vis_object = visualize(loa_experiments, loa_names, num_yaxes=2,
                           yields=True)
    #plot spectroscopic abundance
    vis_object.add_time_ism(isotope, index_yaxis=0)
    vis_object.add_yields(nuclide=isotope, index_yaxis=1, time="sum", log_bool=False)
    vis_object.finalize(show=False, save=filename+".png",
                        title=title, linewidth=3)
    return 
Ejemplo n.º 14
0
    def pose_img(self, exp_id, camera_id, img_id):
        """Employ pose detection on a single image."""
        pd = pose_detection()
        exp = experiment.experiment(new_experiment=False, ts=exp_id)
        room_name = exp.metadata["room"]
        if room_name.lower() == "cears":
            room_id = 1
        elif room_name.lower() == "computer_lab":
            room_id = 0

        devices = self.rooms[room_id]["devices"]
        camera_name = os.path.basename(devices[camera_id])
        fname = os.path.join("data", exp_id, "raw", camera_name,
                             str(img_id) + ".png")
        self.app.logger.info(fname)
        retval = pd.detect_pose(fname)
        if isinstance(retval, str):
            return retval
        else:
            pose = retval[0]
            retval, buffer = cv2.imencode('.png', pose)
            print buffer.shape
            response = make_response(buffer.tobytes())
            response.headers['Content-Type'] = 'image/png'
        self.app.logger.info(response)
        return response
Ejemplo n.º 15
0
    def make_videofrom_matchsticks(self, exp_id, camera_id, fps):
        """
        """
        v = visualization.visualization()
        exp = experiment.experiment(new_experiment=False, ts=exp_id)
        room_name = exp.metadata["room"]

        if room_name.lower() == "cears":
            room_id = 1
        elif room_name.lower() == "computer_lab":
            room_id = 0

        devices = self.rooms[room_id]["devices"]
        devices.sort()
        try:
            cam_name = os.path.basename(devices[int(camera_id)])
        except Exception as e:
            print e
            self.app.logger.info(e)
            return "No cam found sorry!"

        fcamera_path = os.path.join("/dev/v4l/by-id", cam_name)
        nframes = exp.metadata["number_of_images"][fcamera_path]
        exp_path = self.um.experiment_path(str(exp_id))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        pathout = os.path.join(exp_path, "output/pose/video")

        try:
            self.um.create_folder(pathout)
        except:
            pass

        pathout = os.path.join(pathout, cam_name)

        try:
            self.um.create_folder(pathout)
        except:
            pass

        pathout = os.path.join(pathout, "video.avi")
        print pathout
        self.app.logger.info(pathout)
        out = cv2.VideoWriter(pathout, fourcc, fps, (800, 600))

        for frame_id in range(nframes):
            figure = os.path.join(exp_path, "output/pose/img", cam_name,
                                  "matchstick_" + str(frame_id) + ".png")
            self.app.logger.info(figure)
            img_ = cv2.imread(figure)

            if img_ is None:

                return "I think you forgot to draw the matchsticks!"
            cv2.resize(img_, (800, 600))
            out.write(img_)

        out.release()

        return "done"
Ejemplo n.º 16
0
def main(args):
    seed_everything(args.seed)

    # Set up data_dir, vocab_dir, and model_dir
    args.vocab_dir = path.join(args.data_dir, "vocab/uci")
    # Change data directory to point to UCI directory
    args.data_dir = path.join(args.data_dir, "uci")

    if args.max_epochs == 1000:
        # Changing the default value
        args.max_epochs = 10

    args.save_dir = args.weights_save_path if args.weights_save_path is not None else args.base_model_dir
    args.model_name = get_model_name(args)

    print(f"Model name: {args.model_name}")
    experiment(args)
Ejemplo n.º 17
0
def worker_task(i):
    global logger
    if logger is None:
        logging.basicConfig(
            format=
            "%(asctime)s [%(process)-4.4s--%(threadName)-12.12s] [%(levelname)-5.5s]  %(message)s"
        )
        fileHandler = logging.FileHandler('RD_log.log.{}'.format(os.getpid()),
                                          mode='w')
        logger = logging.getLogger()
        logger.addHandler(fileHandler)
        logger.setLevel(logging.DEBUG)
    random.seed(datetime.now())
    start_time = time.time()
    rounds = 10
    NODES = 7115
    min_edges = 75000
    max_edges = 125000
    incr = 0.001
    p = random.uniform(0.0015, 0.0024)  # probability
    seed = 100
    logger.info("# iteration {}".format(i + 1))
    graph = None
    edges, avgc = -1, -1
    while p > 0:
        logger.info("generating graph with N={} p={}".format(NODES, p))
        graph = DirectedGraph.randomDirectedGraph(NODES, p)
        edges = graph.size()[1]
        avgc = graph.toUndirect().average_clustering()
        logger.info("prob={} edges={}".format(p, edges))
        if edges >= min_edges and edges <= max_edges:
            break
            # avgc = graph.average_clustering()
            # logger.info("** avgc={}".format(avgc))
            # if avgc >= 0.1 and avgc <= 0.2:
            # 	break
            # elif avgc > 0.2:
            # 	p += ((max_edges - edges) / min_edges * 10.0) * incr
            # else:
            # 	p += ((min_edges - edges) / min_edges * 10.0) * incr
        elif edges > max_edges:
            p += ((max_edges - edges) / min_edges) * incr
        else:
            p += ((min_edges - edges) / min_edges) * incr
        sys.stdout.flush()

    #graph.setLogger(logger)

    ret = experiment(graph, seed, rounds)
    elapsed = time.time() - start_time
    ret.append((edges, avgc, elapsed))

    logger.info("# iteration %d done in %f" %
                (i + 1, time.time() - start_time))
    logger.info("# {}".format(ret))
    #gc.collect()
    #return [(lin_max_seed, max_lin_influenced), (eigenc_max_seed, max_eigenc_influenced), (bet_max_seed, max_bet_influenced)]
    return ret
Ejemplo n.º 18
0
 def loadExperiment(self, filename='', expid='', options={}):
     try:
         if self.exp is not None:  ## Kill old experiment if any
             self.exp.shutdown()
         self.exp = experiment(filename, expid, self.ident(), options)
         return 1
     except Exception, self.err:
         self.log.exception(self.err)
         return 0
Ejemplo n.º 19
0
 def test_plotExperiment(self):
     numExperiments = 3
     maxEpisodeLengths = 1000
     numEpisodesPerTrial = 50
     eS = experimentSet()
     for i in range(numExperiments):
         exp = experiment('experiment ' +str(i))
         exp.addTrial(np.random.randint(maxEpisodeLengths, size=numEpisodesPerTrial))
         eS.addExperiment(exp)
     eS.plotExperimentSet()
Ejemplo n.º 20
0
	def callback_edit_experiment_window(self):

		if self.experiment_window==None:
			self.experiment_window=experiment()
			self.experiment_window.changed.connect(self.sim_mode_button.update)
			
		help_window().help_set_help(["time.png",_("<big><b>The time mesh editor</b></big><br> To do time domain simulations one must define how voltage the light vary as a function of time.  This can be done in this window.  Also use this window to define the simulation length and time step.")])
		if self.experiment_window.isVisible()==True:
			self.experiment_window.hide()
		else:
			self.experiment_window.show()
Ejemplo n.º 21
0
	def callback_edit_experiment_window(self):

		if self.experiment_window==None:
			self.experiment_window=experiment()
			self.experiment_window.changed.connect(self.callback_experiments_changed)
			
		help_window().help_set_help(["time.png",_("<big><b>The time mesh editor</b></big><br> To do time domain simulations one must define how voltage the light vary as a function of time.  This can be done in this window.  Also use this window to define the simulation length and time step.")])
		if self.experiment_window.isVisible()==True:
			self.experiment_window.hide()
		else:
			self.experiment_window.show()
Ejemplo n.º 22
0
	def callback_edit_experiment_window(self, widget, data=None):

		if self.experiment_window==None:
			self.experiment_window=experiment()
			self.experiment_window.init()

		my_help_class.help_set_help(["time.png",_("<big><b>The time mesh editor</b></big>\n To do time domain simulations one must define how voltage the light vary as a function of time.  This can be done in this window.  Also use this window to define the simulation length and time step.")])
		if self.experiment_window.get_property("visible")==True:
			self.experiment_window.hide_all()
		else:
			self.experiment_window.show_all()
Ejemplo n.º 23
0
def worker_task(i):
	global logger
	if logger is None:
		logging.basicConfig(format="%(asctime)s [%(process)-4.4s--%(threadName)-12.12s] [%(levelname)-5.5s]  %(message)s")
		fileHandler = logging.FileHandler('RD_log.log.{}'.format(os.getpid()),mode='w')
		logger=logging.getLogger()
		logger.addHandler(fileHandler)
		logger.setLevel(logging.DEBUG)
	random.seed(datetime.now())	
	start_time = time.time()
	rounds = 10
	NODES = 7115
	min_edges = 75000
	max_edges = 125000
	incr = 0.001
	p = random.uniform(0.0015, 0.0024) # probability
	seed = 100
	logger.info("# iteration {}".format(i+1))
	graph = None
	edges, avgc = -1, -1
	while p > 0:
		logger.info("generating graph with N={} p={}".format(NODES, p))
		graph = DirectedGraph.randomDirectedGraph(NODES, p)
		edges = graph.size()[1]
		avgc = graph.toUndirect().average_clustering()
		logger.info("prob={} edges={}".format(p, edges))
		if edges >= min_edges and edges <= max_edges:
			break
			# avgc = graph.average_clustering()
			# logger.info("** avgc={}".format(avgc))
			# if avgc >= 0.1 and avgc <= 0.2:
			# 	break
			# elif avgc > 0.2:
			# 	p += ((max_edges - edges) / min_edges * 10.0) * incr
			# else:
			# 	p += ((min_edges - edges) / min_edges * 10.0) * incr
		elif edges > max_edges:
			p += ((max_edges - edges) / min_edges) * incr
		else:
			p += ((min_edges - edges) / min_edges) * incr
		sys.stdout.flush()

	#graph.setLogger(logger)
	
	ret = experiment(graph, seed, rounds)
	elapsed = time.time() - start_time
	ret.append((edges, avgc, elapsed))
	
	logger.info("# iteration %d done in %f" % (i+1, time.time() - start_time))
	logger.info("# {}".format(ret))
	#gc.collect()
	#return [(lin_max_seed, max_lin_influenced), (eigenc_max_seed, max_eigenc_influenced), (bet_max_seed, max_bet_influenced)]
	return ret
Ejemplo n.º 24
0
def generate_simulation_result(x21, x22, x23, x24, x25, x26,
                               x31, x32, x33, x34, x35, x36,
                               x41, x42, x43, x44, x45, x46,
                               x51, x52, x53, x54, x55, x56,
                               x61, x62, x63, x64, x65, x66,
                               x71, x72, x73, x74, x75, x76):
    b1 = [1, 1, 1, 1, 1, 1]
    b2 = [x21, x22, x23, x24, x25, x26]
    b3 = [x31, x32, x33, x34, x35, x36]
    b4 = [x41, x42, x43, x44, x45, x46]
    b5 = [x51, x52, x53, x54, x55, x56]
    b6 = [x61, x62, x63, x64, x65, x66]
    b7 = [x71, x72, x73, x74, x75, x76]

    model = create_map(routes_per_bus=[b1, b2, b3, b4, b5, b6, b7], name='model')
    return experiment([model], 60*18, 10, output_report=False, printing=False)
Ejemplo n.º 25
0
def grid_search(args):
    start_time = time.time()

    results = []

    # split args to default settings and hyperparameters
    default_settings = {}
    search_settings = {}
    for name, value in vars(args).items():
        if type(value) == list and len(value) == 1:
            value = value[0]
        if type(value) != list:
            default_settings[name] = value
        else:
            search_settings[name] = value

    # Search hyperparameters
    for values in itertools.product(*search_settings.values()):
        # Merge default and search settings
        hyperparameters = dict(zip(search_settings.keys(), values))
        merged_settings = {**default_settings, **hyperparameters}
        settings = argparse.Namespace(**merged_settings)

        # Set directory path
        directory_name = '-'.join(name + str(value) for name, value in hyperparameters.items())
        settings.model_directory = os.path.join(settings.model_directory, directory_name)

        # Run experiment
        metrics, epoch = experiment(settings)
        result = (hyperparameters, metrics, epoch)
        results.append(result)

    # print best experiment result
    hyperparameters, metrics, epoch = max(results, key=lambda x: x[1][2])
    print('best experiment settings in epoch', epoch)
    print(json.dumps(hyperparameters, indent=4))
    print('best experiment metrics: precision: {:.2f} recall: {:.2f} f-score: {:.2f} accuracy: {:.2f}'.format(*metrics))

    # save all results
    results_path = os.path.join(args.model_directory, 'results.json')
    json.dump(results, open(results_path, 'w'), indent=4)

    # Print total time
    total_time = time.time() - start_time
    print('total grid search time:', total_time)
Ejemplo n.º 26
0
    def pose_exp(self, exp_id):
        """
        Given an experiment id to employ pose_detection on the whole images
        collected from all the cameras.
        """
        pd = pose_detection()
        exp = experiment.experiment(new_experiment=False, ts=exp_id)
        ncamera = exp.metadata["number_of_cameras"]
        room_name = exp.metadata["room"]
        if room_name.lower() == "cears":
            room_id = 1
        elif room_name.lower() == "computer_lab":
            room_id = 0

        devices = self.rooms[room_id]["devices"]
        statement = ""
        for camera_id in range(len(devices)):
            st = self.pose_cam(str(exp_id), int(camera_id)) + "<br />"
            statement += st
        return statement
Ejemplo n.º 27
0
    def list_experiments(self):
        """List all the experiments."""
        subfolders = self.um.list_subfolders("data/*/")
        experiment_folders = self.um.list_experiments(subfolders)
        experiments = list()
        for exp in experiment_folders:
            try:
                date = self.um.timestamp_to_date(int(exp) / 1000)
                exp_class = experiment.experiment(new_experiment=False, ts=exp)

                if "label" in exp_class.metadata:
                    label = exp_class.metadata["label"]
                else:
                    label = None

                exp_dict = {"date": date, "ts": exp, "label": label}
                experiments.append(exp_dict)
            except:
                print "Skipped"

        return render_template('experiments.html', user=experiments)
Ejemplo n.º 28
0
 def __init__ ( self, parent = None ):
     QtGui.QMainWindow.__init__( self, parent )
     self.setWindowTitle( 'qtView' )
     self.ui = qtView_face.Ui_facewindow()
     self.ui.setupUi( self )
     self.setConnections()
     
     self.cursColors = {0: ['Magenta','m'],1: ['Cyan','c'],2: ['Green','g'],3:['Black','k']}
     self.cursors = []
     self.ui.cursCmpCmbBox.addItem('Select a cursor')
     
     self.fitFlag = False
     self.alignFlags = []
     self.ctPoints = []
     self.bad = []
     self.badFlags = []
     self.ui.setPathBtn.setStyleSheet('background-color: none')
     self.globDir = ''
     self.peaksOnPlot = False
     self.peaksAlreadyPlotted = False
     self.lastOperation = ''
     
     self.exp = experiment.experiment()
     
     self.statusDict = {1:[[self.ui.bAddDir,self.ui.bAddFiles,self.ui.convr9Btn],
                           [self.ui.reloadBtn,self.ui.saveBox,self.ui.removeBox,self.ui.fromFileBox,
                            self.ui.fitNpeakBox,self.ui.alignBox,self.ui.plotModBox,self.ui.cursorsBox,
                            self.ui.peaksTab]],
                        2:[[self.ui.removeBox,self.ui.removeBtn,self.ui.reloadBtn,self.ui.closeExpBtn,
                            self.ui.fromFileBox,self.ui.fitNpeakBox,self.ui.alignBox,self.ui.plotModBox,
                            self.ui.cursorsBox],
                           [self.ui.removeBOBtn,self.ui.saveBox,self.ui.peaksTab,self.ui.chgStatBtn]],
                        3:[[self.ui.removeBOBtn,self.ui.saveBox,self.ui.peaksTab,self.ui.chgStatBtn,self.ui.findPeaksBtn],
                           [self.ui.showPeakBtn,self.ui.peaksCmbBox,self.ui.alignBox,self.ui.savePeaksBox]],
                        4:[[self.ui.showPeakBtn,self.ui.peaksCmbBox,self.ui.savePeaksBox,self.ui.removeBOBtn],
                           []]}
     
     logString = 'Welcome!\n'
     self.simpleLogger(logString)
     self.setStatus(1)
def worker_task(i):
    global logger
    if logger is None:
        logging.basicConfig(
            format=
            "%(asctime)s [%(process)-4.4s--%(threadName)-12.12s] [%(levelname)-5.5s]  %(message)s"
        )
        fileHandler = logging.FileHandler('PA_log.log.{}'.format(os.getpid()),
                                          mode='w')
        logger = logging.getLogger()
        #fileHandler.setFormatter(logging.Formatter("%(asctime)s [%(process)-4.4s--%(threadName)-12.12s] [%(levelname)-5.5s]  %(message)s"))
        logger.addHandler(fileHandler)
        logger.setLevel(logging.DEBUG)
    random.seed(datetime.now().timestamp() * i)
    start_time = time.time()
    rounds = 10
    NODES = 7115
    min_edges = 75000
    max_edges = 125000
    incr = 0.001
    p = random.uniform(0.35, 0.45)  # probability
    seed = 100
    d = int(random.randint(min_edges, max_edges) / NODES)
    ret = None
    avgc = 0
    edges = 0
    with DirectedGraph.preferentialAttachment(NODES, d, p) as graph:
        edges = graph.size()[1]
        avgc = graph.toUndirect().average_clustering()
        ret = experiment(graph, seed, rounds)


#		print("# iteration %d done in %f" % (i+1, time.time() - start_time))

    elapsed = time.time() - start_time
    ret.append((edges, avgc, elapsed, p, d))
    logger.info("# iteration %d done in %f" % (i + 1, elapsed))
    logger.info("# {}".format(ret))
    sys.stdout.flush()
    return ret
Ejemplo n.º 30
0
    def __init__(self, devices, room_name):
        self.cams = list()
        self.exp = experiment.experiment(new_experiment=True,
                                         camera_names=devices,
                                         room=room_name)
        self.exp_id = str(self.exp.ts)
        self.device_ids = list()
        self.img_id = dict()
        self.devices = devices

        for dev in devices:
            print "Accessing:", dev
            self.device_ids.append(os.path.basename(dev))
            cam = cv2.VideoCapture(dev)
            if cam.isOpened():
                cam.set(cv2.CAP_PROP_FPS, 3)
                cam.set(cv2.CAP_PROP_AUTOFOCUS, 0)
                self.cams.append(cam)
                print dev, "is opened"
            else:
                print "error in", dev
            self.img_id[dev] = 0
Ejemplo n.º 31
0
    def skeletons(self, exp_id, cam_id, frame_id):
        """Test skeletons."""
        visualizer = visualization.visualization()
        exp = experiment.experiment(new_experiment=False, ts=exp_id)
        room_name = exp.metadata["room"]
        if room_name.lower() == "cears":
            room_id = 1
        elif room_name.lower() == "computer_lab":
            room_id = 0

        devices = self.rooms[room_id]["devices"]
        devices.sort()
        try:
            cam_name = os.path.basename(devices[int(cam_id)])
        except:
            return "No cam found sorry!"

        exp_path = self.um.experiment_path(str(exp_id))
        pose_detection_result = "output/pose"
        json = "pose"
        img = "img"
        fname = os.path.join(exp_path, pose_detection_result, json, cam_name,
                             str(frame_id) + ".png.json")

        output_fname = os.path.join(exp_path, pose_detection_result, img,
                                    cam_name,
                                    "matchstick_" + str(frame_id) + ".png")
        print fname
        self.app.logger.info(fname)

        if self.um.check_file_exists(fname):
            json_data = self.um.read_json(fname)
            people_in_frame = people(json_data, frame_id)
            visualizer.draw_matchsticks(people_in_frame, output_fname)
            npeople = str(len(people_in_frame.list))
        else:
            npeople = 0

        return "Number of people drawn: {n}".format(n=npeople)
Ejemplo n.º 32
0
def worker_task(i):
    global logger
    if logger is None:
        logging.basicConfig(
            format=
            "%(asctime)s [%(process)-4.4s--%(threadName)-12.12s] [%(levelname)-5.5s]  %(message)s"
        )
        fileHandler = logging.FileHandler('WS_log.log.{}'.format(os.getpid()),
                                          mode='w')
        logger = logging.getLogger()
        logger.addHandler(fileHandler)
        logger.setLevel(logging.DEBUG)
    rounds = 10
    random.seed(datetime.now())
    start_time = time.time()
    logger.info("# iteration %d" % (i + 1))
    NODES = 7115
    min_edges = 75000
    max_edges = 125000
    incr = 0.001
    p = 0.001  # probability
    seed = 100
    radius = 2
    weak_ties = [i * 5 for i in range(0, 3)]
    ret = None
    avgc = 0
    edges = 0
    with DirectedGraph.WS2DGraph(NODES, random.randint(min_edges, max_edges),
                                 radius, weak_ties) as graph:
        edges = graph.size()[1]
        avgc = graph.toUndirect().average_clustering()
        ret = experiment(graph, seed, rounds)
    #	print("# iteration %d done in %f" % (i+1, time.time() - start_time))
    elapsed = time.time() - start_time
    ret.append((edges, avgc, elapsed, p, radius))
    logger.info("# iteration %d done in %f" % (i + 1, elapsed))
    logger.info("# {}".format(ret))
    return ret
Ejemplo n.º 33
0
def demographics():

    form = Demographics()

    if request.method == 'POST':
        if form.validate() == False:
            flash('All fields are required')
            return render_template('demographics.html',form=form)

        this_logfile = files.get_logfile()
        write_this = open(this_logfile, 'a').write(
            '\nage: '+request.form['age']+
            ' ,gender: '+request.form['gender']+
            ' ,environ: '+request.form['location']+
            ' ,occup: '+request.form['occupation']+
            ' ,advert: '+request.form['advertisement']+
            '\n###################################################################\n'+
            '\ntime\tcumtime\texpress\tident\tbutton\tfilename\tevaluation\tstopRT\tchoiceRT\tmaskNum\tmaskList\n'
         )

        return experiment.experiment()

    elif request.method == 'GET':
        return render_template('demographics.html', form=form)
def demographics():

    form = Demographics()

    if request.method == 'POST':
        if form.validate() == False:
            flash('All fields are required')
            return render_template('demographics.html', form=form)

        this_logfile = files.get_logfile()
        write_this = open(this_logfile, 'a').write(
            '\nage: ' + request.form['age'] + ' ,gender: ' +
            request.form['gender'] + ' ,environ: ' + request.form['location'] +
            ' ,occup: ' + request.form['occupation'] + ' ,advert: ' +
            request.form['advertisement'] +
            '\n###################################################################\n'
            +
            '\ntime\tcumtime\texpress\tident\tbutton\tfilename\tevaluation\tstopRT\tchoiceRT\tmaskNum\tmaskList\n'
        )

        return experiment.experiment()

    elif request.method == 'GET':
        return render_template('demographics.html', form=form)
Ejemplo n.º 35
0
 def closeExp(self):
     
     self.ui.grafo.clear()
     self.exp = []
     self.fitFlag = False
     self.alignFlags = []
     self.ctPoints = []
     self.bad = []
     self.badFlags = []
     self.exp = experiment.experiment()
     self.refillList()
     logString = 'Experiment closed\n'
     self.simpleLogger(logString)
     self.globDir = ''
     self.ui.setPathBtn.setStyleSheet('background-color: none')
     self.cursors = []
     for i in xrange(self.ui.cursCmbBox.count()):
         self.ui.cursCmbBox.removeItem(i)
         self.ui.cursCmpCmbBox.removeItem(i+1)
     self.ui.currCursXvalNumDbl.setValue(0.0)
     self.ui.currCursYvalNumDbl.setValue(0.0)
     self.ui.currCursXdelNumDbl.setValue(0.0)
     self.ui.currCursYdelNumDbl.setValue(0.0)
     self.setStatus(1)
Ejemplo n.º 36
0
 def submit():
     new_experiment = experiment(self.experimentName.get(), self.experimentType.get(),\
     self.experimentObjective.get("1.0", 'end-1c'))
     print("new experiment created")
     print(new_experiment)
     self.experiment_details(experiment=new_experiment)
 def run(self):
     for i in range(self._experiments):
         e = experiment(self._agent, self._util, self._generator, self._c,
                        self._n, self._alpha)
         e.iterate(self._iterations)
         self._list.append(e)
Ejemplo n.º 38
0
import config
import experiment

# initialize experiment:
# - single-class classification problem using the IRIS flower dataset
# - the model should predict the iris species
cfg = config.config()
exp = experiment.experiment(cfg.expdir + "/iris", trials = 10)

exp.set_task(cfg.task_iris())

# loss functions
exp.add_loss("logistic", cfg.loss("s-logistic"))

# trainers
epochs = 100
patience = 100
epsilon = 1e-4

for solver in cfg.batch_solvers():
        exp.add_trainer(solver, cfg.batch_trainer(solver, epochs, patience, epsilon))

# models
output = {"name":"output","type":"affine","omaps":3,"orows":1,"ocols":1}

fc1 = {"name":"fc1","type":"affine","omaps":64,"orows":1,"ocols":1}
fc2 = {"name":"fc2","type":"affine","omaps":32,"orows":1,"ocols":1}
fc3 = {"name":"fc3","type":"affine","omaps":16,"orows":1,"ocols":1}

ac1 = {"name":"ac1","type":"act-snorm"}
ac2 = {"name":"ac2","type":"act-snorm"}
Ejemplo n.º 39
0
import config
import experiment

# initialize experiment:
# - single-class classification problem using the WINE dataset
# - the model should predict the wine quality class
cfg = config.config()
exp = experiment.experiment(cfg.expdir + "/wine", trials = 10)

exp.set_task(cfg.task_wine())

# loss functions
exp.add_loss("logistic", cfg.loss("s-logistic"))

# trainers
epochs = 100
patience = 100
epsilon = 1e-4

for solver in cfg.batch_solvers():
        exp.add_trainer(solver, cfg.batch_trainer(solver, epochs, patience, epsilon))

# models
output = {"name":"output","type":"affine","omaps":3,"orows":1,"ocols":1}

fc1 = {"name":"fc1","type":"affine","omaps":64,"orows":1,"ocols":1}
fc2 = {"name":"fc2","type":"affine","omaps":32,"orows":1,"ocols":1}
fc3 = {"name":"fc3","type":"affine","omaps":16,"orows":1,"ocols":1}

ac1 = {"name":"ac1","type":"act-snorm"}
ac2 = {"name":"ac2","type":"act-snorm"}
Ejemplo n.º 40
0
scale = {
    'off1': '../off/metadata.tsv',
    'off2': '../off/metadata.tsv',
    'off3': '../off/metadata.tsv',
    'off4': '../off/metadata.tsv',
    'on1': '../pumped1/metadata.tsv',
    'on2': '../pumped1/metadata.tsv',
    'on3': '../pumped1/metadata.tsv',
    'on4': '../pumped1/metadata.tsv',
    'on5': '../pumped1/metadata.tsv',
    'on6': '../pumped1/metadata.tsv',
    'on7': '../pumped1/metadata.tsv',
    'on8': '../pumped1/metadata.tsv',
}

e = experiment.experiment()

c = experiment.crystal()

for k, v in data.items():
    scaledict = {
        i.split()[0]: float(i.split()[1])
        for i in open(scale[k]).readlines()[1:]
    }
    c[k] = experiment.image_series(v).scale(scaledict)
    if k == reference:
        n = c[k].generate_nxdsin()
n.update(nxds_params)

e.append(c)
e.integrate(reference, n)
Ejemplo n.º 41
0
def main():
    exp_stats = Statistics()
    exp_stats.new_report_dir()

    params = {
        'academic_dataset':
        'cifar10',
        'target_model_path':
        (models_path / 'cifar10_model_default.pt').as_posix(),
        'mia_model_path':
        (models_path / 'mia_model_cifar10_default').as_posix(),
        'shadow_model_base_path':
        (models_path / 'shadows' / 'shadow_cifar10_default').as_posix(),
        'mia_train_dataset_path':
        (data_path / 'mia_train_dataset_cifar10_default').as_posix(),
        'mia_test_dataset_path':
        (data_path / 'mia_test_dataset_cifar10_default').as_posix(),
        'class_number':
        10,
        'target_train_epochs':
        100,
        'shadow_train_epochs':
        100,
        'mia_train_epochs':
        10,
        'shadow_number':
        10,
        # ~ 'custom_mia_model'       : OrderedDict([
        # ~ ('dense1'      , nn.Linear(10, 128)),
        # ~ ('relu1'       , nn.ReLU()),
        # ~ ('dropout1'    , nn.Dropout(0.3)),
        # ~ ('dense2'      , nn.Linear(128, 64)),
        # ~ ('relu2'       , nn.ReLU()),
        # ~ ('dropout2'    , nn.Dropout(0.2)),
        # ~ ('dense3'      , nn.Linear(64, 2)),
        # ~ ('relu3'       , nn.ReLU()),
        # ~ ('logsoftmax'  , nn.LogSoftmax(dim=1))
        # ~ ]),
        'custom_mia_model':
        OrderedDict([('dense1', nn.Linear(10, 25)), ('sigmo1', nn.Sigmoid()),
                     ('dense2', nn.Linear(25, 2)),
                     ('logsoftmax', nn.LogSoftmax(dim=1))]),
        'custom_target_model':
        OrderedDict([('conv1', nn.Conv2d(3, 32, 3, 1)), ('relu1', nn.ReLU()),
                     ('maxp1', nn.MaxPool2d(2, 2)),
                     ('conv2', nn.Conv2d(32, 64, 3, 1)), ('relu2', nn.ReLU()),
                     ('maxp2', nn.MaxPool2d(2, 2)), ('flatt', Flatten()),
                     ('dens1', nn.Linear(6 * 6 * 64, 512)),
                     ('relu3', nn.ReLU()), ('dens2', nn.Linear(512, 10)),
                     ('lsoft', nn.LogSoftmax(dim=1))]),
        'custom_shadow_model':
        OrderedDict([('conv1', nn.Conv2d(3, 32, 3, 1)), ('relu1', nn.ReLU()),
                     ('maxp1', nn.MaxPool2d(2, 2)),
                     ('conv2', nn.Conv2d(32, 64, 3, 1)), ('relu2', nn.ReLU()),
                     ('maxp2', nn.MaxPool2d(2, 2)), ('flatt', Flatten()),
                     ('dens1', nn.Linear(6 * 6 * 64, 512)),
                     ('relu3', nn.ReLU()), ('dens2', nn.Linear(512, 10)),
                     ('lsoft', nn.LogSoftmax(dim=1))]),
        'use_cuda':
        False,
        'no_cache':
        True,
        'no_mia_train_dataset_cache':
        False,
        'no_mia_test_dataset_cache':
        False,
        'no_mia_models_cache':
        False,
        'no_shadow_cache':
        False
    }

    exp_stats.new_experiment(f"Cifar10 MIA", params)
    experiment(**params, stats=exp_stats)

    # ~ params = { 'academic_dataset'       : 'federal',
    # ~ 'target_model_path'      : (models_path/'federal_model_default.pt').as_posix(),
    # ~ 'mia_model_path'         : (models_path/'mia_model_federal_default').as_posix(),
    # ~ 'shadow_model_base_path' : (models_path/'shadows'/'shadow_federal_default').as_posix(),
    # ~ 'mia_train_dataset_path' : (data_path/'mia_train_dataset_federal_default').as_posix(),
    # ~ 'mia_test_dataset_path'  : (data_path/'mia_test_dataset_federal_default').as_posix(),
    # ~ 'class_number'           : 10,
    # ~ 'target_train_epochs'    : 100,
    # ~ 'shadow_train_epochs'    : 100,
    # ~ 'shadow_number'          : 2,
    # ~ 'custom_mia_model'       : OrderedDict([
    # ~ ('dense1'      , nn.Linear(10, 128)),
    # ~ ('relu1'       , nn.ReLU()),
    # ~ ('dropout1'    , nn.Dropout(0.3)),
    # ~ ('dense2'      , nn.Linear(128, 64)),
    # ~ ('relu2'       , nn.ReLU()),
    # ~ ('dropout2'    , nn.Dropout(0.2)),
    # ~ ('dense3'      , nn.Linear(64, 2)),
    # ~ ('relu3'       , nn.ReLU()),
    # ~ ('logsoftmax'  , nn.LogSoftmax(dim=1))
    # ~ ]),
    # ~ 'no_cache'                   : False,
    # ~ 'no_mia_train_dataset_cache' : True,
    # ~ 'no_mia_test_dataset_cache'  : True,
    # ~ 'no_shadow_cache'            : True }

    # ~ exp_stats.new_experiment(f"Federal MIA: shadow number 50", params)
    # ~ experiment(**params, stats = exp_stats)

    # ~ for i in range(5, 200, 5):
    # ~ params = { 'academic_dataset'       : 'federal',
    # ~ 'target_model_path'      : (models_path/'federal_model_default.pt').as_posix(),
    # ~ 'mia_model_path'         : (models_path/'mia_model_federal_default').as_posix(),
    # ~ 'shadow_model_base_path' : (models_path/'shadows'/'shadow_federal_default').as_posix(),
    # ~ 'mia_train_dataset_path' : (data_path/'mia_train_dataset_federal_default').as_posix(),
    # ~ 'mia_test_dataset_path'  : (data_path/'mia_test_dataset_federal_default').as_posix(),
    # ~ 'class_number'           : 10,
    # ~ 'target_train_epochs'    : 15,
    # ~ 'shadow_train_epochs'    : 15,
    # ~ 'shadow_number'          : i,
    # ~ 'custom_mia_model'       : OrderedDict([
    # ~ ('dense1'      , nn.Linear(10, 128)),
    # ~ ('relu1'       , nn.ReLU()),
    # ~ ('dropout1'    , nn.Dropout(0.3)),
    # ~ ('dense2'      , nn.Linear(128, 64)),
    # ~ ('relu2'       , nn.ReLU()),
    # ~ ('dropout2'    , nn.Dropout(0.2)),
    # ~ ('dense3'      , nn.Linear(64, 2)),
    # ~ ('relu3'       , nn.ReLU()),
    # ~ ('logsoftmax'  , nn.LogSoftmax(dim=1))
    # ~ ]),
    # ~ 'no_mia_train_dataset_cache' : True,
    # ~ 'no_mia_test_dataset_cache'  : True,
    # ~ 'no_shadow_cache'            : True }

    # ~ for j in range(5):
    # ~ exp_stats.new_experiment(f"Federal MIA: shadow number {i}", params)
    # ~ experiment(**params, stats = exp_stats)

    # ~ # run the code on cuda or not for all experiments
    # ~ cuda = False
    # ~ if cuda:
    # ~ import torch.multiprocessing
    # ~ torch.multiprocessing.set_start_method('spawn', force = 'True')

    # ~ exp_stats = Statistics()

    # ~ for i in range(1, 129, 4):
    # ~ params = { 'academic_dataset'       : 'cifar10',
    # ~ 'target_model_path'      : (models_path/'cifar10_model_default.pt').as_posix(),
    # ~ 'mia_model_path'         : (models_path/'mia_model_cifar10_default').as_posix(),
    # ~ 'shadow_model_base_path' : (models_path/'shadows'/'shadow_cifar10_default').as_posix(),
    # ~ 'mia_train_dataset_path' : (data_path/'mia_train_dataset_cifar10_default').as_posix(),
    # ~ 'mia_test_dataset_path'  : (data_path/'mia_test_dataset_cifar10_default').as_posix(),
    # ~ 'class_number'           : 10,
    # ~ 'target_train_epochs'    : 15,
    # ~ 'shadow_train_epochs'    : 15,
    # ~ 'shadow_number'          : 90,
    # ~ 'custom_mia_model'       : OrderedDict([
    # ~ ('dense1'      , nn.Linear(10, 128)),
    # ~ ('relu1'       , nn.ReLU()),
    # ~ ('dropout1'    , nn.Dropout(0.3)),
    # ~ ('dense2'      , nn.Linear(128, 64)),
    # ~ ('relu2'       , nn.ReLU()),
    # ~ ('dropout2'    , nn.Dropout(0.2)),
    # ~ ('dense3'      , nn.Linear(64, 2)),
    # ~ ('relu3'       , nn.ReLU()),
    # ~ ('logsoftmax'  , nn.LogSoftmax(dim=1))
    # ~ ]),
    # ~ 'custom_target_model'     : OrderedDict([
    # ~ ('conv1', nn.Conv2d(3, 32, 3, 1)),
    # ~ ('relu1', nn.ReLU()),
    # ~ ('maxp1', nn.MaxPool2d(2, 2)),
    # ~ ('conv2', nn.Conv2d(32, 64, 3, 1)),
    # ~ ('relu2', nn.ReLU()),
    # ~ ('maxp2', nn.MaxPool2d(2, 2)),
    # ~ ('flatt', Flatten()),
    # ~ ('dens1', nn.Linear(6*6*64, 512)),
    # ~ ('relu3', nn.ReLU()),
    # ~ ('dens2', nn.Linear(512, 10)),
    # ~ ('lsoft', nn.LogSoftmax(dim=1))
    # ~ ]),
    # ~ 'custom_shadow_model'     : OrderedDict([
    # ~ ('conv1', nn.Conv2d(3, i, 3, 1)),
    # ~ ('relu1', nn.ReLU()),
    # ~ ('maxp1', nn.MaxPool2d(2, 2)),
    # ~ ('conv2', nn.Conv2d(i, i, 3, 1)),
    # ~ ('relu2', nn.ReLU()),
    # ~ ('maxp2', nn.MaxPool2d(2, 2)),
    # ~ ('flatt', Flatten()),
    # ~ ('dens1', nn.Linear(6*6*i, 512)),
    # ~ ('relu3', nn.ReLU()),
    # ~ ('dens2', nn.Linear(512, 10)),
    # ~ ('lsoft', nn.LogSoftmax(dim=1))
    # ~ ]),
    # ~ 'use_cuda'                   : cuda,
    # ~ 'no_mia_train_dataset_cache' : True,
    # ~ 'no_mia_models_cache'        : True,
    # ~ 'no_shadow_cache'            : True }

    # ~ for j in range(5):
    # ~ exp_stats.new_experiment(f"Cifar10 MIA: shadow conv filter number {i}", params)
    # ~ experiment(**params, stats = exp_stats)

    # ~ # default regularized purchase model
    # ~ params = { 'academic_dataset'       : 'purchase',
    # ~ 'target_model_path'      : (models_path/'purchase_model_default.pt').as_posix(),
    # ~ 'mia_model_path'         : (models_path/'mia_model_purchase_default').as_posix(),
    # ~ 'shadow_model_base_path' : (models_path/'shadows'/'shadow_purchase_default').as_posix(),
    # ~ 'mia_train_dataset_path' : (data_path/'mia_train_dataset_purchase_default').as_posix(),
    # ~ 'mia_test_dataset_path'  : (data_path/'mia_test_dataset_purchase_default').as_posix(),
    # ~ 'class_number'           : 2,
    # ~ 'use_cuda'               : cuda }

    # ~ exp_stats.new_experiment("MIA on default Purchase model (batch norm + dropout regularization)", params)
    # ~ experiment(**params, stats = exp_stats)

    # ~ # default regularized mnist model
    # ~ params = { 'academic_dataset'       : 'mnist',
    # ~ 'target_model_path'      : (models_path/'mnist_model_default.pt').as_posix(),
    # ~ 'mia_model_path'         : (models_path/'mia_model_default').as_posix(),
    # ~ 'shadow_model_base_path' : (models_path/'shadows'/'shadow_default').as_posix(),
    # ~ 'mia_train_dataset_path' : (data_path/'mia_train_dataset_default').as_posix(),
    # ~ 'mia_test_dataset_path'  : (data_path/'mia_test_dataset_default').as_posix(),
    # ~ 'class_number'           : 10,
    # ~ 'use_cuda'               : cuda }

    # ~ exp_stats.new_experiment("MIA on default Mnist (batch norm regularization)", params)
    # ~ experiment(**params, stats = exp_stats)

    # ~ # without regularization
    # ~ params = { 'academic_dataset'    : 'mnist',
    # ~ 'target_model_path'   : (models_path/'mnist_model_exp1.pt').as_posix(),
    # ~ 'mia_model_path'      : (models_path/'mia_model_exp1').as_posix(),
    # ~ 'custom_target_model' : OrderedDict([
    # ~ ('conv1'       , nn.Conv2d(1, 10, 3, 1)),
    # ~ ('relu1'       , nn.ReLU()),
    # ~ ('maxpool1'    , nn.MaxPool2d(2, 2)),
    # ~ ('conv2'       , nn.Conv2d(10, 10, 3, 1)),
    # ~ ('relu2'       , nn.ReLU()),
    # ~ ('maxpool2'    , nn.MaxPool2d(2, 2)),
    # ~ ('to1d'        , Flatten()),
    # ~ ('dense1'      , nn.Linear(5*5*10, 500)),
    # ~ ('tanh'        , nn.Tanh()),
    # ~ ('dense2'      , nn.Linear(500, 10)),
    # ~ ('logsoftmax'  , nn.LogSoftmax(dim=1))
    # ~ ]),
    # ~ 'shadow_number'            : 50,
    # ~ 'shadow_model_base_path'   : (models_path/'shadows'/'shadow_exp1').as_posix(),
    # ~ 'mia_train_dataset_path'   : (data_path/'mia_train_dataset_exp1').as_posix(),
    # ~ 'mia_test_dataset_path'    : (data_path/'mia_test_dataset_exp1').as_posix(),
    # ~ 'class_number'             : 10,
    # ~ 'use_cuda'                 : cuda }

    # ~ exp_stats.new_experiment("MIA on Mnist with no regularization", params)
    # ~ experiment(**params, stats = exp_stats)

    # ~ # with dropout regularization
    # ~ params = { 'academic_dataset'    : 'mnist',
    # ~ 'target_model_path'   : (models_path/'mnist_model_exp2.pt').as_posix(),
    # ~ 'mia_model_path'      : (models_path/'mia_model_exp2').as_posix(),
    # ~ 'custom_target_model' : OrderedDict([
    # ~ ('conv1'       , nn.Conv2d(1, 10, 3, 1)),
    # ~ ('relu1'       , nn.ReLU()),
    # ~ ('maxpool1'    , nn.MaxPool2d(2, 2)),
    # ~ ('dropout1'    , nn.Dropout(p = 0.5)),
    # ~ ('conv2'       , nn.Conv2d(10, 10, 3, 1)),
    # ~ ('relu2'       , nn.ReLU()),
    # ~ ('maxpool2'    , nn.MaxPool2d(2, 2)),
    # ~ ('dropout2'    , nn.Dropout(p = 0.5)),
    # ~ ('to1d'        , Flatten()),
    # ~ ('dense1'      , nn.Linear(5*5*10, 500)),
    # ~ ('tanh'        , nn.Tanh()),
    # ~ ('dropout3'    , nn.Dropout(p = 0.5)),
    # ~ ('dense2'      , nn.Linear(500, 10)),
    # ~ ('logsoftmax'  , nn.LogSoftmax(dim=1))
    # ~ ]),
    # ~ 'shadow_number'            : 50,
    # ~ 'shadow_model_base_path'   : (models_path/'shadows'/'shadow_exp2').as_posix(),
    # ~ 'mia_train_dataset_path'   : (data_path/'mia_train_dataset_exp2').as_posix(),
    # ~ 'mia_test_dataset_path'    : (data_path/'mia_test_dataset_exp2').as_posix(),
    # ~ 'class_number'             : 10,
    # ~ 'use_cuda'                 : cuda }

    # ~ exp_stats.new_experiment("MIA on Mnist with dropout regulrization", params)
    # ~ experiment(**params, stats = exp_stats)

    exp_stats.print_results()
    exp_stats.save(dir=reports_path)
Ejemplo n.º 42
0
def anova(expPath, imageFile="anova.png"):
    exp = e.experiment()
    exp.read(expPath, loadW=True)
        
    ret = exp.getTrainInputs()
    # ret = exp.getTestInputs()

    numLocations = exp.exp['num_locs']
    global pval
    pval = 0.001

    inputs = ret['inputs']
    targets = ret['targets']
    inames = ret['inputNames']

    rnn = exp.rnn

    np.set_printoptions(edgeitems = 10)

    locTrials = [list([]) for _ in range(numLocations)]
    cue1locTrials = [list([]) for _ in range(numLocations)]
    cue2locTrials = [list([]) for _ in range(numLocations)]
    choices = [list([]) for _ in range(2)]

    conjPairs = []
    conjTrials = []

    trialMeans = []

    # The trial mean firing rates for the delay period alone
    trialMeansDelayOnly = []

    for trial in range(len(inputs)):
        j = np.asarray(inputs[trial])

        try:
            info = ast.literal_eval(ret['inputNames'][trial])
        except:
            pass

        # compute activations and select the hidden layer
        hidden = rnn.forward(j, rnn.W)[1]

        # eliminate this extra dimension from the outputs.
        # still not exactly sure what this dimension is
        hidden = hidden[:, 0, :]

        means = [];

        # This "meansDelayOnly" is the mean firing rate for the delay period only
        meansDelayOnly = []
        for unit in range(len(hidden[0])):
            means.append(np.mean(hidden[:, unit]))
            meansDelayOnly.append(np.mean(hidden[-10:-5, unit]))

        trialMeansDelayOnly.append(meansDelayOnly)
        trialMeans.append(means)

        loc1 = info['inputs']['cue1']
        loc2 = info['inputs']['cue2']
        choice = info['inputs']['choice'] - 1

        # print loc1
        # print len(locTrials)
        locTrials[loc1].append(trial)
        locTrials[loc2].append(trial)
        choices[choice].append(trial)
        cue1locTrials[loc1].append(trial)
        cue2locTrials[loc2].append(trial)

        i = min(loc1, loc2)
        j = max(loc1, loc2)
        pair = [i, j]
        if pair not in conjPairs:
            conjPairs.append(pair)
            conjTrials.append([])
        conjTrials[conjPairs.index(pair)].append(trial)

    count = 0
    sigNeurons = []
    for neuron in range(len(trialMeans[0])):

        sample1=[]
        sample2=[]

        for trial in locTrials[0]:
            sample1.append(trialMeans[trial][neuron])
        for trial in locTrials[1]:
            sample2.append(trialMeans[trial][neuron])

        ans = f_oneway(sample1, sample2)

        if ans.pvalue < pval:
            count += 1
            sigNeurons.append(neuron)
            # print ans
            # print neuron
    print "# neurons prefer location 0 at cue1 to cue2".format(count, pval)
    print sigNeurons
    print
    exit()

    count = 0
    sigNeurons = []
    for neuron in range(len(trialMeans[0])):

        sample1=[]
        sample2=[]

        for trial in choices[0]:
            sample1.append(trialMeans[trial][neuron])
        for trial in choices[1]:
            sample2.append(trialMeans[trial][neuron])

        ans = f_oneway(sample1, sample2)

        if ans.pvalue < pval:
            count += 1
            sigNeurons.append(neuron)
            # print ans
            # print neuron
    print "# neurons prefer choice selection (1st or 2nd cue,): {}, pval < {}".format(count, pval)
    print sigNeurons
    print

    # number of significant neurons prefering that direction
    numSigLoc = np.zeros(numLocations)

    # some relevant neurons, just a list of some significant ones
    sigNeurons = []

    for loc in range(numLocations):
        for neuron in range(len(trialMeans[0])):
            sample1=[]
            sample2=[]

            for trial in locTrials[loc]:
                sample1.append(trialMeans[trial][neuron])
            for loc2 in range(numLocations):
                if loc2 is not loc:
                    for trial in locTrials[loc2]:
                        sample2.append(trialMeans[trial][neuron])

            ans = f_oneway(sample1, sample2)

            if ans.pvalue < pval:
                numSigLoc[loc] += 1
                sigNeurons.append(neuron)

    message = "# sig neurons per location: ["
    for loc in range(len(numSigLoc)):
        message = message + str(numSigLoc[loc]) + ", "
    message = message[:-2]
    message = message + "], pval < {}".format(pval)

    print message
    print