Beispiel #1
0
	def Start(self):
		'''{'''
		if not self.__InitialiseConf() is True:
			return None
		'''}'''

		''''''
		self.__LogStart()

		destination = self.configs['destination'] if 'destination' in self.configs else None
		DestDirs = self.configs['DestDirs'] if 'DestDirs' in self.configs else None
		DirsFiles = self.configs['DirsFiles'] if 'DirsFiles' in self.configs else None

		saver = Saver(destination);
		if(saver.CreateDestDirs(DestDirs) is None):
			self.__LogMsg(saver.msg)
		else:
			for destdir in DestDirs:
				if(saver.CopyFiles(destdir,DirsFiles[destdir]) is None):
					self.__LogMsg(saver.msg)
					saver.msg = ""

		''''''
		self.__LogEnd()
		return;
Beispiel #2
0
def main(argv):
	global PYBICO_VERBOSE

	try:
		opts, args = getopt.getopt(argv, "hvl:s:i:e:u:p:", ["help"])
	except getopt.GetoptError as err:
		print(str(err))
		usage()
		sys.exit(2)

	PYBICO_VERBOSE = False
	load_format = "txt"
	save_format = "xlsx"
	load_filename = ""
	save_filename = ""
	password_path = ""
	user = ""

	for o, a in opts:
		if o == "-v":
			PYBICO_VERBOSE = True
		elif o in ("-h", "--help"):
			usage()
			sys.exit()
		elif o == "-u":
			user = a
		elif o == "-p":
			password_path = a
		elif o == "-l":
			load_filename = a
		elif o == "-s":
			save_filename = a
		elif o == "-i":
			load_format = a
		elif o == "-e":
			save_format = a
		else:
			assert False, "unhandled option"

	f = open(password_path, 'r')
	password = f.read().strip('\n')

	db = DB(user, password)
	if load_filename != "":
		l = Loader()
		data = l.load(load_format, load_filename)
		db.add(data)
	if save_filename != "":
		data = db.get()
		s = Saver()
		s.save(data, save_format, save_filename)
Beispiel #3
0
class Scrapper:
    def __init__(self, start_url, savefile, max_depth=10, max_width=100):
        """
            max_depth: maximum recursion depth to follow for each link
            max_width: maximum number of dict keys, i.e. width of tree
        """
        self.parser = Parser()
        self.start_url = start_url
        self.saver = Saver(savefile, max_width)
        self.max_depth = max_depth
        self.saver.starting_url(self.start_url)

    def start_scrapping(self, depth=0, start_url = None):
        if depth == self.max_depth:
            return
        if start_url == None:
            start_url = self.start_url
        nested_urls = self.get_urls(start_url)
        bool_break = self.save_data(start_url, nested_urls)
        if not bool_break:
            exit()

        if nested_urls == None:
            return
        else:
            for url in nested_urls:
                self.start_scrapping(depth+1, url)

    def get_urls(self, url):
        try:
            response = requests.get(url)
            web_page = response.content
            urls = self.parser.get_links(web_page)
        except requests.exceptions.RequestException as re:
            print(re)
            return
        return urls

    def save_data(self, start_url, nested_urls):
        reply = self.saver.save(start_url, nested_urls)
        if not reply:
            return False
        else:
            return True
Beispiel #4
0
 def __init__(self, start_url, savefile, max_depth=10, max_width=100):
     """
         max_depth: maximum recursion depth to follow for each link
         max_width: maximum number of dict keys, i.e. width of tree
     """
     self.parser = Parser()
     self.start_url = start_url
     self.saver = Saver(savefile, max_width)
     self.max_depth = max_depth
     self.saver.starting_url(self.start_url)
def read(folder):
    log.info('Reading pretrained network from {}'.format(folder))
    saver = Saver(folder)
    ckpt_info = saver.get_ckpt_info()
    model_opt = ckpt_info['model_opt']
    ckpt_fname = ckpt_info['ckpt_fname']
    model_id = ckpt_info['model_id']
    m = model.get_model(model_opt)
    cnn_nlayers = len(model_opt['cnn_filter_size'])
    mlp_nlayers = 1
    timespan = 1
    weights = {}
    sess = tf.Session()
    saver.restore(sess, ckpt_fname)

    output_list = []
    for net, nlayers in zip(['cnn', 'mlp'], [cnn_nlayers, mlp_nlayers]):
        for ii in xrange(nlayers):
            for w in ['w', 'b']:
                key = '{}_{}_{}'.format(net, w, ii)
                log.info(key)
                output_list.append(key)
            if net == 'cnn':
                for tt in xrange(timespan):
                    for w in ['beta', 'gamma']:
                        key = '{}_{}_{}_{}'.format(net, ii, tt, w)
                        log.info(key)
                        output_list.append(key)

    output_var = []
    for key in output_list:
        output_var.append(m[key])

    output_var_value = sess.run(output_var)

    for key, value in zip(output_list, output_var_value):
        weights[key] = value
        log.info(key)
        log.info(value.shape)

    return weights
Beispiel #6
0
    def __init__(self, session_params, ax_interactive=None):
        # incorporate kwargs
        self.params = session_params
        self.__dict__.update(self.params)
        self.verify_params()

        # sync
        self.sync_flag = multiprocessing.Value('b', False)
        self.sync_to_save = multiprocessing.Queue()

        # saver
        self.saver = Saver(self.subj, self.name, self, sync_flag=self.sync_flag)

        # hardware
        self.cam = PSEye(sync_flag=self.sync_flag, **self.cam_params)
        self.ar = AnalogReader(saver_obj_buffer=self.saver.buf, sync_flag=self.sync_flag, **self.ar_params)
        # communication
        self.ni = NI845x(i2c_on=self.imaging)

        # interactivity
        self.ax_interactive = ax_interactive
        
        # runtime variables
        self.notes = {}
        self.mask_idx = -1 #for reselecting mask
        self.session_on = 0
        self.on = False
        self.session_complete = False
        self.session_kill = False
        self.trial_flag = False
        self.trial_on = 0
        self.trial_off = 0
        self.trial_idx = -1
        self.stim_cycle_idx = 0
        self.paused = False
        self.deliver_override = False
        self.roi_pts = None
        self.eyelid_buffer = np.zeros(self.eyelid_buffer_size)-1
        self.eyelid_buffer_ts = np.zeros(self.eyelid_buffer_size)-1
        self.past_flag = False
        
        # sync
        self.sync_flag.value = True #trigger all processes to get time
        self.sync_val = now() #get this process's time
        procs = dict(saver=self.saver, cam=self.cam.pseye, ar=self.ar)
        sync_vals = {o:procs[o].sync_val.value for o in procs} #collect all process times
        sync_vals['session'] = self.sync_val
        self.sync_to_save.put(sync_vals)
        
        # more runtime, anything that must occur after sync
        _,self.im = self.cam.get()
Beispiel #7
0
def main():
    args = parse_args()

    saver = Saver(args.model_dir)
    model = SELUNet()
    with warnings.catch_warnings():
        warnings.simplefilter('ignore')
        if args.use_cuda:
            model = model.cuda()
        model, _, params_dict = saver.load_checkpoint(
            model, file_name=args.model_name)

    model.eval()
    filespan = args.filespan

    idr_params, _, _ = get_optimal_params(model,
                                          args.valspeechfolder,
                                          args.valpeaksfolder,
                                          args.window,
                                          args.stride,
                                          filespan,
                                          numfiles=60,
                                          use_cuda=False,
                                          thlist=[
                                              0.15, 0.2, 0.25, 0.3, 0.35, 0.4,
                                              0.45, 0.5, 0.55, 0.6, 0.65, 0.7,
                                              0.75
                                          ],
                                          spblist=[25],
                                          hctlist=[10, 15, 20, 25, 30])
    thr = idr_params['thr']
    spb = idr_params['spb']
    hct = idr_params['hct']

    with open('test_idr.txt', 'w') as f:
        print(
            'Optimal Hyperparameters\nThreshold: {} Samples Per Bin: {} Histogram Count Threshold: {}'
            .format(thr, spb, hct),
            file=f,
            flush=True)

    numfiles = len(glob(os.path.join(args.speechfolder, '*.npy')))
    print('Models and Files Loaded')

    metrics_list = []

    for i in range(0, numfiles, filespan):
        if (i + filespan) > numfiles:
            break
        speech_windowed_data, peak_distance, peak_indicator, indices, actual_gci_locations = create_dataset(
            args.speechfolder, args.peaksfolder, args.window, args.stride,
            slice(i, i + filespan))

        input = to_variable(
            th.from_numpy(
                np.expand_dims(speech_windowed_data, 1).astype(np.float32)),
            args.use_cuda, True)

        with warnings.catch_warnings():
            prediction = model(input)

        predicted_peak_indicator = F.sigmoid(prediction[:, 1]).data.numpy()
        predicted_peak_distance = (prediction[:,
                                              0]).data.numpy().astype(np.int32)

        predicted_peak_indicator_indices = predicted_peak_indicator > args.threshold

        predicted_peak_indicator = predicted_peak_indicator[
            predicted_peak_indicator_indices].ravel()
        predicted_peak_distance = predicted_peak_distance[
            predicted_peak_indicator_indices].ravel()
        indices = indices[predicted_peak_indicator_indices]

        assert (len(indices) == len(predicted_peak_distance))
        assert (len(predicted_peak_distance) == len(predicted_peak_indicator))

        positive_distance_indices = predicted_peak_distance < args.window

        positive_peak_distances = predicted_peak_distance[
            positive_distance_indices]
        postive_predicted_peak_indicator = predicted_peak_indicator[
            positive_distance_indices]

        gci_locations = [
            indices[i, d] for i, d in enumerate(positive_peak_distances)
        ]

        locations_true = np.nonzero(actual_gci_locations)[0]
        xaxes = np.zeros(len(actual_gci_locations))
        xaxes[locations_true] = 1

        ground_truth = np.row_stack(
            (np.arange(len(actual_gci_locations)), xaxes))
        predicted_truth = np.row_stack(
            (gci_locations, postive_predicted_peak_indicator))

        gx = ground_truth[0, :]
        gy = ground_truth[1, :]

        px = predicted_truth[0, :]
        py = predicted_truth[1, :]

        fs = 16000

        gci = np.array(
            cluster(px,
                    py,
                    threshold=thr,
                    samples_per_bin=spb,
                    histogram_count_threshold=hct))
        predicted_gci_time = gci / fs
        target_gci_time = np.nonzero(gy)[0] / fs

        gci = np.round(gci).astype(np.int64)
        gcilocs = np.zeros_like(gx)
        gcilocs[gci] = 1

        metric = corrected_naylor_metrics(target_gci_time, predicted_gci_time)
        print(metric)
        metrics_list.append(metric)

    idr = np.mean([
        v for m in metrics_list for k, v in m.items()
        if k == 'identification_rate'
    ])
    mr = np.mean(
        [v for m in metrics_list for k, v in m.items() if k == 'miss_rate'])
    far = np.mean([
        v for m in metrics_list for k, v in m.items()
        if k == 'false_alarm_rate'
    ])
    se = np.mean([
        v for m in metrics_list for k, v in m.items()
        if k == 'identification_accuracy'
    ])

    print('IDR: {:.5f} MR: {:.5f} FAR: {:.5f} IDA: {:.5f}'.format(
        idr, mr, far, se))

    with open('test_idr.txt', 'a') as f:
        f.write('IDR: {:.5f} MR: {:.5f} FAR: {:.5f} IDA: {:.5f}\n'.format(
            idr, mr, far, se))
Beispiel #8
0
class Logger:

    '''
    Class for keep a track of what's going on.
    TODO: methods docstrings
    TODO: fix logs timestamp as int not str

    '''

    def __init__(self, debug=False):
        self.debug = debug
        self.saver = Saver()
        self.last_tweets = self.maybe_load(FileNames.last_tweets)
        self.giphy_keys = self.maybe_load(FileNames.giphy_keys)

    def maybe_load(self, fname):
        try:
            return load_json(fname)
        except:
            return {}

    def save_dict(self, d, fname):
        save_json(d, fname)
        self.saver.sync()

    def save_last_tweets(self):
        self.save_dict(self.last_tweets, FileNames.last_tweets)

    def save_giphy_keys(self):
        self.save_dict(self.giphy_keys, FileNames.giphy_keys)

    def update_last_tweet(self, action, status):
        if not action in self.last_tweets.keys():
            self.last_tweets[action] = {}
        self.last_tweets[action][
            'datetime'] = datetime.now().strftime('%Y%m%d%H%M%S')
        self.last_tweets[action]['id'] = status.id
        self.save_last_tweets()

    def tweeted_gif(self, key):
        if self.giphy_keys == {}:
            self.giphy_keys['keys'] = []
            return False
        else:
            if key in self.giphy_keys['keys']:
                return True
            else:
                self.giphy_keys['keys'].append(key)
                self.save_giphy_keys()
                return False

    def last_action_id(self, action):
        try:
            return self.last_tweets[action]['id']
        except Exception:
            return None

    def last_action_past_seconds(self, action):
        t = datetime.utcfromtimestamp(0)
        try:
            t = datetime.strptime(
                self.last_tweets[action]['datetime'], '%Y%m%d%H%M%S')
        except Exception, e:
            self.log(
                'Reading self.last_tweets[action]["datetime"]' + str(e), error=True)
        return (datetime.now() - t).total_seconds()
Beispiel #9
0
class TrainModel:
    def __init__(self):
        # prepare training parameters
        if args.backbone == "vgg16":
            model = torchvision.models.vgg16_bn(True)
            # remove dropout
            model.classifier = nn.Sequential(
                *[model.classifier[i] for i in [0, 1, 3, 4, 6]])
            mean = [0.485, 0.456, 0.406]
            std = [0.229, 0.224, 0.225]
        else:
            raise NotImplementedError()
        criterion_ce = nn.CrossEntropyLoss()
        if args.loss_type == "PC1_sign":
            criterion_corr = PearsonCorrelationLoss1("sign")
        elif args.loss_type == "PC2_sign":
            criterion_corr = PearsonCorrelationLoss2("sign")
        elif args.loss_type == "PC3_sign":
            criterion_corr = PearsonCorrelationLoss3("sign")
        else:
            raise NotImplementedError()
        self.device = torch.device(
            "cuda:0" if torch.cuda.is_available() else "cpu")
        self.model = model.to(self.device)
        self.train_loader, self.validate_loader = make_data_loader(args)
        self.optimizer = torch.optim.Adam(self.model.parameters(), lr=args.lr)
        self.criterion_ce = criterion_ce.to(self.device)
        self.criterion_corr = criterion_corr.to(self.device)
        self.best_pred = 0.0

        # config saver
        self.saver = Saver(args)
        self.saver.save_experiment_config()

        # tensorboard
        self.summary = TensorboardSummary(self.saver.experiment_dir, mean, std)
        self.writer = self.summary.create_summary()

        # resume training
        if args.resume is not None:
            if not os.path.isfile(args.resume):
                raise RuntimeError("=> no checkpoint found at '{}'".format(
                    args.resume))
            checkpoint = torch.load(args.resume)
            args.start_epoch = checkpoint['epoch']
            if args.cuda:
                self.model.module.load_state_dict(checkpoint['state_dict'])
            else:
                self.model.load_state_dict(checkpoint['state_dict'])
            if not args.ft:
                self.optimizer.load_state_dict(checkpoint['optimizer'])
            self.best_pred = checkpoint['best_pred']
            print("=> loaded checkpoint '{}' (epoch {})".format(
                args.resume, checkpoint['epoch']))

    def training(self):
        pass

    def plot(self, logdir, train_loss, valid_loss, **kwargs):
        fig = plt.figure(figsize=(10, 8))
        plt.plot(range(0, len(train_loss)), train_loss, label='Training Loss')
        plt.plot(range(0, len(valid_loss)),
                 valid_loss,
                 label='Validation Loss')
        for key in kwargs:
            plt.plot(range(0, len(kwargs[key])), kwargs[key], label=key)
        # find position of lowest validation loss
        minposs = valid_loss.index(min(valid_loss))
        plt.axvline(minposs,
                    linestyle='--',
                    color='r',
                    label='Early Stopping Checkpoint')
        plt.xlabel('epochs')
        plt.ylabel('loss')
        plt.xlim(0, len(train_loss) + 1)  # consistent scale
        plt.grid(True)
        plt.legend()
        plt.tight_layout()
        fig.savefig(os.path.join(logdir, 'training_curve.png'),
                    bbox_inches='tight')
if 'state_dict' in experiment.optimizer:
    optimizer.load_state_dict(torch.load(experiment.optimizer.state_dict))

# Logger
if len(experiment.session.log.when) > 0:
    logger = SummaryWriter(experiment.session.log.folder)
    logger.add_text(
        'Experiment',
        textwrap.indent(pyaml.dump(experiment, safe=True, sort_dicts=False),
                        '    '), experiment.samples)
else:
    logger = None

# Saver
if len(experiment.session.checkpoint.when) > 0:
    saver = Saver(experiment.session.checkpoint.folder)
    if experiment.epoch == 0:
        saver.save_experiment(experiment, suffix=f'e{experiment.epoch:04d}')
else:
    saver = None
# endregion

# Datasets and dataloaders
dataset = SolubilityDataset(experiment.session.data.path)
dataloader_kwargs = dict(
    num_workers=min(experiment.session.cpus, 1)
    if 'cuda' in experiment.session.device else experiment.session.cpus,
    pin_memory='cuda' in experiment.session.device,
    worker_init_fn=lambda _: np.random.seed(
        int(torch.initial_seed()) % (2**32 - 1)),
    batch_size=experiment.session.batch_size,
Beispiel #11
0
from saver import Saver


env = gym.make(ENV_NAME)
num_actions = env.action_space.n

online_network = DQModel(256, num_actions, str_name="Online")
target_network = DQModel(256, num_actions, str_name="Target")
online_network.compile(optimizer=keras.optimizers.Adam(), loss='mse')
# rendi target_network = primary_network
for t, e in zip(target_network.trainable_variables, online_network.trainable_variables):
    t.assign(e)

online_network.compile(optimizer=keras.optimizers.Adam(), loss=tf.keras.losses.Huber())

saver = Saver(ckpt_path=CKPT_PATH, parameters_path=PARAMETERS_PATH)

if LOAD and TRAIN:
    try:
        episode, total_steps, eps, session = saver.load_parameters()
        saver.load_models(online_network, target_network)
    except Exception:
        episode = 0  # ogni volta che step % SAVE_EACH==0 vengono salvati i weights dei due modelli
        total_steps = 0
        eps = MAX_EPSILON
        session = dt.datetime.now().strftime('%d%m%Y%H%M')
else:
    episode = 0   # ogni volta che step % SAVE_EACH==0 vengono salvati i weights dei due modelli
    total_steps = 0
    eps = MAX_EPSILON
    session = dt.datetime.now().strftime('%d%m%Y%H%M')
    [6, 21, 31, 36],
    [7, 27],
    [0, 16, 22, 24, 30],
    [19, 29],
    [34, 35, 37, 38],
]
task_groups = [task_groups[opt.task]]

num_tasks = len(task_groups)
num_classes = sum([len(elt) for elt in task_groups])
classes = task_groups[0]

# Saving settings
model_dir = os.path.join(opt.checkpoint_path, opt.name)
os.mkdir(model_dir) if not os.path.isdir(model_dir) else None
saver = Saver(model_dir, args=opt)

# Define device, model and optimiser
gpu = utils.check_gpu()
device = torch.device(
    "cuda:{}".format(gpu) if torch.cuda.is_available() else "cpu")
model = resnet18(task_groups, trainable_weights=False).to(device)
criterion = nn.BCELoss(reduction='none')
optimizer = optim.Adam(model.parameters(), lr=1e-4)

# Recover weights, if required
if opt.recover:
    ckpt_file = os.path.join(model_dir, opt.reco_type + '_weights.pth')
    ckpt = torch.load(ckpt_file, map_location=device)
    model.load_state_dict(ckpt['model_state_dict'])
    epoch = ckpt['iter_nb'] + 1
Beispiel #13
0
class Ui_Form(QWidget):
    def setupUi(self):
        self.setObjectName("Form")
        self.resize(1600, 1000)
        self.tabWidget = QtWidgets.QTabWidget(self)
        self.tabWidget.setGeometry(QtCore.QRect(20, 20, 1151, 131))
        self.tabWidget.setObjectName("tabWidget")

        self.img = '003.jpg'
        self.path = './'
        self.photo_shower = PhotoShower()
        # self.photo_shower.data.read(self.img)
        # self.photo_shower.show(0)
        # self.photo_shower.resize(1024, 768)

        self.tab_splite = GrayParams(self.photo_shower)#QtWidgets.QWidget()
        self.tab_splite.setObjectName("tab_splite")
        self.tab_splite.qbt0.clicked.connect(self.changeColorImg)
        self.tab_splite.qbt1.clicked.connect(self.changeColorImg)
        self.tabWidget.addTab(self.tab_splite, "拆分")


        self.tab_open_close = OpenClose(self.photo_shower)#QtWidgets.QWidget()
        self.tab_open_close.setObjectName("tab_open_close")
        #self.tab_open_close.sld_ksize.valueChanged.connect(self.photo_shower.showProcessedImg)
        self.tabWidget.addTab(self.tab_open_close, "开闭")

        self.tab_thresh = Thresh(self.photo_shower)
        self.tab_thresh.setObjectName('tab_thresh')
        self.tabWidget.addTab(self.tab_thresh, "Threshold")

        self.tab_filter = Filter(self.photo_shower)
        self.tab_filter.setObjectName('filter')
        self.tabWidget.addTab(self.tab_filter, '过滤')
        self.photo_shower.cur_index = 3
        #self.tabWidget.setCurrentIndex(3)

        self.tab_beautify = Beautify(self.photo_shower)
        self.tab_beautify.setObjectName('beautify')
        self.tabWidget.addTab(self.tab_beautify, '美化')
        #self.tabWidget.setCurrentIndex(4)

        self.tab_saver = Saver(self.photo_shower)
        self.tab_saver.setObjectName('saver')
        self.tabWidget.addTab(self.tab_saver, '保存')
        self.tabWidget.setCurrentIndex(4)

        self.horizontalLayoutWidget = QtWidgets.QWidget(self)
        self.horizontalLayoutWidget.setGeometry(QtCore.QRect(20, 160, 1151, 811))
        self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
        self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
        self.horizontalLayout.setContentsMargins(0, 5, 0, 10)#左,顶,右,底边距

        self.horizontalLayout.addWidget(self.photo_shower)
        self.horizontalLayout.setObjectName("horizontalLayout")

        #self.vlayout
        self.img_index = 0#0-3:gray,rgb
        self.is_gray = False

        self.vlayout_widget = QtWidgets.QWidget(self)
        self.vlayout_widget.setGeometry(QtCore.QRect(1100, 0, 351, 900))
        self.vlayout_widget.setObjectName("verticalLayoutWidget")
        # self.vlayout.addStretch(0)

        self.groupBox = QtWidgets.QGroupBox(self.vlayout_widget)
        self.groupBox.resize(420, 720)
        self.groupBox.setTitle("123")
        self.groupBox.setObjectName("groupBox")

        self.qbt0 = QtWidgets.QPushButton(self.groupBox)
        self.qbt0.setGeometry(QtCore.QRect(10, 20, 150, 30))
        self.qbt0.setText('一键处理')
        #self.qbt0 = QPushButton('原始图像-->灰度图像')
        self.qbt0.clicked.connect(self.processImg)
        #self.qbt0.move(0, 0)

        self.qbt1 = QtWidgets.QPushButton(self.groupBox)
        self.qbt1.setGeometry(QtCore.QRect(10, 60, 150, 30))
        self.qbt1.setText('批量处理')
        self.qbt1.clicked.connect(self.processImgsTogether)

        self.qbt2 = QtWidgets.QPushButton(self.groupBox)
        self.qbt2.setGeometry(QtCore.QRect(190, 20, 150, 30))
        self.qbt2.setText('转换图像')
        self.is_process = True
        self.qbt2.clicked.connect(self.restore)

        self.qbt3 = QtWidgets.QPushButton(self.groupBox)
        self.qbt3.setGeometry(QtCore.QRect(190, 60, 150, 30))
        self.qbt3.setText('保存')
        self.qbt3.clicked.connect(self.saveImg)


        #切换图像
        self.label0 = QtWidgets.QLabel(self.groupBox)
        self.label0.setGeometry(QtCore.QRect(10, 100, 60, 20))
        self.label0.setText('当前图像:')

        self.text_img = QtWidgets.QLineEdit(self.groupBox)
        self.text_img.setGeometry(QtCore.QRect(70, 100, 210, 20))
        self.text_img.setText(self.img)

        self.qbt4 = QtWidgets.QPushButton(self.groupBox)
        self.qbt4.setGeometry(QtCore.QRect(290, 99, 50, 22))
        self.qbt4.setText('...')
        self.qbt4.clicked.connect(self.changeImg)



        self.slider = QtWidgets.QSlider(Qt.Horizontal, self.groupBox)
        self.slider.setGeometry(QtCore.QRect(40, 140, 200, 20))
        self.slider.setMinimum(0)
        self.slider.setMaximum(200)
        self.slider.setValue(100)
        self.slider.valueChanged.connect(self.sliderChange)


        self.label1 = QtWidgets.QLabel(self.groupBox)
        self.label1.setGeometry(QtCore.QRect(10, 140, 40, 20))
        self.label1.setText('比例:')

        self.text = QtWidgets.QLineEdit(self.groupBox)
        self.text.setGeometry(QtCore.QRect(270, 140, 40, 20))
        self.text.setText(str(self.slider.value()))
        self.text.textChanged.connect(self.kChange)

        #输出
        self.edit_msg = QtWidgets.QTextEdit(self.groupBox)
        self.edit_msg.resize(330, 420)
        self.edit_msg.move(10, 170)
        self.msg = OutMsg(self.edit_msg)
        self.qbt5 = QPushButton('清理', self.groupBox)
        self.qbt5.setGeometry(QtCore.QRect(280, 595, 60, 20))
        self.qbt5.clicked.connect(self.msg.clear)
        self.tab_filter.addOuter(self.msg)

        self.vlayout = QtWidgets.QVBoxLayout(self.vlayout_widget)#groupBox)
        self.vlayout.setContentsMargins(0, 200, 0, 10)  # 左,顶,右,底边距

        #self.vlayout.addWidget(self.qbt0)
        #self.vlayout.addWidget(self.qbt1)
        self.groupBox.setFixedWidth(350)
        self.groupBox.setFixedHeight(620)
        self.vlayout.addWidget(self.groupBox)

        #self.vlayout.addWidget(self.groupBox)
        self.vlayout.addStretch()

        self.horizontalLayout.addLayout(self.vlayout)

        #self.retranslateUi(Form)
        #QtCore.QMetaObject.connectSlotsByName(Form)
        self.show()

    def retranslateUi(self):
        _translate = QtCore.QCoreApplication.translate
        self.setWindowTitle(_translate("Form", "Form"))
        self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab), _translate("Form", "Tab 1"))
        self.tabWidget.setTabText(self.tabWidget.indexOf(self.tab_2), _translate("Form", "Tab 2"))

    def changeColorImg(self):
        index = 0
        if not self.tab_splite.is_rgb:
            if not self.tab_splite.is_color:
                index = 4
        else:
            index = self.tab_splite.img_index + 1
        self.photo_shower.show(index)

    def changeRawGray(self):
        self.is_gray = not self.is_gray
        if self.is_gray:
            self.photo_shower.show(4)
            self.qbt0.setText('灰度图像-->彩色图像')
        else:
            self.photo_shower.show(0)
            self.qbt0.setText('彩色图像-->灰度图像')

    def processImgsTogether(self):
        if not os.path.exists('image.txt'):
            print("找不到待处理图片文件!")
        f = open('image.txt','r')
        for line in f.readlines():
            img_name = line.replace('\n','')
            if self.photo_shower.data.read(img_name):
                self.tab_open_close.initData(self.photo_shower)
                self.tab_thresh.initData(self.photo_shower)
                self.tab_filter.initData(self.photo_shower)
                self.processImg()
                pos = img_name.rfind('.')
                img_color_name = img_name[:pos] + '_color.jpg'
                img_black_name = img_name[:pos] + '_black.jpg'
                print(img_color_name,'\n', img_black_name)
                width = self.photo_shower.data.raw_width
                height = self.photo_shower.data.raw_height
                img_color = cv2.resize(self.photo_shower.data.img_show, (width, height))
                img_color = cv2.cvtColor(img_color, cv2.COLOR_BGR2RGB)
                cv2.imwrite(img_color_name, img_color)
                img_black = cv2.resize(self.photo_shower.data.img_binary, (width, height))
                cv2.imwrite(img_black_name, img_black)
                print('读取成功!')
            else:
                print('读取失败!')

        #self.processImg()
        # color_list = ['灰', '红', '绿', '蓝']
        # self.img_index = (self.img_index + 1) % 4
        # if 0 == self.img_index:
        #     self.photo_shower.show(4)
        # else:
        #     self.photo_shower.show(self.img_index)
        # self.qbt1.setText('图像切换:' + color_list[self.img_index] + '-->' + color_list[(self.img_index + 1) % 4])

    def restore(self):
        if self.is_process:
            self.qbt2.setText('原始图像')
            self.photo_shower.show(5)
        else:
            self.qbt2.setText('处理后图像')
            self.photo_shower.showProcessedImg()
        self.is_process = not self.is_process

    def sliderChange(self):
        self.text.setText(str(self.slider.value()))

    def kChange(self):
        self.slider.setValue(int(self.text.text()))
        pass

    def changeImg(self):
        self.img, chose = QtWidgets.QFileDialog.getOpenFileName(self,
                                                    "选择图像", self.path,
                                                    "ALL(*);;JPG (*.jpg);;PNG (*.png);;BMP(*.bmp)")
        #self.path = ''
        #print(self.img)
        if self.photo_shower.data.read(self.img):
            self.photo_shower.show(0)
            self.text_img.setText(self.img)
            self.tab_splite.initData(self.photo_shower)
            self.tab_open_close.initData(self.photo_shower)
            self.tab_thresh.initData(self.photo_shower)
            self.tab_filter.initData(self.photo_shower)
            pos = self.img.rfind('/')
            if pos > 0:
                self.path = self.img[:pos]
            else:
                pos = self.img.rfind('\\')
                if pos > 0:
                    self.path = self.img[:pos]
            print('读取成功!')
        else:
            print('读取失败!')

    def saveImg(self):
        data = self.photo_shower.data
        img_name = data.img_name
        pos = img_name.rfind('.')
        if pos > 0:
            img_type = img_name[pos:len(img_name)]
            img_type = '.jpg'
            img_name = img_name[:pos] + '_after' + img_type
            #img_name = img_name[:pos] + img_type
            print(data.raw_width, data.raw_height)
            img = cv2.resize(data.img_show, (data.raw_width, data.raw_height))
            cv2.imwrite(img_name, img)

    def processImg(self):
        self.tab_splite.changeImg(1)#(self.tab_splite.cur_index)
        self.tab_open_close.processImg()
        self.tab_thresh.processImgByThresh()
        self.tab_filter.filterSize()
        self.tab_filter.fufillImg()
        self.tab_filter.rectangulazie()
Beispiel #14
0
    def __init__(self, subj, session_params, cam_params=default_cam_params):
        self.params = self.DEFAULT_PARAMS
        self.cam_params = cam_params

        self.subj = subj
        self.name = time.strftime("%Y%m%d_%H%M%S")
        self.saver = Saver(self.subj, self.name)

        # kwargs
        self.params.update(session_params)
        # checks on kwargs
        assert self.params["min_isi"] > self.params["stim_duration"]  # otherwise same-side stims can overlap
        if self.params["lick_rule_side"]:
            assert self.params[
                "lick_rule_any"
            ]  # if must lick correct side in lick phase, then must lick at all in lick phase
        # extensions of kwargs
        if not isinstance(self.params["lam"], list):
            self.params["lam"] = [self.params["lam"]]
        if not isinstance(self.params["n_trials"], list):
            self.params["n_trials"] = [self.params["n_trials"]]
        assert len(self.params["lam"]) == len(self.params["n_trials"])
        self.params["subj_name"] = self.subj.name
        # self.params['phase_durations'][self.PHASE_STIM] = self.params['stim_phase_intro_duration'] + self.params['stim_phase_duration']
        self.cam_params.update(dict(save_name=pjoin(self.subj.subj_dir, self.name)))
        if self.params["hints_on"] is True:
            self.params["hints_on"] = 1e6
        # add them in
        self.__dict__.update(self.params)

        # hardware
        syncobj = multiprocessing.Value("d", 0)
        threading.Thread(target=update_sync, args=(syncobj,)).start()
        self.cam = PSEye(clock_sync_obj=syncobj, **self.cam_params)
        self.cam.start()
        self.lr = AnalogReader(
            saver=self.saver, lick_thresh=5.0, ports=["ai0", "ai1", "ai5", "ai6"], runtime_ports=[0, 1]
        )
        sd = self.stim_duration
        if self.stim_duration_override:
            sd = self.stim_duration_override
        self.stimulator = Valve(saver=self.saver, ports=["port0/line0", "port0/line1"], name="stimulator", duration=sd)
        self.spout = Valve(
            saver=self.saver, ports=["port0/line2", "port0/line3"], name="spout", duration=self.reward_duration
        )
        self.light = Light(saver=self.saver, port="ao0")
        self.speaker = Speaker(saver=self.saver)

        # trials init
        self.trials = self.generate_trials()

        # save session info
        self.saver.save_session(self)

        # runtime variables
        self.session_on = 0
        self.session_complete = False
        self.session_kill = False
        self.trial_on = 0
        self.trial_idx = -1
        self.valid_trial_idx = 0
        self.saving_trial_idx = 0
        self.session_runtime = -1
        self.trial_runtime = -1
        self.trial_outcomes = []
        self.trial_corrects = []  # for use in use_trials==False
        self.rewards_given = 0
        self.paused = 0
        self.holding = False
        self.percentages = [0.0, 0.0]  # L/R
        self.side_ns = [0, 0]  # L/R
        self.bias_correction_percentages = [0.0, 0.0]  # L/R
        self.perc_valid = 0.0
        self.iter_write_begin = now()
Beispiel #15
0
def main():
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    load_weights_folder = os.path.expanduser(settings.load_weights_dir)
    model_path = os.path.join(load_weights_folder, "model.pth")
    model_dict = torch.load(model_path)

    # data
    datasets_dict = {
        "3d60": datasets.ThreeD60,
        "panosuncg": datasets.PanoSunCG,
        "stanford2d3d": datasets.Stanford2D3D,
        "matterport3d": datasets.Matterport3D
    }
    dataset = datasets_dict[settings.dataset]

    fpath = os.path.join(os.path.dirname(__file__), "datasets", "{}_{}.txt")

    test_file_list = fpath.format(settings.dataset, "test")

    test_dataset = dataset(settings.data_path,
                           test_file_list,
                           model_dict['height'],
                           model_dict['width'],
                           is_training=False)
    test_loader = DataLoader(test_dataset,
                             settings.batch_size,
                             False,
                             num_workers=settings.num_workers,
                             pin_memory=True,
                             drop_last=False)
    num_test_samples = len(test_dataset)
    num_steps = num_test_samples // settings.batch_size
    print("Num. of test samples:", num_test_samples, "Num. of steps:",
          num_steps, "\n")

    # network
    Net_dict = {"UniFuse": UniFuse, "Equi": Equi}
    Net = Net_dict[model_dict['net']]

    model = Net(model_dict['layers'],
                model_dict['height'],
                model_dict['width'],
                max_depth=test_dataset.max_depth_meters,
                fusion_type=model_dict['fusion'],
                se_in_fusion=model_dict['se_in_fusion'])

    model.to(device)
    model_state_dict = model.state_dict()
    model.load_state_dict(
        {k: v
         for k, v in model_dict.items() if k in model_state_dict})
    model.eval()

    evaluator = Evaluator(settings.median_align)
    evaluator.reset_eval_metrics()
    saver = Saver(load_weights_folder)
    pbar = tqdm.tqdm(test_loader)
    pbar.set_description("Testing")

    with torch.no_grad():
        for batch_idx, inputs in enumerate(pbar):

            equi_inputs = inputs["normalized_rgb"].to(device)

            cube_inputs = inputs["normalized_cube_rgb"].to(device)

            outputs = model(equi_inputs, cube_inputs)

            pred_depth = outputs["pred_depth"].detach().cpu()

            gt_depth = inputs["gt_depth"]
            mask = inputs["val_mask"]
            for i in range(gt_depth.shape[0]):
                evaluator.compute_eval_metrics(gt_depth[i:i + 1],
                                               pred_depth[i:i + 1],
                                               mask[i:i + 1])

            if settings.save_samples:
                saver.save_samples(inputs["rgb"], gt_depth, pred_depth, mask)

    evaluator.print(load_weights_folder)
Beispiel #16
0
class Session(object):

    def __init__(self, session_params, ax_interactive=None):
        # incorporate kwargs
        self.params = session_params
        self.__dict__.update(self.params)
        self.verify_params()

        # sync
        self.sync_flag = multiprocessing.Value('b', False)
        self.sync_to_save = multiprocessing.Queue()

        # saver
        self.saver = Saver(self.subj, self.name, self, sync_flag=self.sync_flag)

        # hardware
        self.cam = PSEye(sync_flag=self.sync_flag, **self.cam_params)
        self.ar = AnalogReader(saver_obj_buffer=self.saver.buf, sync_flag=self.sync_flag, **self.ar_params)
        # communication
        self.ni = NI845x(i2c_on=self.imaging)

        # interactivity
        self.ax_interactive = ax_interactive
        
        # runtime variables
        self.notes = {}
        self.mask_idx = -1 #for reselecting mask
        self.session_on = 0
        self.on = False
        self.session_complete = False
        self.session_kill = False
        self.trial_flag = False
        self.trial_on = 0
        self.trial_off = 0
        self.trial_idx = -1
        self.stim_cycle_idx = 0
        self.paused = False
        self.deliver_override = False
        self.roi_pts = None
        self.eyelid_buffer = np.zeros(self.eyelid_buffer_size)-1
        self.eyelid_buffer_ts = np.zeros(self.eyelid_buffer_size)-1
        self.past_flag = False
        
        # sync
        self.sync_flag.value = True #trigger all processes to get time
        self.sync_val = now() #get this process's time
        procs = dict(saver=self.saver, cam=self.cam.pseye, ar=self.ar)
        sync_vals = {o:procs[o].sync_val.value for o in procs} #collect all process times
        sync_vals['session'] = self.sync_val
        self.sync_to_save.put(sync_vals)
        
        # more runtime, anything that must occur after sync
        _,self.im = self.cam.get()
        

    @property
    def session_runtime(self):
        if self.session_on != 0:
            return now()-self.session_on
        else:
            return -1
    @property
    def trial_runtime(self):
        if self.trial_on != False:
            return now()-self.trial_on
        else:
            return -1
    def name_as_str(self):
        return self.name.strftime('%Y%m%d%H%M%S')

    def verify_params(self):
        if self.name is None:
            self.name = pd.datetime.now()
        self.cam_params.update(dict(save_name=pjoin(self.subj.subj_dir, self.name_as_str()+'_cams.h5')))

    def pause(self, val):
        self.paused = val
        if self.imaging:
            if val == True:
                self.stop_acq()
            elif val == False:
                self.start_acq()

    def update_licked(self):
        l = self.ar.licked

    def start_acq(self):
        if self.imaging:
            self.ni.write_dio(LINE_SI_ON, 1)
            self.ni.write_dio(LINE_SI_ON, 0)
    def stop_acq(self):
        if self.imaging:
            self.ni.write_dio(LINE_SI_OFF, 1)
            self.ni.write_dio(LINE_SI_OFF, 0)

    def wait(self, dur, t0=None):
        if t0 is None:
            t0 = now()
        while now()-t0 < dur:
            pass

    def next_stim_type(self, inc=True):
        st = self.cycle[self.stim_cycle_idx]
        if inc:
            self.stim_cycle_idx += 1
            if self.stim_cycle_idx == len(self.cycle):
                self.stim_cycle_idx = 0
        return st
        
    @property
    def current_stim_state(self):
        return STIM_TYPES[self.cycle[self.stim_cycle_idx]]
        
    def deliver_trial(self):
        while self.on:
            if self.trial_flag:

                # prepare trial
                self.trial_idx += 1
                self.trial_on = now()
                self.cam.set_flush(False)
                kind = self.next_stim_type()
           
                # deilver trial
                self.wait(self.intro)
                cs_time,us_time = self.send_stim(kind)
                
                # replay
                self.wait(self.display_lag)
                self.past_flag = [cs_time[1], us_time[1]]
                
                # finish trial
                self.wait(self.trial_duration, t0=self.trial_on)
                self.trial_off = now()

                # save trial info
                self.cam.set_flush(True)
                
                trial_dict = dict(\
                start   = self.trial_on,\
                end     = self.trial_off,\
                cs_ts0  = cs_time[0],\
                cs_ts1  = cs_time[1],\
                us_ts0  = us_time[0],\
                us_ts1  = us_time[1],\
                kind    = kind,\
                idx     = self.trial_idx,\
                )
                self.saver.write('trials',trial_dict)
                
                self.trial_flag = False
                self.trial_on = False
    
    def dummy_puff(self):
        self.ni.write_dio(LINE_US, 1)
        self.wait(self.us_dur)
        self.ni.write_dio(LINE_US, 0)
    def dummy_light(self, state):
        self.ni.write_dio(LINE_CS, state)
    def send_stim(self, kind):
        if kind == CS:
            t = (now(), now2())
            self.ni.write_i2c('CS_ON')
            self.ni.write_dio(LINE_CS, 1)
            self.wait(self.cs_dur)
            self.ni.write_i2c('CS_OFF')
            self.ni.write_dio(LINE_CS, 0)
            stim_time = [t,(-1,-1)]

        elif kind == US:
            self.wait(self.cs_dur) # for trial continuity
            t = (now(), now2())
            self.ni.write_i2c('US_ON')
            self.ni.write_dio(LINE_US, 1)
            self.wait(self.us_dur)
            self.ni.write_i2c('US_OFF')
            self.ni.write_dio(LINE_US, 0)
            stim_time = [(-1,-1),t]

        elif kind == CSUS:
            t_cs = (now(), now2())
            self.ni.write_i2c('CS_ON')
            self.ni.write_dio(LINE_CS, 1)
            self.wait(self.csus_gap)
            t_us = (now(), now2())
            self.ni.write_i2c('US_ON')
            self.ni.write_dio(LINE_US, 1)
            self.wait(self.us_dur) # assumes US ends before CS does
            self.ni.write_i2c('US_OFF')
            self.ni.write_dio(LINE_US, 0)
            self.wait(self.cs_dur, t0=t_cs[0])
            self.ni.write_i2c('CS_OFF')
            self.ni.write_dio(LINE_CS, 0)
            stim_time = [t_cs,t_us]

        return stim_time

    def acquire_mask(self):
        x,y = self.cam.resolution[0]
        if self.roi_pts is None:
            self.roi_pts = [[0,0],[x,0],[x,y],[0,y]]
            logging.warning('No ROI found, using default')
        self.mask_idx += 1
        pts_eye = np.array(self.roi_pts, dtype=np.int32)
        mask_eye = np.zeros([y,x], dtype=np.int32)
        cv2.fillConvexPoly(mask_eye, pts_eye, (1,1,1), lineType=cv2.LINE_AA)
        self.mask = mask_eye
        self.mask_flat = self.mask.reshape((1,-1))
        self.saver.write('mask{}'.format(self.mask_idx), self.mask)
        logging.info('New mask set.')
        
    def run(self):
        try:
            self.acquire_mask()
            self.session_on = now()
            self.on = True
            self.ar.begin_saving()
            self.cam.begin_saving()
            self.cam.set_flush(True)
            self.start_acq()
        
            # main loop
            threading.Thread(target=self.deliver_trial).start()
            threading.Thread(target=self.update_eyelid).start()
            while True:

                if self.trial_on or self.paused:
                    continue

                if self.session_kill:
                    break
                
                moving = self.determine_motion()
                eyelid = self.determine_eyelid()
                
                if self.deliver_override or ((now()-self.trial_off>self.min_iti) and (not moving) and (eyelid)):
                    self.trial_flag = True
                    self.deliver_override = False

            self.end()

        except:
            logging.error('Session has encountered an error!')
            raise
    def determine_eyelid(self):
        return np.mean(self.eyelid_buffer[-self.eyelid_window:]) < self.eyelid_thresh
    def update_eyelid(self):
        while self.on:
            imts,im = self.cam.get()
            if im is None:
                continue
            self.im = im
            roi_data = self.extract(self.im)
            self.eyelid_buffer = np.roll(self.eyelid_buffer, -1)
            self.eyelid_buffer_ts = np.roll(self.eyelid_buffer_ts, -1)
            self.eyelid_buffer[-1] = roi_data
            self.eyelid_buffer_ts[-1] = imts
    def extract(self, fr):
        if fr is None:
            return 0
        flat = fr.reshape((1,-1)).T
        dp = (self.mask_flat.dot(flat)).T
        return np.squeeze(dp/self.mask_flat.sum(axis=-1))
    def determine_motion(self):
        return self.ar.moving
   
    def end(self):
        self.on = False
        self.stop_acq()
        to_end = [self.ar, self.cam]
        if self.imaging:
            to_end.append(self.ni)
        for te in to_end:
            te.end()
            time.sleep(0.100)
        self.saver.end(notes=self.notes)
        self.session_on = False
            
    def get_code(self):
        py_files = [pjoin(d,f) for d,_,fs in os.walk(os.getcwd()) for f in fs if f.endswith('.py') and not f.startswith('__')]
        code = {}
        for pf in py_files:
            with open(pf, 'r') as f:
                code[pf] = f.read()
        return json.dumps(code)
Beispiel #17
0
def main(_=None):
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        d = os.path.dirname(args.log_file)
        if not os.path.exists(d):
            os.makedirs(d)
        if not args.continue_training:
            with open(args.log_file, 'w') as f:
                f.write('')
        logging.basicConfig(filename=args.log_file, level=logging.DEBUG)
        logging.getLogger().addHandler(logging.StreamHandler())

        game_handler = GameStateHandler(
                args.rom_directory + args.rom_name,
                random_seed=args.random_seed,
                frame_skip=args.frame_skip,
                use_sdl=args.use_sdl,
                repeat_action_probability=args.repeat_action_probability,
                minimum_actions=args.minimum_action_set,
                test_mode=args.test_mode,
                image_processing=lambda x: crop_and_resize(x, args.image_height, args.image_width, args.cut_top))
        num_actions = game_handler.num_actions

        if args.optimizer == 'rmsprop':
            optimizer = tf.train.RMSPropOptimizer(
                    learning_rate=args.learning_rate,
                    decay=args.decay,
                    momentum=0.0,
                    epsilon=args.rmsprop_epsilon)

        if not args.multi_gpu:
            if args.double_dqn:
                net = qnetwork.DualDeepQNetwork(args.image_height, args.image_width, sess, num_actions,
                                                args.state_frames, args.discount_factor, args.target_net_refresh_rate,
                                                net_type=args.net_type, optimizer=optimizer)
            else:
                net = qnetwork.DeepQNetwork(args.image_height, args.image_width, sess, num_actions, args.state_frames,
                                            args.discount_factor, net_type=args.net_type, optimizer=optimizer)
        else:
            net = multi_gpu_qnetwork.MultiGPUDualDeepQNetwork(args.image_height, args.image_width, sess, num_actions,
                                                              args.state_frames, args.discount_factor,
                                                              optimizer=optimizer, gpus=[0, 1, 2, 3])

        saver = Saver(sess, args.data_dir, args.continue_training)
        if saver.replay_memory_found():
            replay_memory = saver.get_replay_memory()
        else:
            if args.test_mode:
                logging.error('NO SAVED NETWORKS IN TEST MODE!!!')
            replay_memory = ReplayMemoryManager(args.image_height, args.image_width, args.state_frames,
                                                args.replay_memory_size, reward_clip_min=args.reward_clip_min,
                                                reward_clip_max=args.reward_clip_max)

        # todo: add parameters to handle monitor
        monitor = Monitoring(log_train_step_every=100, smooth_episode_scores_over=50)

        agent = Agent(
                game_handler=game_handler,
                qnetwork=net,
                replay_memory=replay_memory,
                saver=saver,
                monitor=monitor,
                train_freq=args.train_freq,
                test_mode=args.test_mode,
                batch_size=args.batch_size,
                save_every_x_episodes=args.saving_freq)

        sess.run(tf.initialize_all_variables())
        saver.restore(args.data_dir)
        start_epsilon = max(args.final_epsilon,
                            args.start_epsilon - saver.get_start_frame() * (args.start_epsilon - args.final_epsilon) / args.exploration_duration)
        exploring_duration = max(args.exploration_duration - saver.get_start_frame(), 1)

        if args.test_mode:
            agent.populate_replay_memory(args.state_frames, force_early_stop=True)
            agent.play_in_test_mode(args.epsilon_in_test_mode)
        else:
            agent.populate_replay_memory(args.min_replay_memory)
            agent.play(train_steps_limit=args.number_of_train_steps, start_eps=start_epsilon,
                       final_eps=args.final_epsilon, exploring_duration=exploring_duration)
Beispiel #18
0
                if trash:
                    [trh.extract() for trh in trash]
            except IndexError:
                trash = body.findAll(tag)
                if trash:
                    [trh.extract() for trh in trash]
        comments = body.findAll(text=lambda text:isinstance(text, Comment))
        [comment.extract() for comment in comments]
        return body

    # получаем настройки
    def getSetings(self):
        if not 'http://' in self.url: self.url = 'http://' + self.url
        n = self.url.replace('www.', '').split('/')[2]
        try:
            self.settings = SITES[n]
        except KeyError:
            self.settings = SITES['default']

    # разбиваем id/class:name по двоеточию
    def indent(self, txt):
        return txt.split(':')

#тест на 4х сайтах по 10 урлов с каждого
if __name__ == '__main__':
    from saver import Saver
    for line in open('testLink.txt'):
        line = line.rstrip()
        test = Parser(line)
        f = Saver(line)
        f.saveFile(test.result())
Beispiel #19
0
# -*- coding: utf-8 -*-
from parser import Parser
from saver import Saver
import sys

try:
    url = sys.argv[1]
except IndexError:
    print 'Не передан URL'
    sys.exit()

obj = Parser(url)
text = obj.result()

f = Saver(url)
f.saveFile(text)
Beispiel #20
0
 def __save__(self):
     saver = Saver()
     for obj in self.mp:
         saver.save_in_file(obj, self.mp[obj])
Beispiel #21
0
    def __init__(self, json_data, dir):
        Saver.__init__(self, json_data, dir, QzonePath.BLOG)

        self._filename = "category_info.json"

        self._cate_info = self.json_data["data"]["cateInfo"]
Beispiel #22
0
def main():
    # parse options
    parser = TrainOptions()
    opts = parser.parse()

    # daita loader
    print('\n--- load dataset ---')
    dataset = dataset_multi(opts)
    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=opts.batch_size,
                                               shuffle=True,
                                               num_workers=opts.nThreads)

    # model
    print('\n--- load model ---')
    model = SAVI2I(opts)
    model.setgpu(opts.gpu)
    if opts.resume is None:
        model.initialize()
        ep0 = -1
        total_it = 0
    else:
        ep0, total_it = model.resume(opts.resume)
    model.set_scheduler(opts, last_ep=ep0)
    ep0 += 1
    print('start the training at epoch %d' % (ep0))

    # saver for display and output
    saver = Saver(opts)

    # train
    print('\n--- train ---')
    max_it = 1000000
    for ep in range(ep0, opts.n_ep):
        for it, (images, c_org, c_org_mask,
                 c_org_id) in enumerate(train_loader):
            # input data
            images = torch.cat(images, dim=0)
            images = images.cuda(opts.gpu).detach()
            c_org = torch.cat(c_org, dim=0)
            c_org = c_org.cuda(opts.gpu).detach()
            c_org_mask = torch.cat(c_org_mask, dim=0)
            c_org_mask = c_org_mask.cuda(opts.gpu).detach()
            c_org_id = torch.cat(c_org_id, dim=0)
            c_org_id = c_org_id.cuda(opts.gpu).detach()

            # update model
            if (it + 1) % opts.d_iter != 0 and it < len(train_loader) - 2:
                model.update_D_content(images, c_org)
                continue
            else:
                model.update_D(images, c_org, c_org_mask, c_org_id)
                model.update_EFG()

            print('total_it: %d (ep %d, it %d), lr %08f' %
                  (total_it, ep, it, model.gen_opt.param_groups[0]['lr']))
            total_it += 1
            if total_it >= max_it:
                saver.write_img(-1, model)
                saver.write_model(-1, max_it, model)
                break

        # decay learning rate
        if opts.n_ep_decay > -1:
            model.update_lr()

        # save result image
        saver.write_img(ep, model)

        # Save network weights
        saver.write_model(ep, total_it, model)

    return
Beispiel #23
0
    print("""
    Runnung with arguments:
        Input file: %s
        Use proxy: %s
        Requests per minute: %0.2f (if proxy in use, then for a proxy)
        Max. proxy usage: %d (if less then 2 proxies in use, ignored)
        Download files: %s
    """ % (INPUT_FILENAME,  USE_PROXY, REQUESTS_PER_MINUTE,
                                        MAX_PROXY_USAGE_NUMBER, DO_DOWNLOAD))


    if USE_PROXY:
        proxy_list.load_from_file(PROXIES_FILENAME)

    saver = Saver(autoflush=False)


    # mapping sites to their respective classes
    site_mapping = [
            #((types,), processing function)
            [("GBA", "GBP"),
                SiteGBProcessor(saver, REQUESTS_PER_MINUTE, MAX_PROXY_USAGE_NUMBER,
                    do_download=DO_DOWNLOAD)],
            [("FIA", "FIP"),
                SiteFIProcessor(saver, REQUESTS_PER_MINUTE, MAX_PROXY_USAGE_NUMBER,
                    do_download=DO_DOWNLOAD, proxy_list=[])],
            [("WO"),
                SiteWOProcessor(saver, REQUESTS_PER_MINUTE, MAX_PROXY_USAGE_NUMBER,
                    do_download=DO_DOWNLOAD)],
            [("EPA"),
Beispiel #24
0
from preprocessor import Preprocessor
from posprocessor import Posprocessor
from analyser import Analyser
from listener import Listener
from saver import Saver
import pandas as pd
import json

newSaver = Saver()

def start_to_listen():
    with open('credentials/twitter-api.json') as json_file:
        #Open credentials and insert api keys
        twitter_apiKeys = json.load(json_file)

    print("\n ===== Hey there, I'll start to listen to tweets about Joker ===== \n")
    print(" Press ctrl+C when you want me to stop!! ")
    mylistener = Listener(twitter_apiKeys)  
    mylistener.set_authentication()
    mylistener.start_listening()


def start_to_preprocess():
    pp = Preprocessor()
    data = pd.DataFrame(list(newSaver.get_collection('realTimeTweets')))
    pp.clean_frame(data)

    ##use this if you want to create a csv file to visualize
    # with open('tmpcsv.csv', 'w') as tmpcsv:
    # tmpcsv.writelines(data.to_csv())
    plt.tight_layout(pad=2.0, w_pad=0.0, h_pad=0.0)
    plt.savefig(fname, dpi=150)
    plt.close('all')


def preprocess(x1, x2, y):
    """Preprocess training data."""
    return (x1.astype('float32') / 255,
            x2.astype('float32') / 255,
            y.astype('float32'))


if __name__ == '__main__':
    restore_folder = sys.argv[1]
    saver = Saver(restore_folder)
    ckpt_info = saver.get_ckpt_info()
    model_opt = ckpt_info['model_opt']
    data_opt = ckpt_info['data_opt']
    ckpt_fname = ckpt_info['ckpt_fname']
    step = ckpt_info['step']
    model_id = ckpt_info['model_id']

    log.info('Building model')
    m = get_model(model_opt)

    log.info('Loading dataset')
    dataset = get_dataset(data_opt)

    sess = tf.Session()
    saver.restore(sess, ckpt_fname)
Beispiel #26
0
 def __init__(self, json_data, directory):
     Saver.__init__(self, json_data, directory, QzonePath.PHOTO)
Beispiel #27
0
    def setupUi(self):
        self.setObjectName("Form")
        self.resize(1600, 1000)
        self.tabWidget = QtWidgets.QTabWidget(self)
        self.tabWidget.setGeometry(QtCore.QRect(20, 20, 1151, 131))
        self.tabWidget.setObjectName("tabWidget")

        self.img = '003.jpg'
        self.path = './'
        self.photo_shower = PhotoShower()
        # self.photo_shower.data.read(self.img)
        # self.photo_shower.show(0)
        # self.photo_shower.resize(1024, 768)

        self.tab_splite = GrayParams(self.photo_shower)#QtWidgets.QWidget()
        self.tab_splite.setObjectName("tab_splite")
        self.tab_splite.qbt0.clicked.connect(self.changeColorImg)
        self.tab_splite.qbt1.clicked.connect(self.changeColorImg)
        self.tabWidget.addTab(self.tab_splite, "拆分")


        self.tab_open_close = OpenClose(self.photo_shower)#QtWidgets.QWidget()
        self.tab_open_close.setObjectName("tab_open_close")
        #self.tab_open_close.sld_ksize.valueChanged.connect(self.photo_shower.showProcessedImg)
        self.tabWidget.addTab(self.tab_open_close, "开闭")

        self.tab_thresh = Thresh(self.photo_shower)
        self.tab_thresh.setObjectName('tab_thresh')
        self.tabWidget.addTab(self.tab_thresh, "Threshold")

        self.tab_filter = Filter(self.photo_shower)
        self.tab_filter.setObjectName('filter')
        self.tabWidget.addTab(self.tab_filter, '过滤')
        self.photo_shower.cur_index = 3
        #self.tabWidget.setCurrentIndex(3)

        self.tab_beautify = Beautify(self.photo_shower)
        self.tab_beautify.setObjectName('beautify')
        self.tabWidget.addTab(self.tab_beautify, '美化')
        #self.tabWidget.setCurrentIndex(4)

        self.tab_saver = Saver(self.photo_shower)
        self.tab_saver.setObjectName('saver')
        self.tabWidget.addTab(self.tab_saver, '保存')
        self.tabWidget.setCurrentIndex(4)

        self.horizontalLayoutWidget = QtWidgets.QWidget(self)
        self.horizontalLayoutWidget.setGeometry(QtCore.QRect(20, 160, 1151, 811))
        self.horizontalLayoutWidget.setObjectName("horizontalLayoutWidget")
        self.horizontalLayout = QtWidgets.QHBoxLayout(self.horizontalLayoutWidget)
        self.horizontalLayout.setContentsMargins(0, 5, 0, 10)#左,顶,右,底边距

        self.horizontalLayout.addWidget(self.photo_shower)
        self.horizontalLayout.setObjectName("horizontalLayout")

        #self.vlayout
        self.img_index = 0#0-3:gray,rgb
        self.is_gray = False

        self.vlayout_widget = QtWidgets.QWidget(self)
        self.vlayout_widget.setGeometry(QtCore.QRect(1100, 0, 351, 900))
        self.vlayout_widget.setObjectName("verticalLayoutWidget")
        # self.vlayout.addStretch(0)

        self.groupBox = QtWidgets.QGroupBox(self.vlayout_widget)
        self.groupBox.resize(420, 720)
        self.groupBox.setTitle("123")
        self.groupBox.setObjectName("groupBox")

        self.qbt0 = QtWidgets.QPushButton(self.groupBox)
        self.qbt0.setGeometry(QtCore.QRect(10, 20, 150, 30))
        self.qbt0.setText('一键处理')
        #self.qbt0 = QPushButton('原始图像-->灰度图像')
        self.qbt0.clicked.connect(self.processImg)
        #self.qbt0.move(0, 0)

        self.qbt1 = QtWidgets.QPushButton(self.groupBox)
        self.qbt1.setGeometry(QtCore.QRect(10, 60, 150, 30))
        self.qbt1.setText('批量处理')
        self.qbt1.clicked.connect(self.processImgsTogether)

        self.qbt2 = QtWidgets.QPushButton(self.groupBox)
        self.qbt2.setGeometry(QtCore.QRect(190, 20, 150, 30))
        self.qbt2.setText('转换图像')
        self.is_process = True
        self.qbt2.clicked.connect(self.restore)

        self.qbt3 = QtWidgets.QPushButton(self.groupBox)
        self.qbt3.setGeometry(QtCore.QRect(190, 60, 150, 30))
        self.qbt3.setText('保存')
        self.qbt3.clicked.connect(self.saveImg)


        #切换图像
        self.label0 = QtWidgets.QLabel(self.groupBox)
        self.label0.setGeometry(QtCore.QRect(10, 100, 60, 20))
        self.label0.setText('当前图像:')

        self.text_img = QtWidgets.QLineEdit(self.groupBox)
        self.text_img.setGeometry(QtCore.QRect(70, 100, 210, 20))
        self.text_img.setText(self.img)

        self.qbt4 = QtWidgets.QPushButton(self.groupBox)
        self.qbt4.setGeometry(QtCore.QRect(290, 99, 50, 22))
        self.qbt4.setText('...')
        self.qbt4.clicked.connect(self.changeImg)



        self.slider = QtWidgets.QSlider(Qt.Horizontal, self.groupBox)
        self.slider.setGeometry(QtCore.QRect(40, 140, 200, 20))
        self.slider.setMinimum(0)
        self.slider.setMaximum(200)
        self.slider.setValue(100)
        self.slider.valueChanged.connect(self.sliderChange)


        self.label1 = QtWidgets.QLabel(self.groupBox)
        self.label1.setGeometry(QtCore.QRect(10, 140, 40, 20))
        self.label1.setText('比例:')

        self.text = QtWidgets.QLineEdit(self.groupBox)
        self.text.setGeometry(QtCore.QRect(270, 140, 40, 20))
        self.text.setText(str(self.slider.value()))
        self.text.textChanged.connect(self.kChange)

        #输出
        self.edit_msg = QtWidgets.QTextEdit(self.groupBox)
        self.edit_msg.resize(330, 420)
        self.edit_msg.move(10, 170)
        self.msg = OutMsg(self.edit_msg)
        self.qbt5 = QPushButton('清理', self.groupBox)
        self.qbt5.setGeometry(QtCore.QRect(280, 595, 60, 20))
        self.qbt5.clicked.connect(self.msg.clear)
        self.tab_filter.addOuter(self.msg)

        self.vlayout = QtWidgets.QVBoxLayout(self.vlayout_widget)#groupBox)
        self.vlayout.setContentsMargins(0, 200, 0, 10)  # 左,顶,右,底边距

        #self.vlayout.addWidget(self.qbt0)
        #self.vlayout.addWidget(self.qbt1)
        self.groupBox.setFixedWidth(350)
        self.groupBox.setFixedHeight(620)
        self.vlayout.addWidget(self.groupBox)

        #self.vlayout.addWidget(self.groupBox)
        self.vlayout.addStretch()

        self.horizontalLayout.addLayout(self.vlayout)

        #self.retranslateUi(Form)
        #QtCore.QMetaObject.connectSlotsByName(Form)
        self.show()
Beispiel #28
0
from validators import DateValidator, resolution_validation
from saver import Saver

if __name__ == "__main__":

    WARNING = ('Input parametrs shuold be like\n\'python3 '
               'main.py 05 2017 1920x1080\'.')

    if len(sys.argv) == 4:
        month, year, resolution = sys.argv[1:4]
        validator = DateValidator()
        if (validator.date_validation(month, year)
                and resolution_validation(resolution)):
            page_getter = PageGetter()
            page = page_getter.get_page(month, year)
            page_parser = PageParser(page)
            name_link_dict = page_parser.find_all_wallpapers(resolution)
            if len(name_link_dict) < 1:
                print('You picked a wrong resolution.')
            base_dir = os.path.dirname(__file__)
            wall_dir = os.path.dirname(base_dir)
            path = os.path.join(wall_dir, 'Wallpapers')
            if not os.path.isdir(path):
                os.mkdir(path)
            saver = Saver(path)
            saver.save_wallpapers(name_link_dict)
        else:
            print(WARNING)
    else:
        print(WARNING)
Beispiel #29
0
    if load_process:
        if user != '' and password != '':
            pass
        else:
            print(
                'The user (-u) and the password (-p) options should be specified for proper loading (-l)'
            )

    pred = predict(input_list)
    results = []
    for predict, input_string in zip(pred, input_list):
        if validate(predict):
            result = list(zip(predict, input_string.split()))
            results.append(result)
        else:
            print('Invalid string:')
            print(input_string)
    if load_process or export_process:
        db = DBWrapper(user, password)
        if load_process:
            publications = [compose_publication(result) for result in results]
            db.add(publications)
        if export_process:
            data = db.get()
            exporter = Saver()
            exporter.save(data, "xlsx", export_file_name)
    else:
        for result in results:
            print(group_publication(result))
Beispiel #30
0
def train(autoencoder_config_path, probclass_config_path,
          restore_manager: RestoreManager, log_dir_root, datasets: Datasets,
          train_flags: TrainFlags, ckpt_interval_hours: float,
          description: str):
    ae_config, ae_config_rel_path = config_parser.parse(
        autoencoder_config_path)
    pc_config, pc_config_rel_path = config_parser.parse(probclass_config_path)
    print_configs(('ae_config', ae_config), ('pc_config', pc_config))

    continue_in_ckpt_dir = restore_manager and restore_manager.continue_in_ckpt_dir
    if continue_in_ckpt_dir:
        logdir = restore_manager.log_dir
    else:
        logdir = logdir_helpers.create_unique_log_dir(
            [ae_config_rel_path, pc_config_rel_path],
            log_dir_root,
            restore_dir=restore_manager.ckpt_dir if restore_manager else None)
    print(_LOG_DIR_FORMAT.format(logdir))

    if description:
        _write_to_sheets(logdir_helpers.log_date_from_log_dir(logdir),
                         ae_config_rel_path,
                         pc_config_rel_path,
                         description,
                         git_ref=_get_git_ref(),
                         log_dir_root=log_dir_root,
                         is_continue=continue_in_ckpt_dir)

    ae_cls = autoencoder.get_network_cls(ae_config)
    pc_cls = probclass.get_network_cls(pc_config)

    # Instantiate autoencoder and probability classifier
    ae = ae_cls(ae_config)
    pc = pc_cls(pc_config, num_centers=ae_config.num_centers)

    # train ---
    ip_train = inputpipeline.InputPipeline(
        inputpipeline.get_dataset(datasets.train),
        ae_config.crop_size,
        batch_size=ae_config.batch_size,
        shuffle=False,
        num_preprocess_threads=NUM_PREPROCESS_THREADS,
        num_crops_per_img=NUM_CROPS_PER_IMG)
    x_train = ip_train.get_batch()

    enc_out_train = ae.encode(
        x_train, is_training=True)  # qbar is masked by the heatmap
    x_out_train = ae.decode(enc_out_train.qbar, is_training=True)
    # stop_gradient is beneficial for training. it prevents multiple gradients flowing into the heatmap.
    pc_in = tf.stop_gradient(enc_out_train.qbar)
    bc_train = pc.bitcost(pc_in,
                          enc_out_train.symbols,
                          is_training=True,
                          pad_value=pc.auto_pad_value(ae))
    bpp_train = bits.bitcost_to_bpp(bc_train, x_train)
    d_train = Distortions(ae_config, x_train, x_out_train, is_training=True)
    # summing over channel dimension gives 2D heatmap
    heatmap2D = (tf.reduce_sum(enc_out_train.heatmap, 1)
                 if enc_out_train.heatmap is not None else None)

    # loss ---
    total_loss, H_real, pc_comps, ae_comps = get_loss(ae_config, ae, pc,
                                                      d_train.d_loss_scaled,
                                                      bc_train,
                                                      enc_out_train.heatmap)
    train_op = get_train_op(ae_config, pc_config, ip_train, pc.variables(),
                            total_loss)

    # test ---
    with tf.name_scope('test'):
        ip_test = inputpipeline.InputPipeline(
            inputpipeline.get_dataset(datasets.test),
            ae_config.crop_size,
            batch_size=ae_config.batch_size,
            num_preprocess_threads=NUM_PREPROCESS_THREADS,
            num_crops_per_img=1,
            big_queues=False,
            shuffle=False)
        x_test = ip_test.get_batch()
        enc_out_test = ae.encode(x_test, is_training=False)
        x_out_test = ae.decode(enc_out_test.qhard, is_training=False)
        bc_test = pc.bitcost(enc_out_test.qhard,
                             enc_out_test.symbols,
                             is_training=False,
                             pad_value=pc.auto_pad_value(ae))
        bpp_test = bits.bitcost_to_bpp(bc_test, x_test)
        d_test = Distortions(ae_config, x_test, x_out_test, is_training=False)

    try:  # Try to get codec distnace for current dataset
        codec_distance_ms_ssim = CodecDistance(datasets.codec_distance,
                                               codec='bpg',
                                               metric='ms-ssim')
        get_distance = functools_ext.catcher(ValueError,
                                             handler=functools_ext.const(
                                                 np.nan),
                                             f=codec_distance_ms_ssim.distance)
        get_distance = functools_ext.compose(np.float32,
                                             get_distance)  # cast to float32
        d_BPG_test = tf.py_func(get_distance, [bpp_test, d_test.ms_ssim],
                                tf.float32,
                                stateful=False,
                                name='d_BPG')
        d_BPG_test.set_shape(())
    except CodecDistanceReadException as e:
        print('Cannot compute CodecDistance: {}'.format(e))
        d_BPG_test = tf.constant(np.nan, shape=(), name='ConstNaN')

    # ---

    train_logger = Logger()
    test_logger = Logger()
    distortion_name = ae_config.distortion_to_minimize

    train_logger.add_summaries(d_train.summaries_with_prefix('train'))
    # Visualize components of losses
    train_logger.add_summaries([
        tf.summary.scalar('train/PC_loss/{}'.format(name), comp)
        for name, comp in pc_comps
    ])
    train_logger.add_summaries([
        tf.summary.scalar('train/AE_loss/{}'.format(name), comp)
        for name, comp in ae_comps
    ])
    train_logger.add_summaries([tf.summary.scalar('train/bpp', bpp_train)])
    train_logger.add_console_tensor('loss={:.3f}', total_loss)
    train_logger.add_console_tensor('ms_ssim={:.3f}', d_train.ms_ssim)
    train_logger.add_console_tensor('bpp={:.3f}', bpp_train)
    train_logger.add_console_tensor('H_real={:.3f}', H_real)

    test_logger.add_summaries(d_test.summaries_with_prefix('test'))
    test_logger.add_summaries([
        tf.summary.scalar('test/bpp', bpp_test),
        tf.summary.scalar('test/distance_BPG_MS-SSIM', d_BPG_test),
        tf.summary.image('test/x_in',
                         prep_for_image_summary(x_test, n=3, name='x_in')),
        tf.summary.image('test/x_out',
                         prep_for_image_summary(x_out_test, n=3, name='x_out'))
    ])
    if heatmap2D is not None:
        test_logger.add_summaries([
            tf.summary.image(
                'test/hm',
                prep_for_grayscale_image_summary(heatmap2D,
                                                 n=3,
                                                 autoscale=True,
                                                 name='hm'))
        ])

    test_logger.add_console_tensor('ms_ssim={:.3f}', d_test.ms_ssim)
    test_logger.add_console_tensor('bpp={:.3f}', bpp_test)
    test_logger.add_summaries([
        tf.summary.histogram('centers', ae.get_centers_variable()),
        tf.summary.histogram(
            'test/qbar', enc_out_test.qbar[:ae_config.batch_size // 2, ...])
    ])
    test_logger.add_console_tensor('d_BPG={:.6f}', d_BPG_test)
    test_logger.add_console_tensor(Logger.Numpy1DFormatter('centers={}'),
                                   ae.get_centers_variable())

    print('Starting session and queues...')
    with tf_helpers.start_queues_in_sess(
            init_vars=restore_manager is None) as (sess, coord):
        train_logger.finalize_with_sess(sess)
        test_logger.finalize_with_sess(sess)

        if restore_manager:
            restore_manager.restore(sess)

        saver = Saver(Saver.ckpt_dir_for_log_dir(logdir),
                      max_to_keep=1,
                      keep_checkpoint_every_n_hours=ckpt_interval_hours)

        train_loop(ae_config,
                   sess,
                   coord,
                   train_op,
                   train_logger,
                   test_logger,
                   train_flags,
                   logdir,
                   saver,
                   is_restored=restore_manager is not None)
Beispiel #31
0
 def __init__(self, debug=False):
     self.debug = debug
     self.saver = Saver()
     self.last_tweets = self.maybe_load(FileNames.last_tweets)
     self.giphy_keys = self.maybe_load(FileNames.giphy_keys)
Beispiel #32
0
    def __init__(self, json_data, begin, end, directory):
        Saver.__init__(self, json_data, directory, QzonePath.MSG_BOARD)

        self._filename = "msg_board_%05d-%05d.json" % (begin, end - 1)
Beispiel #33
0
def main():
    # parse options
    parser = TrainOptions()
    opts = parser.parse()

    # data loader
    print('\n--- load dataset ---')

    if opts.multi_modal:
        dataset = dataset_unpair_multi(opts)
    else:
        dataset = dataset_unpair(opts)

    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=opts.batch_size,
                                               shuffle=True,
                                               num_workers=opts.nThreads)

    # model
    print('\n--- load model ---')
    model = DRIT(opts)
    model.setgpu(opts.gpu)
    if opts.resume is None:
        model.initialize()
        ep0 = -1
        total_it = 0
    else:
        ep0, total_it = model.resume(opts.resume)
    model.set_scheduler(opts, last_ep=ep0)
    ep0 += 1
    print('start the training at epoch %d' % (ep0))

    # saver for display and output
    saver = Saver(opts)

    # train
    print('\n--- train ---')
    max_it = 500000
    for ep in range(ep0, opts.n_ep):
        for it, (images_a, images_b) in enumerate(train_loader):
            if images_a.size(0) != opts.batch_size or images_b.size(
                    0) != opts.batch_size:
                continue

            # input data
            images_a = images_a.cuda(opts.gpu).detach()
            images_b = images_b.cuda(opts.gpu).detach()

            # update model
            if (it + 1) % opts.d_iter != 0 and it < len(train_loader) - 2:
                model.update_D_content(images_a, images_b)
                continue
            else:
                model.update_D(images_a, images_b)
                model.update_EG()

            # save to display file

            if not opts.no_display_img and not opts.multi_modal:
                saver.write_display(total_it, model)

            print('total_it: %d (ep %d, it %d), lr %08f' %
                  (total_it, ep, it, model.gen_opt.param_groups[0]['lr']))
            total_it += 1
            if total_it >= max_it:
                # saver.write_img(-1, model)
                saver.write_model(-1, model)
                break

        # decay learning rate
        if opts.n_ep_decay > -1:
            model.update_lr()

        # save result image
        if not opts.multi_modal:
            saver.write_img(ep, model)

        # Save network weights
        saver.write_model(ep, total_it, model)

    return
Beispiel #34
0
def main():
    args = parse_args()
    speech_windowed_data, peak_distance, peak_indicator, indices, actual_gci_locations = create_dataset(
        args.speechfolder, args.peaksfolder, args.window, args.stride, 10)
    saver = Saver(args.model_dir)
    model = SELUWeightNet
    model, _, params_dict = saver.load_checkpoint(
        model, file_name=args.model_name)
    model.eval()

    input = to_variable(
        th.from_numpy(
            np.expand_dims(speech_windowed_data, 1).astype(np.float32)),
        args.use_cuda, True)

    with warnings.catch_warnings():
        if args.use_cuda:
            model = model.cuda()
        warnings.simplefilter('ignore')
        prediction = model(input)

    predicted_peak_indicator = F.sigmoid(prediction[:, 1]).data.numpy()
    predicted_peak_distance = (prediction[:, 0]).data.numpy().astype(np.int32)

    predicted_peak_indicator_indices = predicted_peak_indicator > args.threshold

    predicted_peak_indicator = predicted_peak_indicator[
        predicted_peak_indicator_indices].ravel()
    predicted_peak_distance = predicted_peak_distance[
        predicted_peak_indicator_indices].ravel()
    indices = indices[predicted_peak_indicator_indices]

    assert (len(indices) == len(predicted_peak_distance))
    assert (len(predicted_peak_distance) == len(predicted_peak_indicator))

    positive_distance_indices = predicted_peak_distance < args.window

    positive_peak_distances = predicted_peak_distance[
        positive_distance_indices]
    postive_predicted_peak_indicator = predicted_peak_indicator[
        positive_distance_indices]

    print('Neg Peaks: {} Pos Peaks: {}'.format(
        len(predicted_peak_distance) - len(positive_peak_distances),
        len(positive_peak_distances)))

    gci_locations = [
        indices[i, d] for i, d in enumerate(positive_peak_distances)
    ]

    locations_true = np.nonzero(actual_gci_locations)[0]
    xaxes = np.zeros(len(actual_gci_locations))
    xaxes[locations_true] = 1

    if __debug__:
        ground_truth = np.row_stack((np.arange(len(actual_gci_locations)),
                                     xaxes))
        predicted_truth = np.row_stack((gci_locations,
                                        postive_predicted_peak_indicator))
        os.makedirs(args.prediction_dir, exist_ok=True)

        np.save(
            os.path.join(args.prediction_dir, 'ground_truth'), ground_truth)
        np.save(
            os.path.join(args.prediction_dir, 'predicted'), predicted_truth)

    plt.scatter(
        gci_locations,
        postive_predicted_peak_indicator,
        color='b',
        label='Predicted GCI')
    plt.plot(
        np.arange(len(actual_gci_locations)),
        xaxes,
        color='r',
        label='Actual GCI')
    plt.legend()
    plt.show()
 def __init__(self):
     self.saver = Saver()
Beispiel #36
0
def main():
    # parse options
    parser = TrainOptions()
    opts = parser.parse()

    # daita loader
    print('\n--- load dataset ---')
    dataset = dataset_unpair(opts)
    train_loader = torch.utils.data.DataLoader(dataset,
                                               batch_size=opts.batch_size,
                                               shuffle=True,
                                               num_workers=opts.nThreads)

    # model
    print('\n--- load model ---')
    model = UID(opts)
    model.setgpu(opts.gpu)
    if opts.resume is None:
        model.initialize()
        ep0 = -1
        total_it = 0
    else:
        ep0, total_it = model.resume(opts.resume)
    model.set_scheduler(opts, last_ep=ep0)
    ep0 += 1
    print('start the training at epoch %d' % (ep0))

    # saver for display and output
    saver = Saver(opts)

    # train
    print('\n--- train ---')
    max_it = 500000
    for ep in range(ep0, opts.n_ep):
        for it, (images_a, images_b) in enumerate(train_loader):
            if images_a.size(0) != opts.batch_size or images_b.size(
                    0) != opts.batch_size:
                continue
            images_a = images_a.cuda(opts.gpu).detach()
            images_b = images_b.cuda(opts.gpu).detach()

            # update model
            model.update_D(images_a, images_b)
            if (it + 1) % 2 != 0 and it != len(train_loader) - 1:
                continue
            model.update_EG()

            # save to display file
            if (it + 1) % 48 == 0:
                print('total_it: %d (ep %d, it %d), lr %08f' %
                      (total_it + 1, ep, it + 1,
                       model.gen_opt.param_groups[0]['lr']))
                print(
                    'Dis_I_loss: %04f, Dis_B_loss %04f, GAN_loss_I %04f, GAN_loss_B %04f'
                    % (model.disA_loss, model.disB_loss, model.gan_loss_i,
                       model.gan_loss_b))
                print('B_percp_loss %04f, Recon_II_loss %04f' %
                      (model.B_percp_loss, model.l1_recon_II_loss))
            if (it + 1) % 200 == 0:
                saver.write_img(ep * len(train_loader) + (it + 1), model)

            total_it += 1
            if total_it >= max_it:
                saver.write_img(-1, model)
                saver.write_model(-1, model)
                break

        # decay learning rate
        if opts.n_ep_decay > -1:
            model.update_lr()

        # Save network weights
        saver.write_model(ep, total_it + 1, model)

    return
Beispiel #37
0
def main():

    # parse options
    parser = TestOptions()
    opts = parser.parse()
    orig_dir = opts.orig_dir
    blur_dir = opts.dataroot

    saver = Saver(opts)

    # data loader
    print('\n--- load dataset ---')
    dataset_domain = 'A' if opts.a2b else 'B'
    #     dataset = dataset_single(opts, 'A', opts.input_dim_a)
    # else:
    #     dataset = dataset_single(opts, 'B', opts.input_dim_b)
    # loader = torch.utils.data.DataLoader(dataset, batch_size=1, num_workers=opts.nThreads)
    loader = CreateDataLoader(opts)

    # model
    print('\n--- load model ---')
    model = UID(opts)
    model.setgpu(opts.gpu)  ## comment for cpu mode
    model.resume(opts.resume, train=False)
    model.eval()

    # test
    print('\n--- testing ---')
    for idx1, data in enumerate(loader):
        # img1, img_name_list = data[dataset_domain], data[dataset_domain+'_paths']
        # img1 = img1.cuda(opts.gpu).detach()
        images_b = data['B']
        images_a = images_b  # should in the same shape (This is only for the case `resize_or_crop="none"`)
        img_name_list = data['B_paths']  # B is the fluorescence image
        center_crop_shape = data[
            'B_size_WH'][::-1]  # B is the fluorescence image
        if len(img_name_list) > 1:
            print("Warning, there are more than 1 sample in the test batch.")
        images_a = images_a.cuda(opts.gpu).detach()  ## comment for cpu mode
        images_b = images_b.cuda(opts.gpu).detach()  ## comment for cpu mode
        images_a = torch.cat(
            [images_a] * 2,
            dim=0)  # because half of the batch is used as real_A_random
        images_b = torch.cat(
            [images_b] * 2,
            dim=0)  # because half of the batch is used as real_B_random
        print('{}/{}'.format(idx1, len(loader)))
        with torch.no_grad():
            model.inference(images_a, images_b)
            # img = model.test_forward(img1, a2b=opts.a2b)
        img_name = img_name_list[0].split('/')[-1]
        saver.write_img(idx1,
                        model,
                        img_name=img_name,
                        inference_mode=True,
                        mask_path='../input/testB_mask/' + img_name)  # True
        # saver.save_img(img=model.fake_I_encoded[[np.s_[:]]*2 + return_center_crop_slices(input_shapes=images_b.shape[-2:],
        #                                                                                  output_shapes=center_crop_shape,
        #                                                                                  input_scale=1.0,
        #                                                                                  output_scale=opts.fineSize*1.0/opts.loadSize)],
        #                img_name=img_name,
        #                subfolder_name="fake_A") #'gen_%05d.png' % (idx1),

    return
Beispiel #38
0
class Session(object):

    L, R, X = 0, 1, 2
    COR, INCOR, EARLY, BOTH, NULL, KILLED, SKIPPED = 0, 1, 2, 3, 4, 5, 6
    PHASE_INTRO, PHASE_STIM, PHASE_DELAY, PHASE_LICK, PHASE_REWARD, PHASE_ITI, PHASE_END = [0, 1, 2, 3, 4, 5, 6]
    DEFAULT_PARAMS = dict(
        n_trials=[100],  # if a list, corresponds to list of lams
        lam=[
            0.5
        ],  # poisson lambda for one side, as fraction of n_total_stims. ex 0.9 means that on any given trial, the expected number of stims on one side will be 0.9*n_total_stims and the expected number of stims on the other side will be 0.1*n_total_stims. If a list, will be sequential trials with those params, numbers of trials specified by n_trials parameter. If any item in this list is a list, it means multiple shuffled trials equally distributed across those lams.
        # n_total_stims               = 10.,
        rate_sum=5.0,
        max_rate_frac=2.0,
        # max_n_stims                 = 20, #on one side
        stim_duration=0.050,
        stim_duration_override=False,
        stim_phase_duration=[5.0, 1.0],  # mean, std
        enforce_stim_phase_duration=True,
        stim_phase_intro_duration=0.200,
        stim_phase_end_duration=0.050,
        min_isi=0.100,
        reward_duration=[0.010, 0.010],
        penalty_iti_frac=1.0,  # fraction of ITI to add to normal ITI when trial was incorrect
        distribution_mode="poisson",
        phase_durations={
            PHASE_INTRO: 1.0,
            PHASE_STIM: None,
            PHASE_DELAY: 1.0,
            PHASE_LICK: 4.0,
            PHASE_REWARD: 4.0,
            PHASE_ITI: 3.0,
            PHASE_END: 0.0,
        },  # PHASE_END must always have 0.0 duration
        iter_resolution=0.030,
        hold_rule=True,
        puffs_on=True,
        rewards_on=True,
        hints_on=False,  # False, or n first trials to give hints, or True for all trials
        hint_interval=0.500,
        bias_correction=True,
        bias_correction_window=6,
        max_bias_correction=0.1,
        lick_rule_phase=True,  # must not lick before the lick phase
        lick_rule_side=True,  # must not lick incorrect side during the lick phase
        lick_rule_any=True,  # must lick some port in lick phase to get reward
        multiple_decisions=False,  # can make multiple attempts at correct side
        use_trials=True,  # if false, trials are ignored. used for training
        trial_vid_freq=2,  # movie for every n trials
        extra_bigdiff_trials=False,
        condition=-1,
    )

    def __init__(self, subj, session_params, cam_params=default_cam_params):
        self.params = self.DEFAULT_PARAMS
        self.cam_params = cam_params

        self.subj = subj
        self.name = time.strftime("%Y%m%d_%H%M%S")
        self.saver = Saver(self.subj, self.name)

        # kwargs
        self.params.update(session_params)
        # checks on kwargs
        assert self.params["min_isi"] > self.params["stim_duration"]  # otherwise same-side stims can overlap
        if self.params["lick_rule_side"]:
            assert self.params[
                "lick_rule_any"
            ]  # if must lick correct side in lick phase, then must lick at all in lick phase
        # extensions of kwargs
        if not isinstance(self.params["lam"], list):
            self.params["lam"] = [self.params["lam"]]
        if not isinstance(self.params["n_trials"], list):
            self.params["n_trials"] = [self.params["n_trials"]]
        assert len(self.params["lam"]) == len(self.params["n_trials"])
        self.params["subj_name"] = self.subj.name
        # self.params['phase_durations'][self.PHASE_STIM] = self.params['stim_phase_intro_duration'] + self.params['stim_phase_duration']
        self.cam_params.update(dict(save_name=pjoin(self.subj.subj_dir, self.name)))
        if self.params["hints_on"] is True:
            self.params["hints_on"] = 1e6
        # add them in
        self.__dict__.update(self.params)

        # hardware
        syncobj = multiprocessing.Value("d", 0)
        threading.Thread(target=update_sync, args=(syncobj,)).start()
        self.cam = PSEye(clock_sync_obj=syncobj, **self.cam_params)
        self.cam.start()
        self.lr = AnalogReader(
            saver=self.saver, lick_thresh=5.0, ports=["ai0", "ai1", "ai5", "ai6"], runtime_ports=[0, 1]
        )
        sd = self.stim_duration
        if self.stim_duration_override:
            sd = self.stim_duration_override
        self.stimulator = Valve(saver=self.saver, ports=["port0/line0", "port0/line1"], name="stimulator", duration=sd)
        self.spout = Valve(
            saver=self.saver, ports=["port0/line2", "port0/line3"], name="spout", duration=self.reward_duration
        )
        self.light = Light(saver=self.saver, port="ao0")
        self.speaker = Speaker(saver=self.saver)

        # trials init
        self.trials = self.generate_trials()

        # save session info
        self.saver.save_session(self)

        # runtime variables
        self.session_on = 0
        self.session_complete = False
        self.session_kill = False
        self.trial_on = 0
        self.trial_idx = -1
        self.valid_trial_idx = 0
        self.saving_trial_idx = 0
        self.session_runtime = -1
        self.trial_runtime = -1
        self.trial_outcomes = []
        self.trial_corrects = []  # for use in use_trials==False
        self.rewards_given = 0
        self.paused = 0
        self.holding = False
        self.percentages = [0.0, 0.0]  # L/R
        self.side_ns = [0, 0]  # L/R
        self.bias_correction_percentages = [0.0, 0.0]  # L/R
        self.perc_valid = 0.0
        self.iter_write_begin = now()

    def generate_trials(self):
        timess = []
        lamss = []
        durs = []
        for lam, n_trials in zip(self.lam, self.n_trials):
            for _ in xrange(n_trials):
                if isinstance(lam, list):
                    lami = np.random.choice(lam)
                else:
                    lami = lam
                lams = [lami, 1.0 - lami]
                lamss.append(lams)
                dur = np.random.normal(*self.stim_phase_duration)
                durs.append(dur)
                tt = self.generate_stim_times(
                    lams=lams, dur=dur, intro_dur=self.stim_phase_intro_duration, min_isi=self.min_isi
                )
                if self.extra_bigdiff_trials and 1.0 in lams and np.random.random() < 0.7:
                    enforce_n = np.random.choice([11, 12, 13, 14, 15])
                    while max([len(i) for i in tt]) < enforce_n:
                        tt = self.generate_stim_times(
                            lams=lams, dur=dur, intro_dur=self.stim_phase_intro_duration, min_isi=self.min_isi
                        )
                timess.append(tt)

        trials = np.zeros(
            (len(timess)), dtype=[("times", vlen_array_dtype), ("correct", int), ("lams", float, 2), ("dur", float)]
        )
        lr_idxer = [0, 1]
        for tidx, times, lams, d in zip(range(len(timess)), timess, lamss, durs):
            np.random.shuffle(lr_idxer)
            times = [times[lr_idxer[0]], times[lr_idxer[1]]]  # left or right equally likely to correspond to lam[0]
            lams = [lams[lr_idxer[0]], lams[lr_idxer[1]]]  # maintain alignment with lams
            lenL, lenR = [len(times[i]) for i in [self.L, self.R]]
            if lenL > lenR:
                correct = self.L
            elif lenR > lenL:
                correct = self.R
            elif lenR == lenL:
                raise Exception("Indeterminate trial included. This should not be possible!")

            trials[tidx] = (times, correct, lams, d)

        return np.array(trials)

    def generate_stim_times(self, lams, dur, intro_dur, min_isi, time_resolution=0.001):
        n_stim = [0, 0]
        while n_stim[0] == n_stim[1] or np.max(n_stim) / dur > self.rate_sum * self.max_rate_frac:
            if self.distribution_mode == "poisson":
                n_stim = np.random.poisson(self.rate_sum * dur * np.asarray(lams))
            elif self.distribution_mode == "abs":
                n_stim = np.round(self.n_total_stims * np.asarray(lams))

        t = np.arange(intro_dur, intro_dur + dur + time_resolution, time_resolution)

        def make(sz):
            times = np.append(0, np.sort(np.random.choice(t, size=sz, replace=False)))
            while len(times) > 1 and np.diff(times).min() < min_isi:
                times = np.append(0, np.sort(np.random.choice(t, size=sz, replace=False)))
            return times

        stim_times = map(make, n_stim)

        return np.array(stim_times)

    def get_cum_performance(self):
        cum = np.asarray(self.trial_outcomes)
        markers = cum.copy()
        if self.lick_rule_phase:
            ignore = [self.SKIPPED, self.EARLY, self.NULL, self.KILLED]
        else:
            ignore = [self.SKIPPED, self.NULL, self.KILLED]
        valid = np.array([c not in ignore for c in cum]).astype(bool)
        cum = cum == self.COR
        cum = [
            np.mean([c for c, v in zip(cum[:i], valid[:i]) if v]) if np.any(valid[:i]) else 0.0
            for i in xrange(1, len(cum) + 1)
        ]  # cumulative
        return cum, markers, np.asarray(self.trial_corrects)

    def stimulate(self, trial):
        sides = [self.L, self.R]
        np.random.shuffle(sides)
        for side in sides:
            tr = trial["times"][side]
            si = self.stim_idx[side]

            if si >= len(tr):
                return

            dt_phase = now() - self.phase_start
            if dt_phase >= tr[si]:
                self.stimulator.go(side)
                self.stim_idx[side] += 1

        if np.all(np.asarray(self.stim_idx) == np.asarray(map(len, trial["times"]))):
            self.stim_complete = True

    def to_phase(self, ph):
        self.phase_times[self.current_phase][1] = now()
        self.phase_times[ph][0] = now()

        self.current_phase = ph
        self.phase_start = now()
        self.hinted = False
        if ph == self.PHASE_END:
            # sanity check. should have been rewarded only if solely licked on correct side
            if (
                self.lick_rule_side
                and self.lick_rule_phase
                and (not self.licked_early)
                and (not self.multiple_decisions)
                and self.use_trials
            ):
                assert bool(self.rewarded) == (
                    any(self.lick_phase_licks[self.trial["correct"]])
                    and (not any(self.lick_phase_licks[-self.trial["correct"] + 1]))
                )

            if not self.rewarded:
                if self.use_trials:
                    self.trial_corrects.append(self.trial["correct"])
                else:
                    self.trial_corrects.append(self.X)

            # determine trial outcome
            if not self.use_trials:
                if any(self.lick_phase_licks):
                    outcome = self.COR
                else:
                    outcome = self.INCOR
            elif self.use_trials:
                if self.rewarded and self.lick_rule_side and not self.multiple_decisions:
                    outcome = self.COR
                elif self.rewarded and ((not self.lick_rule_side) or self.multiple_decisions):
                    lpl_min = np.array([min(i) if len(i) else -1 for i in self.lickph_andon_licks])
                    if np.all(lpl_min == -1):
                        outcome = self.NULL
                    else:
                        lpl_min[lpl_min == -1] = now()
                        if np.argmin(lpl_min) == self.trial["correct"]:
                            outcome = self.COR
                        else:
                            outcome = self.INCOR
                elif self.trial_kill:
                    outcome = self.KILLED
                elif self.licked_early:
                    outcome = self.EARLY
                # this BOTH logic no longer works bc i include reward phase licks in lickphaselicks. both will never show up, though it still can *rarely* be a cause for trial failure
                # elif (any(self.lick_phase_licks[self.L]) and any(self.lick_phase_licks[self.R])):
                #    outcome = self.BOTH
                elif any(self.lick_phase_licks[-self.trial["correct"] + 1]):
                    outcome = self.INCOR
                elif not any(self.lick_phase_licks):
                    outcome = self.NULL
            self.trial_outcomes.append(outcome)

    def update_licked(self):
        l = self.lr.licked()
        tst = now()
        for idx, li in enumerate(l):
            self.licks[idx] += [
                tst
            ] * li  # represent not the number of licks but the number of times the LR class queried daq and found a positive licking signal
            if self.current_phase in [self.PHASE_LICK, self.PHASE_REWARD]:
                self.lickph_andon_licks[idx] += [tst] * li
            if self.current_phase == self.PHASE_LICK:
                self.lick_phase_licks[idx] += [tst] * li

        if self.hold_rule:
            if (not self.holding) and np.any(self.lr.holding):
                self.holding = True
                self.paused += 1
            elif self.holding and not np.any(self.lr.holding):
                self.paused = max(self.paused - 1, 0)
                self.holding = False
            if self.holding:
                self.speaker.pop()

    def run_phase(self, ph):
        ph_dur = self.phase_durations[ph]  # intended phase duration
        if ph == self.PHASE_STIM:
            ph_dur = (
                self.stim_phase_intro_duration + self.trial["dur"] + self.stim_phase_end_duration
            )  # before dur was introduced: np.max(np.concatenate(self.trial['times']))+self.stim_phase_end_duration
        dt_phase = now() - self.phase_start
        self.session_runtime = now() - self.session_on
        self.trial_runtime = now() - self.trial_on
        self.update_licked()

        # special cases
        if ph == self.PHASE_ITI and not self.rewarded:
            ph_dur *= 1 + self.penalty_iti_frac

        if now() - self.iter_write_begin >= self.iter_resolution:
            iter_info = dict(
                trial=self.trial_idx,
                phase=ph,
                licks=np.array([len(i) for i in self.licks]),
                licked_early=self.licked_early,
                dt_phase=dt_phase,
                paused=self.paused,
            )
            self.saver.write("iterations", iter_info)
            self.iter_write_begin = now()

        if self.paused and self.current_phase in [self.PHASE_INTRO, self.PHASE_STIM, self.PHASE_DELAY, self.PHASE_LICK]:
            self.trial_kill = True
            return

        if self.trial_kill and not self.current_phase == self.PHASE_ITI:
            self.to_phase(self.PHASE_ITI)
            return

        # Intro
        if ph == self.PHASE_INTRO:
            self.light.off()
            if not self.intro_signaled:
                self.speaker.intro()
                self.intro_signaled = True

            # comment out to give intro time with licks allowed? doesnt work, cause lick variable accumulates over trial
            if any(self.licks) and self.lick_rule_phase:
                self.licked_early = min(self.licks[0] + self.licks[1])
                self.to_phase(self.PHASE_ITI)
                return

            if dt_phase >= ph_dur:
                self.to_phase(self.PHASE_STIM)
                return

        # Stim
        elif ph == self.PHASE_STIM:
            if any(self.licks) and self.lick_rule_phase:
                self.licked_early = min(self.licks[0] + self.licks[1])
                self.to_phase(self.PHASE_ITI)
                return

            if dt_phase >= ph_dur:
                self.to_phase(self.PHASE_DELAY)
                return

            if self.puffs_on:
                self.stimulate(self.trial)

            if (not self.enforce_stim_phase_duration) and self.stim_complete:
                self.to_phase(self.PHASE_DELAY)
                return

        # Delay
        elif ph == self.PHASE_DELAY:
            if any(self.licks) and self.lick_rule_phase:
                self.licked_early = min(self.licks[0] + self.licks[1])
                self.to_phase(self.PHASE_ITI)
                return

            if dt_phase >= ph_dur:
                self.to_phase(self.PHASE_LICK)
                return

        # Lick
        elif ph == self.PHASE_LICK:
            self.light.on()

            if (
                self.hints_on
                and self.hints_on > self.trial_idx
                and (not self.hinted or (now() - self.hinted) > self.hint_interval)
                and self.use_trials
            ):
                self.stimulator.go(self.trial["correct"])
                self.hinted = now()

            if not self.laser_signaled:
                self.speaker.laser()
                self.laser_signaled = True

            if not self.lick_rule_any:
                self.to_phase(self.PHASE_REWARD)
                return

            if any(self.lick_phase_licks) and not self.multiple_decisions:
                self.to_phase(self.PHASE_REWARD)
                return
            elif (
                any(self.lick_phase_licks)
                and self.multiple_decisions
                and any(self.lick_phase_licks[self.trial["correct"]])
            ):
                self.to_phase(self.PHASE_REWARD)
                return

            # if time is up, to reward phase
            if dt_phase >= ph_dur:
                self.to_phase(self.PHASE_REWARD)
                return

        # Reward
        elif ph == self.PHASE_REWARD:
            self.light.on()  # probably redundant

            if (
                self.lick_rule_side
                and any(self.lick_phase_licks[-self.trial["correct"] + 1])
                and not self.rewarded
                and not self.multiple_decisions
            ):
                self.speaker.wrong()
                self.to_phase(self.PHASE_ITI)
                return

            # sanity check. cannot reach here if any incorrect licks, ensure that:
            if self.lick_rule_side and not self.multiple_decisions:
                assert not any(self.lick_phase_licks[-self.trial["correct"] + 1])

            # if no licks at all, go straight to ITI
            if self.lick_rule_any and not any(self.lick_phase_licks):
                self.to_phase(self.PHASE_ITI)
                return

            # if allowed multiple choices but only licked wrong side
            if self.multiple_decisions and not any(self.lick_phase_licks[self.trial["correct"]]):
                self.to_phase(self.PHASE_ITI)
                return

            # sanity check. can only reach here if licked correct side only
            if self.lick_rule_any and self.lick_rule_side:
                assert any(self.lick_phase_licks[self.trial["correct"]])

            # from this point on, it is assumed that rewarding should occur.
            if self.use_trials:
                rside = self.trial["correct"]
            else:
                rside = np.argmin([min(i) if len(i) else now() for i in self.lick_phase_licks])
            if not self.corside_added:
                self.trial_corrects.append(rside)
                self.corside_added = True

            if (
                self.hints_on
                and self.hints_on > self.trial_idx
                and (not self.hinted or (now() - self.hinted) > self.hint_interval)
            ):
                self.stimulator.go(rside)
                self.hinted = now()

            if self.rewards_on and not self.rewarded:
                self.spout.go(side=rside)
                self.rewarded = now()
                self.rewards_given += 1

            if dt_phase >= ph_dur:
                self.to_phase(self.PHASE_ITI)
        # ITI
        elif ph == self.PHASE_ITI:
            self.light.off()

            if self.licked_early and self.lick_rule_phase and not self.error_signaled:
                self.speaker.error()
                self.error_signaled = True

            if dt_phase >= ph_dur:
                self.to_phase(self.PHASE_END)
                return

    def determine_skip(self):
        to = np.asarray(self.trial_outcomes)
        if len(to) == 0:
            return False
        corincor_idxs = np.where(np.logical_or(to == self.COR, to == self.INCOR))[0]
        all_val = np.sum([i in [self.COR, self.INCOR, self.EARLY, self.NULL, self.KILLED] for i in to])
        if all_val != 0:
            self.perc_valid = float(len(corincor_idxs)) / all_val
        corincor_trials = self.trials[corincor_idxs]
        trcor = corincor_trials["correct"]
        subcor = to[corincor_idxs] == self.COR

        if np.sum(trcor == self.L) > 0:
            perc_l = np.mean(subcor[trcor == self.L])
            self.percentages[self.L] = perc_l
            self.side_ns[self.L] = np.sum(trcor == self.L)
        if np.sum(trcor == self.R) > 0:
            perc_r = np.mean(subcor[trcor == self.R])
            self.percentages[self.R] = perc_r
            self.side_ns[self.R] = np.sum(trcor == self.R)

        if (
            np.sum(trcor == self.L) < self.bias_correction_window
            or np.sum(trcor == self.R) < self.bias_correction_window
        ):
            return False

        perc_l = np.mean(subcor[trcor == self.L][-self.bias_correction_window :])
        perc_r = np.mean(subcor[trcor == self.R][-self.bias_correction_window :])
        self.bias_correction_percentages = [perc_l, perc_r]

        if perc_l == perc_r:
            return False

        this_cor = self.trials[self.trial_idx]["correct"]
        if self.bias_correction_percentages[this_cor] < self.bias_correction_percentages[-1 + this_cor]:
            return False

        if min(self.bias_correction_percentages) == 0:
            pthresh = self.max_bias_correction
        else:
            pthresh = float(min(self.bias_correction_percentages)) / max(self.bias_correction_percentages)

        return np.random.random() > pthresh

    def next_trial(self):
        # init the trial
        self.skip_trial = False
        self.trial_idx += 1
        if self.trial_idx > 1 and self.trial_outcomes[-1] in [self.COR, self.INCOR]:
            self.valid_trial_idx += 1
        if self.trial_idx >= len(self.trials):
            self.session_complete = True
            return

        self.skip_trial = self.determine_skip()

        self.trial_on = now()

        self.trial = self.trials[self.trial_idx]
        self.current_phase = self.PHASE_INTRO
        self.stim_idx = [0, 0]  # index of next stim, for [L,R]
        self.trial_start = now()
        self.phase_start = self.trial_start
        self.phase_times = [[-1, -1] for _ in self.phase_durations]

        _ = self.lr.licked()  # to clear any residual signal
        self.licks = [[], []]
        self.lick_phase_licks = [[], []]
        self.lickph_andon_licks = [[], []]
        self.licked_early = False
        self.rewarded = False
        self.error_signaled = False
        self.laser_signaled = False
        self.intro_signaled = False
        self.stim_complete = False
        self.trial_kill = False
        self.hinted = False
        self.corside_added = False

        if self.skip_trial:
            self.cam.SAVING.value = 0
        else:
            if self.saving_trial_idx % self.trial_vid_freq == 0:
                self.cam.SAVING.value = 1
            else:
                self.cam.SAVING.value = 0
            self.saving_trial_idx += 1

        # logging.info('Starting trial %i.' %self.trial_idx)

        # run the trial loop
        if self.skip_trial:
            self.trial_outcomes.append(self.SKIPPED)
            self.trial_corrects.append(self.trial["correct"])
        else:
            while self.current_phase != self.PHASE_END:
                self.run_phase(self.current_phase)

        # save trial info
        trial_info = dict(
            idx=self.trial_idx,
            ns=[len(tt) for tt in self.trial["times"]],
            start_time=self.trial_start,
            licksL=self.licks[self.L],
            licksR=self.licks[self.R],
            licked_early=self.licked_early,
            phase_times=self.phase_times,
            rewarded=self.rewarded,
            outcome=self.trial_outcomes[-1],
            hints=(self.hints_on and self.hints_on > self.trial_idx),
            end_time=now(),
            condition=self.condition,
        )
        self.saver.write("trials", trial_info)

    def run(self):
        self.session_on = now()
        self.lr.begin_saving()

        while True:
            self.next_trial()
            if self.session_kill:
                logging.info("Session killed manually")
                self.paused = False
                break
            if self.session_complete:
                logging.info("Session complete")
                break

        self.session_on = False
        self.end()

    def end(self):
        to_end = [self.lr, self.stimulator, self.spout, self.light, self.cam, self.saver]
        for te in to_end:
            te.end()
            time.sleep(0.050)

    def get_code(self):
        py_files = [
            pjoin(d, f) for d, _, fs in os.walk(os.getcwd()) for f in fs if f.endswith(".py") and not f.startswith("__")
        ]
        code = {}
        for pf in py_files:
            with open(pf, "r") as f:
                code[pf] = f.read()
        return json.dumps(code)
 def __init__(self, master=None):
     Frame.__init__(self, master)
     self.master.title("Diary Prompter")
     self.init_elements()
     self.saver = Saver()
Beispiel #40
0
sweep.attox = attox
sweep.attoy = attoy
sweep.attoz = attoz
sweep.daq = daq

i = it.Foo()

print(daq.__class__)
print(daq.__class__.__name__)
print(i.__class__)
print(i.__class__.__name__)

print('\n[dict2, objlist2] = attox.todict([])')
[dict2, objlist2] = attox.todict([])
print('dict2: \n' + str(Saver.tocommentjson(dict2)))
print('objlist2: ' + str(objlist2))

print('\n[dict3, objlist3] = attoy.todict([])')
[dict3, objlist3] = attoy.todict([])
print('dict3: \n' + str(Saver.tocommentjson(dict3)))
print('objlist3: ' + str(objlist3))

print('\n[dict4, objlist4] = attoz.todict([])')
[dict4, objlist4] = attoz.todict([])
print('dict4: \n' + str(Saver.tocommentjson(dict4)))
print('objlist4: ' + str(objlist4))

print('\n[dict1, objlist1] = daq.todict([])')
[dict1, objlist1] = daq.todict([])
print('dict1: \n' + str(Saver.tocommentjson(dict1)))
Beispiel #41
0
def validate(val_dirs: ValidationDirs, images_iterator: ImagesIterator,
             flags: OutputFlags):
    """
    Saves in val_dirs.log_dir/val/dataset_name/measures.csv:
        - `img_name,bpp,psnr,ms-ssim forall img_name`
    """
    print(_VALIDATION_INFO_STR)

    validated_checkpoints = val_dirs.get_validated_checkpoints(
    )  # :: [10000, 18000, ..., 256000], ie, [int]
    all_ckpts = Saver.all_ckpts_with_iterations(val_dirs.ckpt_dir)
    if len(all_ckpts) == 0:
        print('No checkpoints found in {}'.format(val_dirs.ckpt_dir))
        return
    # if ckpt_step is -1, then all_ckpt[:-1:flags.ckpt_step] === [] because of how strides work
    ckpt_to_check = all_ckpts[:-1:flags.ckpt_step] + [
        all_ckpts[-1]
    ]  # every ckpt_step-th checkpoint plus the last one
    if flags.ckpt_step == -1:
        assert len(ckpt_to_check) == 1
    print('Validating {}/{} checkpoints (--ckpt_step {})...'.format(
        len(ckpt_to_check), len(all_ckpts), flags.ckpt_step))

    missing_checkpoints = [(ckpt_itr, ckpt_path)
                           for ckpt_itr, ckpt_path in ckpt_to_check
                           if ckpt_itr not in validated_checkpoints]
    if len(missing_checkpoints) == 0:
        print('All checkpoints validated, stopping...')
        return

    # ---

    # create networks
    autoencoder_config_path, probclass_config_path = logdir_helpers.config_paths_from_log_dir(
        val_dirs.log_dir,
        base_dirs=[constants.CONFIG_BASE_AE, constants.CONFIG_BASE_PC])
    ae_config, ae_config_rel_path = config_parser.parse(
        autoencoder_config_path)
    pc_config, pc_config_rel_path = config_parser.parse(probclass_config_path)

    ae_cls = autoencoder.get_network_cls(ae_config)
    pc_cls = probclass.get_network_cls(pc_config)

    # Instantiate autoencoder and probability classifier
    ae = ae_cls(ae_config)
    pc = pc_cls(pc_config, num_centers=ae_config.num_centers)

    x_val_ph = tf.placeholder(tf.uint8, (3, None, None), name='x_val_ph')
    x_val_uint8 = tf.expand_dims(x_val_ph, 0, name='batch')
    x_val = tf.to_float(x_val_uint8, name='x_val')

    enc_out_val = ae.encode(x_val, is_training=False)
    x_out_val = ae.decode(enc_out_val.qhard, is_training=False)

    bc_val = pc.bitcost(enc_out_val.qbar,
                        enc_out_val.symbols,
                        is_training=False,
                        pad_value=pc.auto_pad_value(ae))
    bpp_val = bits.bitcost_to_bpp(bc_val, x_val)

    x_out_val_uint8 = tf.cast(x_out_val, tf.uint8, name='x_out_val_uint8')
    # Using numpy implementation due to dynamic shapes
    msssim_val = ms_ssim_np.tf_msssim_np(x_val_uint8,
                                         x_out_val_uint8,
                                         data_format='NCHW')
    psnr_val = psnr_np(x_val_uint8, x_out_val_uint8)

    restorer = Saver(val_dirs.ckpt_dir,
                     var_list=Saver.get_var_list_of_ckpt_dir(
                         val_dirs.ckpt_dir))

    # create fetch_dict
    fetch_dict = {
        'bpp': bpp_val,
        'ms-ssim': msssim_val,
        'psnr': psnr_val,
    }

    if flags.real_bpp:
        fetch_dict['sym'] = enc_out_val.symbols  # NCHW

    if flags.save_ours:
        fetch_dict['img_out'] = x_out_val_uint8

    # ---
    fw = tf.summary.FileWriter(val_dirs.out_dir, graph=tf.get_default_graph())

    def full_summary_tag(summary_name):
        return '/'.join(['val', images_iterator.dataset_name, summary_name])

    # Distance
    try:
        codec_distance_ms_ssim = CodecDistance(images_iterator.dataset_name,
                                               codec='bpg',
                                               metric='ms-ssim')
        codec_distance_psnr = CodecDistance(images_iterator.dataset_name,
                                            codec='bpg',
                                            metric='psnr')
    except CodecDistanceReadException as e:  # no codec distance values stored for the current setup
        print('*** Distance to BPG not available for {}:\n{}'.format(
            images_iterator.dataset_name, str(e)))
        codec_distance_ms_ssim = None
        codec_distance_psnr = None

    # Note that for each checkpoint, the structure of the network will be the same. Thus the pad depending image
    # loading can be cached.

    # create session
    with tf_helpers.create_session() as sess:
        if flags.real_bpp:
            pred = probclass.PredictionNetwork(pc, pc_config,
                                               ae.get_centers_variable(), sess)
            checker = probclass.ProbclassNetworkTesting(pc, ae, sess)
            bpp_fetcher = bpp_helpers.BppFetcher(pred, checker)

        fetcher = sess.make_callable(fetch_dict, feed_list=[x_val_ph])

        last_ckpt_itr = missing_checkpoints[-1][0]
        for ckpt_itr, ckpt_path in missing_checkpoints:
            if not ckpt_still_exists(ckpt_path):
                # May happen if job is still training
                print('Checkpoint disappeared: {}'.format(ckpt_path))
                continue

            print(_CKPT_ITR_INFO_STR.format(ckpt_itr))

            restorer.restore_ckpt(sess, ckpt_path)

            values_aggregator = ValuesAggregator('bpp', 'ms-ssim', 'psnr')

            # truncates the previous measures.csv file! This way, only the last valid checkpoint is saved.
            measures_writer = MeasuresWriter(val_dirs.out_dir)

            # ----------------------------------------
            # iterate over images
            # images are padded to work with current auto encoder
            for img_i, (img_name, img_content) in enumerate(
                    images_iterator.iter_imgs(
                        pad=ae.get_subsampling_factor())):
                otp = fetcher(img_content)
                measures_writer.append(img_name, otp)

                if flags.real_bpp:
                    # Calculate
                    bpp_real, bpp_theory = bpp_fetcher.get_bpp(
                        otp['sym'],
                        bpp_helpers.num_pixels_in_image(img_content))

                    # Logging
                    bpp_loss = otp['bpp']
                    diff_percent_tr = (bpp_theory / bpp_real) * 100
                    diff_percent_lt = (bpp_loss / bpp_theory) * 100
                    print('BPP: Real         {:.5f}\n'
                          '     Theoretical: {:.5f} [{:5.1f}% of real]\n'
                          '     Loss:        {:.5f} [{:5.1f}% of real]'.format(
                              bpp_real, bpp_theory, diff_percent_tr, bpp_loss,
                              diff_percent_lt))
                    assert abs(
                        bpp_theory - bpp_loss
                    ) < 1e-3, 'Expected bpp_theory to match loss! Got {} and {}'.format(
                        bpp_theory, bpp_loss)

                if flags.save_ours and ckpt_itr == last_ckpt_itr:
                    save_img(img_name, otp['img_out'], val_dirs)

                values_aggregator.update(otp)

                print('{: 10d} {img_name} | Mean: {avgs}'.format(
                    img_i,
                    img_name=img_name,
                    avgs=values_aggregator.averages_str()),
                      end=('\r' if not flags.real_bpp else '\n'),
                      flush=True)

            measures_writer.close()

            print()  # add newline
            avgs = values_aggregator.averages()
            avg_bpp, avg_ms_ssim, avg_psnr = avgs['bpp'], avgs[
                'ms-ssim'], avgs['psnr']

            tf_helpers.log_values(
                fw, [(full_summary_tag('avg_bpp'), avg_bpp),
                     (full_summary_tag('avg_ms_ssim'), avg_ms_ssim),
                     (full_summary_tag('avg_psnr'), avg_psnr)],
                iteration=ckpt_itr)

            if codec_distance_ms_ssim and codec_distance_psnr:
                try:
                    d_ms_ssim = codec_distance_ms_ssim.distance(
                        avg_bpp, avg_ms_ssim)
                    d_pnsr = codec_distance_psnr.distance(avg_bpp, avg_psnr)
                    print('Distance to BPG: {:.3f} ms-ssim // {:.3f} psnr'.
                          format(d_ms_ssim, d_pnsr))
                    tf_helpers.log_values(
                        fw,
                        [(full_summary_tag('distance_BPG_MS-SSIM'), d_ms_ssim),
                         (full_summary_tag('distance_BPG_PSNR'), d_pnsr)],
                        iteration=ckpt_itr)
                except ValueError as e:  # out of range errors from distance calls
                    print(e)

            val_dirs.add_validated_checkpoint(ckpt_itr)

    print('Validation completed {}'.format(val_dirs))
Beispiel #42
0
from bone import Bone
from game_over import GameOver
from get_key import ClearConsole, GetKey, Wait, WindowSize
from laser import Laser
from map import Map
from paco import Paco
from saver import Saver
from start import Start

if __name__ == '__main__':
    map = Map(133, 33, '#')
    start = Start(133, 33)
    finish = GameOver(133, 33)
    paconator = Paco(1, 15, 133, 33)
    getKey = GetKey()
    save = Saver()
    pigeons = []
    lasers = []
    bones = []
    play_game = False
    game_over = False
    ammo = 20
    points = 0
    x = save.load()
    record = int(x)
    WindowSize()

    # ====== START SCREEN ======
    while True:
        key = getKey()
        for y in range(11):
Beispiel #43
0
    def __init__(self, json_data, begin, end, directory):
        Saver.__init__(self, json_data, directory, QzonePath.BLOG)

        self._filename = "blogs_%05d-%05d.json" % (begin, end)
Beispiel #44
0
    def train(self):

        # saver
        self.logger.info('Initialize saver ...')
        train_saver = Saver(self.sess, tf.global_variables(),
                            self.cfg.model_dir)
        merged = tf.summary.merge_all()
        writer = tf.summary.FileWriter(self.cfg.log_dir, self.sess.graph)
        # initialize weights
        self.logger.info('Initialize all variables ...')
        self.sess.run(
            tf.variables_initializer(tf.global_variables(), name='init'))
        self.load_weights(self.cfg.init_model)

        self.logger.info('Start training ...')
        start_itr = self.cur_epoch * self.itr_per_epoch + 1
        end_itr = self.itr_per_epoch * self.cfg.end_epoch + 1
        for itr in range(start_itr, end_itr):
            self.tot_timer.tic()

            self.cur_epoch = itr // self.itr_per_epoch
            setproctitle.setproctitle('train epoch:' + str(self.cur_epoch))

            cur_lr = get_lr(self.cur_epoch)
            if not approx_equal(cur_lr, self.lr_eval):
                print(self.lr_eval, cur_lr)
                self.sess.run(tf.assign(self.lr, cur_lr))

            # input data
            self.read_timer.tic()
            feed_dict = self.next_feed()
            self.read_timer.toc()

            # train one step
            self.gpu_timer.tic()
            _, self.lr_eval, *summary_res = self.sess.run(
                [self.graph_ops[0], self.lr, *self.summary_dict.values()],
                feed_dict=feed_dict)
            self.gpu_timer.toc()

            if (itr % 1 == 0):
                result = self.sess.run(merged, feed_dict=feed_dict)
                writer.add_summary(result, itr)

            itr_summary = dict()
            for i, k in enumerate(self.summary_dict.keys()):
                itr_summary[k] = summary_res[i]

            screen = [
                'Epoch %d itr %d/%d:' %
                (self.cur_epoch, itr, self.itr_per_epoch),
                'lr: %g' % (self.lr_eval),
                'speed: %.2f(%.2fs r%.2f)s/itr' %
                (self.tot_timer.average_time, self.gpu_timer.average_time,
                 self.read_timer.average_time),
                '%.2fh/epoch' %
                (self.tot_timer.average_time / 3600. * self.itr_per_epoch),
                ' '.join(
                    map(lambda x: '%s: %.4f' % (x[0], x[1]),
                        itr_summary.items())),
            ]

            if itr % self.cfg.display == 0:
                self.logger.info(' '.join(screen))

            if itr % self.itr_per_epoch == 0:
                train_saver.save_model(self.cur_epoch)

            self.tot_timer.toc()
Beispiel #45
0
        rel_err = torch.abs(x_pred_true - x_output_true) / x_output_true
        return torch.sum(abs_err) / torch.nonzero(binary_mask).size(0), torch.sum(rel_err) / torch.nonzero(binary_mask).size(0)


##############################################################################
##############################################################################

################################     MAIN     ################################

##############################################################################
##############################################################################

# Saving settings
model_dir = os.path.join(opt.checkpoint_path, opt.name)
os.mkdir(model_dir) if not os.path.isdir(model_dir) else None
saver = Saver(model_dir, args=opt)

# Define model and optimiser
gpu = utils.check_gpu()
device = torch.device("cuda:{}".format(gpu) if torch.cuda.is_available() else "cpu")
model = SegNet().to(device)
optimizer = optim.Adam(model.parameters(), lr=1e-4)


# Recover weights, if required
if opt.recover:
    ckpt_file = os.path.join(model_dir, opt.reco_type+'_weights.pth')
    ckpt = torch.load(ckpt_file, map_location=device)
    model.load_state_dict(ckpt['model_state_dict'])
    epoch = ckpt['iter_nb'] + 1
    print('Model recovered from {}.'.format(ckpt_file))
Beispiel #46
0
def saver_work(args, queues):
    # with tf.device('/gpu:0'):
    saver = Saver(args, queues)
    saver.run()