Ejemplo n.º 1
0
def downloader(url, filename, file_type='.mp4'):
    """Функция осуществляет загрузку видео-файла по url, в файл filename"""
    filename = Path('{}{}'.format(filename.resolve(), file_type))
    r = requests.get(url, stream=True)
    filename_str = str(filename.resolve())
    if not filename.exists():
        total_length = int(r.headers.get('content-length') or 0)
        dl = 0
        if len(filename_str) > 260:
            print(
                "{} Имена файлов слишком длинны для перемещения в эту целевую папку. Лекции будет присвоено имя формата 'Лекция №'"
                .format(filename))
            new_filename = re.findall(r'.*(Лекция \d*)',
                                      filename.name)[0] + file_type
            filename = filename.parent / new_filename
        temp_filename = filename.with_suffix('.dl')
        with temp_filename.open('wb') as f:
            for chunk in r.iter_content(chunk_size=CHUNK_SIZE):
                dl += len(chunk)
                if chunk:
                    f.write(chunk)
                if total_length:
                    progress(dl, total_length)
        temp_filename.rename(filename)

    else:
        print('Файл "{}" уже существует'.format(filename))
Ejemplo n.º 2
0
async def on_message(message):
    if message.content.startswith('!wordcloud'):
        words = to_words(message.content)
        args = []
        for word in words:
            if word != '!wordcloud':
                args.append(word)
        if 'help' in args:
            doc = open('documentation.txt', 'r')
            await client.send_message(message.author, doc.read() % (format_colors(), format_fonts()))
            await client.send_message(message.channel, 'Documentation has been sent via DM.')
        else:
            generating = await client.send_message(message.channel, 'Generating word cloud...')
            font = ''
            hues = []
            unknownargs = []
            for arg in args:
                if colors.is_color(arg):
                    hues.append(colors.from_name(arg).hue)
                elif arg in os.listdir('fonts/'):
                    font = arg
                else:
                    unknownargs.append(arg)

            if len(unknownargs) > 0:
                formattedargs = ''
                for unknownarg in unknownargs:
                    formattedarg = '`' + unknownarg + '`'
                    if not formattedargs:
                        formattedargs += formattedarg
                    else:
                        formattedargs = formattedargs + ', ' + formattedarg
                await client.edit_message(generating, 'Unknown argument(s): ' + formattedargs +
                                          '\n\nAvailable colors: ' + format_colors() +
                                          '\nAvailable fonts: ' + format_fonts() +
                                          '\n\nUse `!wordcloud help` to receive full documentation.')
            else:
                if not font or font == 'random':
                    font = random.choice(os.listdir('fonts/'))

                if len(hues) < 1:
                    hues.append(random.choice(colors.all).hue)

                generatefrom = ''
                print("Reading messages...")
                message_counter = 0;
                async for log in client.logs_from(message.channel, limit=max_messages):
                    message_counter+=1;
                    progressbar.progress(message_counter, max_messages)
                    generatefrom = generatefrom + ' ' + log.content
                print("")
                print("Generating cloud...")
                make_cloud(generatefrom, font, hues)
                await client.send_file(message.channel, output)
                await client.delete_message(generating)

                os.remove(output)

                print("All done, standing by...")
Ejemplo n.º 3
0
def DFO_extract_by_watershed(vtk_file, aqid_list, gen_plot=False):
    """extract and summary"""

    watersheds = watersheds_gdb_reader()
    if len(aqid_list) == 0:
        # take the list from watersheds gdb
        #aqid is the index column
        aqid_list = watersheds.index.tolist()

    # setup output file
    # get header from vtk file
    # Flood_2-Day_250m.vrt
    headerprefix = os.path.basename(vtk_file).split("_")[1]
    if ("_CS_" in vtk_file):
        headerprefix = "1-Day_CS"

    headers_list = [
        "pfaf_id", headerprefix + "_TotalArea_km2", headerprefix + "_perc_Area"
    ]
    summary_file = os.path.basename(vtk_file)[:-4] + ".csv"
    if not os.path.exists(summary_file):
        with open(summary_file, 'w') as f:
            writer = csv.writer(f)
            writer.writerow(headers_list)
    else:
        # already processed,
        return

    count = 0
    with open(summary_file, 'a') as f:
        writer = csv.writer(f)

        for the_aqid in aqid_list:
            count += 1
            #print(the_aqid, count, " out of ", len(aqid_list))
            progress(count, len(aqid_list), status='aqid')
            # extract mask
            test_json = json.loads(
                geopandas.GeoSeries([watersheds.loc[the_aqid,
                                                    'geometry']]).to_json())
            # plot check
            dfoarea = DFO_extract_by_mask(vtk_file, test_json)

            DFO_TotalArea = dfoarea
            DFO_Area_percent = DFO_TotalArea / watersheds.loc[the_aqid][
                'areakm2'] * 100

            results_list = [
                the_aqid, "{:.3f}".format(DFO_TotalArea),
                "{:.3f}".format(DFO_Area_percent)
            ]
            writer.writerow(results_list)

    return summary_file
def main():
    '''Convert cine file to frames and save them in a .npz file.'''
    for dataset in DATASET:
        if dataset == 'Globular':
            video_name = '11-12-06_GMAW CV High V Globular - 175 ipm WFS, 33V, 85-15CO2, 20 ipm travel'
        elif dataset == 'Spray':
            video_name = '11-12-06_GMAW CV Spray - 400 ipm WFS, 35V, 85-15CO2, 20 ipm travel'

        video_path = os.path.join('Data', 'Video', 'CINE',
                                  video_name + '.cine')
        total_frames = read_header(video_path)["cinefileheader"].ImageCount
        save_folder = os.path.join('Data', 'Image', 'Input', dataset.lower())
        n_frames = total_frames

        data_rgb = []
        data_gray = []
        for i in progress(range(n_frames)):
            # read each frame from .cine file as an ndarray
            rgb_image, _ = display_frames(video_path, start_frame=i + 1)
            rgb_image[np.isnan(rgb_image)] = 0
            gray_image = rgb2gray(rgb_image)

            # Convert from float to uint8
            rgb_image = normalizeuint8(rgb_image)
            gray_image = normalizeuint8(gray_image)
            data_rgb.append(rgb_image)
            data_gray.append(gray_image)

        data_rgb = np.array(data_rgb)
        data_gray = np.array(data_gray)

        np.savez_compressed(save_folder + '_rgb', images=data_rgb)
        np.savez_compressed(save_folder + '_gray', images=data_gray)
def load_and_save_json():
    '''
    Loads exported json file from LabelBox containing urls to each image and segmentation map.
    '''
    for dataset in DATASET:
        json_file = pd.read_json(
            os.path.join('Data', 'json',
                         dataset.lower() + '_masks.json'))
        _ids = []
        masks = []
        for i in progress(range(len(json_file))):
            _ids.append(int(json_file['External ID'][i][:-4]))
            annot = json_file['Label'][i]['objects'][0]['instanceURI']

            with urllib.request.urlopen(annot) as url:
                image_bytes_url = io.BytesIO(url.read())
            image = Image.open(image_bytes_url).convert('L')
            masks.append(np.asarray(image))
        images = np.load(
            os.path.join('Data', 'Image', 'Input',
                         dataset.lower() + '_gray.npz'))['images'][_ids, ...]
        np.savez_compressed(os.path.join('Data', 'Image', 'Labelbox',
                                         dataset.lower() + '_segmented'),
                            images=images,
                            masks=masks)
def main():
    '''
    Load original images and masks, augment them and save as images.
    '''
    for dataset in DATASET:
        data = np.load(os.path.join('Data', 'Image', 'Labelbox',
                                    dataset.lower() + '_segmented.npz'))
        images = data['images']
        masks = data['masks'].astype(bool)
        image_shape = images.shape[1:3]

        images_augmented, masks_augmented = ([], [])
        for image, mask in progress(zip(images, masks)):
            segmap = SegmentationMapsOnImage(mask, shape=image_shape)
            image_aug, segmap_aug = augment_data(SEQ, image, segmap, N_AUGMENT)
            for img_aug, sg_aug in zip(image_aug, segmap_aug):
                sg_map = sg_aug.draw(size=image_shape)[0]
                sg_map = ((sg_map[..., 0] != 0)*255).astype(np.uint8)
                images_augmented.append(img_aug)
                masks_augmented.append(sg_map)

        images_augmented = np.array(images_augmented)
        masks_augmented = np.array(masks_augmented)
        np.savez_compressed(os.path.join('Data', 'Image', 'Augmented', dataset.lower() +
                                         '_augmented'), images=images_augmented, masks=masks_augmented)
Ejemplo n.º 7
0
def save_properties():
    '''
    Computes properties for every image in a dataset, then saves the lists
    of properties to a pickle file.
    '''
    cents_float_arr = []
    cents_arr = []
    area_arr = []
    perim_arr = []
    vol_arr = []
    vol_corrected_arr = []
    time_list = []
    time_cycle = time_seq()

    data_img = np.load(
        os.path.join('Data', 'Image', 'Input', f'{DATASET.lower()}_rgb.npz'))
    data_preds = np.load(PREDS_DIR)
    _ = data_img['images']
    preds = data_preds['preds']

    for _, pred in progress(enumerate(preds)):
        try:
            last_time = time_list[-1]
            time_list.append(last_time + next(time_cycle))
        except IndexError:
            time_list.append(0)
        cents_float, cents, area, perimeter, volume, volume_corrected = compute_properties(
            pred)
        cents_float_arr.append(cents_float)
        cents_arr.append(cents)
        area_arr.append(area)
        perim_arr.append(perimeter)
        vol_arr.append(volume)
        vol_corrected_arr.append(volume_corrected)
    geometry = {
        'centroids_float': cents_float_arr,
        'centroids': cents_arr,
        'areas': area_arr,
        'perimeters': perim_arr,
        'volumes': vol_arr,
        'volumes_corrected': vol_corrected_arr,
        'time': time_list
    }
    with open(
            os.path.join(
                'Output', 'Geometry',
                f'{ARCHITECTURE_NAME.lower()}_{DATASET.lower()}_{N_FILTERS}_{BATCH_SIZE_TRAIN}_{EPOCHS}_geometry.pickle'
            ), 'wb') as data_file:
        pickle.dump(geometry, data_file)
Ejemplo n.º 8
0
	def run(self):
		import mvalve, mconfig, msensor
		from progressbar import progress
	
		mconfig.GPIO_init()
		
		#Setup timers and stopwatches
		self.since_start = mtools.Stopwatch()
		self.since_stim = mtools.Stopwatch()
		self.stim_timer = mtools.Timer(self.stim_length)
		self.main_timer = mtools.Timer()
		
		self.water_valve = mvalve.Valve(pin = mconfig.drink_pin, pulse_length = mconfig.drink_time)
		self.sensor = msensor.Sensor(mconfig.sensor_pin)
		self.log = []
		
		import Adafruit_CharLCD as LCD
		#Initialize the LCD using the pins 
		lcd = LCD.Adafruit_CharLCDPlate()

		#Display project name & empty progressbar to LCD	
		lcd.home()
		lcd.message(self.name)
		progress(0)
        # show frame with object detection
        """
        fig, ax = plt.subplots()
        ax.set_title(f"Annotated Frame {counter}")
        ax.imshow(image_np)
        ax.scatter([xmin,xmin,xmax,xmax],[ymin,ymax,ymin,ymax],s=4)
        ax.scatter([x_avg],[y_avg],c="red",marker = '+',s=12)
        plt.show()
        """

        # add annotated image to the video writer
        writer.append_data(image_np)

        # update progressbar
        progressbar.progress(counter, frames_num)

    # end line after progressbar
    sys.stdout.write("\n")

    # close video writer
    writer.close()
    """
    out_image = (1/(len(all_images)*255))*np.sum(all_images,axis=0)
    print("Number of frames to merge:",len(all_images))
    """

    # make regression
    (a, b, c) = np.polyfit(x_points, y_points, 2)
    f = np.poly1d([a, b, c])
Ejemplo n.º 10
0
#!/usr/bin/python3
import time
import progressbar

max=33
for i in range(max+1):
    time.sleep(0.1)
    progressbar.progress(i, max, "This is an demo...")
print("")
Ejemplo n.º 11
0
	def run(self):
		super(Gng,self).run()
		
		import time
		from random import shuffle
		from progressbar import progress
		from mtools import Timer
		
		active_timer = Timer(self.active_period)
		grace_timer = Timer(self.grace_period)
		idle_timer = Timer(self.idle_period)
		
		punish_timer = Timer(self.extra_time)
		
		#Todo list
		tasks = []
		
		#Put tasks with positive stimulus onto the todo list
		for i in range(self.positive_count):
			tasks.append(True)
			
		#Put tasks with negative stimulus onto the todo list
		for i in range(self.negative_count):
			tasks.append(False)

		#Execute tastks at random order
		shuffle(tasks)
			
		#Reset since start stopwatch
		self.since_start.reset()
		
		#Log start
		self.log.append((0,0,"Start"))
		
		#Start doing the tasks
		current_task_count = 1
		for current_task in tasks:
			
			#Update progressbar
			progress(current_task_count*100/len(tasks))
			
			#Reset since stim start stopper
			self.since_stim.reset()
			
			#Timer that runs until the end of the stimulus. Reset here.
			self.stim_timer.reset()
			
			#Give Stimulus
			if current_task:
				print "Positive stimulus"
				self.log.append((self.since_start.value(), self.since_stim.value(),"Positive stimulus"))
				self.positive_stimulus()
			else: 
				print "Negative stimulus"
				self.log.append((self.since_start.value(), self.since_stim.value(),"Negative stimulus"))
				self.negative_stimulus()
					
			
			#Wait until the stim is over. Detect premature licks.
			while self.stim_timer.is_running():
				time.sleep(0.001)
				
				if self.sensor.detected():
					print "Lick at	" + str(self.since_start) +"	Since stim.	" +str(self.since_stim)+ " //During stimulus"
					self.log.append((self.since_start.value(), self.since_stim.value(), "Lick (during stimulus)"))
			
			#Grace period
			grace_timer.reset()
			while grace_timer.is_running():
				time.sleep(0.001)
				if self.sensor.detected():
					print "Lick at	" + str(self.since_start) +"	Since stim.	" +str(self.since_stim)+ " //In grace period"
					self.log.append((self.since_start.value(), self.since_stim.value(), "Lick (in grace period)"))				
						
				
			
			#Task is considered done if we already gave water
			done = False
			
			#Active period
			active_timer.reset()
			while active_timer.is_running():
				time.sleep(0.001)

				if self.sensor.detected():
					
					print "Lick at	" + str(self.since_start) +"	Since stim.	" +str(self.since_stim)
					self.log.append((self.since_start.value(), self.since_stim.value(), "Lick"))
					
					
					if not done:
						done = True
						
						if current_task:
							print "Giving water"
							self.log.append((self.since_start.value(), self.since_stim.value(), "Water"))
							
							#Give water
							self.water_valve.pulse()
		
						if not current_task:
							print "Punishment start " + str(self.extra_time) +" sec"
							self.log.append((self.since_start.value(), self.since_stim.value(), "Punishment delay started"))
							
							
							#Punishment period
							punish_timer.reset()
							while punish_timer.is_running():
								time.sleep(0.001)
								if self.sensor.detected():
									print "Lick at	" + str(self.since_start) +"	Since stim.	" +str(self.since_stim)+ " //During punishment"
									self.log.append((self.since_start.value(), self.since_stim.value(), "Lick (during punishment)"))				
						
							
							print "Punishment over"
							self.log.append((self.since_start.value(), self.since_stim.value(), "Punishment delay over"))


						
			#Idle period
			idle_timer.reset()
			while idle_timer.is_running():
				time.sleep(0.001)
				if self.sensor.detected():
					print "Lick at	" + str(self.since_start) +"	Since stim.	" +str(self.since_stim)+ " //In idle phase"
					self.log.append((self.since_start.value(), self.since_stim.value(), "Lick (in idle phase)"))
					
					if self.reset_on_random == "on":
						print "Idle phase starts over"
						idle_timer.reset()

			current_task_count += 1
		
		
		self.log.append((self.since_start.value(), self.since_stim.value(), "End"))	
	
		
		#Create log file
		main_logfile = open("/MouseCandy/run/log.txt", "w")
		
		#Write logfile header
		main_logfile.write(str(self)+"\n\n")
		#main_logfile.write("Since start	Since stim.	Log entry\n\n")
		
		#Write records into log files
		for record in self.log:
			main_logfile.write(mtools.toString(record[0])+"	"+mtools.toString(record[1])+"	"+record[2]+"\n")
			
		#Close logfile
		main_logfile.close()
	
	
		#Create zip file
		import os
		zipname = mtools.CRC32_from_file("/MouseCandy/run/log.txt")
		os.system("zip -j /MouseCandy/logs/"+zipname+".zip /MouseCandy/run/log.txt")
		os.remove("/MouseCandy/run/log.txt")
		
		os.system("zipnote /MouseCandy/logs/"+zipname+".zip > /MouseCandy/logs/temp_notes.txt")
		
		notesfile = open("/MouseCandy/logs/temp_notes.txt", "a")
		tags = str(self).split("\n")
		tags = tags[0]
		notesfile.write(tags+"\n")
		notesfile.write(self.name)
		notesfile.close()
		
		os.system("zipnote -w /MouseCandy/logs/"+zipname+".zip < /MouseCandy/logs/temp_notes.txt")
		os.remove("/MouseCandy/logs/temp_notes.txt")
Ejemplo n.º 12
0
	def run(self):
		super(Pav,self).run()
		
		import time
		from progressbar import progress
		from random import uniform as random_float

		#Reset since start stopwatch
		self.since_start.reset()
		
		#Log start
		self.log.append((0,0,"Start"))
		
		#Start doing the tasks
		current_task = 0
		for current_task in range(self.action_count):
			
			#Update progressbar
			progress(current_task*100/self.action_count)
			
			#Execute stimulus command
			self.stimulus()
			
			#Reset since stim stopwatch
			self.since_stim.reset()
			
			#Log the stimulus
			self.log.append((self.since_start.value(), 0, "Stimulus"))
			print "Stimulus at	"+ str(self.since_start)

			#Wait until the stim is over. Detect premature licks.
			self.stim_timer.reset()
			while self.stim_timer.is_running():
				time.sleep(0.001)
				
				if self.sensor.detected():
					self.log.append((self.since_start.value(), self.since_stim.value(), "Lick (premature)"))
					print "Lick at	" + str(self.since_start) +"	Since stim.	" +str(self.since_stim)+ " //Premature"
					
				
				
			#Give water
			self.water_valve.pulse()	
			
			#Log the water
			self.log.append((self.since_start.value(), self.since_stim.value(), "Water"))
			print "Water at	" + str(self.since_start) +"	Since stim.	" +str(self.since_stim)
			
			#Generate random wait time
			wait_time = random_float(self.wait_time_min,self.wait_time_max)
			
			#Log wait time
			self.wait_times.append(wait_time)
			print "Wait time	"+mtools.toString(wait_time)+" sec"
			
			#Set new time and reset the main timer
			self.main_timer.set(wait_time)
			self.main_timer.reset()
			
			#Wait until next stim. Log licks
			while self.main_timer.is_running():
				time.sleep(0.001)
				
				if self.sensor.detected():
					self.log.append((self.since_start.value(), self.since_stim.value(), "Lick"))
					print "Lick at	" + str(self.since_start) +"	Since stim.	" +str(self.since_stim)
				
		
		
		#Update progressbar
		progress(100)
		
		#Log end of project
		self.log.append((self.since_start.value(), self.since_stim.value(), "End"))
		
		#Calculate wait time statistics
		average_wait_time = mtools.avg(self.wait_times)
		wait_time_deviation = mtools.dev(self.wait_times)
		
		#Create specific logs
		licks_log = filter(lambda record: record[2]=="Lick" or record[2]=="Lick (premature)", self.log)
		reaction_times = map(lambda record: record[1], licks_log)
		
		#Calculate lick statistics
		average_reaction_time = mtools.avg(reaction_times)
		reaction_time_deviation = mtools.dev(reaction_times)
		
		
		#Create log files
		main_logfile = open("/MouseCandy/run/log.txt", "w")
		licks_logfile = open("/MouseCandy/run/licks.csv", "w")
		
		#Write logfile headers
		main_logfile.write(str(self)+"\n")
		main_logfile.write("Average wait time: " + mtools.toString(average_wait_time, padded=False) +" sec\n")
		main_logfile.write("Wait time deviation: " + mtools.toString(wait_time_deviation, padded=False) +" sec\n")
		main_logfile.write("Average reaction time: " + mtools.toString(average_reaction_time, padded=False) +" sec\n")
		main_logfile.write("Reaction time deviation: " + mtools.toString(reaction_time_deviation, padded=False) +" sec\n\n")
				
		licks_logfile.write("time[s],since_last_stim[s]\n")
		
		#Write records into log files
		for record in self.log:
			main_logfile.write(mtools.toString(record[0])+"	"+mtools.toString(record[1])+"	"+record[2]+"\n")
		
		for record in licks_log:
			licks_logfile.write(str(record[0])+","+str(record[1])+"\n")
		
		main_logfile.close()
		licks_logfile.close()
		
		#Create zipfiles
		import os
		zipname = mtools.CRC32_from_file("/MouseCandy/run/log.txt")
		os.system("zip -j /MouseCandy/logs/"+zipname+".zip /MouseCandy/run/log.txt /MouseCandy/run/licks.csv")
		os.remove("/MouseCandy/run/log.txt")
		os.remove("/MouseCandy/run/licks.csv")
		
		os.system("zipnote /MouseCandy/logs/"+zipname+".zip > /MouseCandy/logs/temp_notes.txt")
		
		notesfile = open("/MouseCandy/logs/temp_notes.txt", "a")
		tags = str(self).split("\n")
		tags = tags[0]
		notesfile.write(tags+"\n")
		notesfile.write(self.name)
		notesfile.close()
		
		os.system("zipnote -w /MouseCandy/logs/"+zipname+".zip < /MouseCandy/logs/temp_notes.txt")
		os.remove("/MouseCandy/logs/temp_notes.txt")
def test_model():
    '''
    Tests trained model and returns masks and loss.
    '''
    try:
        with open(os.path.join(MODEL_DIR, 'trainHistoryDict'),
                  'rb') as file_pi:
            params = pickle.load(file_pi)['params']
    except FileNotFoundError:
        print(
            f"Oops! It seems the model folder '~/{MODEL_DIR}' does not exist.")
        return
    model = tf.keras.models.load_model(MODEL_DIR, compile=False)

    if params['optimizer_name'] == 'adam':
        opt = tf.optimizers.Adam(params['learning_rate'])

    if params['loss_name'] == 'iou':
        loss_fn = losses.jaccard_distance_loss

    model.compile(optimizer=opt, loss=loss_fn)

    data = np.load(
        os.path.join('Data', 'Image', 'Input',
                     f"{params['dataset'].lower()}_gray.npz"))
    test_size = len(data['images'])
    id_batches = chunks([i for i in range(test_size)], BATCH_SIZE)
    results = []

    images = data['images'].astype('float32')
    #masks = data['masks'].astype('float32')

    images = images / 255
    #masks = masks/255
    predictions = []
    for batch in progress(id_batches):
        print(f'Testing batch [{batch[0]} - {batch[-1]}]')
        x_batch = images[batch]
        #mask_batch = masks[batch]
        y_batch = model.predict(x_batch, verbose=0)
        loss = model.evaluate(x_batch, y_batch, verbose=0)

        for pred in y_batch:
            predictions.append(normalizeuint8(pred[..., 0]))
        # fig, axes = plt.subplots(1, 3, sharex=True, sharey=True)
        # axes[0].imshow(normalizeuint8(x_batch[0]))
        # axes[1].imshow(normalizeuint8(mask_batch[0]))
        # axes[2].imshow(predictions[-1])

        # fig.tight_layout()
        # fig.savefig(os.path.join('Output', 'Plots', 'train_preds',
        #                          f'{DATASET}_{batch[0]}_train_pred.png'), transparent=True, dpi=300)

        results.append(loss)
    test_loss = {'test_loss': results}
    with open(os.path.join(MODEL_DIR, 'test_loss_dict'), 'wb') as file_pi:
        pickle.dump(test_loss, file_pi)

    predictions = np.array(predictions, dtype=np.uint8)
    np.savez(os.path.join(
        'Output', 'Predictions',
        f'{ARCHITECTURE_NAME.lower()}_{DATASET.lower()}_{N_FILTERS}_{BATCH_SIZE_TRAIN}_{EPOCHS}_preds'
    ),
             preds=predictions)
    print(f'Mean loss - {sum(results)/len(results):.2f}')
Ejemplo n.º 14
0
def train_model(params, save=False, verbose=2, gridsearch=False, folds=5):
    '''
    Trains new UNET model and saves results.
    '''
    images, masks, shape = load_dataset(params['dataset'])

    early_stop = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
                                                  patience=20)
    checkpoint = tf.keras.callbacks.ModelCheckpoint(params['checkpoint_dir'],
                                                    monitor='val_loss',
                                                    save_best_only=True,
                                                    save_weights_only=True)
    if gridsearch:
        kfold = KFold(folds, shuffle=True)
        cross_val = {'epoch': [], 'loss': [], 'val_loss': []}
        model_arch = choose_architecture(params['architecture_name'])
        for train_idx, val_idx in progress(kfold.split(images)):
            model = model_arch(n_filters=params['n_filters'],
                               input_shape=shape,
                               optimizer_name=params['optimizer_name'],
                               learning_rate=params['learning_rate'],
                               loss_name=params['loss_name']).create_model()
            x_train = images[train_idx]
            y_train = masks[train_idx]

            x_val = images[val_idx]
            y_val = masks[val_idx]

            train_ds = tf.data.Dataset.from_tensor_slices(
                (x_train, y_train)).repeat().batch(params['batch_size'])
            val_ds = tf.data.Dataset.from_tensor_slices(
                (x_val, y_val)).repeat().batch(params['batch_size'])
            hist = model.fit(
                train_ds,
                epochs=params['epochs'],
                steps_per_epoch=x_train.shape[0] // params['batch_size'],
                validation_data=val_ds,
                validation_steps=x_val.shape[0] // params['batch_size'],
                verbose=verbose,
                callbacks=[early_stop])
            history = hist.history
            cross_val['loss'].append(history['loss'][-1])
            cross_val['val_loss'].append(history['val_loss'][-1])
            cross_val['epoch'].append(len(history['loss']))
        return cross_val
    else:
        model_arch = choose_architecture(params['architecture_name'])
        model = model_arch(n_filters=params['n_filters'],
                           input_shape=shape,
                           optimizer_name=params['optimizer_name'],
                           learning_rate=params['learning_rate'],
                           loss_name=params['loss_name']).create_model()
        x_train, x_val, y_train, y_val = train_test_split(images,
                                                          masks,
                                                          test_size=0.2)
        train_ds = tf.data.Dataset.from_tensor_slices(
            (x_train, y_train)).repeat().batch(params['batch_size'])
        val_ds = tf.data.Dataset.from_tensor_slices(
            (x_val, y_val)).repeat().batch(params['batch_size'])
        hist = model.fit(
            train_ds,
            epochs=params['epochs'],
            steps_per_epoch=x_train.shape[0] // params['batch_size'],
            validation_data=val_ds,
            validation_steps=x_val.shape[0] // params['batch_size'],
            verbose=verbose,
            callbacks=[early_stop, checkpoint])
        history = hist.history
        history['params'] = params
        if save:
            model.load_weights(
                tf.train.latest_checkpoint(
                    os.path.dirname(params['checkpoint_dir'])))
            model.save(params['save_dir'])
            with open(os.path.join(params['save_dir'], 'trainHistoryDict'),
                      'wb') as file_pi:
                pickle.dump(history, file_pi)

            plot_train_loss(params)
    return history
def GFMS_extract_by_watershed(vtk_file, aqid_list, gen_plot=False):
    """extract and summary"""

    watersheds = watersheds_gdb_reader()
    if len(aqid_list) == 0:
        # take the list from watersheds gdb
        #aqid is the index column
        aqid_list = watersheds.index.tolist()

    # setup output file
    headers_list = [
        "pfaf_id", "GFMS_TotalArea_km", "GFMS_perc_Area", "GFMS_MeanDepth",
        "GFMS_MaxDepth", "GFMS_Duration"
    ]
    summary_file = gfmsdata + os.path.basename(vtk_file)[:-4] + ".csv"
    if not os.path.exists(summary_file):
        with open(summary_file, 'w') as f:
            writer = csv.writer(f)
            writer.writerow(headers_list)
    else:
        # already processed,
        return

    count = 0
    with open(summary_file, 'a') as f:
        writer = csv.writer(f)

        for the_aqid in aqid_list:
            count += 1
            #print(the_aqid, count, " out of ", len(aqid_list))
            progress(count, len(aqid_list), status='aqid')
            # extract mask
            test_json = json.loads(
                geopandas.GeoSeries([watersheds.loc[the_aqid,
                                                    'geometry']]).to_json())
            # plot check
            data_points = GFMS_extract_by_mask(vtk_file, test_json)
            if gen_plot:
                GFMS_watershed_plot(watersheds, the_aqid, vtk_file,
                                    data_points)

            # generate summary
            #Summary part
            #GFMS_TotalArea_km	GFMS_perc_Area	GFMS_MeanDepth	GFMS_MaxDepth	GFMS_Duration
            # print('Summary')
            # print('Watershed: ', the_aqid)
            # print("GFMS data: ", vtk_file)
            # print("Number of data point: ", len(data_points))
            # print("GFMS_TotalArea_km2: ",data_points['area'].sum())
            # print("GFMS_perc_Area (%): ",data_points['area'].sum()/watersheds.loc[the_aqid]['SUM_area_km2']*100)
            # print("GFMS_MeanDepth (mm): ",data_points['intensity'].mean())
            # print("GFMS_MaxDepth (mm): ",data_points['intensity'].max())
            # print("GFMS_Duration (hour): ", 3)

            # write summary to a csv file
            GFMS_Duration = 0
            if (not data_points.empty):
                GFMS_TotalArea = data_points['area'].sum()
                if GFMS_TotalArea > 100.0:
                    GFMS_Duration = 3
                GFMS_Area_percent = GFMS_TotalArea / watersheds.loc[the_aqid][
                    'areakm2'] * 100
                GFMS_MeanDepth = data_points['intensity'].mean()
                GFMS_MaxDepth = data_points['intensity'].max()
            else:
                GFMS_TotalArea = 0.0
                GFMS_Area_percent = 0.0
                GFMS_MeanDepth = 0.0
                GFMS_MaxDepth = 0.0
                GFMS_Duration = 0.0

            results_list = [
                the_aqid, GFMS_TotalArea, GFMS_Area_percent, GFMS_MeanDepth,
                GFMS_MaxDepth, GFMS_Duration
            ]
            writer.writerow(results_list)

    print(summary_file)
    logging.info("GFMS: " + summary_file)
    # wrtie summary file as excel
    # temp_data = pd.read_csv(summary_file)
    # xlsx_name = summary_file.replace(".csv",".xlsx")
    # sheet_name = os.path.basename(summary_file)[:-4]
    # temp_data.to_excel(xlsx_name, sheet_name=sheet_name, index=False)

    return
Ejemplo n.º 16
0
def evolve(msnake,
           levelset=None,
           num_iters=20,
           animate=False,
           background=None):
    """
    Visual evolution of a morphological snake.

    Parameters
    ----------
    msnake : MorphGAC or MorphACWE instance
        The morphological snake solver.
    levelset : array-like, optional
        If given, the levelset of the solver is initialized to this. If not
        given, the evolution will use the levelset already set in msnake.
    num_iters : int, optional
        The number of iterations.
    """

    import time

    if animate:
        import cv2

    if levelset is not None:
        msnake.levelset = levelset

    # Iterate.
    print bcolors.WARNING + 'Evolving, iterations: ' + str(
        num_iters) + bcolors.ENDC
    last = None
    count = 0
    for i in range(num_iters):
        # Evolve.
        msnake.step()
        progressbar.progress(i, num_iters - 1, "")

        if animate:
            if (i + 1) % 1 == 0:

                if background is not None:
                    snek = msnake.levelset

                    kernel = np.ones((3, 3))
                    erosion = cv2.erode(snek, kernel)

                    edges = (snek - erosion).astype(np.uint8) * 255
                    cv2.putText(edges, 'Evolving, iteration: ' + str(i + 1),
                                (20, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.5, 255,
                                1)
                    cv2.imshow(
                        "window",
                        cv2.addWeighted(background, 0.7, edges, 0.3, 0.0))

                    if last is not None:
                        if np.count_nonzero(
                                last) > np.count_nonzero(snek) - 15:
                            count = count + 1
                            if count == 3:
                                edges = (snek - erosion).astype(np.uint8) * 255
                                print "Stopped at iteration " + str(i)
                                return (msnake.levelset).astype(
                                    np.uint8) * 255, edges
                        else:
                            count = 0

                    last = snek

                else:

                    cv2.imshow("window", msnake.levelset)
                cv2.waitKey(1)

    # Return the last levelset.
    print bcolors.OKGREEN + "Evolution completed" + bcolors.ENDC
    snek = msnake.levelset
    kernel = np.ones((3, 3))
    erosion = cv2.erode(snek, kernel)

    edges = (snek - erosion).astype(np.uint8) * 255
    return (msnake.levelset).astype(np.uint8) * 255, edges