Example #1
0
def save_video(url, directory, filename, extension):
	path = os.path.join(directory, "%s.%s" % (filename, extension))
	temp = "%s.tmp" % path

	# Temp file already exists, so we'll bin it and try again
	if os.path.exists(temp):
		os.remove(temp)
		os.remove(path)

	if not os.path.exists(path):
		response = session.get(url, stream=True)
		with open(temp, 'w') as f:
			f.write('\n')

		with open(path, 'wb') as f:
			total_length = int(response.headers.get('Content-Length'))
			widgets = ['Saving %s: ' % path, Percentage(), ' ', Bar(), ' ', ETA(), ' ', FileTransferSpeed()]
			pbar = ProgressBar(widgets=widgets, maxval=total_length).start()
			bytes_downloaded = 0
			for chunk in response.iter_content(chunk_size=1024):
				if chunk: # filter out keep-alive new chunks
					f.write(chunk)
					f.flush()
					bytes_downloaded += len(chunk)
					pbar.update(bytes_downloaded)
			pbar.finish()

		# Delete temp file
		os.remove(temp)
def sentence_similarity(idx, ob, mode):

    s_list = list()
    pbar = ProgressBar(widgets=['%s: image ' % mode, SimpleProgress()],
                       maxval=len(sentences)).start()

    for im_idx, sentence_group in enumerate(np.array(sentences)[idx, :]):

        pbar.update(im_idx + 1)
        for sent in sentence_group:

            words = analyze(sent)

            sim = list()
            for w in words:

                syn1 = wn.synsets(w)
                syn2 = wn.synsets(ob)

                if syn1 and syn2:
                    sim.append(max(s1.path_similarity(s2) for (s1, s2)
                                   in product(syn1, syn2)))
                else:
                    # ignore word if no synset combination was found on wordnet
                    sim.append(None)

            if max(sim):
                s_list.append(max(sim))
            else:
                # ignore sentence if no word was similar enough
                s_list.append(float('nan'))

    pbar.finish()
    return s_list
Example #3
0
def formalize(sarc):
    all_words = defaultdict(lambda: 0)
    for line in range(0, len(CORPUS)):
        for word in range(0, len(CORPUS[line])):
            all_words[CORPUS[line][word]] += 1
    all_words_sorted = sorted(all_words.items(), key=operator.itemgetter(1), reverse=True)
    # print all_words_sorted
    with open("contexts.cols", "w") as raw:
        for word in range(0, len(all_words_sorted)):
            # print all_words_sorted[word][0]
            raw.write(all_words_sorted[word][0] + "\n")

    with open("targets.rows", "w") as raw:
        for word in range(0, 1):
            raw.write(all_words_sorted[word][0] + "\n")

    with open("target_context_count.sm", "a") as raw:
        # raw.write('target_word\tcontext_word\tcount\n')
        for target in range(0, 1):
            pbar = ProgressBar(maxval=len(all_words_sorted)).start()
            for context in range(0, len(all_words_sorted)):
                # print all_words_sorted[target]
                pbar.update(context + 1)
                context_ = getContext(all_words_sorted[target][0], CORPUS, WINDOW_SIZE)[all_words_sorted[context][0]]
                if context_ > 2:
                    # print all_words_sorted[context]
                    # raw.write('{}\t{}\t{}\t{}\n'.format(all_words_sorted[target][0],all_words_sorted[context][0],context_, ppmi(getContext(all_words_sorted[context][0], CORPUS, WINDOW_SIZE), getContext(all_words_sorted[target][0], CORPUS, WINDOW_SIZE), all_words_sorted[context][0], all_words_sorted[target][0])))
                    raw.write(
                        "{}\t{}\t{}\t{}\n".format(
                            all_words_sorted[target][0], all_words_sorted[context][0], context_, sarc
                        )
                    )

                    # print('\n')
            pbar.finish()
Example #4
0
def main(url,User_agent,thread = 5):
    regex = re.compile(r'^(?:http|ftp)s?://'r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'r'localhost|'r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})'r'(?::\d+)?'r'(?:/?|[/?]\S+)$', re.IGNORECASE)
    if not regex.match(url):
        print "%s is not a effective url"%url
        return
    filename = urllib2.unquote(url).decode('utf8').split('/')[-1]
    req = urllib2.Request(url)
    req.headers['User-Agent'] =User_agent
    f=urllib2.urlopen(req)
    size = int(f.info().getheaders('Content-Length')[0])
    widgets = ['Test: ', Percentage(), ' ',Bar(marker='>',left='[',right=']'), ' ', ETA(), ' ', FileTransferSpeed()]
    pbar = ProgressBar(widgets=widgets, maxval=size)
    fb = open(filename,"wb+")
    block_size = size % thread + 1
    thread_list = []
    for i in range(thread):
        start_file_seek = i*block_size
        end_file_seek = start_file_seek + block_size
        if i == thread - 1:
            end_file_seek = ''
        t = downFileMul(url,User_agent,pbar,start_file_seek,end_file_seek,fb)
        thread_list.append(t)
    pbar.start()
    for i in thread_list:
        i.start()
    for i in thread_list:
        i.join()
    fb.close()
    pbar.finish()
Example #5
0
    def __iter__(self):
        #size = float()
        p = ProgressBar(max_value = len(self.text_list), min_value = 1)
        for i, line in enumerate(self.text_list):
            #if i%100 == 0:
            p.update((i+1))
            sentence = line.rstrip()

            tagger = MeCab.Tagger('')  # 別のTaggerを使ってもいい
            #print sentence
            node = tagger.parseToNode(sentence)
            words = []
            while node:
                # macabで分けると、文の最後に’’が、その手前に'EOS'が来る
                info = node.feature.split(",")

                try:
                    word = node.surface
                except:
                    pass
                if word != 'EOS' and word != '' and info[0] != "記号":
                    if info[6] != '*':
                        words.append(info[6])
                    else:
                        words.append(word)

                node = node.next
            #print(words)
            yield words
        p.finish()
Example #6
0
def get_data(limit=None):
    """Read csv file of training data and put it in the database"""

    if limit is not None:
        limit = min(ROW_COUNT, int(limit))
    else:
        limit = ROW_COUNT

    pbar = ProgressBar(maxval=limit).start()

    with open(PATH, 'rb') as csvfile:
        csvreader = csv.reader(csvfile)
        next(csvreader) # to skip the headings

        final_data = []
        for counter, row in enumerate(csvreader):
            if counter == limit - 1: break

            datapoint = Row()
            datapoint.id_ = int(row[0])
            datapoint.target = int(row[-1])

            for x in range(1, 1934 + 1):
                if x in ABSENT_ATTRIBUTES: continue
                x_temp = x
                if 218 < x < 240: x_temp -= 1
                if x > 240: x_temp -= 2
                setattr(datapoint, 'VAR_' + _make_equal_length(x), row[x_temp])
            yield datapoint
            pbar.update(counter)
        
        pbar.finish()
Example #7
0
def vectorizePass(didout_fname, vecout_fname, docset):
    print "processing to output file %s" % vecout_fname

    did_out = open(didout_fname, "w")
    vector_out = open(vecout_fname, "w")

    pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=len(docset)).start()
    for row, doc in enumerate(docset):
        did, vec = doc

        # vec is [word1, word2, word3, ...]
        # want to convert to {word1: cnt, word2: cnt, ...}
        # after the conversion, we replace word with its index
        # index is added one because matlab index starts from one.
        index_cnt = {}
        for word, cnt in Counter(vec).iteritems():
            try:
                index_cnt[words_index[word]] = cnt
            except KeyError:  # thrown when meeting non-popular words
                continue

        # we normalize the vector, and output
        cnt_sum = float(sum(index_cnt.values()))

        # output to file
        did_out.write("%s\n" % did)

        for col, value in index_cnt.iteritems():
            vector_out.write("%d\t%d\t%f\n" % (row + 1, col + 1, value / cnt_sum))

        pbar.update(row)
    pbar.finish()

    did_out.close()
    vector_out.close()
Example #8
0
    def train_imdb(self):
        f_train_loss=open('./train_loss.txt','w')
        f_valid_acc=open('./valid_acc.txt','w')
        f_train_loss.close()
        f_valid_acc.close()

        self.session = tf.Session()
        init_op = tf.initialize_all_variables()
        self.session.run(init_op)

        print("Training...")
        for epoch in range(self.num_epoch):
            pbar = ProgressBar(widgets=[Percentage(), Bar(), ETA()], maxval=len(self.X_batch)).start()
            loss=0.0
            for i in range(len(self.X_batch)):
                results = self.session.run([self.cross_entropy,self.optimizer], feed_dict={self.input: self.X_batch[i], self.target: self.y_batch[i], self.keep_prob: 0.5})
                loss+=results[0]
                pbar.update(i+1)
            print('\nloss: {}\n'.format(loss/len(self.X_batch)))
            f_train_loss=open('./train_loss.txt','a')
            f_train_loss.write(str(loss/len(self.X_batch))+'\n')
            f_train_loss.close()

            for i in range(len(self.X_valid_batch)):
                acc = self.session.run(self.accuracy, feed_dict={self.input:self.X_valid_batch[i], self.target: self.y_valid_batch[i], self.keep_prob: 1.0})
            f_train_loss=open('./valid_acc.txt','a')
            f_train_loss.write(str(acc/len(self.X_valid_batch))+'\n')
            f_train_loss.close()
            print('\naccuracy: {}\n'.format(acc/len(self.X_valid_batch)))

        print('Training completed!')
def main(parser, options, args):
    app_globs = app_globals._current_obj()
    app_id = app_globals.settings['facebook_appid']
    if not app_id:
        print 'No Facebook app_id configured, exiting'
        sys.exit(3)
    
    app_secret = options.app_secret
    fb = FacebookAPI(app_id, app_secret)
    
    from mediadrop.model import DBSession, Media
    # eager loading of 'meta' to speed up later check.
    all_media = Media.query.options(joinedload('_meta')).all()
    
    print 'Checking all media for existing Facebook comments'
    progress = ProgressBar(maxval=len(all_media)).start()
    for i, media in enumerate(all_media):
        progress.update(i+1)
        if 'facebook-comment-xid' not in media.meta:
            continue
        if not fb.has_xid_comments(media):
            continue
        media.meta[u'facebook-comment-xid'] = unicode(media.id)
        DBSession.add(media)
        DBSession.commit()

    progress.finish()
Example #10
0
def example3():
    widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
    pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
    for i in range(1000000):
        # do something
        pbar.update(10*i+1)
    pbar.finish()
Example #11
0
def train(train_dl, N, model, optimizer, trans, args, input_q, data_q):
    pbar = ProgressBar(N)
    perm = np.random.permutation(N)
    sum_loss = 0

    # putting all data
    for i in range(0, N, args.batchsize):
        x_batch = train_dl[perm[i:i + args.batchsize]]
        input_q.put(x_batch)

    # training
    for i in range(0, N, args.batchsize):
        input_data, label = data_q.get()

        if args.gpu >= 0:
            input_data = cuda.to_gpu(input_data.astype(np.float32))
            label = cuda.to_gpu(label.astype(np.float32))

        optimizer.zero_grads()
        loss, pred = model.forward(input_data, label, train=True)
        loss.backward()
        optimizer.update()

        sum_loss += float(cuda.to_cpu(loss.data)) * batchsize
        pbar.update(i + args.batchsize if (i + args.batchsize) < N else N)

    return sum_loss
Example #12
0
def train_epoch(train_data, train_labels, model, optimizer, batchsize, transformations, silent, gpu=0, finetune=False):

    N = train_data.shape[0]
    pbar = ProgressBar(0, N)
    perm = np.random.permutation(N)
    sum_accuracy = 0
    sum_loss = 0

    for i in range(0, N, batchsize):
        x_batch = train_data[perm[i : i + batchsize]]
        y_batch = train_labels[perm[i : i + batchsize]]

        if transformations is not None:
            if "rotation" == transformations:
                x_batch = rotate_transform_batch(x_batch, rotation=2 * np.pi)

        if gpu >= 0:
            x_batch = cuda.to_gpu(x_batch.astype(np.float32))
            y_batch = cuda.to_gpu(y_batch.astype(np.int32))

        optimizer.zero_grads()
        x = Variable(x_batch)
        t = Variable(y_batch)

        loss, acc = model(x, t, train=True, finetune=finetune)
        if not finetune:
            loss.backward()
            optimizer.update()

        sum_loss += float(cuda.to_cpu(loss.data)) * y_batch.size
        sum_accuracy += float(cuda.to_cpu(acc.data)) * y_batch.size
        if not silent:
            pbar.update(i + y_batch.size)

    return sum_loss, sum_accuracy
Example #13
0
	def writeTweets2file(self, filePath):
		if len(filePath) <= 0:
			print "No tweets are collected!"
			return

		tweet_list = self.getText()

		pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval = len(tweet_list)).start()
		print "Begin to write tweets into file..."

		index = 0
		filewrite = open(filePath, 'w')
		for tweet in tweet_list:
			text = tweet.replace('\n', ' ') # 替換掉所有換行符
			text = text.replace(';', '.') # 所有分號替換成句號
			text = text.replace('\t', ' ') # 替換掉所有tab
			filewrite.write(text)
			filewrite.write('\t' + 'surprise' + '\t' + 'sadness') # 每行後面添加一個制表符,為了符合JAVA的simpleClassifier代碼
			filewrite.write('\r\n')
			pbar.update(index+1)
			index += 1

		pbar.finish()
		filewrite.close()
		print "Finishing writing tweets!"
		print str(len(self._text)) + ' tweets have been written into the file: ' + filePath
Example #14
0
File: gan.py Project: Seb-Leb/Tars
    def gan_test(self, test_set, n_batch=None, verbose=False):
        if n_batch is None:
            n_batch = self.n_batch

        n_x = test_set[0].shape[0]
        nbatches = n_x // n_batch
        z_dim = (n_batch,) + self.hidden_dim
        loss_all = []

        if verbose:
            pbar = ProgressBar(maxval=nbatches).start()
        for i in range(nbatches):
            start = i * n_batch
            end = start + n_batch
            batch_x = [_x[start:end] for _x in test_set]
            batch_z =\
                self.rng.uniform(-1., 1.,
                                 size=z_dim).astype(batch_x[0].dtype)

            _x = [batch_z] + batch_x
            loss = self.test(*_x)
            loss_all.append(np.array(loss))

            if verbose:
                pbar.update(i)

        loss_all = np.mean(loss_all, axis=0)
        return loss_all
Example #15
0
File: gan.py Project: Seb-Leb/Tars
    def train(self, train_set, freq=1, verbose=False):
        n_x = len(train_set[0])
        nbatches = n_x // self.n_batch
        z_dim = (self.n_batch,) + self.hidden_dim
        loss_all = []

        if verbose:
            pbar = ProgressBar(maxval=nbatches).start()
        for i in range(nbatches):
            start = i * self.n_batch
            end = start + self.n_batch
            batch_x = [_x[start:end] for _x in train_set]
            batch_z =\
                self.rng.uniform(-1., 1.,
                                 size=z_dim).astype(batch_x[0].dtype)
            _x = [batch_z] + batch_x
            loss = self.p_train(*_x)
            loss = self.d_train(*_x)
            loss_all.append(np.array(loss))

            if verbose:
                pbar.update(i)

        loss_all = np.mean(loss_all, axis=0)
        return loss_all
Example #16
0
def remove_padding():
    """
    Remove paddings
    :return: nothing
    """
    problem_folder = 'lamellarity'
    out_dir = '/home/sergii/Documents/microscopic_data/' + problem_folder + '/images/without_padding/'
    in_dir = '/home/sergii/Documents/microscopic_data/' + problem_folder + '/images/particles/'
    path_to_json = '/home/sergii/Documents/microscopic_data/' + problem_folder + '/particles_repaired_2.json'

    print '...Reading dataframe at %s' % path_to_json
    df = pd.read_json(path_to_json)

    print '...Starting processing images'

    if os.path.exists(out_dir):
        shutil.rmtree(out_dir)
    os.makedirs(out_dir)

    total_count = df.shape[0]
    pbar = ProgressBar(widgets=[Percentage(), Bar()], maxval=total_count).start()
    counter = 0
    for _, row in df.iterrows():
        img_name = row['Image']
        padding = row['Padding']
        _remove_padding(in_dir + img_name,
                        out_dir + img_name,
                        padding)
        counter += 1
        pbar.update(counter)
    pbar.finish()
Example #17
0
    def create_zip_files(self, dirs):
        """Create zip files."""
        logmsg.header('Creating zip files...', self.logger)

        # Create output directory to hold our zips
        output_dir = 'Zips'
        if not os.path.exists(output_dir):
            os.mkdir(output_dir)

        num_zips = 0

        # Create a progress bar
        pbar = ProgressBar(term_width=80, maxval=len(dirs)).start()

        for d in dirs:
            self.logger.debug('Zipping: "%s"' % d)
            parent_dir = os.path.join(os.path.dirname(d).split(os.path.sep)[-1], '')
            if parent_dir == self.input_dir:
                parent_dir = ''
            output_file = os.path.join(output_dir, parent_dir, os.path.basename(d))
            shutil.make_archive(output_file, format="zip", root_dir=d)
            num_zips += 1

            # Update progress bar
            pbar.update(num_zips)

        # Ensure progress bar is finished
        pbar.finish()
        time_elapsed = "(Time Elapsed: {0})".format(secs_to_mins(pbar.seconds_elapsed))

        logmsg.success('Created {0} zip files {1}'.format(num_zips,
                                                          time_elapsed), self.logger)
Example #18
0
def dir_walk(target_dir=None, quiet=None):
    '''recursively walk a directory containing cti and return the stats'''
    files = find_files('*.xml', resolve_path(target_dir))
    if not quiet:
        widgets = ['Directory Walk: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
                   ' ', ETA()]
        progress = ProgressBar(widgets=widgets, maxval=len(files)).start()
    cooked_stix_objs = {'campaigns': set(), 'courses_of_action': set(), \
                        'exploit_targets': set(), 'incidents': set(), \
                        'indicators': set(), 'threat_actors': set(), \
                        'ttps': set()}
    cooked_cybox_objs = dict()
    for file_ in files:
        try:
            stix_package = file_to_stix(file_)
            (raw_stix_objs, raw_cybox_objs) = \
                process_stix_pkg(stix_package)
            for k in raw_stix_objs.keys():
                cooked_stix_objs[k].update(raw_stix_objs[k])
            for k in raw_cybox_objs.keys():
                if not k in cooked_cybox_objs.keys():
                    cooked_cybox_objs[k] = set()
                cooked_cybox_objs[k].update(raw_cybox_objs[k])
            if not quiet:
                progress.update(i)
        except:
            next
    if not quiet:
        progress.finish()
    return (cooked_stix_objs, cooked_cybox_objs)
Example #19
0
class Euler:
    def __init__(self, solver, tf, shift=False):
        self.solver = solver
        self.tf = tf
        self.shift = shift
        self.pbar = ProgressBar(maxval=self.tf)

    def integrate(self):
        pass

    def savedata(self):
        # filename format EQUATION_ORDER_SCHEME_N_TIME.npz
        filename = "./data/%s_%s_%s_%s_%s.npz" % (
            self.solver.f.__name__,
            self.solver.order,
            self.solver.scheme,
            self.solver.N,
            self.tf,
        )
        np.savez(filename, x=self.solver.xc, u=self.solver.u, uactual=Complex().init(self.solver.xc, self.tf))
        print filename

    def run(self):
        t = 0.0
        while t < self.tf:
            dt = self.solver.calc_dt()
            residue = self.solver.calc_residue()
            self.solver.u += residue * dt
            self.pbar.update(t)
            t += dt
        self.savedata()
 def retrieve(self):
     '''
     Execute the jobs
     '''
     numleft = self.total
     writer = Writer((0,self.numCores))
     width = int(math.floor(term.width/3))
     name = 'Total'
     writer.write('{0:3} {1}: Queued'.format('',name[:width]+' '*max(0,width-len(name))))
     pbar = ProgressBar(widgets=['{0:3} {1}: '.format('',name[:width]+' '*max(0,width-len(name))),' ',SimpleProgress(),' ',Percentage(),' ',Bar(),' ',Timer()],fd=writer,term_width=term.width, maxval=self.total).start()
     try:
         while True:
            numleft_new = self.jobsRemaining()
            if numleft_new < numleft:
                pbar.update(self.total-numleft_new)
            numleft = numleft_new
            if numleft==0: break
            time.sleep(0.1)
         theResult = [r.get(9999999999) for r in self.results]
     except:
         e = sys.exc_info()[0]
         self.pool.terminate()
         print term.exit_fullscreen
         print e
         theResult = []
     else:
         print term.exit_fullscreen
         self.pool.close()
     self.pool.join()
     return theResult
Example #21
0
def mongodb2shape(mongodb_server, mongodb_port, mongodb_db, mongodb_collection, output_shape, query_filter):
    """Convert a mongodb collection (all elements must have same attributes) to a shapefile"""
    print ' Converting a mongodb collection to a shapefile '
    connection = Connection(mongodb_server, mongodb_port)
    print 'Getting database MongoDB %s...' % mongodb_db
    db = connection[mongodb_db]
    print 'Getting the collection %s...' % mongodb_collection
    collection = db[mongodb_collection]
    print 'Exporting %s elements in collection to shapefile...' % collection.count()
    drv = ogr.GetDriverByName("ESRI Shapefile")
    ds = drv.CreateDataSource(output_shape)
    lyr = ds.CreateLayer('test', None, ogr.wkbPolygon)
    print 'Shapefile %s created...' % ds.name
    cursor = collection.find(query_filter)
    # define the progressbar
    pbar = ProgressBar(collection.count()).start()
    k=0
    # iterate the features in the collection and copy them to the shapefile
    # for simplicity we export only the geometry to the shapefile
    # if we would like to store also the other fields we should have created a metadata element with fields datatype info
    for element in cursor:
        element_geom = element['geom']
        feat = ogr.Feature(lyr.GetLayerDefn())
        feat.SetGeometry(ogr.CreateGeometryFromWkt(element_geom))
        lyr.CreateFeature(feat)
        feat.Destroy()
        k = k + 1
        pbar.update(k)
    pbar.finish()
    print '%s features loaded in shapefile from MongoDb.' % lyr.GetFeatureCount()
Example #22
0
    def write_wav(self):
        """
        Synthesizes the analysis to a WAV file in the synthesis directory
        """
        print("Performing Resynthesis...")
        progress = ProgressBar(
                widgets=[Percentage(), Bar()],
                maxval=self.num_samples
                ).start()


        sines = []
        for i in range(len(self.bins)):
            data = [sin(2 * pi * self.bins[i][0] * (x / self.frate))
                    for x in range(self.num_samples)]
            for s in data:
                s = s * self.bins[i][1]
            sines.append(data)
        samples = []
        for i in range(len(sines[0])):
            s = 0
            for j in range(len(sines)):
                s += sines[j][i]
            samples.append(s)
            progress.update(i+1)
        samples = scale(samples, -1.0, 1.0)
        wavfile.write(
                "../synthesis/" + self.wav_name + "_resynth.wav",
                self.sample_rate,
                array(samples)
                )
        progress.finish()
Example #23
0
def extractFeatures(data, n):
    logging.info('Features: extracting {0}...'.format(n))

    # create DF
    columns = []
    col_names = ['open', 'high', 'low', 'close', 'volume']
    for col_name in col_names:
        for m in xrange(1, n+1):
            columns.append('{0}_{1}'.format(col_name, m))
    # pprint(columns)
    df = pd.DataFrame(dtype=float, columns=columns)

    pb = ProgressBar(maxval=len(data)).start()
    for i in xrange(n, len(data)+1):
        pb.update(i)
        slice = data.ix[i-n:i]
        # print slice
        scale(slice, axis=0, copy=False)
        # print slice
        cntr = 0
        item = {}
        for slice_index, slice_row in slice.iterrows():
            cntr += 1
            # print slice_index
            # print slice_row
            for col in slice.columns:
                item['{0}_{1}'.format(col, cntr)] = slice_row[col]
        # pprint(item)
        df.loc[i] = item
        # break
    pb.finish()

    logging.info('Features: extracted')
    return df
Example #24
0
    def import_stack(self, file, isImport, isForce, rbundles, isUseMajor):
        try:
            if isImport:
                printer.out("Importing template from [" + file + "] archive ...")
            else:
                if constants.TMP_WORKING_DIR in str(file):
                    printer.out("Creating template from temporary [" + file + "] archive ...")
                else:
                    printer.out("Creating template from [" + file + "] archive ...")
            file = open(file, "r")

            # The following code could not be used for the moment
            # appImport = applianceImport()
            # appImport.imported = isImport
            # appImport.forceRw = isForce
            # appImport.reuseBundles = rbundles
            # appImport.useMajor = isUseMajor
            # appImport = self.api.Users(self.login).Imports.Import(appImport)

            appImport = self.api.Users(self.login).Imports.Import(
                Imported=isImport, Force=isForce, Reusebundles=rbundles, Usemajor=isUseMajor
            )
            if appImport is None:
                if isImport:
                    printer.out("error importing appliance", printer.ERROR)
                else:
                    printer.out("error creating appliance", printer.ERROR)
                return 2
            else:
                status = self.api.Users(self.login).Imports(appImport.dbId).Uploads.Upload(file)
                progress = ProgressBar(widgets=[Percentage(), Bar()], maxval=100).start()
                while not (status.complete or status.error):
                    progress.update(status.percentage)
                    status = self.api.Users(self.login).Imports(appImport.dbId).Status.Get()
                    time.sleep(2)
                progress.finish()
                if status.error:
                    if isImport:
                        printer.out("Template import: " + status.message + "\n" + status.errorMessage, printer.ERROR)
                        if status.detailedError:
                            printer.out(status.detailedErrorMsg)
                    else:
                        printer.out("Template create: " + status.message + "\n" + status.errorMessage, printer.ERROR)
                else:
                    if isImport:
                        printer.out("Template import: DONE", printer.OK)
                    else:
                        printer.out("Template create: DONE", printer.OK)

                    # get appliance import
                    appImport = self.api.Users(self.login).Imports(appImport.dbId).Get()
                    printer.out("Template URI: " + appImport.referenceUri)
                    printer.out("Template Id : " + generics_utils.extract_id(appImport.referenceUri))

                return 0
        except IOError as e:
            printer.out("File error: " + str(e), printer.ERROR)
            return 2
        except Exception as e:
            return handle_uforge_exception(e)
Example #25
0
 def _load_data(self, strpcons, dt_start, dt_end, n, spec_date):
     all_data = OrderedDict()     
     max_window = -1
     logger.info("loading data...")
     pbar = ProgressBar().start()
     for i, pcon  in enumerate(strpcons):
         #print "load data: %s" % pcon
         if pcon in spec_date:
             dt_start = spec_date[pcon][0]
             dt_end = spec_date[pcon][1]
         assert(dt_start < dt_end)
         if n:
             wrapper = self._data_manager.get_last_bars(pcon, n)
         else:
             wrapper = self._data_manager.get_bars(pcon, dt_start, dt_end)
         if len(wrapper) == 0:
             continue 
         all_data[pcon] = DataContext(wrapper)
         max_window = max(max_window, len(wrapper))
         pbar.update(i*100.0/len(strpcons))
         #progressbar.log('')
     if n:
         assert(max_window <= n) 
     pbar.finish()
     if len(all_data) == 0:
         assert(False)
         ## @TODO raise
     return all_data, max_window
Example #26
0
def EvaluateGenomeList_Serial(genome_list, evaluator, display=True):
    fitnesses = []
    count = 0

    if prbar_installed and display:
        widg = ['Individuals: ', Counter(), ' of ' + str(len(genome_list)),
                ' ', ETA(), ' ', AnimatedMarker()]
        progress = ProgressBar(maxval=len(genome_list), widgets=widg).start()

    for g in genome_list:
        f = evaluator(g)
        fitnesses.append(f)

        if display:
            if prbar_installed:
                progress.update(count+1)
            else:
                print 'Individuals: (%s/%s)' % (count, len(genome_list))
        
                count += 1

    if prbar_installed and display:
        progress.finish()

    return fitnesses
Example #27
0
def EvaluateGenomeList_Parallel(genome_list, evaluator, cores=4, display=True):
    fitnesses = []
    pool = mpc.Pool(processes=cores)
    curtime = time.time()

    if prbar_installed and display:
        widg = ['Individuals: ', Counter(),
                ' of ' + str(len(genome_list)), ' ', ETA(), ' ',
                AnimatedMarker()]
        progress = ProgressBar(maxval=len(genome_list), widgets=widg).start()

    for i, fitness in enumerate(pool.imap(evaluator, genome_list)):
        if prbar_installed and display:
            progress.update(i)
        else:
            if display:
                print 'Individuals: (%s/%s)' % (i, len(genome_list))

        if cvnumpy_installed:
            cv2.waitKey(1)
        fitnesses.append(fitness)

    if prbar_installed and display:
        progress.finish()

    elapsed = time.time() - curtime

    if display:
        print 'seconds elapsed: %s' % elapsed

    pool.close()
    pool.join()

    return fitnesses
Example #28
0
def dark_spot_gridsearch(observer, stars, min_altitude, alt_resolution=0.5, az_resolution=4):

    azs = np.deg2rad(np.arange(0, 360, az_resolution))
    alts = np.deg2rad(np.arange(min_altitude, 91, 0.5))
    light = []
    coords = []

    prog = ProgressBar(maxval=len(azs) * len(alts)).start()
    for i, az in enumerate(azs):
        for j, alt in enumerate(alts):
            coords.append((az, alt))
            light.append(light_in_fov(az, alt, stars, observer))
            prog.update(i * len(alts) + j)

    light = np.array(light)
    coords = np.array(coords)
    azs = coords[:, 0]
    alts = coords[:, 1]

    min_index = np.argmin(light)
    az = azs[min_index]
    alt = alts[min_index]

    ra, dec = observer.radec_of(az, alt)

    return az, alt, ra, dec
Example #29
0
 def sample_many(self, num_samples = 2000):
     """
     Generates a given number of trajectories, using the method sample(). 
     Returns a fixmat with the generated data.
     
     Parameters:
         num_samples : int, optional
             The number of trajectories that shall be generated.
     """     
     x = []
     y = []
     fix = []
     sample = []
     
     # XXX: Delete ProgressBar
     pbar = ProgressBar(widgets=[Percentage(),Bar()], maxval=num_samples).start()
     
     for s in xrange(0, num_samples):
         for i, (xs, ys) in enumerate(self.sample()):
             x.append(xs)
             y.append(ys)
             fix.append(i+1)
             sample.append(s)
         pbar.update(s+1)
         
     fields = {'fix':np.array(fix), 'y':np.array(y), 'x':np.array(x)}
     param = {'pixels_per_degree':self.fm.pixels_per_degree}
     out =  fixmat.VectorFixmatFactory(fields, param)
     return out
Example #30
0
def EvaluateGenomeList_Serial(genome_list, evaluator):
    fitnesses = []
    curtime = time.time()
    
    if prbar_installed:
        widg = ['Individuals: ', Counter(), ' of ' + str(len(genome_list)), ' ', ETA(), ' ', AnimatedMarker()]
        progress = ProgressBar(maxval=len(genome_list), widgets=widg).start()
        
    count = 0
    for g in genome_list:
        f = evaluator(g)
        fitnesses.append(f)
        
        if prbar_installed:
            progress.update(count)
        else:
            print ('Individuals: (',count,'/',len(genome_list),')')
            
        count += 1
        
    if prbar_installed:
        progress.finish()
        
    elapsed = time.time() - curtime
    print ('seconds elapsed:', elapsed)
    return (fitnesses, elapsed)
Example #31
0
 def setUp(self):
     self.p = ProgressBar()
Example #32
0
        fourcc = cv2.VideoWriter_fourcc(*'MJPG')
        video_path = os.path.join(args.vis_path,
                                  '%s.avi' % eval_path.replace('/', '.'))
        image_path = os.path.join(args.vis_path,
                                  '%s' % eval_path.replace('/', '.'))
        print('Saving video to', video_path)
        print('Saving image to', image_path)
        video_shape = (1280, 480) if fancy else (1200, 600)
        out = cv2.VideoWriter(video_path, fourcc, 14., video_shape)
        os.system('mkdir -p ' + image_path)

        print('min: %.4f, max: %.4f' % (np.min(result), np.max(result)))
        lim_low = np.min(result)
        lim_high = np.max(result) * 1. / 2.

        bar = ProgressBar(max_value=touch_rec.shape[0])
        for i in bar(range(touch_rec.shape[0])):
            touch_raw_cur = touch_raw_rec[i]
            touch_cur = touch_rec[i, args.obs_window // 2]
            touch_cal = result[i, 0]

            touch_raw_render = vis.render(
                touch_raw_cur,
                lim_low=np.min(np.median(touch_raw_rec, 0)) + 10,
                lim_high=800,
                fancy=fancy)
            '''
            touch_render = vis.render(
                touch_cur, lim_low=0., lim_high=24, text='raw', fancy=fancy)
            '''
            result_render = vis.render(touch_cal,
Example #33
0
    def process_file(self, input_file):

        block = Block(default_compress_level=self.config.compress_level)
        resultados = []
        self.input_file = input_file
        size_test_file = os.path.getsize(self.input_file)

        self.spool_types = {
            "fixed":
            SpoolFixedRecordLength(self.input_file,
                                   buffer_size=self.config.buffer_size,
                                   encoding=self.config.encoding,
                                   newpage_code=self.config.EOP),
            "fcfc":
            SpoolHostReprint(self.input_file,
                             buffer_size=self.config.buffer_size,
                             encoding=self.config.encoding)
        }

        compresiones = [
            e for e in block.compressor.available_types
            if e[0] == self.config.compress_type
        ]
        encriptados = [
            e for e in block.cipher.available_types
            if e[0] == self.config.cipher_type
        ]

        mode = "ab"

        r = ReportMatcher(self.config.report_cfg)
        reports = []
        for encriptado in encriptados:
            for compress in compresiones:

                start = time.time()
                paginas = 0

                # file_name	= "{0}.{1}.oerm".format(self.config.output_path, slugify("{0}.{1}".format(compress[1], encriptado[1]), "_"))
                file_name = os.path.join(
                    self.config.output_path,
                    generate_filename(self.config.file_mask) + ".oerm")

                db = Database(
                    file=file_name,
                    mode=mode,
                    default_compress_method=compress[0],
                    default_compress_level=self.config.compress_level,
                    default_encription_method=encriptado[0],
                    pages_in_container=self.config.pages_in_group)

                file_size = os.path.getsize(file_name)
                reportname_anterior = ""

                widgets = [
                    os.path.basename(self.input_file), ': ',
                    FormatLabel(
                        '%(value)d bytes de %(max_value)d (%(percentage)0.2f)'
                    ),
                    Bar(marker='#', left='[', right=']'), ' ',
                    ETA(), ' ',
                    FileTransferSpeed()
                ]

                p_size = 0
                with ProgressBar(max_value=size_test_file,
                                 widgets=widgets) as bar:
                    spool = self.spool_types[self.config.file_type]
                    with spool as s:
                        for page in s:
                            p_size += len(page)
                            bar.update(p_size)
                            data = r.match(page)
                            reportname = data[0]

                            if reportname not in reports:
                                reports.append(reportname)

                            if reportname != reportname_anterior:
                                rpt_id = db.get_report(reportname)
                                if rpt_id:
                                    db.set_report(reportname)
                                else:
                                    db.add_report(reporte=reportname,
                                                  sistema=data[1],
                                                  departamento=data[2],
                                                  fecha=data[3])
                                reportname_anterior = reportname

                            paginas = paginas + 1
                            db.add_page(page)

                    db.close()

                compress_time = time.time() - start
                compress_size = os.path.getsize(file_name) - file_size

                resultados.append([
                    "[{0}] {1} ({2}p/cont.)".format(
                        compress[0], compress[1], self.config.pages_in_group),
                    ("" if encriptado[0] == 0 else encriptado[1]),
                    float(size_test_file),
                    float(compress_size),
                    (compress_size / size_test_file) * 100,
                    paginas / compress_time,
                    len(reports)
                ])

        tablestr = tabulate(tabular_data=resultados,
                            headers=[
                                "Algoritmo", "Encript.", "Real (bytes)",
                                "Compr. (bytes)", "Ratio", "Compr. Pg/Seg",
                                "Reportes"
                            ],
                            floatfmt="8.2f",
                            tablefmt="psql",
                            numalign="right",
                            stralign="left",
                            override_cols_fmt=[
                                None, None, ",.0f", ",.0f", ",.2f", ",.2f",
                                ",.2f"
                            ])
        return tablestr
Example #34
0
original_images = []

for root, dirs, filenames in os.walk(path):
    for filename in filenames:
        original_images.append(os.path.join(root, filename))

original_images = sorted(original_images)

print('num:', len(original_images))

f = open('check_error.txt', 'w+')

error_images = []

progress = ProgressBar()

for filename in progress(original_images):

    check = imghdr.what(filename)

    if check == None:
        f.write(filename)

        f.write('\n')

        error_images.append(filename)

print(len(error_images))

f.seek(0)
s = len(time_series.keys()) - 2
#Get number of simulations
n_simul = len(np.unique(time_series.n_simulation))
#Filter networks
keys = list(networks.keys())
junk = [i for i in keys if not (i.startswith('sub') or i.startswith('prod'))]
#Get rid of junk
networks = networks.drop(columns=junk)
#Initialize storing object
names_strains = ['s' + str(i) for i in range(s)]
column_names = ["n_simulation", "t"] + names_strains
inter_evol_df = pd.DataFrame(columns=column_names)
c_evol_df = pd.DataFrame(columns=column_names)
f_evol_df = pd.DataFrame(columns=column_names)
surv_vector = np.array([])
pbar = ProgressBar()
print('Calculating evolution of interactions during community assembly')
for i in pbar(range(n_simul)):
    time_series_i = time_series[time_series.n_simulation == i].reset_index(
        drop=True)
    networks_i = networks.iloc[i * s:(i + 1) * s, :].reset_index(drop=True)
    #Get evolution of richness during community assembly
    richness = evol_richness(abundances_t=time_series_i)
    #Get positions where extinctions happen
    indx = np.where(richness[:-1] != richness[1:])[0]
    #Get values of time where extinctions happen
    t_ext = time_series_i.t[indx]
    t_points = len(t_ext)
    t_ext_ind = np.array(t_ext.index)
    f_evol, c_evol, inter_evol = interaction_evolution(time_series_i,
                                                       networks_i,
Example #36
0
 def setUp(self):
     self.p = ProgressBar(**self.custom)
Example #37
0
class CustomizedTestCase(unittest.TestCase):
    """
    ProgressBar custom:
    start = 0
    end = 100
    width = 20
    fill = '#'
    blank = '.'
    format = '%(progress)s%% [%(fill)s%(blank)s]'
    incremental = True
    """
    custom = {
        'end': 100,
        'width': 20,
        'fill': '#',
        'format': '%(progress)s%% [%(fill)s%(blank)s]'
    }

    def setUp(self):
        self.p = ProgressBar(**self.custom)

    def tearDown(self):
        del (self.p)

    def test_initialization(self):
        """
        >>> p = ProgressBar(custom)
        >>> p
        0% [....................]
        """
        self.assertEqual(str(self.p), '0% [....................]')

    def test_increment(self):
        """
        >>> p = ProgressBar(custom)
        >>> p + 1
        1% [....................]
        """
        self.p + 1
        self.assertEqual(str(self.p), '1% [....................]')
        self.p + 4
        self.assertEqual(str(self.p), '5% [#...................]')

    def test_reset(self):
        """
        >>> p = ProgressBar(custom)
        >>> p += 8
        >>> p.reset()
        0% [....................]
        """
        self.p += 8
        self.p.reset()
        self.assertEqual(str(self.p), '0% [....................]')

    def test_full_progress(self):
        """
        >>> p = ProgressBar()
        >>> p + 10
        100% [####################]
        """
        self.p + 100
        self.assertEqual(str(self.p), '100% [####################]')
        self.p + 100
        self.assertEqual(str(self.p), '100% [####################]')
Example #38
0
def getlistoffileitems(rootpath):
    '''
	scan the root and return a list of file items
	-these are not validated show or non show yet
	-make assumption shows have Season directory in them
	-non shows do not
	'''
    shows = []
    nonshows = []
    dirtodel = []
    fileitems = os.listdir(rootpath)
    fileitems.sort()
    max = int(len(fileitems))
    pbar = ProgressBar()
    print("Scanning root directory")
    quickrun = 0  # remove later
    fh = open("filestodelete.txt", "w")
    fh2 = open("showfiles.txt", "w")

    #looping through root directory
    for item in pbar(fileitems):
        if item[0:8] != "_UNPACK_":
            if os.path.isdir(rootpath + "\\" + item):
                isshow = False
                numfiles = 0
                subfileitems = os.listdir(rootpath + "\\" + item)
                if numfiles == 0:
                    numfiles = len(subfileitems)
                    if numfiles == 0:
                        dirtodel.append(rootpath + item)

                #loop through directory and check all files
                #for "Season" indicating a show dir
                for subfileitem in subfileitems:
                    if "season" in subfileitem.lower():
                        isshow = True

                if (isshow):
                    shows.append(item)
                    fh2.write(item)
                    fh2.write("\n")
                else:
                    nonshows.append(item)
                    fh.write(rootpath + item + "\\\\" + subfileitem)
                    fh.write("\n")
                    fh.write(str(numfiles))
                    fh.write("\n")

                if numfiles == 0:
                    fh2.write("DEL: " + rootpath + item)

                subfileitem = ""

                #some code to shorten test time
                quickrun += 1
                if quickrun > 10000:
                    break

    fh2.close()
    fh.close()
    return shows, nonshows, dirtodel
Example #39
0
def insert_documents(data):
    """ Insert all provided documents. Checks if the document has been manually
    changed before - if it has, and the new document does not match, it is
    marked as a conflict """
    factory = ModelFactory.get_instance()
    factory.set_db()

    temp_col = Config.get_mongo_collection("temp_scraped")
    manual_col = Config.get_mongo_collection("manual")
    unknown_col = Config.get_mongo_collection("unknown")
    prod_col = Config.get_mongo_collection("prod")
    conflict_col = Config.get_mongo_collection("conflicts")

    print("Starting insertion of {} documents".format(len(data)))
    pbar = ProgressBar()
    for i, doc in enumerate(pbar(data)):
        factory.post_document(doc, temp_col)
    print("Successfully inserted {} documents".format(i + 1))

    manual_docs = factory.get_collection(manual_col).find()

    conflicts = []
    for manual_doc in manual_docs:
        if "id" in manual_doc:
            idx = manual_doc["id"]
        else:
            continue

        # Mark corresponding entry in temp collection as manually changed
        factory.get_database() \
               .get_collection(temp_col) \
               .update_one({"id": idx}, {"$set": {"manually_changed": True}})

        prod_doc = next(
            factory.get_collection(prod_col).find({"id": idx}), None)
        temp_doc = next(
            factory.get_collection(temp_col).find({"id": idx}), None)

        if prod_doc and temp_doc:
            if not temp_doc["content"] == prod_doc["content"]:
                title = temp_doc["content"]["title"]
                conflicts.append({"id": idx, "title": title})

    print("Conflicts: {}".format(conflicts))
    factory.get_collection(conflict_col).create_index([("id", 1)], unique=True)
    for conflict in conflicts:
        try:
            factory.post_document(conflict, conflict_col)
        except pymongo.errors.DuplicateKeyError:
            # In case there are dupliacte, unsolved conflicts
            pass

    # Update production collection
    db = factory.get_database()
    try:
        db.get_collection(prod_col).rename("old_prod")
    except pymongo.errors.OperationFailure:
        # If the prod collection does not exist
        pass

    try:
        db.get_collection(temp_col).rename(prod_col)
    except Exception as e:
        print("Failed to update production db collection")
        print(e)
        db.get_collection("old_prod").rename(prod_col)
    finally:
        db.get_collection("old_prod").drop()
        db.get_collection(temp_col).drop()

    # Update all indexes
    factory.set_index(prod_col)
    factory.set_index(manual_col)
    factory.set_index(temp_col)
    # Removes duplicates
    factory.get_collection(unknown_col).create_index([("query_text", 1)],
                                                     unique=True)

    return conflicts
Example #40
0
    def run(self):

        self.initialize()
        self.iters = 0

        for epoch in range(self.args.epoch_size):
            data_style_A, data_style_B = shuffle_data(self.data_style_A,
                                                      self.data_style_B)

            widgets = ['epoch #%d|' % epoch, Percentage(), Bar(), ETA()]
            pbar = ProgressBar(maxval=self.n_batches, widgets=widgets)
            pbar.start()

            for i in range(self.n_batches):

                pbar.update(i)

                self.generator_A.zero_grad()
                self.generator_B.zero_grad()
                self.discriminator_A.zero_grad()
                self.discriminator_B.zero_grad()

                self.A_path = data_style_A[i * self.args.batch_size:(i + 1) *
                                           self.args.batch_size]
                self.B_path = data_style_B[i * self.args.batch_size:(i + 1) *
                                           self.args.batch_size]

                A, B = self.get_images()
                A = Variable(torch.FloatTensor(A))
                B = Variable(torch.FloatTensor(B))

                if self.cuda:
                    A = A.cuda()
                    B = B.cuda()

                AB = self.generator_B(A)
                BA = self.generator_A(B)

                ABA = self.generator_A(AB)
                BAB = self.generator_B(BA)

                # Reconstruction Loss
                self.recon_loss_A = self.recon_criterion(ABA, A)
                self.recon_loss_B = self.recon_criterion(BAB, B)

                # Real/Fake GAN Loss (A)
                A_dis_real, A_feats_real = self.discriminator_A(A)
                A_dis_fake, A_feats_fake = self.discriminator_A(BA)

                self.dis_loss_A, self.gen_loss_A = self.get_gan_loss(
                    A_dis_real, A_dis_fake)
                self.fm_loss_A = self.get_fm_loss(A_feats_real, A_feats_fake)

                # Real/Fake GAN Loss (B)
                B_dis_real, B_feats_real = self.discriminator_B(B)
                B_dis_fake, B_feats_fake = self.discriminator_B(AB)

                self.dis_loss_B, self.gen_loss_B = self.get_gan_loss(
                    B_dis_real, B_dis_fake)
                self.fm_loss_B = self.get_fm_loss(B_feats_real, B_feats_fake)

                # Total Loss
                if self.iters < self.args.gan_curriculum:
                    rate = self.args.starting_rate
                else:
                    rate = self.args.default_rate

                self.gen_loss_A_total = (
                    self.gen_loss_B * 0.1 + self.fm_loss_B * 0.9) * (
                        1. - rate) + self.recon_loss_A * rate
                self.gen_loss_B_total = (
                    self.gen_loss_A * 0.1 + self.fm_loss_A * 0.9) * (
                        1. - rate) + self.recon_loss_B * rate

                if self.args.model_arch == 'discogan':
                    self.gen_loss = self.gen_loss_A_total + self.gen_loss_B_total
                    self.dis_loss = self.dis_loss_A + self.dis_loss_B
                elif self.args.model_arch == 'recongan':
                    self.gen_loss = self.gen_loss_A_total
                    self.dis_loss = self.dis_loss_B
                elif self.args.model_arch == 'recongan_reverse':
                    self.gen_loss = self.gen_loss_B_total
                    self.dis_loss = self.dis_loss_A
                elif self.args.model_arch == 'gan':
                    self.gen_loss = (self.gen_loss_B * 0.1 +
                                     self.fm_loss_B * 0.9)
                    self.dis_loss = self.dis_loss_B

                self.finish_iteration()
                self.iters += 1
Example #41
0
def color_images_full(model, b_size=32):
    """
    Function that colors images with approaches on full images.
    Function is used on reg-full and reg-full-vgg approaches.

    Parameters
    ----------
    model : keras.engine.training.Model
        Model for image colorization
    b_size : int
        Size of bach of images
    """
    abs_file_path = get_abs_path(data_origin)
    images = get_image_list(abs_file_path)

    # get list of images to color
    num_of_images = len(images)

    #progress bar
    pbar = ProgressBar(maxval=num_of_images,
                       widgets=[Percentage(), ' ',
                                Bar(), ' ',
                                ETA()])
    pbar.start()

    # for each batch
    for batch_n in range(int(math.ceil(num_of_images / b_size))):
        _b_size = b_size if (
            batch_n + 1) * b_size < num_of_images else num_of_images % b_size

        # load images
        original_size_images = []
        all_images_l = np.zeros((_b_size, 224, 224, 1))
        for i in range(_b_size):
            # get image
            image_lab = load_images(
                os.path.join(abs_file_path, images[batch_n * b_size + i]))
            original_size_images.append(image_lab[:, :, 0])
            image_lab_resized = resize_image_lab(image_lab, (224, 224))
            all_images_l[i, :, :, :] = image_lab_resized[:, :, 0][:, :,
                                                                  np.newaxis]

        # prepare images for a global network
        all_vgg = np.zeros((_b_size, 224, 224, 3))
        for i in range(_b_size):
            all_vgg[i, :, :, :] = np.tile(all_images_l[i], (1, 1, 1, 3))

        # color
        if model.name == "reg_full_vgg":  # vgg has no global network
            color_im = model.predict(all_vgg, batch_size=b_size)
        else:
            color_im = model.predict([all_images_l, all_vgg],
                                     batch_size=b_size)

        # save all images
        abs_save_path = get_abs_path(data_destination)
        for i in range(_b_size):
            # to rgb
            original_im_bw = original_size_images[i]
            h, w = original_im_bw.shape

            # workaround for not suitable shape while resizing
            small_images = np.concatenate((all_images_l[i], color_im[i]),
                                          axis=2)
            colored_im = resize_image_lab(small_images, (h, w))

            lab_im = np.concatenate(
                (original_im_bw[:, :, np.newaxis], colored_im[:, :, 1:]),
                axis=2)
            im_rgb = color.lab2rgb(lab_im)

            # save
            scipy.misc.toimage(im_rgb, cmin=0.0,
                               cmax=1.0).save(abs_save_path + model.name +
                                              "_" +
                                              images[batch_n * b_size + i])

        # update progress bar
        pbar.update(min((batch_n + 1) * b_size, num_of_images))

    # stop progress bar
    pbar.finish()
Example #42
0
    def train(self, epochs, train_loader, val_loader):
        """
        Método para entrenar en modelo
        :param epochs: (int) numero de épocas que se entrenará el modelo
        :param train_loader: (Data.Loader) Iterador con los datos provenientes del data_set
               ref: http://pytorch.org/docs/0.3.1/data.html#torch.utils.data.DataLoader
        :param val_loader: (Data.Loader) Iterador con los datos que se utilizarán para validar el entrenamiento
               ref: http://pytorch.org/docs/0.3.1/data.html#torch.utils.data.DataLoader
        """

        total_batch_number = len(train_loader)
        val_loss_prev = 1e5

        for epoch_idx in range(1, epochs + 1):
            sys.stdout.write("epoch {}/{}\n".format(epoch_idx, epochs))

            with ProgressBar(widgets=[Percentage(), Bar()],
                             maxval=total_batch_number) as pbar:

                for batch_idx, (x, y) in enumerate(train_loader):
                    x, y = Variable(x), Variable(y)
                    if self.use_cuda:
                        x, y = x.cuda(), y.cuda()

                    y_pred = self.model(x)
                    loss = self.loss_metric(y_pred, y)
                    self.optimizer.zero_grad()
                    loss.backward()
                    self.optimizer.step()

                    pbar.update(batch_idx + 1)

            sys.stdout.write("epoch: {}, train loss: {:.6f}\n".format(
                epoch_idx, loss.data[0]))

            # Calculamos la perdida y accuracy en el conjunto de validación
            correct_cnt = 0
            total_cnt = 0
            for batch_idx, (x, y) in enumerate(val_loader):
                if self.use_cuda:
                    x, y = x.cuda(), y.cuda()
                x, target = Variable(x, volatile=True), Variable(y,
                                                                 volatile=True)
                y_pred = self.model(x)
                loss = self.loss_metric(y_pred, target)
                _, pred_label = torch.max(y_pred.data, 1)
                total_cnt += x.data.size()[0]
                correct_cnt += (pred_label == target.data).sum()

            sys.stdout.write(
                "\nepoch: {}, validation loss: {:.6f}, acc: {:.3f}\n\n".format(
                    epoch_idx, loss.data[0], correct_cnt * 1.0 / total_cnt))

            ## Guardamos el modelo
            val_loss = loss.data[0]
            is_best = val_loss_prev > val_loss

            ## Podríamos no guardar en cada época sino que cada K epocas ...
            self.save_checkpoint(
                self.model.state_dict(), is_best,
                self.root_models + self.model.__name__() +
                "_epoch{}_val_loss{:.6f}.pt".format(epoch_idx + 1, val_loss))

            val_loss_prev = val_loss if val_loss < val_loss_prev else val_loss_prev

        sys.stdout.write("\nTerminó el entrenamiento ...\n")

        ## Guardamos el ultimo modelo, sea mejor o no, por si se quiere continuar el entrenamiento
        self.save_checkpoint(
            self.model.state_dict(), False,
            self.root_models + self.model.__name__() + "_finalmodel.pt")
Example #43
0
def run():
    import sys
    if sys.argv[1] == 'dhfr':
        [system, positions, testsystem_name] = dhfr()
    elif sys.argv[1] == 'tip3p':
        [system, positions, testsystem_name] = tip3p()

    precision = sys.argv[2]
    platform_name = sys.argv[3]

    print('%s %s : contains %d particles' %
          (testsystem_name, precision, system.getNumParticles()))
    print('')

    # Remove CMMotionRemover and barostat
    indices_to_remove = list()
    for index in range(system.getNumForces()):
        force = system.getForce(index)
        force_name = force.__class__.__name__
        print(force_name)
        if force_name in ['MonteCarloBarostat', 'CMMotionRemover']:
            print('Removing %s (force index %d)' % (force_name, index))
            indices_to_remove.append(index)
    indices_to_remove.reverse()
    for index in indices_to_remove:
        system.removeForce(index)
    print('')

    # Add barostat
    barostat = openmm.MonteCarloBarostat(pressure, temperature, frequency)

    # Create OpenMM context
    print('Creating context...')
    from openmmtools import integrators
    integrator = integrators.VelocityVerletIntegrator(timestep)
    integrator.setConstraintTolerance(1.0e-8)
    platform = openmm.Platform.getPlatformByName(platform_name)
    platform.setPropertyDefaultValue('Precision', precision)
    if platform_name == 'CUDA':
        platform.setPropertyDefaultValue('DeterministicForces', 'true')
        print('Using deterministic forces...')
    context = openmm.Context(system, integrator, platform)

    context.setPositions(positions)
    print('')

    # Get PME parameters
    print('Retrieving PME parameters...')
    for force in system.getForces():
        if force.__class__.__name__ == 'NonbondedForce':
            nbforce = force
            break
    pme_parameters = nbforce.getPMEParametersInContext(context)
    print(pme_parameters)

    # Flush
    sys.stdout.flush()

    # Equilibrate with barostat
    print('equilibrating...')
    barostat.setFrequency(frequency)
    from progressbar import Percentage, Bar, ETA, RotatingMarker
    widgets = [
        'equilibration: ',
        Percentage(), ' ',
        Bar(marker=RotatingMarker()), ' ',
        ETA()
    ]
    progress = ProgressBar(widgets=widgets)
    for iteration in progress(range(nequil)):
        context.setVelocitiesToTemperature(temperature)
        integrator.step(nequilsteps)

    # Get positions, velocities, and box vectors
    print('')
    state = context.getState(getPositions=True, getVelocities=True)
    box_vectors = state.getPeriodicBoxVectors()
    positions = state.getPositions(asNumpy=True)
    velocities = state.getVelocities(asNumpy=True)
    del context, integrator

    # Remove CMMotionRemover and barostat
    indices_to_remove = list()
    for index in range(system.getNumForces()):
        force = system.getForce(index)
        force_name = force.__class__.__name__
        print(force_name)
        if force_name in ['MonteCarloBarostat', 'CMMotionRemover']:
            print('Removing %s (force index %d)' % (force_name, index))
            indices_to_remove.append(index)
    indices_to_remove.reverse()
    for index in indices_to_remove:
        system.removeForce(index)

    #
    integrator = integrators.VelocityVerletIntegrator(timestep)
    integrator.setConstraintTolerance(1.0e-8)
    context = openmm.Context(system, integrator, platform)
    context.setPeriodicBoxVectors(*box_vectors)
    context.setPositions(positions)
    context.setVelocities(velocities)

    # Open NetCDF file for writing.
    ncfile = netcdf.Dataset(
        'work-%s-%s-%s.nc' % (testsystem_name, precision, platform_name), 'w')
    ncfile.createDimension('nwork', 0)  # extensible dimension
    ncfile.createDimension('nworkvals', nworkvals + 1)
    ncfile.createVariable('work', np.float32, ('nwork', 'nworkvals'))
    work = np.zeros([nwork, nworkvals + 1], np.float32)
    for i in range(nwork):
        context.setVelocitiesToTemperature(temperature)
        integrator.step(nequilsteps)  # equilibrate
        state = context.getState(getEnergy=True)
        initial_energy = state.getPotentialEnergy() + state.getKineticEnergy()
        widgets = [
            'Work %5d / %5d: ' % (i, nwork),
            Percentage(), ' ',
            Bar(marker=RotatingMarker()), ' ',
            ETA()
        ]
        progress = ProgressBar(widgets=widgets)
        for workval in progress(range(nworkvals)):
            integrator.step(nworksteps)
            state = context.getState(getEnergy=True)
            current_energy = state.getPotentialEnergy(
            ) + state.getKineticEnergy()
            work[i, workval + 1] = (current_energy - initial_energy) / kT
            ncfile.variables['work'][i, workval + 1] = work[i, workval + 1]
        print(work[i, :])
        ncfile.sync()
Example #44
0
def zippy_attack(url):
    global zippy_secret_attempts
    global zippy_conn_attempts
    zippy_req = urllib.request.Request(url)
    try:
        zippy_data = retry_urlopen(zippy_req)
        if zippy_data.status == 200:
            zippy_html = str(zippy_data.read())
            zippy_soup = BeautifulSoup(zippy_html)
            if not zippy_soup.title.text == "Zippyshare.com - ":
                zippy_dl = zippy_soup.find('a', id="dlbutton")
                if not zippy_dl is None:
                    zippy_js = zippy_soup.find_all('script')
                    for js in zippy_js:
                        if re.match('\\\\n   var somffunction',
                                    js.text) or re.match(
                                        '\\\\n   var otfunction', js.text):
                            a = re.search('var a = (\d*)\;', js.text)
                            if a.group(1):
                                if args.v:
                                    print(colors.OKGREEN +
                                          "Attemping to break secret" +
                                          colors.ENDC)
                                secret = int(a.group(1))
                                download_secret = str(
                                    int((secret % 78956) * (secret % 3) + 18))
                                url_info = url.split('/')
                                download_server = str(
                                    url_info[2].split('.')[0])
                                download_file = str(url_info[4])
                                zippy_title = zippy_soup.title.text.split(
                                    ' - ')
                                zippy_title.pop(0)
                                download_name = " ".join(zippy_title)
                                download_name = urllib.parse.quote(
                                    download_name)
                                url = "http://" + download_server + ".zippyshare.com/d/" + download_file + "/" + download_secret + "/" + download_name
                                test_req = urllib.request.Request(
                                    url=url, method='HEAD')
                                test_data = urllib.request.urlopen(test_req)
                                content_type = test_data.headers[
                                    'content-type'].split(';')
                                if content_type[0] == "application/x-download":
                                    if args.v:
                                        print(colors.OKBLUE + "\tSuccess" +
                                              colors.ENDC)
                                    widgets = [
                                        " " + " ".join(zippy_title) + " ",
                                        Percentage(), ' ',
                                        Bar(), ' ',
                                        ETA(), ' ',
                                        FileTransferSpeed()
                                    ]
                                    pbar = ProgressBar(widgets=widgets)

                                    def dlProgress(count, blockSize,
                                                   totalSize):
                                        if pbar.maxval is None:
                                            pbar.maxval = totalSize
                                            pbar.start()

                                        pbar.update(
                                            min(count * blockSize, totalSize))

                                    dl, headers = urllib.request.urlretrieve(
                                        url,
                                        " ".join(zippy_title),
                                        reporthook=dlProgress)
                                    pbar.finish()
                                elif zippy_secret_attempts <= zippy_secret_attempts_max:
                                    if args.v:
                                        print(colors.WARNING + "\tFailed" +
                                              colors.ENDC)
                                    zippy_secret_attempts += 1
                                    zippy_attack(url)
                                else:
                                    print(
                                        colors.FAIL +
                                        "Reached max secret attempts, exiting"
                                        + colors.ENDC)
                                    exit(0)

                else:
                    print(colors.WARNING + "Can't find download button..." +
                          colors.ENDC)
            else:
                print(colors.WARNING + "Dead link" + colors.ENDC)
        else:
            print(colors.WARNING + "Bad status code: " +
                  str(zippy_data.status) + colors.ENDC)

    except URLError:
        if zippy_conn_attempts <= zippy_conn_attempts_max:
            if args.v:
                print(colors.WARNING +
                      "Connection refused, let's wait 5 seconds and retry" +
                      colors.ENDC)
            zippy_conn_attempts += 1
            time.sleep(5)
            zippy_attack(url)
        else:
            print(colors.FAIL + "Reached connection retry limit, exiting" +
                  colors.ENDC)
            exit(0)
Example #45
0
 def progress_bar(self, total):
     widget = ["AutoCutMovie: ", Percentage(), Bar("#"), Timer(), " ", ETA()]
     bar = ProgressBar(widgets=widget, maxval=total).start()
     return bar
Example #46
0
def color_images_part(model):
    """
    Function that colors images with approaches on part of images.
    Function is used on reg-parts, class-wo-weights and class-with-weights approaches.

    Parameters
    ----------
    model : keras.engine.training.Model
        Model for image colorization
    """

    # get images to color
    test_set_dir_path = get_abs_path(data_origin)

    image_list = get_image_list(test_set_dir_path)
    num_of_images = len(image_list)

    # init progress bar
    pbar = ProgressBar(maxval=num_of_images,
                       widgets=[Percentage(), ' ',
                                Bar(), ' ',
                                ETA()])
    pbar.start()

    # repeat for each image
    for i in range(num_of_images):
        # get image
        image_lab = load_images(os.path.join(test_set_dir_path, image_list[i]))
        image_l = image_lab[:, :, 0]
        h, w = image_l.shape

        # split images to list of images
        slices_dim_h = int(math.ceil(h / 32))
        slices_dim_w = int(math.ceil(w / 32))

        slices = np.zeros((slices_dim_h * slices_dim_w * 4, 32, 32, 1))
        for a in range(slices_dim_h * 2 - 1):
            for b in range(slices_dim_w * 2 - 1):
                part = image_l[a * 32 // 2:a * 32 // 2 + 32,
                               b * 32 // 2:b * 32 // 2 + 32]
                # fill with zero on edges
                _part = np.zeros((32, 32))
                _part[:part.shape[0], :part.shape[1]] = part

                slices[a * slices_dim_w * 2 + b] = _part[:, :, np.newaxis]

        # lover originals dimension to 224x224 to feed vgg and increase dim
        image_lab_224_b = resize_image_lab(image_lab, (224, 224))
        image_l_224 = np.repeat(image_lab_224_b[:, :, 0, np.newaxis],
                                3,
                                axis=2).astype(float)

        # append together booth lists
        input_data = [
            slices,
            np.array([
                image_l_224,
            ] * slices_dim_h * slices_dim_w * 4)
        ]

        # predict
        predictions_ab = model.predict(input_data, batch_size=32)

        # for histograms -> transformation from hist to ab
        if model.name == "class_wo_weights" or model.name == "class_with_weights":
            indices = np.argmax(predictions_ab[:, :, :, :], axis=3)

            predictions_a = indices // 20 * 10 - 100 + 5
            predictions_b = indices % 20 * 10 - 100 + 5  # +5 to set in the middle box
            predictions_ab = np.stack((predictions_a, predictions_b), axis=3)

        # reshape back to original size
        original_size_im = np.zeros((slices_dim_h * 32, slices_dim_w * 32, 2))
        o_h, o_w = original_size_im.shape[:2]

        for n in range(predictions_ab.shape[0]):
            a, b = n // (slices_dim_w * 2) * 16, n % (slices_dim_w * 2) * 16

            if a + 32 > o_h or b + 32 > o_w:
                continue  # it is empty edge

            # weight decision
            if a == 0 and b == 0:
                weight = weight_top_left
            elif a == 0 and b == o_w - 32:
                weight = weight_top_right
            elif a == 0:
                weight = weight_top
            elif a == o_h - 32 and b == 0:
                weight = weight_bottom_left
            elif b == 0:
                weight = weight_left
            elif a == o_h - 32 and b == o_w - 32:
                weight = weight_bottom_right
            elif a == o_h - 32:
                weight = weight_bottom
            elif b == o_w - 32:
                weight = weight_right
            else:
                weight = weight_m

            im_a = predictions_ab[n, :, :, 0] * weight
            im_b = predictions_ab[n, :, :, 1] * weight

            original_size_im[a:a + 32, b:b + 32, :] += np.stack((im_a, im_b),
                                                                axis=2)

        # make original shape image
        original_size_im = original_size_im[:h, :w]

        # to rgb
        color_im = np.concatenate(
            (image_l[:, :, np.newaxis], original_size_im), axis=2)
        im_rgb = color.lab2rgb(color_im)

        # save
        abs_svave_path = get_abs_path(data_destination)
        scipy.misc.toimage(im_rgb, cmin=0.0,
                           cmax=1.0).save(abs_svave_path + model.name + "_" +
                                          image_list[i])

        # update progress bar
        pbar.update(i + 1)

    # finish progress bar
    pbar.finish()
def rest_allspec(overwrite=False):
    """Load and interpolate *ALL* HST FOS/GHRS starburst spectra
    on to the same rest-frame wavelength grid
    """

    path = join(datapath.hstfos_path(), _subpath, 'corrected')

    # check output files
    bands = _allinone_rest_bands
    for thisband in bands:
        # check outfiles
        outfile = allinone_rest_filename(thisband)
        if isfile(outfile) and not overwrite:
           print "File {0} exists. Use overwrite to overwrite it.".format(outfile)
           return -1
        # print "Will write into these files: {0}".format(outfile)

    # read in the starburst catalog
    objs_ori = starburst_readin()
    nobj = objs_ori.size

    # make a temporary new catalog
    objs_dtype = [('RA', 'f8'),
                  ('DEC', 'f8'),
                  ('Z', 'f8'),
                  ('gal', 'S15')]
    objs = np.zeros(nobj, dtype=objs_dtype)
    objs['RA'] = 0.
    objs['DEC'] = 0.
    objs['Z'] = 0.
    objs['gal'] = objs_ori['gal']

    # read in master wavelength grid
    master_wave = (aio.allinone_wave_readin())[0]['WAVE']
    master_loglam = np.log10(master_wave)
    nwave = master_wave.size

    # initialization, nobj second dimension because of NMF traditions
    rest_allflux = np.zeros((nwave, nobj))
    rest_allivar = np.zeros((nwave, nobj))

    # Wavelength
    wave_pos = np.array([1000., 3300.])
    rest_loc = np.searchsorted(master_wave, wave_pos)
    newloglam = master_loglam[rest_loc[0]:rest_loc[1]]
    flux = np.zeros((objs.size, newloglam.size))
    ivar = np.zeros((objs.size, newloglam.size))

    pbar = ProgressBar(maxval=nobj).start()
    # Progress bar
    for (iobj, thisobj)  in zip(np.arange(objs.size), objs):
        pbar.update(iobj)
        thisdata = readspec_rest(thisobj)
        inloglam = np.log10(thisdata['wave'])
        influx = thisdata['flux']
        inivar = 1./np.power(thisdata['error'], 2)
        (rest_allflux[rest_loc[0]:rest_loc[1], iobj], rest_allivar[rest_loc[0]:rest_loc[1], iobj]) = specutils.interpol_spec(inloglam, influx, inivar, newloglam)

    #Progress bar
    pbar.finish()

    # write out
    print "Now I am writing everything out..."
    allinone_rest_writeout(objs, master_wave, rest_allflux, rest_allivar, overwrite=overwrite)
Example #48
0
    def scan(self,
             clock_en1=True,
             pixels=512,
             clock_en2=True,
             trigger_en=True,
             measure_direction=True,
             offset=15,
             mask_steps=4,
             PrmpVbpDac=80,
             vthin2Dac=0,
             columns=[True] * 16,
             vthin1Dac=80,
             preCompVbnDac=50,
             mask_filename='',
             **kwargs):
        '''Scan loop
        Parameters
        ----------
        mask : int
            Number of mask steps.
        repeat : int
            Number of injections.
        '''
        inj_factor = 1.0
        INJ_LO = 0.0
        #try:
        #dut = Dut(ScanBase.get_basil_dir(self)+'/examples/lab_devices/agilent33250a_pyserial.yaml')
        #dut.init()
        #logging.info('Connected to '+str(dut['Pulser'].get_info()))
        #except RuntimeError:
        #INJ_LO = 0.0#0.2
        #inj_factor = 2.0
        #logging.info('External injector not connected. Switch to internal one')
        #self.dut['INJ_LO'].set_voltage(INJ_LO, unit='V')
        offset = offset - 1
        vthin1Dac = vthin1Dac + 1

        self.dut['global_conf']['PrmpVbpDac'] = 80
        self.dut['global_conf']['vthin1Dac'] = 255
        self.dut['global_conf']['vthin2Dac'] = 0
        self.dut['global_conf']['vffDac'] = 24
        self.dut['global_conf']['PrmpVbnFolDac'] = 51
        self.dut['global_conf']['vbnLccDac'] = 1
        self.dut['global_conf']['compVbnDac'] = 25
        self.dut['global_conf']['preCompVbnDac'] = 50

        self.dut.write_global()
        self.dut['control']['RESET'] = 0b01
        self.dut['control']['DISABLE_LD'] = 0
        self.dut['control']['PIX_D_CONF'] = 0
        self.dut['control'].write()

        self.dut['control']['CLK_OUT_GATE'] = 1
        self.dut['control']['CLK_BX_GATE'] = 1
        self.dut['control'].write()
        time.sleep(0.1)

        self.dut['control']['RESET'] = 0b11
        self.dut['control'].write()

        self.dut['global_conf']['OneSr'] = 1

        self.dut['global_conf']['TestHit'] = 0
        self.dut['global_conf']['SignLd'] = 0
        self.dut['global_conf']['InjEnLd'] = 0
        self.dut['global_conf']['TDacLd'] = 0
        self.dut['global_conf']['PixConfLd'] = 0
        self.dut.write_global()

        #self.dut['global_conf']['OneSr'] = 0  #all multi columns in parallel
        self.dut['global_conf']['ColEn'][:] = bitarray.bitarray([True] *
                                                                16)  #(columns)
        self.dut['global_conf']['ColSrEn'][:] = bitarray.bitarray([True] * 16)
        self.dut.write_global()

        self.dut['pixel_conf'].setall(False)
        self.dut.write_pixel()
        self.dut['global_conf']['InjEnLd'] = 1
        self.dut.write_global()
        self.dut['global_conf']['InjEnLd'] = 0

        mask_en = np.full([64, 64], False, dtype=np.bool)
        mask_tdac = np.full([64, 64], 16, dtype=np.uint8)
        ###
        if pixels > 1 and pixels <= 64:
            mask_en[1:2, :] = True
        ###
        if pixels == 1:
            mask_en[1][1] = True

        if mask_filename:
            logging.info('Using pixel mask from file: %s', mask_filename)

            with tb.open_file(mask_filename, 'r') as in_file_h5:
                mask_tdac = in_file_h5.root.scan_results.tdac_mask[:]
                if pixels > 64:
                    mask_en = in_file_h5.root.scan_results.en_mask[:]

        self.dut.write_en_mask(mask_en)
        self.dut.write_tune_mask(mask_tdac)
        self.dut.write_global()

        self.dut['global_conf']['OneSr'] = 0
        self.dut.write_global()

        self.dut['trigger'].set_delay(10000)  #trigger for no injection 10000
        self.dut['trigger'].set_width(16)  #16
        self.dut['trigger'].set_repeat(1)
        self.dut['trigger'].set_en(False)

        logging.debug('Configure TDC')
        self.dut['tdc']['RESET'] = True
        self.dut['tdc']['EN_TRIGGER_DIST'] = True
        self.dut['tdc']['ENABLE_EXTERN'] = False
        self.dut['tdc']['EN_ARMING'] = False
        self.dut['tdc']['EN_INVERT_TRIGGER'] = False
        self.dut['tdc']['EN_INVERT_TDC'] = False
        self.dut['tdc']['EN_WRITE_TIMESTAMP'] = True

        lmask = [1] + ([0] * (mask_steps - 1))
        lmask = lmask * ((64 * 64) / mask_steps + 1)
        lmask = lmask[:64 * 64]
        ranges = np.arange(0, (vthin1Dac - offset), 1)
        n = 0
        for ni in ranges:
            time.sleep(0.5)
            bv_mask = bitarray.bitarray(lmask)
            if measure_direction:
                vthin1Dac1 = vthin1Dac - n
            else:
                vthin1Dac1 = n + offset
            with self.readout(scan_param_id=vthin1Dac1):  #vthin1Dac-n):
                logging.info('Scan Parameter: %f (%d of %d)', vthin1Dac1,
                             n + 1, vthin1Dac - offset)
                pbar = ProgressBar(maxval=mask_steps).start()

                self.dut['global_conf']['vthin1Dac'] = 255
                self.dut['global_conf']['preCompVbnDac'] = 50
                self.dut['global_conf']['vthin2Dac'] = 0
                self.dut['global_conf']['PrmpVbpDac'] = 80
                self.dut.write_global()
                time.sleep(0.1)

                self.dut['pixel_conf'][:] = bv_mask
                self.dut.write_pixel_col()
                self.dut['global_conf']['InjEnLd'] = 0  #1
                #self.dut['global_conf']['PixConfLd'] = 0b11
                self.dut.write_global()

                bv_mask[1:] = bv_mask[0:-1]
                bv_mask[0] = 0
                self.dut['global_conf']['vthin1Dac'] = vthin1Dac1
                self.dut['global_conf']['preCompVbnDac'] = preCompVbnDac
                self.dut['global_conf']['vthin2Dac'] = vthin2Dac
                self.dut['global_conf']['PrmpVbpDac'] = PrmpVbpDac
                self.dut.write_global()
                time.sleep(0.1)

                #while not self.dut['inj'].is_done():
                #pass

                if trigger_en == True:
                    self.dut['trigger'].set_repeat(0)
                if clock_en1 == False:
                    self.dut['control']['CLK_BX_GATE'] = 0
                    self.dut['control'].write()
                if clock_en2 == False:
                    self.dut['control']['CLK_OUT_GATE'] = 0
                    self.dut['control'].write()
                if trigger_en == True:
                    self.dut['trigger'].start()

                self.dut['tdc'].ENABLE = True

                time.sleep(5)  #10

                self.dut['tdc'].ENABLE = False

                #n=0
                #if trigger_en == True:
                #while not self.dut['trigger'].is_done():
                #time.sleep(1)
                #n=n+1
                #print self.dut['trigger'].is_done() , n, "sekunden"

                if clock_en1 == False:
                    self.dut['control']['CLK_BX_GATE'] = 1
                    self.dut['control'].write()
                if clock_en2 == False:
                    self.dut['control']['CLK_OUT_GATE'] = 1
                    self.dut['control'].write()
                #while not self.dut['trigger'].is_done():
                #pass
                n = n + 1

        scan_results = self.h5_file.create_group("/", 'scan_results',
                                                 'Scan Masks')
        self.h5_file.createCArray(scan_results, 'tdac_mask', obj=mask_tdac)
        self.h5_file.createCArray(scan_results, 'en_mask', obj=mask_en)
Example #49
0
class Installer(object):
    def __init__(self,
                 install_config,
                 maxy=0,
                 maxx=0,
                 iso_installer=False,
                 rpm_path="../stage/RPMS",
                 log_path="../stage/LOGS",
                 ks_config=None):
        self.install_config = install_config
        self.ks_config = ks_config
        self.iso_installer = iso_installer
        self.rpm_path = rpm_path
        self.log_path = log_path
        self.mount_command = "./mk-mount-disk.sh"
        self.prepare_command = "./mk-prepare-system.sh"
        self.finalize_command = "./mk-finalize-system.sh"
        self.install_package_command = "./mk-install-package.sh"
        self.chroot_command = "./mk-run-chroot.sh"
        self.setup_grub_command = "./mk-setup-grub.sh"
        self.unmount_disk_command = "./mk-unmount-disk.sh"

        if self.iso_installer:
            self.working_directory = "/mnt/photon-root"
        elif 'working_directory' in self.install_config:
            self.working_directory = self.install_config['working_directory']
        else:
            self.working_directory = "/mnt/photon-root"
        self.photon_root = self.working_directory + "/photon-chroot"

        self.restart_command = "shutdown"

        if self.iso_installer:
            self.output = open(os.devnull, 'w')
        else:
            self.output = None

        if self.iso_installer:
            #initializing windows
            self.maxy = maxy
            self.maxx = maxx
            self.height = 10
            self.width = 75
            self.progress_padding = 5

            self.progress_width = self.width - self.progress_padding
            self.starty = (self.maxy - self.height) / 2
            self.startx = (self.maxx - self.width) / 2
            self.window = Window(self.height,
                                 self.width,
                                 self.maxy,
                                 self.maxx,
                                 'Installing Photon',
                                 False,
                                 items=[])
            self.progress_bar = ProgressBar(
                self.starty + 3, self.startx + self.progress_padding / 2,
                self.progress_width)

        signal.signal(signal.SIGINT, self.exit_gracefully)

    # This will be called if the installer interrupted by Ctrl+C or exception
    def exit_gracefully(self, signal, frame):
        if self.iso_installer:
            self.progress_bar.hide()
            self.window.addstr(
                0, 0,
                'Opps, Installer got interrupted.\n\nPress any key to get to the bash...'
            )
            self.window.content_window().getch()

        modules.commons.dump(modules.commons.LOG_FILE_NAME)
        sys.exit(1)

    def install(self, params):
        try:
            return self.unsafe_install(params)
        except Exception as inst:
            if self.iso_installer:
                modules.commons.log(modules.commons.LOG_ERROR, repr(inst))
                self.exit_gracefully(None, None)
            else:
                raise

    def unsafe_install(self, params):

        if self.iso_installer:
            self.window.show_window()
            self.progress_bar.initialize('Initializing installation...')
            self.progress_bar.show()
            #self.rpm_path = "https://dl.bintray.com/vmware/photon_release_1.0_TP2_x86_64"
            if self.rpm_path.startswith(
                    "https://") or self.rpm_path.startswith("http://"):
                cmdoption = 's/baseurl.*/baseurl={}/g'.format(
                    self.rpm_path.replace('/', '\/'))
                process = subprocess.Popen([
                    'sed', '-i', cmdoption, '/etc/yum.repos.d/photon-iso.repo'
                ])
                retval = process.wait()
                if retval != 0:
                    modules.commons.log(modules.commons.LOG_INFO,
                                        "Failed to reset repo")
                    self.exit_gracefully(None, None)

            cmdoption = 's/cachedir=\/var/cachedir={}/g'.format(
                self.photon_root.replace('/', '\/'))
            process = subprocess.Popen(
                ['sed', '-i', cmdoption, '/etc/tdnf/tdnf.conf'])
            retval = process.wait()
            if retval != 0:
                modules.commons.log(modules.commons.LOG_INFO,
                                    "Failed to reset tdnf cachedir")
                self.exit_gracefully(None, None)
        self.execute_modules(modules.commons.PRE_INSTALL)

        self.initialize_system()

        if self.iso_installer:
            self.get_size_of_packages()
            selected_packages = self.install_config['packages']
            for package in selected_packages:
                self.progress_bar.update_message(
                    'Installing {0}...'.format(package))
                process = subprocess.Popen([
                    'tdnf', 'install', package, '--installroot',
                    self.photon_root, '--nogpgcheck', '--assumeyes'
                ],
                                           stdout=self.output,
                                           stderr=subprocess.STDOUT)
                retval = process.wait()
                # 0 : succeed; 137 : package already installed; 65 : package not found in repo.
                if retval != 0 and retval != 137:
                    modules.commons.log(
                        modules.commons.LOG_ERROR,
                        "Failed install: {} with error code {}".format(
                            package, retval))
                    self.exit_gracefully(None, None)
                self.progress_bar.increment(self.size_of_packages[package])
        else:
            #install packages
            for rpm in self.rpms_tobeinstalled:
                # We already installed the filesystem in the preparation
                if rpm['package'] == 'filesystem':
                    continue
                return_value = self.install_package(rpm['filename'])
                if return_value != 0:
                    self.exit_gracefully(None, None)

        if self.iso_installer:
            self.progress_bar.show_loading('Finalizing installation')

        self.finalize_system()

        if not self.install_config['iso_system']:
            # Execute post installation modules
            self.execute_modules(modules.commons.POST_INSTALL)

            # install grub
            try:
                if self.install_config['boot'] == 'bios':
                    process = subprocess.Popen([
                        self.setup_grub_command, '-w', self.photon_root,
                        "bios", self.install_config['disk']['disk'],
                        self.install_config['disk']['root'],
                        self.install_config['disk']['boot'],
                        self.install_config['disk']['bootdirectory']
                    ],
                                               stdout=self.output)
                elif self.install_config['boot'] == 'efi':
                    process = subprocess.Popen([
                        self.setup_grub_command, '-w', self.photon_root, "efi",
                        self.install_config['disk']['disk'],
                        self.install_config['disk']['root'],
                        self.install_config['disk']['boot'],
                        self.install_config['disk']['bootdirectory']
                    ],
                                               stdout=self.output)
            except:
                #install bios if variable is not set.
                process = subprocess.Popen([
                    self.setup_grub_command, '-w', self.photon_root, "bios",
                    self.install_config['disk']['disk'],
                    self.install_config['disk']['root'],
                    self.install_config['disk']['boot'],
                    self.install_config['disk']['bootdirectory']
                ],
                                           stdout=self.output)

            retval = process.wait()

            self.update_fstab()

        command = [self.unmount_disk_command, '-w', self.photon_root]
        if not self.install_config['iso_system']:
            command.extend(self.generate_partitions_param(reverse=True))
        process = subprocess.Popen(command, stdout=self.output)
        retval = process.wait()

        if self.iso_installer:
            self.progress_bar.hide()
            self.window.addstr(
                0, 0,
                'Congratulations, Photon has been installed in {0} secs.\n\nPress any key to continue to boot...'
                .format(self.progress_bar.time_elapsed))
            if self.ks_config == None:
                self.window.content_window().getch()

        return ActionResult(True, None)

    def copy_rpms(self):
        # prepare the RPMs list
        rpms = []
        for root, dirs, files in os.walk(self.rpm_path):
            for name in files:
                file = os.path.join(root, name)
                size = os.path.getsize(file)
                rpms.append({'filename': name, 'path': file, 'size': size})

        self.rpms_tobeinstalled = []
        selected_packages = self.install_config['packages']
        for package in selected_packages:
            pattern = package + '-[0-9]*.rpm'
            if (package == 'glibc'):
                pattern2 = pattern
            else:
                pattern2 = package + '-[a-z][0-9]*.rpm'
            for rpm in rpms:
                if fnmatch.fnmatch(rpm['filename'],
                                   pattern) or fnmatch.fnmatch(
                                       rpm['filename'], pattern2):
                    rpm['package'] = package
                    self.rpms_tobeinstalled.append(rpm)
                    break
        # Copy the rpms
        for rpm in self.rpms_tobeinstalled:
            shutil.copy(rpm['path'], self.photon_root + '/RPMS/')

    def copy_files(self):
        # Make the photon_root directory if not exits
        process = subprocess.Popen(['mkdir', '-p', self.photon_root],
                                   stdout=self.output)
        retval = process.wait()

        # Copy the installer files
        process = subprocess.Popen(
            ['cp', '-r', "../installer", self.photon_root], stdout=self.output)
        retval = process.wait()

        # Create the rpms directory
        process = subprocess.Popen(['mkdir', '-p', self.photon_root + '/RPMS'],
                                   stdout=self.output)
        retval = process.wait()
        self.copy_rpms()

    def bind_installer(self):
        # Make the photon_root/installer directory if not exits
        process = subprocess.Popen(
            ['mkdir', '-p',
             os.path.join(self.photon_root, "installer")],
            stdout=self.output)
        retval = process.wait()
        # The function finalize_system will access the file /installer/mk-finalize-system.sh after chroot to photon_root.
        # Bind the /installer folder to self.photon_root/installer, so that after chroot to photon_root,
        # the file can still be accessed as /installer/mk-finalize-system.sh.
        process = subprocess.Popen([
            'mount', '--bind', '/installer',
            os.path.join(self.photon_root, "installer")
        ],
                                   stdout=self.output)
        retval = process.wait()

    def update_fstab(self):
        fstab_file = open(os.path.join(self.photon_root, "etc/fstab"), "w")
        fstab_file.write("#system\tmnt-pt\ttype\toptions\tdump\tfsck\n")

        for partition in self.install_config['disk']['partitions']:
            options = 'defaults'
            dump = 1
            fsck = 2

            if 'mountpoint' in partition and partition['mountpoint'] == '/':
                options = options + ',barrier,noatime,noacl,data=ordered'
                fsck = 1

            if partition['filesystem'] == 'swap':
                mountpoint = 'swap'
                dump = 0
                fsck = 0
            else:
                mountpoint = partition['mountpoint']

            fstab_file.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(
                partition['path'], mountpoint, partition['filesystem'],
                options, dump, fsck))
        # Add the cdrom entry
        fstab_file.write("/dev/cdrom\t/mnt/cdrom\tiso9660\tro,noauto\t0\t0\n")

        fstab_file.close()

    def generate_partitions_param(self, reverse=False):
        if reverse:
            step = -1
        else:
            step = 1
        params = []
        for partition in self.install_config['disk']['partitions'][::step]:
            if partition["filesystem"] == "swap":
                continue

            params.extend([
                '--partitionmountpoint', partition["path"],
                partition["mountpoint"]
            ])
        return params

    def initialize_system(self):
        #Setup the disk
        if (not self.install_config['iso_system']):
            command = [self.mount_command, '-w', self.photon_root]
            command.extend(self.generate_partitions_param())
            process = subprocess.Popen(command, stdout=self.output)
            retval = process.wait()

        if self.iso_installer:
            self.bind_installer()
            process = subprocess.Popen(
                [self.prepare_command, '-w', self.photon_root, 'install'],
                stdout=self.output)
            retval = process.wait()
        else:
            self.copy_files()
            #Setup the filesystem basics
            process = subprocess.Popen(
                [self.prepare_command, '-w', self.photon_root],
                stdout=self.output)
            retval = process.wait()

    def finalize_system(self):
        #Setup the disk
        shutil.copy("/etc/resolv.conf", self.photon_root + '/etc/.')
        process = subprocess.Popen([
            self.chroot_command, '-w', self.photon_root, self.finalize_command,
            '-w', self.photon_root
        ],
                                   stdout=self.output)
        retval = process.wait()
        if self.iso_installer:

            modules.commons.dump(modules.commons.LOG_FILE_NAME)
            shutil.copy(modules.commons.LOG_FILE_NAME,
                        self.photon_root + '/var/log/')

            # unmount the installer directory
            process = subprocess.Popen(
                ['umount',
                 os.path.join(self.photon_root, "installer")],
                stdout=self.output)
            retval = process.wait()
            # remove the installer directory
            process = subprocess.Popen(
                ['rm', '-rf',
                 os.path.join(self.photon_root, "installer")],
                stdout=self.output)
            retval = process.wait()
            # Disable the swap file
            process = subprocess.Popen(['swapoff', '-a'], stdout=self.output)
            retval = process.wait()
            # remove the tdnf cache directory and the swapfile.
            process = subprocess.Popen(
                ['rm', '-rf',
                 os.path.join(self.photon_root, "cache")],
                stdout=self.output)

    def install_package(self, package_name):
        rpm_params = ''

        os.environ["RPMROOT"] = self.rpm_path
        rpm_params = rpm_params + ' --force '
        rpm_params = rpm_params + ' --root ' + self.photon_root
        rpm_params = rpm_params + ' --dbpath /var/lib/rpm '

        if ('type' in self.install_config and
            (self.install_config['type']
             in ['micro', 'minimal'])) or self.install_config['iso_system']:
            rpm_params = rpm_params + ' --excludedocs '

        process = subprocess.Popen([
            self.install_package_command, '-w', self.photon_root, package_name,
            rpm_params
        ],
                                   stdout=self.output)

        return process.wait()

    def execute_modules(self, phase):
        modules_paths = glob.glob('modules/m_*.py')
        for mod_path in modules_paths:
            module = mod_path.replace('/', '.', 1)
            module = os.path.splitext(module)[0]
            try:
                __import__(module)
                mod = sys.modules[module]
            except ImportError:
                modules.commons.log(modules.commons.LOG_ERROR,
                                    'Error importing module {}'.format(module))
                continue

            # the module default is disabled
            if not hasattr(mod, 'enabled') or mod.enabled == False:
                modules.commons.log(modules.commons.LOG_INFO,
                                    "module {} is not enabled".format(module))
                continue
            # check for the install phase
            if not hasattr(mod, 'install_phase'):
                modules.commons.log(
                    modules.commons.LOG_ERROR,
                    "Error: can not defind module {} phase".format(module))
                continue
            if mod.install_phase != phase:
                modules.commons.log(
                    modules.commons.LOG_INFO,
                    "Skipping module {0} for phase {1}".format(module, phase))
                continue
            if not hasattr(mod, 'execute'):
                modules.commons.log(
                    modules.commons.LOG_ERROR,
                    "Error: not able to execute module {}".format(module))
                continue
            mod.execute(module, self.ks_config, self.install_config,
                        self.photon_root)

    def get_install_size_of_a_package(self, name_size_pairs, package):
        modules.commons.log(modules.commons.LOG_INFO,
                            "Find the install size of: {} ".format(package))
        for index, name in enumerate(name_size_pairs, start=0):
            if name[name.find(":") + 1:].strip() == package.strip():
                item = name_size_pairs[index + 1]
                size = item[item.find("(") + 1:item.find(")")]
                return int(size)
        raise LookupError(
            "Cannot find package {} in the repo.".format(package))

    def get_size_of_packages(self):
        #call tdnf info to get the install size of all the packages.
        process = subprocess.Popen(
            ['tdnf', 'info', '--installroot', self.photon_root],
            stdout=subprocess.PIPE)
        out, err = process.communicate()
        if err != None and err != 0:
            modules.commons.log(
                modules.commons.LOG_ERROR,
                "Failed to get infomation from : {} with error code {}".format(
                    package, err))

        name_size_pairs = re.findall("(?:^Name.*$)|(?:^.*Install Size.*$)",
                                     out, re.M)
        selected_packages = self.install_config['packages']
        self.size_of_packages = {}
        progressbar_num_items = 0
        for package in selected_packages:
            size = self.get_install_size_of_a_package(name_size_pairs, package)
            progressbar_num_items += size
            self.size_of_packages[package] = size
        self.progress_bar.update_num_items(progressbar_num_items)

    def run(self, command, comment=None):
        if comment != None:
            modules.commons.log(modules.commons.LOG_INFO,
                                "Installer: {} ".format(comment))
            self.progress_bar.update_loading_message(comment)

        modules.commons.log(modules.commons.LOG_INFO,
                            "Installer: {} ".format(command))
        process = subprocess.Popen([command],
                                   shell=True,
                                   stdout=subprocess.PIPE)
        out, err = process.communicate()
        if err != None and err != 0 and "systemd-tmpfiles" not in command:
            modules.commons.log(
                modules.commons.LOG_ERROR,
                "Installer: failed in {} with error code {}".format(
                    command, err))
            modules.commons.log(modules.commons.LOG_ERROR, out)
            self.exit_gracefully(None, None)

        return err
Example #50
0
    shutil.copy2(
        Path(project_dir) / 'libcblite.so.sym', f'./libcblite-{args.version}')
    os.chdir(workspace)

    package_name = f'{args.product}-{args.edition}-{args.version}-{args.bld_num}-{args.os}.tar.gz'
    print()
    print(f"=== Creating {workspace}/{package_name} package ===")
    print()

    os.chdir(str(workspace_path / 'build_release'))
    shutil.copy2(
        workspace_path / 'product-texts' / 'mobile' / 'couchbase-lite' /
        'license' / f'LICENSE_{args.edition}.txt',
        f'libcblite-{args.version}/LICENSE.txt')

    pbar = ProgressBar(maxval=3)
    pbar.start()
    with tarfile.open(f'{workspace}/{package_name}', 'w:gz') as tar:
        tar.add(f'libcblite-{args.version}/include', recursive=True)
        pbar.update(1)
        tar.add(f'libcblite-{args.version}/lib', recursive=True)
        pbar.update(2)
        tar.add(f'libcblite-{args.version}/LICENSE.txt')
        pbar.update(3)
        pbar.finish()

    symbols_package_name = f'{args.product}-{args.edition}-{args.version}-{args.bld_num}-{args.os}-symbols.tar.gz'
    with tarfile.open(f'{workspace}/{symbols_package_name}', 'w:gz') as tar:
        tar.add(f'libcblite-{args.version}/libcblite.so.sym')

    os.chdir(workspace)
Example #51
0
def upload_files(binary_filename, updown_client):
    """Upload a binary file to the Store.

    Submit a file to the Store upload service and return the
    corresponding upload_id.
    """
    result = {'success': False, 'errors': []}

    try:
        binary_file_size = os.path.getsize(binary_filename)
        binary_file = open(binary_filename, 'rb')
        encoder = MultipartEncoder(
            fields={
                'binary': ('filename', binary_file, 'application/octet-stream')
            }
        )

        # Create a progress bar that looks like: Uploading foo [==  ] 50%
        progress_bar = ProgressBar(
            widgets=['Uploading {} '.format(binary_filename),
                     Bar(marker='=', left='[', right=']'), ' ', Percentage()],
            maxval=os.path.getsize(binary_filename))
        progress_bar.start()
        # Print a newline so the progress bar has some breathing room.
        logger.info('')

        # Create a monitor for this upload, so that progress can be displayed
        monitor = MultipartEncoderMonitor(
            encoder, functools.partial(_update_progress_bar, progress_bar,
                                       binary_file_size))

        # Begin upload
        response = updown_client.upload(monitor)

        # Make sure progress bar shows 100% complete
        progress_bar.finish()

        if response.ok:
            response_data = response.json()
            result.update({
                'success': response_data.get('successful', True),
                'upload_id': response_data['upload_id'],
                'binary_filesize': os.path.getsize(binary_filename),
                'source_uploaded': False,
            })
        else:
            logger.error(
                'There was an error uploading the package.\n'
                'Reason: %s\n'
                'Text: %s',
                response.reason, response.text)
            result['errors'] = [response.text]
    except Exception as err:
        logger.exception(
            'An unexpected error was found while uploading files.')
        result['errors'] = [str(err)]
    finally:
        # Close the open file
        binary_file.close()

    return result
Example #52
0
    def setup(self):
        """
        Checks the location of the jar files.
        Spawns the server as a process.
        """
        jars = [
            "stanford-corenlp-3.2.0.jar", "stanford-corenlp-3.2.0-models.jar",
            "joda-time.jar", "xom.jar", "jollyday.jar"
        ]

        # if CoreNLP libraries are in a different directory,
        # change the corenlp_path variable to point to them
        corenlp_path = os.path.relpath(__file__).split(
            '/')[0] + "/stanford-corenlp-full-2013-06-20/"
        #corenlp_path = "stanford-corenlp-full-2013-06-20/"

        java_path = "java"
        classname = "edu.stanford.nlp.pipeline.StanfordCoreNLP"
        # include the properties file, so you can change defaults
        # but any changes in output format will break parse_parser_results()
        props = "-props " + os.path.relpath(__file__).split(
            '/')[0] + "/default.properties"

        # add and check classpaths
        jars = [corenlp_path + jar for jar in jars]
        for jar in jars:
            if not os.path.exists(jar):
                print "Error! Cannot locate %s" % jar
                sys.exit(1)

        #Change from ':' to ';'
        # spawn the server
        start_corenlp = "%s -Xmx2500m -cp %s %s %s" % (
            java_path, ':'.join(jars), classname, props)
        if VERBOSE: print start_corenlp
        self.corenlp = pexpect.spawn(start_corenlp)

        # show progress bar while loading the models
        widgets = ['Loading Models: ', Fraction()]
        pbar = ProgressBar(widgets=widgets, maxval=4,
                           force_update=True).start()
        self.corenlp.expect("done.",
                            timeout=20)  # Load pos tagger model (~5sec)
        pbar.update(1)
        self.corenlp.expect("done.",
                            timeout=200)  # Load NER-all classifier (~33sec)
        pbar.update(2)
        self.corenlp.expect("done.",
                            timeout=600)  # Load NER-muc classifier (~60sec)
        pbar.update(3)
        self.corenlp.expect("done.",
                            timeout=600)  # Load CoNLL classifier (~50sec)
        pbar.update(4)
        #        self.corenlp.expect("done.", timeout=200) # Loading PCFG (~3sec)
        #        pbar.update(5)
        self.corenlp.expect("Entering interactive shell.")
        pbar.finish()
Example #53
0
else:
    fname = sys.argv[1]
    if len(sys.argv) >= 4:
        acctid = sys.argv[2]
        secret = sys.argv[3]

if acctid is None:
    acctid = raw_input("AWS_ACCESS_KEY_ID: ").strip()

if secret is None:
    secret = raw_input("AWS_SECRET_ACCESS_KEY: ").strip()

bucket = "kroll.appcelerator.com"
key = os.path.basename(fname)
conn = S3Connection(acctid, secret)
bucket = conn.get_bucket(bucket)
k = bucket.new_key(key)

pbar = ProgressBar().start()
try:

    def progress_callback(current, total):
        pbar.update(int(100 * (float(current) / float(total))))

    k.set_contents_from_filename(fname,
                                 cb=progress_callback,
                                 num_cb=100,
                                 policy='public-read')
finally:
    pbar.finish()
Example #54
0
            # fill inbox
            done = s.query(tables.clangs.program_id) \
              .filter(tables.clangs.clang == args.clang)
            todo = s.query(tables.programs) \
              .filter(~tables.programs.id.in_(done)) \
              .order_by(tables.programs.date_added) \
              .limit(BATCH_SIZE)

            for program in todo:
                inbox.append(program)

        for tables in tablesets:
            if args.recheck:
                q = s.query(tables.clang_stderrs)
                for stderr in ProgressBar(max_value=q.count())(q):
                    assertion_ = util.get_assertion(s, tables.clang_assertions,
                                                    stderr.stderr)
                    unreachable_ = util.get_unreachable(
                        s, tables.clang_unreachables, stderr.stderr)
                    terminate_ = util.get_terminate(s, tables.clang_terminates,
                                                    stderr.stderr)

                    errs = sum(1 if x else 0
                               for x in [assertion_, unreachable_, terminate_])
                    if errs > 1:
                        raise LookupError(
                            f"Multiple errors types found in: {stderr}\n\n" +
                            f"Assertion: {assertion_}\n" +
                            f"Unreachable: {unreachable_}\n" +
                            f"Terminate: {terminate_}")
Example #55
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--category',
                        '-c',
                        help='category name',
                        required=True)
    parser.add_argument('--level',
                        '-l',
                        type=int,
                        help='level id',
                        required=True)
    parser.add_argument('--load_ckpt',
                        '-k',
                        help='Path to a check point file for load',
                        required=True)
    parser.add_argument('--model', '-m', help='Model to use', required=True)
    parser.add_argument('--setting',
                        '-x',
                        help='Setting to use',
                        required=True)
    parser.add_argument('--batch_size',
                        '-b',
                        help='Batch size during testing',
                        default=8,
                        type=int)
    parser.add_argument('--save_ply',
                        '-s',
                        help='Save results as ply',
                        action='store_true')
    parser.add_argument('--save_dir',
                        '-o',
                        help='The output directory',
                        type=str,
                        default=None)
    parser.add_argument('--save_num_shapes',
                        '-u',
                        help='how many shapes to visualize',
                        default=20,
                        type=int)
    args = parser.parse_args()
    print(args)

    if args.save_ply:
        if os.path.exists(args.save_dir):
            print('ERROR: folder %s exists! Please check and delete!' %
                  args.save_dir)
            exit(1)
        os.mkdir(args.save_dir)

    model = importlib.import_module(args.model)
    setting_path = os.path.join(os.path.dirname(__file__), args.model)
    sys.path.append(setting_path)
    setting = importlib.import_module(args.setting)

    sample_num = setting.sample_num
    batch_size = args.batch_size

    args.data_folder = '../../data/sem_seg_h5/'

    # Load all test data
    args.filelist = os.path.join(args.data_folder,
                                 '%s-%d' % (args.category, args.level),
                                 'test_files.txt')
    data_test, _, label_gt = data_utils.load_seg(args.filelist)
    num_shape = data_test.shape[0]
    print('Loaded data: %s shapes in total to test.' % num_shape)

    # Load current category + level statistics
    with open(
            '../../stats/after_merging_label_ids/%s-level-%d.txt' %
        (args.category, args.level), 'r') as fin:
        setting.num_class = len(fin.readlines()) + 1  # with "other"
        print('NUM CLASS: %d' % setting.num_class)

    ######################################################################
    # Placeholders
    is_training = tf.placeholder(tf.bool, name='is_training')
    pts_fts = tf.placeholder(tf.float32,
                             shape=(batch_size, sample_num, setting.data_dim),
                             name='points')
    ######################################################################

    ######################################################################
    pts_fts_sampled = pts_fts
    points_sampled = pts_fts_sampled
    features_sampled = None

    net = model.Net(points_sampled, features_sampled, is_training, setting)
    seg_probs_op = tf.nn.softmax(net.logits, name='seg_probs')

    # for restore model
    saver = tf.train.Saver()

    parameter_num = np.sum(
        [np.prod(v.shape.as_list()) for v in tf.trainable_variables()])
    print('{}-Parameter number: {:d}.'.format(datetime.now(), parameter_num))

    # Create a session
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    config.log_device_placement = False
    sess = tf.Session(config=config)

    # Load the model
    ckptstate = tf.train.get_checkpoint_state(args.load_ckpt)
    if ckptstate is not None:
        LOAD_MODEL_FILE = os.path.join(
            args.load_ckpt, os.path.basename(ckptstate.model_checkpoint_path))
        saver.restore(sess, LOAD_MODEL_FILE)
        print("Model loaded in file: %s" % LOAD_MODEL_FILE)
    else:
        print("Fail to load modelfile: %s" % args.load_ckpt)

    # Start the testing
    print('{}-Testing...'.format(datetime.now()))

    num_batch = (num_shape - 1) // batch_size + 1
    pts_batch = np.zeros((batch_size, sample_num, 3), dtype=np.float32)

    avg_acc = 0.0
    avg_cnt = 0

    shape_iou_tot = 0.0
    shape_iou_cnt = 0

    part_intersect = np.zeros((setting.num_class), dtype=np.float32)
    part_union = np.zeros((setting.num_class), dtype=np.float32)

    bar = ProgressBar()
    all_seg_probs = []
    for batch_idx in bar(range(num_batch)):
        start_idx = batch_idx * batch_size
        end_idx = min((batch_idx + 1) * batch_size, num_shape)

        pts_batch[:end_idx - start_idx, ...] = data_test[start_idx:end_idx]

        seg_probs = sess.run(seg_probs_op,
                             feed_dict={
                                 pts_fts: pts_batch,
                                 is_training: False
                             })
        seg_probs = seg_probs[:end_idx - start_idx]
        all_seg_probs.append(seg_probs)

        seg_res = np.argmax(seg_probs[:, :, 1:], axis=-1) + 1

        avg_acc += np.sum(
            np.mean((seg_res == label_gt[start_idx:end_idx]) |
                    (label_gt[start_idx:end_idx] == 0),
                    axis=-1))
        avg_cnt += end_idx - start_idx

        seg_gt = label_gt[start_idx:end_idx]
        seg_res[seg_gt == 0] = 0

        for i in range(end_idx - start_idx):
            cur_pred = seg_res[i]
            cur_gt = seg_gt[i]

            cur_shape_iou_tot = 0.0
            cur_shape_iou_cnt = 0
            for j in range(1, setting.num_class):
                cur_gt_mask = (cur_gt == j)
                cur_pred_mask = (cur_pred == j)

                has_gt = (np.sum(cur_gt_mask) > 0)
                has_pred = (np.sum(cur_pred_mask) > 0)

                if has_gt or has_pred:
                    intersect = np.sum(cur_gt_mask & cur_pred_mask)
                    union = np.sum(cur_gt_mask | cur_pred_mask)
                    iou = intersect / union

                    cur_shape_iou_tot += iou
                    cur_shape_iou_cnt += 1

                    part_intersect[j] += intersect
                    part_union[j] += union

            if cur_shape_iou_cnt > 0:
                cur_shape_miou = cur_shape_iou_tot / cur_shape_iou_cnt
                shape_iou_tot += cur_shape_miou
                shape_iou_cnt += 1

        if args.save_ply and start_idx < args.save_num_shapes:
            for i in range(start_idx, min(end_idx, args.save_num_shapes)):
                out_fn = os.path.join(args.save_dir, 'shape-%02d-pred.ply' % i)
                data_utils.save_ply_property(data_test[i],
                                             seg_res[i - start_idx],
                                             setting.num_class, out_fn)
                out_fn = os.path.join(args.save_dir, 'shape-%02d-gt.ply' % i)
                data_utils.save_ply_property(data_test[i], label_gt[i],
                                             setting.num_class, out_fn)

    all_seg_probs = np.vstack(all_seg_probs)
    np.save('out.npy', all_seg_probs)

    print('{}-Done!'.format(datetime.now()))

    print('Average Accuracy: %f' % (avg_acc / avg_cnt))
    print('Shape mean IoU: %f' % (shape_iou_tot / shape_iou_cnt))

    part_iou = np.divide(part_intersect[1:], part_union[1:])
    mean_part_iou = np.mean(part_iou)
    print('Category mean IoU: %f, %s' % (mean_part_iou, str(part_iou)))

    out_list = ['%3.1f' % (item * 100) for item in part_iou.tolist()]
    print('%3.1f;%3.1f;%3.1f;%s' %
          (avg_acc * 100 / avg_cnt, shape_iou_tot * 100 / shape_iou_cnt,
           mean_part_iou * 100, '[' + ', '.join(out_list) + ']'))
Example #56
0
def _upload_files(sca_client, name, data, result):
    data['name'] = name
    response = sca_client.snap_upload(data)
    if response.ok:
        response_data = response.json()
        status_url = response_data['status_url']

        # This is just a waiting game, so we'll show an indeterminate
        # AnimatedMarker for it.
        progress_indicator = ProgressBar(
            widgets=['Checking package status... ', AnimatedMarker()],
            maxval=7)
        progress_indicator.start()

        # Execute the package scan in another thread so we can update the
        # progress indicator.
        with ThreadPoolExecutor(max_workers=1) as executor:
            future = executor.submit(get_scan_data, sca_client, status_url)

            count = 0
            while not future.done():
                # Annoyingly, there doesn't seem to be a way to actually
                # make a progress indicator that will go on forever, so we
                # need to restart this one each time we reach the end of
                # its animation.
                if count >= 7:
                    progress_indicator.start()
                    count = 0

                # Actually update the progress indicator
                progress_indicator.update(count)
                count += 1
                time.sleep(0.15)

            # Grab the results from the package scan
            completed, data = future.result()

        progress_indicator.finish()

        if completed:
            message = data.get('message', '')
            if not message:
                result['success'] = True
                result['revision'] = data.get('revision')
            else:
                result['errors'] = [message]
        else:
            result['errors'] = [
                'Package scan took too long.',
            ]
            status_web_url = response_data.get('web_status_url')
            if status_web_url:
                result['errors'].append(
                    'Please check the status later at: {}.'.format(
                        status_web_url),
                )
        result['application_url'] = data.get('application_url', '')
    else:
        logger.error(
            'There was an error uploading the application.\n'
            'Reason: {}\n'
            'Text: {}'.format(response.reason, response.text))
        result['errors'] = [response.text]
    return result
Example #57
0
def generate_subtitles(  # pylint: disable=too-many-locals,too-many-arguments
    source_path,
    output=None,
    concurrency=DEFAULT_CONCURRENCY,
    src_language=DEFAULT_SRC_LANGUAGE,
    dst_language=DEFAULT_DST_LANGUAGE,
    subtitle_file_format=DEFAULT_SUBTITLE_FORMAT,
    api_key=None,
):
    """
    Given an input audio/video file, generate subtitles in the specified language and format.
    """
    audio_filename, audio_rate = extract_audio(source_path)

    regions = find_speech_regions(audio_filename)

    pool = multiprocessing.Pool(concurrency)
    converter = FLACConverter(source_path=audio_filename)
    recognizer = SpeechRecognizer(language=src_language,
                                  rate=audio_rate,
                                  api_key=GOOGLE_SPEECH_API_KEY)

    transcripts = []
    if regions:
        try:
            widgets = [
                "Converting speech regions to FLAC files: ",
                Percentage(), ' ',
                Bar(), ' ',
                ETA()
            ]
            pbar = ProgressBar(widgets=widgets, maxval=len(regions)).start()
            extracted_regions = []
            for i, extracted_region in enumerate(pool.imap(converter,
                                                           regions)):
                extracted_regions.append(extracted_region)
                pbar.update(i)
            pbar.finish()

            widgets = [
                "Performing speech recognition: ",
                Percentage(), ' ',
                Bar(), ' ',
                ETA()
            ]
            pbar = ProgressBar(widgets=widgets, maxval=len(regions)).start()

            for i, transcript in enumerate(
                    pool.imap(recognizer, extracted_regions)):
                transcripts.append(transcript)
                pbar.update(i)
            pbar.finish()

            if src_language.split("-")[0] != dst_language.split("-")[0]:
                if api_key:
                    google_translate_api_key = api_key
                    translator = Translator(dst_language,
                                            google_translate_api_key,
                                            dst=dst_language,
                                            src=src_language)
                    prompt = "Translating from {0} to {1}: ".format(
                        src_language, dst_language)
                    widgets = [prompt, Percentage(), ' ', Bar(), ' ', ETA()]
                    pbar = ProgressBar(widgets=widgets,
                                       maxval=len(regions)).start()
                    translated_transcripts = []
                    for i, transcript in enumerate(
                            pool.imap(translator, transcripts)):
                        translated_transcripts.append(transcript)
                        pbar.update(i)
                    pbar.finish()
                    transcripts = translated_transcripts
                else:
                    print(
                        "Error: Subtitle translation requires specified Google Translate API key. "
                        "See --help for further information.")
                    return 1

        except KeyboardInterrupt:
            pbar.finish()
            pool.terminate()
            pool.join()
            print("Cancelling transcription")
            raise

    timed_subtitles = [(r, t) for r, t in zip(regions, transcripts) if t]
    formatter = FORMATTERS.get(subtitle_file_format)
    formatted_subtitles = formatter(timed_subtitles)

    dest = output

    if not dest:
        base = os.path.splitext(source_path)[0]
        dest = "{base}.{format}".format(base=base, format=subtitle_file_format)

    with open(dest, 'wb') as output_file:
        output_file.write(formatted_subtitles.encode("utf-8"))

    os.remove(audio_filename)

    return dest
    organization_id) + '&y=' + str(year_range)
organizations_url = 'http://api.aiddata.org/data/origin/organizations?'
json_orgs = json.load(urllib2.urlopen(organizations_url))
donating_org = ''

# Finds the organization based on the id
for org in json_orgs['hits']:
    donating_org = org['name']
    print 'Creating map for ' + donating_org

    json_result = json.load(urllib2.urlopen(url))
    num_projects = json_result['project_count']
    count = 0
    totamt = 0
    country_dict = {}
    pbar = ProgressBar(maxval=num_projects).start()

    # Iterates over the projects from the AidData api in chunks of 50, the max size allowed by the api
    while (count < num_projects):
        project_info = getProjectData(count, organization_id, year_range)
        for project in project_info['items']:
            # Only looks at projects that have transaction values
            if 'transactions' in project:
                for transactions in project['transactions']:
                    # Ignores projects that don't indicate a recipient country
                    if 'tr_receiver_country' in transactions and transactions[
                            'tr_receiver_country']['iso3'] != '':
                        donor = transactions['tr_funding_org']['name']
                        receiver = transactions['tr_receiver_country']['iso3']
                        amount = transactions['tr_constant_value']
                        totamt += amount
Example #59
0
def with_example22():
    try:
        with ProgressBar(maxval=-1) as progress:
            progress.start()
    except ValueError:
        pass
Example #60
0
def svg_heatmap(data, filename, row_labels=None, box_size=4,
                index=None,
                cmap=ISH, norm_rows_by=None, draw_row_labels=False,
                col_sep='', box_height=None, total_width=None,
                draw_box=False, draw_name=False, data_names=None,
                progress_bar = False,
                max_width=np.inf,
                spacers=None,
                cmap_by_prefix=None,
                split_columns=False,
                vspacer=30,
                hatch_nan=True, hatch_size=20,
                first_col='', last_col=''):
    """
    Draw heatmap as an SVG file stored in filename

    *data* can be either a 2D array-like type (list of lists, numpy array,
    pandas DataFrame, etc), or a tuple of 2D array-likes, in which case a
    separator will be added between each one in the output

    *cmap* is a matplotlib-like colormap (i.e. a callable that expects floats
    in the range 0.0-1.0.), or an iterable of the same length as the tuple
    *data* containing colormaps

    *row_labels* can be supplied, otherwise they will detected from the first
    item in *data*, if available, and if not they will be blank.

    If *total_width* is supplied, width of each dataset in *data* will be
    scaled to that constant. If *box_height* is supplied, the height of each
    row will be *box_height*, otherwise it will be equal to the width of each
    element. If neither are supplied, elements will be squares equal to
    *box_size*. IT IS STRONGLY RECOMMENDED that if if supplying *total_width*,
    *box_height* also be specified, but this is not enforced.

    *draw_row_labels*, if True, will label the rows on the right hand side. As
    of 2013/09/03, this won't scale the SVG properly, so including the
    resulting file in an html element won't display properly.

    *spacers* is the distance between adjacent datasets.  Can either be a
    number, in which case it will apply to all datasets, or an interable for
    different distances. If the iterable is shorter than the number of
    datasets, the last value will be repeated.

    """
    import svgwrite as svg
    import pandas as pd

    if split_columns and isinstance(data, pd.DataFrame):
        from Utils import sel_startswith
        colnames = list(sorted(
            {col.split(col_sep)[0] for col in data.columns}))
        data = tuple(
            data.select(**sel_startswith(colname)) for colname in colnames
        )
    elif not isinstance(data, tuple):
        data = (data,)

    rows, cols = np.shape(data[0])
    if index is not None:
        rows = len(index)
    if box_height is None:
        box_height = box_size

    if total_width is not None and max_width is not np.inf:
        dwg = svg.Drawing(filename,
                          size=(max_width,
                                np.ceil((len(data) * total_width)/max_width)
                                * (box_height+vspacer)))
    else:
        dwg = svg.Drawing(filename)
    dwg.add(svg.base.Title(path.basename(filename)))

    pat = dwg.pattern(id='hatch', insert=(0, 0), size=(hatch_size, hatch_size),
                      patternUnits='userSpaceOnUse')
    g = pat.add(dwg.g(style="fill:none; stroke:#B0B0B0; stroke-width:1"))
    g.add(dwg.path(('M0,0', 'l{hatch},{hatch}'.format(hatch=hatch_size))))
    g.add(dwg.path(('M{hatch2},0 l{hatch2},{hatch2}'.format(hatch2=hatch_size/2).split())))
    g.add(dwg.path(('M0,{hatch2} l{hatch2},{hatch2}'.format(hatch2=hatch_size/2).split())))

    dwg.add(pat)

    if row_labels is None:
        if index is not None:
            row_labels = index
        elif hasattr(data[0], 'index'):
            row_labels = data[0].index
        else:
            row_labels = ['' for row in range(rows)]

    if box_height is None:
        box_height = box_size

    if not hasattr(cmap, "__len__"):
        cmap = [cmap for frame in data]

    if data_names is None:
        data_names = ["" for frame in data]

    if len(cmap) != len(data):
        raise ValueError("cmap and data should be the same length")

    if not hasattr(spacers, "__len__"):
        spacers = [spacers]
    else:
        spacers = list(spacers)
    while len(spacers) < len(data):
        spacers.append(spacers[-1])

    if not isinstance(norm_rows_by, tuple):
        norm_rows_by = repeat(norm_rows_by)

    x_start = 0
    y_start = 0
    y_diff = 0
    if progress_bar:
        from progressbar import ProgressBar
        iterator = zip(data, cmap, data_names, norm_rows_by, spacers)
        pbar = ProgressBar(maxval=len(iterator)*rows).start()
        pbar_val = 0
    else:
        iterator = zip(data, cmap, data_names, norm_rows_by, spacers)

    for frame, c_cmap, name, normer, spacer in iterator:
        if frame is None:
            if total_width is not None:
                if spacer is None:
                    x_start += total_width * 1.1
                else:
                    x_start += total_width + spacer
            else:
                if spacer is None:
                    x_start += box_size
                else:
                    x_start += spacer
            if x_start > max_width:
                x_start = 0
                y_start += y_diff
                continue
        frame = pd.DataFrame(frame)
        if normer is None:
            norm_data = frame.copy()
        elif normer is 'mean':
            norm_data = frame.divide(frame.dropna(axis=1).mean(axis=1)+10, axis=0)
        elif normer is 'max':
            norm_data = frame.divide(frame.dropna(axis=1).max(axis=1)+10, axis=0)
        elif normer is 'center0':
            norm_data = (0.5 +
                         0.5 * frame.divide(frame.dropna(axis=1).abs().max(axis=1),
                                      axis=0)
                        )
        elif index is not None and hasattr(normer, "ix"):
            norm_data = frame.divide(normer.ix[index], axis=0)
        elif hasattr(normer, "__len__") and len(normer) == rows:
            norm_data = frame.divide(normer, axis=0)

        elif hasattr(normer, "__len__"):
            raise TypeError("norm_rows_by should be the same shape "
                            "as the number of rows")
        else:
            norm_data = frame.divide(normer, axis=0)

        if not c_cmap or str(c_cmap).lower() == 'default':
            c_cmap = ISH

        new_rows, new_cols = np.shape(frame)
        if hasattr(frame, 'index'):
            col_labels = frame.columns
        else:
            col_labels = ['' for col in range(new_cols)]
        if new_rows != rows:
            raise ValueError("All input elements must have the same number of"
                             " rows (and same row meanings --unchecked)")

        if total_width is not None:
            box_size = total_width / float(new_cols)

        for i in range(rows):
            if progress_bar:
                pbar.update(pbar_val)
                pbar_val += 1
            prefix = col_labels[0][:col_labels[0].find(col_sep)]
            if cmap_by_prefix:
                c_cmap = cmap_by_prefix(prefix)
            for j in range(new_cols):
                g = dwg.g()
                g.add(svg.base.Title("{}, {}: {:.2f}".format(row_labels[i],
                                                             col_labels[j],
                                                             frame.ix[i, j])))
                hatch = not isfinite(norm_data.ix[i, j])
                if hatch:
                    n = 0
                    norm_data.ix[i, j] = 0
                    if j > 0:
                        norm_data.ix[i, j] += norm_data.ix[i, j-1]
                        n += 1
                    if j + 1 < len(norm_data.columns):
                        norm_data.ix[i, j] += norm_data.ix[i, j+1]
                        n += 1
                    norm_data.ix[i, j] /= n
                g.add(dwg.rect((x_start + box_size*j, y_start + i*box_height),
                               (box_size, box_height),
                               style="fill:#{:02x}{:02x}{:02x}"
                               .format(*[int(255*x) for x in
                                         c_cmap(norm_data.ix[i, j])])))
                dwg.add(g)
                if hatch_nan and hatch:
                    g.add(dwg.rect((x_start + box_size*j,
                                    y_start + i*box_height),
                                   (box_size, box_height),
                                   style="fill:url(#hatch)"
                                  )
                         )
                col_base = col_labels[j][:col_labels[j].find(col_sep)]
                if col_base != prefix:
                    prefix = col_base
                    if cmap_by_prefix:
                        c_cmap = cmap_by_prefix(prefix)
                    g.add(dwg.line((x_start + box_size * j,
                                    y_start + i * box_height),
                                   (x_start + box_size * j,
                                    y_start + (i + 1) * box_height),
                                   style="stroke-width:{}; stroke:#000000"
                                   .format(.1 * box_size)))
        dwg.add(dwg.text(first_col, (x_start,
                                     y_start + (i + 1) * box_height)))
        dwg.add(dwg.text(last_col, (x_start + (new_cols - 1) * box_size,
                                    y_start + (i + 1) * box_height)))
        if draw_box:
            dwg.add(dwg.rect((x_start, y_start + 0),
                             (new_cols*box_size, rows*box_height),
                             style="stroke-width:1; "
                             "stroke:#000000; fill:none"))
        if draw_name:
            dwg.add(dwg.text(name,
                             (x_start + box_size * new_cols / 2.0,
                              y_start + box_height * (rows) + 13),
                             style="text-anchor: middle;"))

        if total_width is not None:
            if spacer is None:
                x_start += total_width * 1.1
            else:
                x_start += total_width + spacer
        else:
            if spacer is None:
                x_start += new_cols * box_size + box_size
            else:
                x_start += new_cols * box_size + spacer

        y_diff = new_rows * box_height + 30
        if x_start + total_width >= max_width:
            x_start = 0
            y_start += new_rows*box_height + vspacer

    if draw_row_labels:
        for i in range(rows):
            dwg.add(dwg.text(row_labels[i],
                             (x_start, y_start + i*box_height+box_height),
                             style='font-size:{}'.format(box_height),
                            ))
    pbar.finish()
    dwg.saveas(filename)