Esempio n. 1
0
def prep_errors_bar(num_update_errors,
                    tot_reads,
                    suppress_progress,
                    curr_num_reads=0,
                    start_time=None):
    if num_update_errors > 0 and not suppress_progress:
        # add lines for dynamic error messages
        sys.stderr.write('\n'.join(['' for _ in range(num_update_errors + 2)]))
    bar, prog_prefix, bar_header = None, None, None
    if suppress_progress:
        num_update_errors = 0
    else:
        bar = tqdm(total=tot_reads,
                   smoothing=0,
                   initial=curr_num_reads,
                   unit='read',
                   dynamic_ncols=True)
        if start_time is not None:
            bar.start_t = start_time
    if num_update_errors > 0:
        prog_prefix = ''.join([
            _term_move_up(),
        ] * (num_update_errors + 1)) + '\r'
        bar_header = (str(num_update_errors) +
                      ' most common unsuccessful read types:')
        # write failed read update header
        bar.write(prog_prefix +
                  format_fail_summ(bar_header, num_errs=num_update_errors),
                  file=sys.stderr)

    return bar, prog_prefix, bar_header
Esempio n. 2
0
def print_progress_tqdm(hps, metrics, pbar, is_notebook, clear=True):
    """Custom writer for tqdm which prints winning metrics
    to console after each iteration.

    Uses a hack for tqdm.write(): https://github.com/tqdm/tqdm/issues/520

    # Arguments:
        hps: dict of hyperparameters
        metrics: dict of hyperparameters+metrics
        pbar: a tqdm progressbar
        is_notebook: boolean if automl-gs is running in a Notebook.
        clear: if writing should clear existing output
    """

    # hp_str = '\n'.join(['{}: {}'.format(k, v) for k, v in hps.items()])
    metrics_str = '\n'.join([
        '{}: {}'.format(k, v) for k, v in metrics.items()
        if k not in hps.keys()
    ])

    # console_str = ("\nHyperparameters:\n" + hp_str + "\n" +
    #              "\nMetrics:\n" + metrics_str)

    console_str = "\nMetrics:\n" + metrics_str

    # Print to console, removing appropriate number of lines
    move_up_char = '' if is_notebook else _term_move_up()
    if clear:
        pbar.write("".join([move_up_char] * (console_str.count('\n') + 2)))

    pbar.write(console_str)
Esempio n. 3
0
class TqdmFile(object):
    """Dummy file-like that will write to tqdm"""
    file = None
    prefix = _term_move_up() + '\r'

    def __init__(self, file):
        self.file = file
        self._carriage_pending = False

    def write(self, line):
        if self._carriage_pending:
            line = self.prefix + line
            self._carriage_pending = False

        if line.endswith('\r'):
            self._carriage_pending = True
            line = line[:-1] + '\n'

        tqdm.tqdm.write(line, file=self.file, end='')

    def flush(self):
        return getattr(self.file, "flush", lambda: None)()

    def isatty(self):
        return getattr(self.file, "isatty", lambda: False)()

    def close(self):
        return getattr(self.file, "close", lambda: None)()
    def set_attributes(self):
        self.NATIONAL_LEVEL, created = DivisionLevel.objects.get_or_create(
            name=DivisionLevel.COUNTRY)
        self.STATE_LEVEL, created = DivisionLevel.objects.get_or_create(
            name=DivisionLevel.STATE, parent=self.NATIONAL_LEVEL)
        self.DISTRICT_LEVEL, created = DivisionLevel.objects.get_or_create(
            name=DivisionLevel.DISTRICT, parent=self.STATE_LEVEL)
        self.COUNTY_LEVEL, created = DivisionLevel.objects.get_or_create(
            name=DivisionLevel.COUNTY, parent=self.STATE_LEVEL)

        # Other fixtures
        self.TOWNSHIP_LEVEL, created = DivisionLevel.objects.get_or_create(
            name=DivisionLevel.TOWNSHIP, parent=self.COUNTY_LEVEL)
        self.PRECINCT_LEVEL = DivisionLevel.objects.get_or_create(
            name=DivisionLevel.PRECINCT, parent=self.COUNTY_LEVEL)

        self.NATION, created = Division.objects.get_or_create(
            code="00",
            name="United States of America",
            label="United States of America",
            short_label="USA",
            level=self.NATIONAL_LEVEL,
        )

        self.TQDM_PREFIX = _term_move_up() + "\r"
        self.SHP_SOURCE_BASE = "https://www2.census.gov/geo/tiger/GENZ{}/shp/"
        self.DOWNLOAD_DIRECTORY = "./tmp/data/geography/"
Esempio n. 5
0
 def clear(self):
     with self.lock:
         self.done = True
         if self.drawn:
             for meter in self.meters:
                 self.fp.write('\r')
                 self.fp.write(' ' * self.ncols)
                 self.fp.write('\r')
                 self.fp.write('\n')
             self.fp.write(_unicode(_term_move_up() * len(self.meters)))
Esempio n. 6
0
 def draw(self):
     with self.lock:
         if (time.time() - self.creation_time) < self.delay_draw:
             return
         if self.done:
             return
         for meter in self.meters:
             tqdm.status_printer(self.fp)(meter)
             self.fp.write('\n')
         self.fp.write(_unicode(_term_move_up() * len(self.meters)))
         self.drawn = True
Esempio n. 7
0
def update_progress_kuma(progress):
    tqdm.write(_term_move_up() * 10)  # move cursor up
    offset = " " * int(progress.n / progress.total * (progress.ncols - 40))

    tqdm.write(offset + '    _______________')
    tqdm.write(offset + '   |               |')
    tqdm.write(offset + '   |  KUMA-SAN IS  |')
    tqdm.write(offset + '   |  OPTIMIZING!  |')
    tqdm.write(offset +
               '   |   {:>3}/{:<3}     |'.format(progress.n, progress.total))
    tqdm.write(offset + '   |________|')
    tqdm.write(offset + ' ( )  ( )||')
    tqdm.write(offset + ' ( •(エ)•)|| ')
    tqdm.write(offset + ' /      づ')
Esempio n. 8
0
class DummyTqdmFile:
    MOVE_UP = tqdm_utils._term_move_up()

    def __init__(self, fobj):
        self.fobj = fobj
        self._updating = False
        self.tmp_cnt = 0
        self.line_cnt = 0
        self.open_bar = None

    def updating(self):
        self.tmp_cnt = 0
        self._updating = True

    def updated(self):
        self._updating = False
        self.line_cnt = self.tmp_cnt

    def flush(self):
        self.line_cnt = 0

    def write(self, msg):
        tqdm_instances = getattr(tqdm, '_instances', [])
        # find the most outer instance
        outer_bar = None
        pos = len(tqdm_instances)
        for inst in tqdm_instances:
            if abs(inst.pos) <= pos:
                outer_bar = inst

        # check if the previous open instance is closed.
        if self.open_bar is not None:
            if self.open_bar not in tqdm_instances:
                self.line_cnt = 0
        self.open_bar = outer_bar

        # record line count if we are printing refreshable messages
        if self._updating:
            self.tmp_cnt += msg.count('\n')
            if self.line_cnt > 0:
                msg = DummyTqdmFile.MOVE_UP * self.line_cnt + msg

        cols = _get_cols()
        msg = msg.rstrip().ljust(cols)

        self.line_cnt = 0
        if msg.rstrip():
            tqdm.write(msg, file=self.fobj)
        self.fobj.flush()
Esempio n. 9
0
def test_train():
    # 21 joint hand graph as used in hands2017 dataset
    HAND_GRAPH_HANDS2017 = [("TMCP", "Wrist"), ("IMCP", "Wrist"), ("MMCP", "Wrist"), ("RMCP", "Wrist"),
                            ("PMCP", "Wrist"),
                            ("IMCP", "MMCP"), ("MMCP", "RMCP"), ("RMCP", "PMCP"),
                            ("TMCP", "TPIP"), ("TPIP", "TDIP"), ("TDIP", "TTIP"),
                            ("IMCP", "IPIP"), ("IPIP", "IDIP"), ("IDIP", "ITIP"),
                            ("MMCP", "MPIP"), ("MPIP", "MDIP"), ("MDIP", "MTIP"),
                            ("RMCP", "RPIP"), ("RPIP", "RDIP"), ("RDIP", "RTIP"),
                            ("PMCP", "PPIP"), ("PPIP", "PDIP"), ("PDIP", "PTIP")]

    # joint order as used in hands2017 dataset
    HAND_GRAPH_HANDS2017_INDEX_DICT = {"Wrist": 0,
                                       "TMCP": 1, "IMCP": 2, "MMCP": 3, "RMCP": 4, "PMCP": 5,
                                       "TPIP": 6, "TDIP": 7, "TTIP": 8,
                                       "IPIP": 9, "IDIP": 10, "ITIP": 11,
                                       "MPIP": 12, "MDIP": 13, "MTIP": 14,
                                       "RPIP": 15, "RDIP": 16, "RTIP": 17,
                                       "PPIP": 18, "PDIP": 19, "PTIP": 20}

    nxgraph = glstm.GraphLSTMNet.create_nxgraph(HAND_GRAPH_HANDS2017, num_units=3,
                                                index_dict=HAND_GRAPH_HANDS2017_INDEX_DICT)
    from unit_tests import plot_nxgraph as plot
    graph_lstm_net = glstm.GraphLSTMNet(nxgraph, shared_weights=glstm.NEIGHBOUR_CONNECTIONS_SHARED)
    for n in nxgraph:
        print(n, nxgraph.nodes[n][glstm._INDEX])

    from tqdm import tqdm
    from tqdm._utils import _term_move_up
    from time import sleep
    sleep(1)
    prefix = _term_move_up() + '\r'
    def tqdm_generator():
        for i in tqdm(range(50), leave=False):
            yield i
    t = TQDMHelper()
    t.start()
    for x in range(2):
        g = tqdm_generator()
        for i in g:
            t.write("I am %i" % i)
            sleep(.05)
    t.stop()

    print("Done.")
    sleep(2)
Esempio n. 10
0
    def train_network(self):
        total_epochs = 200
        print("Training the model ({} epochs) ...".format(total_epochs))
        starting_time = time.time()

        border = "-" * 50
        clear_border = _term_move_up() + "\r" + " " * len(border) + "\r"
        print(border)
        print(border)

        progress_bar = trange(total_epochs)
        for epoch in progress_bar:
            self.model.train()  #turns dropout on
            self.optimizer.zero_grad()
            output = self.model(self.features, self.adjacency_matrix)
            loss_train = functional.nll_loss(output[self.index_train],
                                             self.labels[self.index_train])
            accuracy_train = self.accuracy(output[self.index_train],
                                           self.labels[self.index_train])

            loss_train.backward()
            self.optimizer.step()

            self.model.eval()  #turns dropout off
            output = self.model(self.features, self.adjacency_matrix)
            loss_validation = functional.nll_loss(
                output[self.index_validation],
                self.labels[self.index_validation])
            accuracy_validation = self.accuracy(
                output[self.index_validation],
                self.labels[self.index_validation])

            if epoch % (total_epochs / 10) == 0:
                progress_bar.write(
                    clear_border +
                    "epoch:{} loss:{:.9f} accuracy:{:.2f}".format(
                        epoch, loss_validation, accuracy_validation))
                progress_bar.write(border)

        print("Total time elapsed during training: {:.4f}s".format(
            time.time() - starting_time))
Esempio n. 11
0
thumbnailExtension = ".jpg"

thumbnailWidth = 270
hdrWidth = 8192
hdrBlurWidth = 4096

tileSize = 64

blurAmountX = 25.0
blurAmountY = 35.0
blurFilter = "bspline"
resizeFilter = "mitchell"

errorFlag = 0
threadResult = Queue.Queue()
prefix = _term_move_up() + '\r'
barFormat = "{desc:<9}{percentage:3.0f}%|{bar}| {n_fmt:<4}/{total_fmt:<4} {elapsed}<{remaining:<6}, {rate_fmt:<11}{postfix}"

textureExts = [".jpg", ".exr", ".tif", ".jpeg", ".hdr", ".png"]
hdrExts = [".exr", ".hdr"]
hdrPrevExts = [".jpg"]
# excludeFolders      = ["_ADAPTLOOKDEV_", "_SRC"]

# -------------------------------------------------------------------------------------
# Get & Set Pathes
# -------------------------------------------------------------------------------------

rootFolder = folder.rootDir("")
adaptFolder = folder.rootDir("_ADAPTLOOKDEV_")
hdrFolder = folder.rootDir("_ADAPTLOOKDEV_/lighting/hdr")
hdrPrevFolder = folder.rootDir("_ADAPTLOOKDEV_/lighting/hdr/previews")
        np.array(eval_data[:, 1],
                 dtype='float32').reshape(eval_data.shape[0], 1))
    net.load_state_dict(torch.load(ckpt_path))
    net.eval()
    eval_outputs = net(eval_inputs)
    print(eval_outputs.data.numpy())
    print(eval_targets)
    print(
        torch.mean(
            torch.true_divide(torch.abs(eval_outputs - eval_targets),
                              eval_targets)).item())
    exit()

pbar = tqdm(range(total_epochs))
border = "=" * 50
clear_border = _term_move_up() + "\r" + " " * len(border) + "\r"
for epoch in pbar:  # loop over the dataset multiple times

    running_loss = 0.0
    for i, data in enumerate(train_loader, 0):
        # get the inputs; data is a list of [inputs, labels]
        inputs, labels = data

        inputs = Variable(inputs, volatile=True).cuda()
        labels = Variable(labels, volatile=True).cuda()

        # zero the parameter gradients
        optimizer.zero_grad()
        # forward + backward + optimize
        outputs = net(inputs)
        #loss = criterion(outputs, labels)
Esempio n. 13
0
def printout(comment, info_type = 0, info_list = [1,0,0,1], fp = "", msg_token = "", color = "", end = None, overwrite = False):
    #print(overwrite)
    """

    Parameters
    ----------
    comment : TYPE
        DESCRIPTION. 出力コメント
    info_type : TYPE, optional
        DESCRIPTION. 読み込み変数なし:0、読み込み変数あり(通常時):1、読み込み変数あり(認証時):0(The default is 0).
    info_list : TYPE, optional
        DESCRIPTION. [標準出力,slack通知、Line通知、ファイル出力]、機能あり:1、機能なし:0(The default is [1,0,0,1]).
    fp : TYPE, optional
        DESCRIPTION. ファイル出力用ファイルオブジェクト (The default is "").
        
    Returns
    -------
    val : TYPE
        DESCRIPTION.読み込み変数
    out_list : TYPE
        DESCRIPTION.ファイル出力用list

    """
    #読み込み変数がない場合
    if(info_type == 0):
        #コンソール標準出力
        if(info_list[0] == 1):
            if(overwrite == True):
                #print("aaa")
                border = "="*100
                prefix = _term_move_up() + '\r' + " "*len(border) + "\r"
                tqdm.write(prefix + comment)
                #print(color_dict[color] + '\r' + comment + Color.END, end = '')
            else:
                print(color_dict[color] + comment + Color.END, end = end)
        
        #slackの通知機能
        if(info_list[1] == 1):
            slack_webhook_token = msg_token
            data = {'text': str(comment)}
            requests.post(slack_webhook_token, data = json.dumps(data))
            
        #Lineの通知機能
        if(info_list[2] == 1):
            line_notify_token = msg_token
            line_notify_api = 'https://notify-api.line.me/api/notify'
            headers = {'Authorization': f'Bearer {line_notify_token}'}
            data = {'message': str(comment)}
            requests.post(line_notify_api, headers = headers, data = data)
            
        #出力ファイル
        if(info_list[3] == 1):
            print(color_dict[color] + comment + Color.END, end = end, file = fp, flush=True)
            
        val = ""
            
    #読み込み変数がある場合(通常:info_type == 1、 認証時:info_type == 2)
    elif(info_type == 1 or info_type == 2):
        #コンソール標準出力and入力
        val = input(comment)
        
        #出力ファイル
        if(info_list[3] == 1 and info_type == 1):
            print(comment + val, file = fp, flush=True)
        elif(info_list[3] == 1 and info_type == 2):
            if(len(val) > 3):
                print(comment + val[0:3] + "".join(["*" for i in range(len(val)-3)]), file = fp, flush=True)
            else:
                print(comment + val, file = fp, flush=True)
            
    return val
Esempio n. 14
0
import multiprocessing as mp
import threading as th
from tqdm import tqdm
from tqdm._utils import _term_move_up

up = _term_move_up()

# Create global parallelism locks to avoid racing issues with parallel bars
# works only if fork available (Linux, MacOSX, but not on Windows)
try:
    mp_lock = mp.RLock()  # multiprocessing lock
except ImportError:  # pragma: no cover
    mp_lock = None
except OSError:  # pragma: no cover
    mp_lock = None
try:
    th_lock = th.RLock()  # thread lock
except OSError:  # pragma: no cover
    th_lock = None


class TqdmDefaultWriteLock(object):
    """
    Provide a default write lock for thread and multiprocessing safety.
    Works only on platforms supporting `fork` (so Windows is excluded).
    On Windows, you need to supply the lock from the parent to the children as
    an argument to joblib or the parallelism lib you use.
    """
    def __init__(self):
        global mp_lock, th_lock
        self.locks = [lk for lk in [mp_lock, th_lock] if lk is not None]
Esempio n. 15
0
 def __init__(self):
     from tqdm._utils import _term_move_up, _environ_cols_wrapper
     self._r_prefix = _term_move_up() + '\r'
     self._dynamic_ncols = _environ_cols_wrapper()