Esempio n. 1
0
    def manual_lock(self, fpath, expiration_check=True):
        """
        Spins until a lock is acquired on a file. `fpath` can be absolute or relative and error checking will be done for you.
        Also checks if a lock is expired (as long as `expiration_check=True`) and automatically unlocks it if so.
        Doesn't return anything.
        *Lock must be freed with fs.unlock() at the end of use.*
        See `lock()` for a contextmanager version of this with unlocks at the end for you, and should be used the vast majority of the time.
        """
        #if self.verbose: print(f"manual_lock({fpath}) called")
        paths = self.get_paths(fpath)
        if self.ismetafile(fpath):
            return  # we don't lock lock files and guard extensions but its useful to be able to call lock() on them when doing things like iterating over directories

        tstart = time.time()
        # spinlock with _try_lock() attempts
        while not self._try_lock(paths.lock):
            if expiration_check:
                self._unlock_if_expired(paths)
            if time.time() - tstart > 5:
                print("Been waiting for a lock for over 5 seconds...")

        # If we just locked a directory, we now lock all sub items
        if self.isdir(fpath):
            if self.verbose:
                util.green(
                    "took a directory lock, so now taking locks on all sub items"
                )
            for item in self.listdir(fpath):
                self.manual_lock(join(fpath, item),
                                 expiration_check=expiration_check)
Esempio n. 2
0
def upload_sfs_file(username, password, filename):
    """
    Uploads a file to Snapchat FS.

    @username User who will own the file in Snapchat FS.
    @password Password of user.
    @filename Path of the file to upload.
    """
    print util.green('Uploading file ') + (filename)
    ss = SfsSession(username, password)
    ss.login()
    sfs_id = ss.generate_sfs_id(filename)
    ss.upload_image(filename, sfs_id)
    ss.send_image_to(username, sfs_id)
Esempio n. 3
0
File: main.py Progetto: DanSnow/DFS
def main(args):
    args['exit'] = threading.Event()
    args['sv'] = args['db'].server_list(args['defaults']['domain'])
    args['svlink'] = linkserver
    args['svlock'] = threading.RLock()
    for s in args['sv'].values():
        cli = linkserver(s, args['defaults']['domain'])
        if cli.ping() == False:
            print "%s: %s [%s]" % (s['name'], s['host'], util.red("failed"))
            sys.exit(1)
        print "%s: %s [%s]" % (s['name'], s['host'], util.green("OK"))
        cli.close()
    chksvth = threading.Thread(target=checkserver,
                               name='chksvth',
                               args=(args, ))
    chksvth.daemon = True
    chksvth.start()

    try:
        server.main(args)
    except KeyboardInterrupt:
        print ""
        print "Caught KeyboardInterrupt, closing..."
        args['exit'].set()
        chksvth.join()
    if args['defaults']['debug']:
        print "main exited"
Esempio n. 4
0
def exec_cmd(cmd, in_content=None, remote=None, pipe=True, print_cmd=False):
    if print_cmd:
        print green('[' + (remote if remote else 'localhost') +
                    '] [exec_cmd] ') + cmd

    if remote:
        # scp sh file to remote because the cmd may contain any kinds of quotes
        tmp_fd, tmp_fn = tempfile.mkdtemp('.sh')
        remote_tmp_fn = '/tmp/rain_tmpscript_' + os.path.basename(tmp_fn)
        try:
            os.write(tmp_fd, cmd)
            os.close(tmp_fd)
            exec_cmd('scp %s %s:%s' % (tmp_fn, remote, remote_tmp_fn))
        finally:
            os.unlink(tmp_fn)

    if not pipe:
        code = os.system(cmd)
        if code != 0:
            raise ExecError(code, '', '')
        return '', ''

    p = subprocess.Popen(cmd,
                         shell=True,
                         stdin=subprocess.PIPE if in_content else None,
                         stdout=subprocess.PIPE,
                         stderr=subprocess.PIPE)

    if in_content:
        p.stdin.write(in_content)
        p.stdin.close()

    stdout = p.stdout.read() if p.stdout else ''
    stderr = p.stderr.read() if p.stderr else ''
    code = p.wait()
    if code != 0:
        raise ExecError(code, stdout, stderr)

    if remote:
        # clean the remote tmp sh file
        try:
            exec_cmd("ssh %s 'rm -f %s'" % (remote, remote_tmp_fn))
        except ExecError, e:
            print 'Failed to clean remote file %s, reason:%s' % (remote,
                                                                 e.message)
Esempio n. 5
0
def upload_sfs_file(username, password, filename):
    """
    Uploads a file to Snapchat FS.

    @username User who will own the file in Snapchat FS.
    @password Password of user.
    @filename Path of the file to upload.
    """
    with open(filename) as f:
        data = f.read()

    basename = os.path.basename(filename)
    print util.green('Uploading file ') + (basename)
    ss = SfsSession(username, password)
    ss.login()
    sfs_id = ss.generate_sfs_id(basename, data)
    ss.upload_image(data, sfs_id)
    ss.send_image_to(username, sfs_id)
Esempio n. 6
0
 def show_progress(self, opt, it, loss):
     time_elapsed = util.get_time(time.time() - self.time_start)
     print("it {0}/{1}, lr:{3}, loss:{4}, time:{2}".format(
         util.cyan("{}".format(it + 1)),
         opt.to_it,
         util.green("{0}:{1:02d}:{2:05.2f}".format(*time_elapsed)),
         util.yellow("{:.2e}".format(opt.lr_pmo)),
         util.red("{:.4e}".format(loss.all)),
     ))
Esempio n. 7
0
 def show_progress(self,opt,ep,loss):
     [lr] = self.sched.get_lr()
     time_elapsed = util.get_time(time.time()-self.time_start)
     print("ep {0}/{1}, lr:{3}, loss:{4}, time:{2}"
         .format(util.cyan("{}".format(ep+1)),
                 opt.to_epoch,
                 util.green("{0}:{1:02d}:{2:05.2f}".format(*time_elapsed)),
                 util.yellow("{:.2e}".format(lr)),
                 util.red("{:.4e}".format(loss.all)),
     ))
Esempio n. 8
0
def login(args, data):
    psk = data['pkt'].get('psk')

    if psk == hashlib.md5(args['defaults']['psk']).hexdigest():
        print "Auth %s From %s" % (util.green("Success"), str(data['addr']))
        data['sock'].sendall(packet.Packet({}, 'OK').tostr())
        return
    print "Auth %s From %s (psk: %s)" % (util.red("Failed"), str(
        data['addr']), str(psk))
    data['sock'].sendall(packet.Packet({}, 'REJECT').tostr())
Esempio n. 9
0
    def _try_lock(self, lockpath):
        """
        Makes a single attempt to take a lock, returning True if taken and False if the lock is busy.
        If successful, we add the lock to self.lock_time and increment its lock_count
        Works for both locks and guards
        """
        lock_time = time.time()
        lockmsg = f"{os.getpid()} {lock_time} {self.name}"
        #if self.verbose: print(f"attempting to lock {lockpath}")

        # check if we already have the lock
        if lockpath in self.lock_time:
            self.lock_count[lockpath] += 1
            if self.verbose:
                print("we already own this lock, incrementing lock_count")
            return True  # we let ppl take a lock multiple times if they already own it, incrementing `self.lock_count[lockpath]` each time

        # check for directory locks all along the path that we're locking
        dirs = lockpath[len(self.root):].split('/')
        for i in range(len(dirs)):
            dirlock = ''.join(dirs[:i + 1]) + LOCK_EXTENSION
            if isfile(dirlock):
                return False  # someone locked a directory along the path that we're trying to lock, so we give up

        # try to take hte lock
        try:
            tmpfile = f"{lockpath}.{os.getpid()}"
            with open(
                    lockpath, 'x'
            ) as f:  # open in 'x' mode (O_EXCL and O_CREAT) so it fails if file exists
                f.write(lockmsg)
                if self.verbose:
                    util.green(f"LOCKED {self.get_paths(lockpath).relative}")
            self.lock_time[lockpath] = lock_time
            self.lock_count[lockpath] = 1
            return True  # we got the lock!
        except FileExistsError:
            return False  # we did not get the lock
Esempio n. 10
0
 def load(self):
     """
     This is an extremely important contextmanager. If you load do any attr mutation operation you probably want to use it. It holds a lock the whole time and loads the whole shared object into local memory for the duration of the contextmanager. Any getattr/setattr calls will use this local version. This contextmanager recurses with no issues. Nobody else can access the shared object as long as you have it loaded, since load() takes a lock().
     Why this is so important:
         If you do sharedobj.mylist.pop() outside of this contextmanager the 'mylist' attr will be loaded from disk, then pop() will be called on it, but it won't be written back to disk.
         If you have it wrapped in a load() then all of the attributes will be loaded into self.attr_dict, then the 'mylist' attr will simply be taken from this local dict, so when pop() is called it will modify the mylist copy in the local dict, and at the very end everything will get flushed back to disk when load() ends.
     """
     with self.lock():
         self.load_count += 1
         if self.fs.verbose:
             util.yellow(f'load(load_count={self.load_count})')
         if self.load_count == 1:  # if this is first load since being unloaded
             if self.fs.verbose: util.green(f'--loaded--')
             self.attr_dict = self.fs.load_nosplay(self.abspath)
         try:
             yield None
         finally:
             self.load_count -= 1
             if self.fs.verbose:
                 util.yellow(f'unload(load_count={self.load_count})')
             if self.load_count == 0:  # unload for real
                 self.fs.save_nosplay(self.attr_dict, self.abspath)
                 if self.fs.verbose: util.purple(f'--unloaded--')
Esempio n. 11
0
def download_all_sfs(username, password, target_dir):
    """
    Downloads all files managed by Snapchat FS, writing them to `target_dir`
    
    @username Username that owns the files.
    @password Password for the account specified by `username`
    @target_dir Where to download the files to.
    """
    # get all downloadable files tracked by Snapchat FS
    files = all_downloadable_sfs_files(username, password)

    # download each file in sequence; if we find two files with the same
    # name, we give the file a name that includes a hash of the contents
    filenames_downloaded = set()
    for filename, content_hash, received_id, snap in files:
        try:
            data = snap.download()
            if filename not in filenames_downloaded:
                
                print(util.green("Downloading snap ") + "%s" % filename)
                path = os.path.join(target_dir, filename)
            else:
                print(util.green("Downloading snap ") + ("%s " % filename) +
                      (util.red("but filename is not unique; ") +
                       ("downloading as: %s" %
                        (filename + "-" + content_hash))))
                path = os.path.join(target_dir
                                    , filename + "-" + content_hash)

            filenames_downloaded.add(filename)
            with open(os.path.join(target_dir, filename+content_hash)
                      , 'w') as w:
                w.write(data)

        except Exception as e:
            print("Failed to download %s: %s" % (filename, e))
            raise
Esempio n. 12
0
def dirtest():
    util.green("DIRTEST")
    fs = SyncedFS('mg', 'test', verbose=True)
    fs.wipe()
    # directories

    # populate testdir
    fs.mkdir('testdir')
    with fs.open('testdir/testfile', 'w') as f:
        f.write('test')
    with fs.open('testdir/testfile2', 'w') as f:
        f.write('test')

    # populate testdir/subdir
    fs.mkdir('testdir/subdir')
    with fs.open('testdir/subdir/subfile1', 'w') as f:
        f.write('test')
    with fs.open('testdir/subdir/subfile2', 'w') as f:
        f.write('test')

    #populate testdir/subdir/subdir
    fs.mkdir('testdir/subdir/subdir')
    with fs.open('testdir/subdir/subdir/ss1', 'w') as f:
        f.write('test')
    with fs.open('testdir/subdir/subdir/ss2', 'w') as f:
        f.write('test')

    # lock testdir/subdir
    fs.manual_lock('testdir/subdir')
    # lock testdir/subdir/subdir
    fs.manual_lock('testdir/subdir/subfile1')
    # lock testdir
    fs.manual_lock('testdir')
    fs.unlock('testdir/subdir')
    fs.unlock('testdir')
    fs.unlock('testdir/subdir/subfile1')
    fs.no_locks()
Esempio n. 13
0
def set():

    # parse input arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("--problem_name",
                        default="Finance",
                        help="name for system to control")
    parser.add_argument("--graph",
                        default="oneshot",
                        help="which graph to use")
    parser.add_argument("--debug", action="store_true", help="")
    parser.add_argument("--gpu", default="False", help="use cuda tensors")
    parser.add_argument("--recurrent", action="store_true", help="recurrent")
    parser.add_argument(
        "--load_timestamp",
        default=None,
        help="timestamp of previous experiment you want to load")
    parser.add_argument(
        "--test_only",
        default="False",
        help="Run test only. For this load_timestamp is mandatory")

    args = parser.parse_args()

    # --- below are automatically set ---
    # if args.seed is not None:
    #     seed = args.seed
    #     random.seed(seed)
    #     os.environ['PYTHONHASHSEED'] = str(seed)
    #     np.random.seed(seed)
    # args.problem_name += "_seed{}".format(args.seed)

    opt_dict = {name: value for (name, value) in args._get_kwargs()}
    opt = argparse.Namespace(**opt_dict)

    # print configurations
    for o in vars(opt):
        print(util.green(o), ":", util.yellow(getattr(opt, o)))
    print()

    return opt
Esempio n. 14
0
def train_model(hyperparam, x_train, x_cv, y_train, y_cv, num_epochs, display=True, save=False):
  print(util.green(str(hyperparam)))
  if save:
    saver = tf.train.Saver()
  init = tf.global_variables_initializer()
  sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options))
  sess.run(init)
  summaries = []
  for j in range(num_epochs):
    train_loss, train_acc = train_epoch(x_train, y_train, hyperparam, sess)
    cv_loss, cv_acc = cross_validate(x_cv, y_cv, hyperparam, sess)
    summaries.append([train_loss, train_acc, cv_loss, cv_acc])
    if display and j % DISPLAY_STEP == 0:
      print("Train Loss: %f\nCV Loss: %f\nTrain Accuracy: %s\nCV Accuracy: %s\nEPOCH: %d\n"%(train_loss,
                                                                    cv_loss,
                                                                    train_acc,
                                                                    cv_acc,
                                                                    j))
    if save and j % 50 == 0:
      saver.save(sess, output_dir(hyperparam) + '/model_%d.ckpt'%(j))
  if save:
    saver.save(sess, output_dir(hyperparam) + '/model_%d.ckpt'%(j))
  return np.array(summaries), sess
Esempio n. 15
0
def get_or_create_labels():
    labels = []

    # Loading heuristics from the file system.
    labels_fs = dict()
    for label_type in os.scandir(HEURISTICS_DIR):
        if label_type.is_dir() and not label_type.name.startswith('.'):
            for label in os.scandir(label_type.path):
                if label.is_file() and not label.name.startswith('.'):
                    with open(label.path) as file:
                        pattern = file.read()
                    label_fs = {
                        'name': os.path.splitext(label.name)[0],
                        'type': label_type.name,
                        'pattern': pattern,
                    }
                    labels_fs[(label_fs['type'], label_fs['name'])] = label_fs

    # Loading labels from the database.
    labels_db = db.query(db.Label).options(
        selectinload(db.Label.heuristic).options(
            selectinload(
                db.Heuristic.executions).defer('output').defer('user'))).all()

    status = {
        'File System': len(labels_fs),
        'Database': len(labels_db),
        'Added': 0,
        'Deleted': 0,
        'Updated': 0,
    }

    i = 0
    # Deleting labels that do not exist in the file system
    for label in labels_db:
        label_fs = labels_fs.pop((label.type, label.name), None)
        if label_fs is not None:
            # Print progress information
            i += 1
            progress = '{:.2%}'.format(i / status['File System'])
            print(f'[{progress}] Adding label {label.type}/{label.name}:',
                  end=' ')
            labels.append(label)

            # Check if pattern has changed and, in this case, remove executions that are not accepted and verified
            heuristic = label.heuristic
            if heuristic.pattern != label_fs['pattern']:
                heuristic.pattern = label_fs['pattern']
                count = 0
                for execution in list(heuristic.executions):
                    if not (execution.isValidated and execution.isAccepted):
                        # The commit does not invalidate the objects for performance reasons,
                        # so we need to remove the relationships before deleting the execution.
                        execution.heuristic = None
                        execution.version = None

                        db.delete(execution)
                        count += 1
                print(
                    green(f'heuristic updated ({count} executions removed).'))
                status['Updated'] += 1
            else:
                print(yellow('already done.'))
        else:
            db.delete(label)
            status['Deleted'] += 1

    # Adding missing labels in the database
    for label_fs in labels_fs.values():
        # Print progress information
        i += 1
        progress = '{:.2%}'.format(i / status['File System'])
        print(
            f'[{progress}] Adding label {label_fs["type"]}/{label_fs["name"]}:',
            end=' ')

        label = db.create(db.Label,
                          name=label_fs['name'],
                          type=label_fs['type'])
        db.create(db.Heuristic,
                  pattern=label_fs['pattern'],
                  label=label,
                  executions=[])
        labels.append(label)
        print(green('ok.'))
        status['Added'] += 1

    status['Total'] = len(labels)
    print_results(status)
    commit()
    return sorted(labels,
                  key=lambda item: (item.type.lower(), item.name.lower()))
Esempio n. 16
0
def get_or_create_projects():
    projects = []

    # Loading projects from the Excel.
    df = pd.read_excel(ANNOTATED_FILE, keep_default_na=False)
    df = df[df.discardReason == ''].reset_index(drop=True)
    projects_excel = dict()
    for i, project_excel in df.iterrows():
        projects_excel[(project_excel['owner'],
                        project_excel['name'])] = project_excel

    # Loading projects from the database.
    projects_db = db.query(db.Project).options(
        load_only('id', 'owner', 'name'),
        selectinload(db.Project.versions).load_only('id')).all()

    status = {
        'Excel': len(projects_excel),
        'Database': len(projects_db),
        'Added': 0,
        'Deleted': 0,
        'Repository not found': 0,
        'Git error': 0
    }

    i = 0
    # Deleting projects that do not exist in the Excel file
    for project in projects_db:
        if projects_excel.pop((project.owner, project.name), None) is not None:
            # Print progress information
            i += 1
            progress = '{:.2%}'.format(i / status['Excel'])
            print(
                f'[{progress}] Adding project {project.owner}/{project.name}:',
                end=' ')
            projects.append(project)
            print(yellow('already done.'))
        else:
            db.delete(project)
            status['Deleted'] += 1

    # Adding missing projects in the database
    for project_excel in projects_excel.values():
        # Print progress information
        i += 1
        progress = '{:.2%}'.format(i / status['Excel'])
        print(
            f'[{progress}] Adding project {project_excel["owner"]}/{project_excel["name"]}:',
            end=' ')

        project_dict = {
            k: v
            for k, v in project_excel.to_dict().items()
            if k not in ['url', 'isSoftware', 'discardReason']
        }
        project_dict['createdAt'] = str(project_dict['createdAt'])
        project_dict['pushedAt'] = str(project_dict['pushedAt'])
        try:
            os.chdir(REPOS_DIR + os.sep + project_dict['owner'] + os.sep +
                     project_dict['name'])
            p = subprocess.run(REVPARSE_COMMAND, capture_output=True)
            if p.stderr:
                raise subprocess.CalledProcessError(p.returncode,
                                                    REVPARSE_COMMAND, p.stdout,
                                                    p.stderr)

            project = db.create(db.Project, **project_dict)
            db.create(db.Version,
                      sha1=p.stdout.decode().strip(),
                      isLast=True,
                      project=project)
            projects.append(project)
            print(green('ok.'))
            status['Added'] += 1
        except NotADirectoryError:
            print(red('repository not found.'))
            status['Repository not found'] += 1
        except subprocess.CalledProcessError as ex:
            print(red('Git error.'))
            status['Git error'] += 1
            if CODE_DEBUG:
                print(ex.stderr)

    status['Total'] = len(projects)
    print_results(status)
    commit()
    return sorted(projects,
                  key=lambda item: (item.owner.lower(), item.name.lower()))
Esempio n. 17
0
def main(mode):
    fs = SyncedFS('mg', 'test', verbose=True)
    fs.unlock_all()

    # message writing test
    with fs.open('test', 'w') as f:
        print("START")
        f.write('message1\n')
        f.write('message2')
        print("END")

    # make sure that message was written successfully
    with fs.open('test', 'r') as f:
        assert f.read() == 'message1\nmessage2'

    # recursive locks test
    with fs.lock('test'):
        with fs.lock('test'):
            with fs.open('test', 'w') as f:
                f.write('message')

    # make sure that message was written successfully
    with fs.open('test', 'r') as f:
        assert f.read() == 'message'

    # multiprocess tests
    if mode == 1:
        with fs.open('test', 'w') as f:
            print("START")
            print(
                'Now launch this process again (preferably multiple copies) within 7 seconds!'
            )
            sleep(7)
            f.write('message')
            print("END")
    elif mode == 2:
        util.green("Parent trying to get lock")
        fs.manual_lock('test2')  # take a lock and dont release it
        if self.verbose: util.green("Parent got lock")
        if os.fork() != 0:
            util.green("Parent sleeping with lock")
            sleep(
                15
            )  # need to sleep a while in the parent otherwise the lock will be released when the child clears the parent since its an old pid
            util.green("Parent exiting")
            exit(0)
        else:
            sleep(1)
            fs = SyncedFS(
                'mg', 'test',
                verbose=True)  # so we dont inherit fs.locks from parent
            util.green("Child trying to get lock")
            fs.manual_lock(
                'test2'
            )  # take lock in child, which should succeed after 10 seconds
            util.green("Child got lock")
            fs.unlock('test2')  # cleanup
            util.green("Child released lock")

    # make sure we aren't still holding any locks
    fs.no_locks()
Esempio n. 18
0
def prompt(choices, mode='*'):
    if mode not in PROMPT_MODES:
        raise ValueError("mode '{}' is invalid".format(mode))

    if len(choices) > 26:
        raise ValueError("too many choices")

    if mode == '*':
        header = "select zero or more:"
        max, min = float('inf'), 0

    elif mode == '+':
        header = "select one or more:"
        max, min = float('inf'), 1

    elif mode in [1, '1']:
        header = "select one:"
        max, min = 1, 1

    elif mode == '?':
        header = "select zero or one:"
        max, min = 1, 0

    letters = list(map(lambda x: chr(ord('a') + x), range(len(choices))))

    num_selections = 0
    selections = []         # unique indices into choices list

    while num_selections < min or num_selections < max:
        util.print(util.green(header))

        for i in range(len(choices)):
            if i in selections:
                choice = " × "
            else:
                choice = "   "

            choice += str(letters[i]) + '. ' + str(choices[i])

            if i in selections:
                choice = util.yellow(choice)

            util.print(choice)

        try:
            sel = input(util.green("make a selection (or ! to commit): "))
        except KeyboardInterrupt:
            util.exit(util.ERR_INTERRUPTED)

        if sel == '!':
            if num_selections < min:
                util.error("can't stop now; you must make "
                           "{} {}".format(min,
                                          util.plural("selection", min)))
                continue
            else:
                break

        try:
            if letters.index(sel) in selections:
                selections.remove(letters.index(sel))
                continue

            selections.append(letters.index(sel))
            num_selections += 1

        except ValueError:
            if sel == '':
                util.print("make a selection (or ! to commit)")
            else:
                util.error("invalid selection: not in list")
            continue

    return selections
Esempio n. 19
0
def main():
    """
    This program can be useful to fix collisions (due to data migrated from case insensitive to case sensitive file
    systems) and to update the workspace to the most recent version.
    """
    print(f'Loading repositories from {ANNOTATED_FILE}.')
    info_repositories = pd.read_excel(ANNOTATED_FILE, keep_default_na=False)
    info_repositories = info_repositories[info_repositories.discardReason ==
                                          ''].reset_index(drop=True)

    status = {
        'Success': 0,
        'Collided': 0,
        'Repository not found': 0,
        'Git error': 0,
        'Total': len(info_repositories)
    }

    print(f'Resetting {status["Total"]} repositories...')
    for i, row in info_repositories.iterrows():
        # Print progress information
        progress = '{:.2%}'.format(i / status["Total"])
        print(
            f'[{progress}] Processing repository {row["owner"]}/{row["name"]}:',
            end=' ')

        try:
            target = REPOS_DIR + os.sep + row['owner'] + os.sep + row['name']
            os.chdir(target)

            # Removes lock file, if they exist
            try:
                os.remove('.git/index.lock')
            except OSError:
                pass

            process = subprocess.run(
                ['git', 'config', 'core.precomposeunicode', 'false'],
                capture_output=True)
            if process.stderr:
                raise subprocess.CalledProcessError(process.stderr)

            cmd = ['git', 'reset', '--hard', '-q', 'origin/HEAD']
            process = subprocess.run(cmd, capture_output=True)
            if process.stderr:
                raise subprocess.CalledProcessError(process.returncode, cmd,
                                                    process.stdout,
                                                    process.stderr)

            cmd = ['git', 'clean', '-d', '-f', '-x', '-q']
            process = subprocess.run(cmd, capture_output=True)
            if process.stderr:
                raise subprocess.CalledProcessError(process.returncode, cmd,
                                                    process.stdout,
                                                    process.stderr)

            process = subprocess.run(['git', 'status', '-s'],
                                     capture_output=True)
            if process.stdout or process.stderr:
                print(yellow('collided.'))
                status['Collided'] += 1
                if CODE_DEBUG:
                    print(process.stdout)
                    print(process.stderr)
            else:
                print(green('ok.'))
                status['Success'] += 1
        except NotADirectoryError:
            print(red('repository not found.'))
            status['Repository not found'] += 1
        except subprocess.CalledProcessError as ex:
            print(red('Git error.'))
            status['Git error'] += 1
            if CODE_DEBUG:
                print(ex.stderr)

    print('\n*** Processing results ***')
    for k, v in status.items():
        print(f'{k}: {v}')

    print("\nFinished.")
Esempio n. 20
0
def set():

    # parse input arguments
    parser = argparse.ArgumentParser()
    parser.add_argument("--eval", action="store_true", help="evaluation phase")
    parser.add_argument("--group", default="0", help="name for group")
    parser.add_argument("--name",
                        default="debug",
                        help="name for model instance")
    parser.add_argument("--seed", type=int, default=0, help="fix random seed")
    parser.add_argument("--gpu", type=int, default=0, help="GPU device")
    parser.add_argument("--cpu", action="store_true", help="use CPU only")
    parser.add_argument("--load", default=None, help="load (pre)trained model")
    # dataset
    parser.add_argument("--rendering-path",
                        default="data/rendering",
                        help="path to ShapeNet rendering")
    parser.add_argument("--pointcloud-path",
                        default="data/customShapeNet",
                        help="path to ShapeNet 3D point cloud")
    parser.add_argument("--sun360-path",
                        default="data/background",
                        help="path to SUN360 background")
    parser.add_argument("--seq-path",
                        default="data/sequences",
                        help="path to RGB sequences for evaluation")
    parser.add_argument("--category",
                        default=None,
                        help="train on specific category")
    parser.add_argument("--num-workers",
                        type=int,
                        default=8,
                        help="number of data loading threads")
    parser.add_argument("--size",
                        default="224x224",
                        help="rendered image size")
    parser.add_argument("--sfm",
                        action="store_true",
                        help="use coordinate system mapping from SfM output")
    parser.add_argument("--init-idx",
                        type=int,
                        default=27,
                        help="initial frame index")
    parser.add_argument("--noise",
                        type=float,
                        default=None,
                        help="gaussian noise in coordinate system mapping")
    # visualization
    parser.add_argument("--log-tb",
                        action="store_true",
                        help="output loss in TensorBoard")
    parser.add_argument("--log-visdom",
                        action="store_true",
                        help="visualize mesh in Visdom")
    parser.add_argument("--vis-server",
                        default="http://localhost",
                        help="visdom port server")
    parser.add_argument("--vis-port",
                        type=int,
                        default=8097,
                        help="visdom port number")
    parser.add_argument("--video",
                        action="store_true",
                        help="write video sequence with optimized mesh")
    # AtlasNet
    parser.add_argument("--num-prim",
                        type=int,
                        default=25,
                        help="number of primitives")
    parser.add_argument("--num-points",
                        type=int,
                        default=100,
                        help="number of points (per primitive)")
    parser.add_argument("--num-meshgrid",
                        type=int,
                        default=5,
                        help="number of regular grids for mesh")
    parser.add_argument("--sphere",
                        action="store_true",
                        help="use closed sphere for AtlasNet")
    parser.add_argument("--sphere-densify",
                        type=int,
                        default=3,
                        help="densify levels")
    parser.add_argument("--imagenet-enc",
                        action="store_true",
                        help="initialize with pretrained ResNet encoder")
    parser.add_argument("--pretrained-dec",
                        default=None,
                        help="initialize with pretrained AtlasNet decoder")
    # photometric optimization
    parser.add_argument(
        "--batch-size-pmo",
        type=int,
        default=-1,
        help="batch size for photometric optimization (-1 for all)")
    parser.add_argument("--lr-pmo",
                        type=float,
                        default=1e-3,
                        help="base learning rate for photometric optimization")
    parser.add_argument("--code",
                        type=float,
                        default=None,
                        help="penalty on code difference")
    parser.add_argument("--scale",
                        type=float,
                        default=None,
                        help="penalty on scale")
    parser.add_argument("--to-it",
                        type=int,
                        default=100,
                        help="run optimization to iteration number")
    parser.add_argument(
        "--avg-frame",
        action="store_true",
        help="average photo. loss across frames instead of sampled pixels")
    # AtlasNet training
    parser.add_argument("--batch-size",
                        type=int,
                        default=32,
                        help="input batch size")
    parser.add_argument(
        "--aug-transl",
        type=int,
        default=None,
        help="augment with random translation (for new dataset)")
    parser.add_argument("--lr-pretrain",
                        type=float,
                        default=1e-4,
                        help="base learning rate")
    parser.add_argument("--lr-decay",
                        type=float,
                        default=1.0,
                        help="learning rate decay")
    parser.add_argument("--lr-step",
                        type=int,
                        default=100,
                        help="learning rate decay step size")
    parser.add_argument("--from-epoch",
                        type=int,
                        default=0,
                        help="train from epoch number")
    parser.add_argument("--to-epoch",
                        type=int,
                        default=500,
                        help="train to epoch number")
    opt = parser.parse_args()

    # --- below are automatically set ---
    if opt.seed is not None:
        np.random.seed(opt.seed)
        torch.manual_seed(opt.seed)
        torch.cuda.manual_seed_all(opt.seed)
        opt.name += "_seed{}".format(opt.seed)
    opt.device = "cpu" if opt.cpu or not torch.cuda.is_available(
    ) else "cuda:{}".format(opt.gpu)
    opt.H, opt.W = [int(s) for s in opt.size.split("x")]

    if opt.sphere:
        opt.num_prim = 1
    opt.num_points_all = opt.num_points * opt.num_prim

    # print configurations
    for o in sorted(vars(opt)):
        print(util.green(o), ":", util.yellow(getattr(opt, o)))
    print()

    return opt
Esempio n. 21
0
def main():
    db.connect()

    print(f'Loading projects from {ANNOTATED_FILE}.')
    projects = get_or_create_projects()

    print(f'\nLoading heuristics from {HEURISTICS_DIR}.')
    labels = get_or_create_labels()

    # Indexing executions by label heuristic and project version.
    executions = index_executions(labels)

    status = {
        'Success': 0,
        'Skipped': 0,
        'Repository not found': 0,
        'Git error': 0,
        'Total': len(labels) * len(projects)
    }

    print(
        f'\nProcessing {len(labels)} heuristics over {len(projects)} projects.'
    )
    for i, label in enumerate(labels):
        heuristic = label.heuristic
        for j, project in enumerate(projects):
            version = project.versions[
                0]  # TODO: fix this to deal with multiple versions

            # Print progress information
            progress = '{:.2%}'.format(
                (i * len(projects) + (j + 1)) / status['Total'])
            print(
                f'[{progress}] Searching for {label.name} in {project.owner}/{project.name}:',
                end=' ')

            # Try to get a previous execution
            execution = executions.get((heuristic, version), None)
            if not execution:
                try:
                    os.chdir(REPOS_DIR + os.sep + project.owner + os.sep +
                             project.name)
                    cmd = GREP_COMMAND + [
                        HEURISTICS_DIR + os.sep + label.type + os.sep +
                        label.name + '.txt'
                    ]
                    p = subprocess.run(cmd, capture_output=True)
                    if p.stderr:
                        raise subprocess.CalledProcessError(
                            p.returncode, cmd, p.stdout, p.stderr)
                    db.create(db.Execution,
                              output=p.stdout.decode(errors='replace').replace(
                                  '\x00', '\uFFFD'),
                              version=version,
                              heuristic=heuristic,
                              isValidated=False,
                              isAccepted=False)
                    print(green('ok.'))
                    status['Success'] += 1
                except NotADirectoryError:
                    print(red('repository not found.'))
                    status['Repository not found'] += 1
                except subprocess.CalledProcessError as ex:
                    print(red('Git error.'))
                    status['Git error'] += 1
                    if CODE_DEBUG:
                        print(ex.stderr)
            else:  # Execution already exists
                print(yellow('already done.'))
                status['Skipped'] += 1
        commit()

    print_results(status)
    db.close()
Esempio n. 22
0
def main():
    for f in get_tests():
        print(util.green(f))
        fp = os.path.join(qa_dir, f)
        subprocess.check_call([sys.executable, fp])
Esempio n. 23
0
def main():
    for f in get_tests():
        print (util.green(f))
        fp = os.path.join (qa_dir, f)
        subprocess.check_call([sys.executable, fp])