def main():
    """Parse command line arguments and decide what should be done."""
    desc = _("""p4gf_lfs_http_server.py handles LFS requests over HTTP.
Typically it is run via a web server and protected by some form of user
authentication. The environment variable REMOTE_USER must be set to
the name of a valid Perforce user, which is taken to be the user
performing a pull or push operation.
""")
    epilog = _("""If the --port argument is given then a simple HTTP server
will be started, listening on the specified port. In lieu of REMOTE_USER, the
user name is extracted from the URI, which starts with "/~", followed by the
user name. To stop the server, send a terminating signal to the process.
""")
    log_l10n()
    parser = p4gf_util.create_arg_parser(desc, epilog=epilog)
    parser.add_argument('-p',
                        '--port',
                        type=int,
                        help=_('port on which to listen for LFS reqeuests'))
    args = parser.parse_args()
    if args.port:
        LOG.info("Listening for LFS-HTTP requests on port %s, pid=%s",
                 args.port, os.getpid())
        httpd = wsgiref.simple_server.make_server('', args.port, app_wrapper)
        print(_('Serving on port {port}...').format(port=args.port))
        p4gf_http_common.wsgi_install_signal_handler(httpd)
        p4gf_proc.install_stack_dumper()
        httpd.serve_forever()
    else:
        # Assume we are running inside a web server...
        p4gf_proc.install_stack_dumper()
        _handle_cgi()
Esempio n. 2
0
def pacemaker(view_name, event):
    """
    As long as event flag is clear, update heartbeat of named lock.
    """
    # Running in a separate process, need to establish our own P4 connection
    # and set up a heartbeat-only lock to update the heartbeat of the lock
    # associated with the view.
    p4gf_proc.install_stack_dumper()
    LOG.getChild("pacemaker").debug("starting for lock {}".format(view_name))
    p4 = None
    try:
        p4 = p4gf_create_p4.create_p4(client=p4gf_util.get_object_client_name(), connect=False)
        lock = CounterLock(p4, view_name, heartbeat_only=True)
        while not event.is_set():
            with p4gf_create_p4.Connector(p4):
                lock.update_heartbeat()
            event.wait(HEART_RATE)
    # pylint: disable=W0703
    # Catching too general exception
    except Exception as e:
        LOG.getChild("pacemaker").error("error occurred: {}".format(str(e)))
    finally:
        LOG.getChild("pacemaker").debug("stopping for view {}".format(view_name))
        if p4:
            p4gf_create_p4.destroy(p4)
def main():
    """
    Parse command line arguments and decide what should be done.
    """
    desc = _("""p4gf_http_server.py handles http(s) requests. Typically it
is run via a web server and protected by some form of user
authentication. The environment variable REMOTE_USER must be set to
the name of a valid Perforce user, which is taken to be the user
performing a pull or push operation.
""")
    epilog = _("""If the --user argument is given then a simple HTTP server
will be started, listening on the port specified by --port. The
REMOTE_USER value will be set to the value given to the --user
argument. To stop the server, send a terminating signal to the process.
""")
    log_l10n()
    parser = p4gf_util.create_arg_parser(desc, epilog=epilog)
    parser.add_argument('-u',
                        '--user',
                        help=_('value for REMOTE_USER variable'))
    parser.add_argument('-p',
                        '--port',
                        type=int,
                        default=8000,
                        help=_('port on which to listen (default 8000)'))
    args = parser.parse_args()
    if args.user:
        LOG.debug(
            "Listening for HTTP requests on port {} as user {}, pid={}".format(
                args.port, args.user, os.getpid()))
        wrapper = functools.partial(_app_wrapper, args.user)
        httpd = wsgiref.simple_server.make_server(
            '', args.port, wrapper, handler_class=GitFusionRequestHandler)
        print(_('Serving on port {}...').format(args.port))

        def _signal_handler(signum, _frame):
            """
            Ensure the web server is shutdown properly.
            """
            LOG.info("received signal {}, pid={}, exiting".format(
                signum, os.getpid()))
            httpd.server_close()
            sys.exit(0)

        LOG.debug("installing HTTP server signal handler, pid={}".format(
            os.getpid()))
        signal.signal(signal.SIGHUP, _signal_handler)
        signal.signal(signal.SIGINT, _signal_handler)
        signal.signal(signal.SIGQUIT, _signal_handler)
        signal.signal(signal.SIGTERM, _signal_handler)
        signal.signal(signal.SIGTSTP, _signal_handler)

        p4gf_proc.install_stack_dumper()
        httpd.serve_forever()
    else:
        # Assume we are running inside a web server...
        _handle_cgi()
def _double_fork(func):
    """
    do the UNIX double-fork magic, see Stevens' "Advanced
    Programming in the UNIX Environment" for details (ISBN 0201563177)
    http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
    """
    # flush before fork rather than later so that buffer contents doesn't get
    # written twice
    sys.stderr.flush()

    try:
        pid = os.fork()
        if pid > 0:
            # main/parent process
            return
    except OSError as e:
        sys.stderr.write(
            _('fork #1 failed: %d (%s)\n') % (e.errno, e.strerror))
        sys.exit(1)

    # decouple from parent environment
    os.setsid()
    os.umask(0)

    # do second fork
    try:
        pid = os.fork()
        if pid > 0:
            # exit from second parent
            os._exit(0)
    except OSError as e:
        sys.stderr.write(
            _('fork #2 failed: %d (%s)\n') % (e.errno, e.strerror))
        os._exit(1)

    # redirect standard file descriptors
    sys.stdout.flush()
    sys.stderr.flush()
    si = open('/dev/null', 'r')
    so = open('/dev/null', 'a+')
    se = open('/dev/null', 'a+')
    os.dup2(si.fileno(), sys.stdin.fileno())
    os.dup2(so.fileno(), sys.stdout.fileno())
    os.dup2(se.fileno(), sys.stderr.fileno())

    # call the given function
    p4gf_proc.install_stack_dumper()
    func()
    os._exit(0)
def main():
    """
    Parse command line arguments and decide what should be done.
    """
    desc = _("""p4gf_http_server.py handles http(s) requests. Typically it
is run via a web server and protected by some form of user
authentication. The environment variable REMOTE_USER must be set to
the name of a valid Perforce user, which is taken to be the user
performing a pull or push operation.
""")
    epilog = _("""If the --user argument is given then a simple HTTP server
will be started, listening on the port specified by --port. The
REMOTE_USER value will be set to the value given to the --user
argument. To stop the server, send a terminating signal to the process.
""")
    log_l10n()
    parser = p4gf_util.create_arg_parser(desc, epilog=epilog)
    parser.add_argument('-u', '--user',
                        help=_('value for REMOTE_USER variable'))
    parser.add_argument('-p', '--port', type=int, default=8000,
                        help=_('port on which to listen (default 8000)'))
    args = parser.parse_args()
    if args.user:
        LOG.debug("Listening for HTTP requests on port {} as user {}, pid={}".format(
            args.port, args.user, os.getpid()))
        wrapper = functools.partial(_app_wrapper, args.user)
        httpd = wsgiref.simple_server.make_server('', args.port, wrapper,
            handler_class=GitFusionRequestHandler)
        print(_('Serving on port {}...').format(args.port))

        def _signal_handler(signum, _frame):
            """
            Ensure the web server is shutdown properly.
            """
            LOG.info("received signal {}, pid={}, exiting".format(signum, os.getpid()))
            httpd.server_close()
            sys.exit(0)
        LOG.debug("installing HTTP server signal handler, pid={}".format(os.getpid()))
        signal.signal(signal.SIGHUP, _signal_handler)
        signal.signal(signal.SIGINT, _signal_handler)
        signal.signal(signal.SIGQUIT, _signal_handler)
        signal.signal(signal.SIGTERM, _signal_handler)
        signal.signal(signal.SIGTSTP, _signal_handler)

        p4gf_proc.install_stack_dumper()
        httpd.serve_forever()
    else:
        # Assume we are running inside a web server...
        _handle_cgi()
Esempio n. 6
0
def main(poll_only=False):
    """set up repo.

    repo_name_git    is the untranslated repo name
    repo_name        is the translated repo name
    """
    p4gf_proc.install_stack_dumper()
    p4gf_util.log_environ(LOG, os.environ, "SSH")

    encoding = sys.getfilesystemencoding()
    if encoding == 'ascii':
        # This encoding is wrong and will eventually lead to problems.
        _report_error(
            _("Using 'ascii' file encoding will ultimately result in errors, "
              "please set LANG/LC_ALL to 'utf-8' in environment configuration."
              ))
        return os.EX_CONFIG

    server = AuthServer(poll_only)

    try:
        return server.process()
    except p4gf_server_common.BadRequestException as e:
        _report_error(str(e))
        return os.EX_USAGE
    except p4gf_server_common.PerforceConnectionFailed:
        _report_error(_("Perforce connection failed"))
        return 2
    except p4gf_server_common.SpecialCommandException:
        return os.EX_OK
    except p4gf_server_common.RepoNotFoundException as e:
        _report_error(str(e))
        return 1
    except p4gf_server_common.RepoInitFailedException as e:
        _report_error(str(e))
        return 1
    except p4gf_server_common.ReadOnlyInstanceException as e:
        _report_error(str(e))
        return 1
    except p4gf_server_common.MissingSubmoduleImportUrlException:
        _report_error(
            _('Stream imports require an ssh-url'
              ' be configured. Contact your administrator.'))
        return 0
    except p4gf_atomic_lock.LockConflict as lc:
        _report_error(str(lc))
    return os.EX_SOFTWARE
Esempio n. 7
0
def _double_fork(func):
    """
    do the UNIX double-fork magic, see Stevens' "Advanced
    Programming in the UNIX Environment" for details (ISBN 0201563177)
    http://www.erlenstar.demon.co.uk/unix/faq_2.html#SEC16
    """
    # flush before fork rather than later so that buffer contents doesn't get
    # written twice
    sys.stderr.flush()

    try:
        pid = os.fork()
        if pid > 0:
            # main/parent process
            return
    except OSError as e:
        sys.stderr.write(_('fork #1 failed: %d (%s)\n') % (e.errno, e.strerror))
        sys.exit(1)

    # decouple from parent environment
    os.setsid()
    os.umask(0)

    # do second fork
    try:
        pid = os.fork()
        if pid > 0:
            # exit from second parent
            os._exit(0)
    except OSError as e:
        sys.stderr.write(_('fork #2 failed: %d (%s)\n') % (e.errno, e.strerror))
        os._exit(1)

    # redirect standard file descriptors
    sys.stdout.flush()
    sys.stderr.flush()
    si = open('/dev/null', 'r')
    so = open('/dev/null', 'a+')
    se = open('/dev/null', 'a+')
    os.dup2(si.fileno(), sys.stdin.fileno())
    os.dup2(so.fileno(), sys.stdout.fileno())
    os.dup2(se.fileno(), sys.stderr.fileno())

    # call the given function
    p4gf_proc.install_stack_dumper()
    func()
    os._exit(0)
def main(poll_only=False):
    """set up repo for a view
       view_name_git    is the untranslated repo name
       view_name        is the translated repo name
    """
    p4gf_proc.install_stack_dumper()
    _log_environ(os.environ)
    with p4gf_server_common.ExceptionAuditLogger()\
    , p4gf_create_p4.Closer():
        LOG.debug(p4gf_log.memory_usage())
        start_time = time.time()
        args = parse_args(sys.argv[1:])
        if not args:
            return 1

        is_push = 'upload' not in args.command[0]

        # Record the p4 user in environment. We use environment to pass to
        # git-invoked hook. We don't have to set ctx.authenticated_p4user because
        # Context.__init__() reads it from environment, which we set here.
        os.environ[p4gf_const.P4GF_AUTH_P4USER] = args.user

        # view_name_git    is the untranslated repo name
        # view_name        is the translated repo name

        # print "args={}".format(args)
        view_name_git = args.options[-1]
        # translate '/' ':' ' '  .. etc .. for internal view_name
        view_name = p4gf_translate.TranslateReponame.git_to_repo(view_name_git)
        LOG.debug("public view_name: {0}   internal view_name: {1}".
                format(view_name_git, view_name))


        p4gf_util.reset_git_enviro()
        p4 = p4gf_create_p4.create_p4()
        if not p4:
            return 2
        LOG.debug("connected to P4: %s", p4)

        p4gf_server_common.check_readiness(p4)

        p4gf_server_common.check_lock_perm(p4)

        if not p4gf_server_common.check_protects(p4):
            p4gf_server_common.raise_p4gf_perm()

        if p4gf_server_common.run_special_command(view_name, p4, args.user):
            return 0

        # Initialize the external process launcher early, before allocating lots
        # of memory, and just after all other conditions have been checked.
        p4gf_proc.init()
        # Prepare for possible spawn of GitMirror worker process by forking
        # now before allocating lots of memory.
        p4gf_gitmirror.setup_spawn(view_name)
        # Kick off garbage collection debugging, if enabled.
        p4gf_gc.init_gc()

        if poll_only:
            view_perm = None
        else:
            # Go no further, create NOTHING, if user not authorized.
            # We use the translated internal view name here for perm authorization
            required_perm = p4gf_server_common.COMMAND_TO_PERM[args.command[0]]
            view_perm = p4gf_group.ViewPerm.for_user_and_view(p4, args.user,
                        view_name, required_perm)
            p4gf_server_common.check_authorization(p4, view_perm, args.user, args.command[0],
                                                   view_name)

        # Create Git Fusion server depot, user, config. NOPs if already created.
        p4gf_init.init(p4)

        write_motd()

        # view_name is the internal view_name (identical when notExist special chars)
        before_lock_time = time.time()
        with p4gf_lock.view_lock(p4, view_name) as view_lock:
            after_lock_time = time.time()

            # Create Git Fusion per-repo client view mapping and config.
            #
            # NOPs if already created.
            # Create the empty directory that will hold the git repo.
            init_repo_status = p4gf_init_repo.init_repo(p4, view_name, view_lock)
            if init_repo_status == p4gf_init_repo.INIT_REPO_OK:
                repo_created = True
            elif init_repo_status == p4gf_init_repo.INIT_REPO_EXISTS:
                repo_created = False
            else:
                return 1

            # If authorization came from default, not explicit group
            # membership, copy that authorization to a group now. Could
            # not do this until after p4gf_init_repo() has a chance to
            # create not-yet-existing groups.
            if view_perm:
                view_perm.write_if(p4)

            # Now that we have valid git-fusion-user and
            # git-fusion-<view> client, replace our temporary P4
            # connection with a more permanent Context, shared for the
            # remainder of this process.
            with p4gf_context.create_context(view_name, view_lock) as ctx:
                LOG.debug("reconnected to P4, p4gf=%s", ctx.p4gf)

                # Find directory paths to feed to git.
                ctx.log_context()

                # cd into the work directory. Not all git functions react well
                # to --work-tree=xxxx.
                cwd = os.getcwd()
                os.chdir(ctx.view_dirs.GIT_WORK_TREE)

                # Only copy from Perforce to Git if no other process is cloning
                # from this Git repo right now.
                shared_in_progress = p4gf_lock.shared_host_view_lock_exists(ctx.p4, view_name)
                if not shared_in_progress:
                    # Copy any recent changes from Perforce to Git.
                    try:
                        LOG.debug("bare: No git-upload-pack in progress, force non-bare"
                                  " before update Git from Perforce.")
                        p4gf_git.set_bare(False)
                        p4gf_copy_p2g.copy_p2g_ctx(ctx)
                        p4gf_init_repo.process_imports(ctx)

                        # Now is also an appropriate time to clear out any stale Git
                        # Swarm reviews. We're pre-pull, pre-push, time when we've
                        # got exclusive write access to the Git repo,
                        GSReviewCollection.delete_refs_for_closed_reviews(ctx)

                    except p4gf_lock.LockCanceled as lc:
                        LOG.warning(str(lc))
                    except:
                        # Dump failure to log, BEFORE cleanup, just in case
                        # cleanup ALSO fails and throws its own error (which
                        # happens if we're out of memory).
                        LOG.error(traceback.format_exc())

                        if repo_created:
                            # Return to the original working directory to allow the
                            # config code to call os.getcwd() without dying, since
                            # we are about to delete the current working directory.
                            os.chdir(cwd)
                            p4gf_server_common.cleanup_client(ctx, view_name)
                        raise

                if poll_only:
                    code = os.EX_OK
                else:

                    git_caller = functools.partial(_call_git, args, ctx)
                    try:

                        # Deep in call_git(), we grab an 'p4 reviews' lock on
                        # ctx.clientmap's LHS. Switch that clientmap to our
                        # full union view to prevent simultaneous 'git push'es
                        # from clobbering each other in some shared depot
                        # branch. Must include all lightweight branches, too.
                        ctx.switch_client_view_to_union()

                        exclusive = 'upload' not in args.command[0]
                        code = p4gf_call_git.call_git(
                                git_caller, ctx, view_name, view_lock, exclusive)
                        if is_push:
                            GSReviewCollection.post_push(ctx)
                    except p4gf_atomic_lock.LockConflict as lc:
                        sys.stderr.write("{}\n".format(lc))
                        code = os.EX_SOFTWARE

            p4gf_gc.process_garbage(NTR('at end of auth_server'))
            if LOG.isEnabledFor(logging.DEBUG):
                end_time = time.time()
                frm = NTR("Runtime: preparation {} ms, lock acquisition {} ms,"
                          " processing {} ms")
                LOG.debug(frm.format(before_lock_time - start_time,
                                    after_lock_time - before_lock_time,
                                    end_time - after_lock_time))
        return code
def main():
    """Copy incoming Git commits to Perforce changelists."""
    _log_environ(os.environ)
    log_l10n()
    LOG.debug("main() running, pid={}".format(os.getpid()))
    p4gf_proc.install_stack_dumper()
    for h in ['-?', '-h', '--help']:
        if h in sys.argv:
            print(_('Git Fusion pre-receive hook.'))
            return 2
    with p4gf_create_p4.Closer():
        p4gf_version.print_and_exit_if_argv()
        p4 = p4gf_create_p4.create_p4()
        if not p4:
            return 2
        p4gf_util.reset_git_enviro(p4)

        view_name = p4gf_util.cwd_to_view_name()
        view_lock = p4gf_lock.view_lock_heartbeat_only(p4, view_name)
        with p4gf_context.create_context(view_name, view_lock) as ctx:

            # this script is called by git while a context and temp clients
            # are already in use.  Don't sabotage that context by deleting
            # the temp clients from here.
            ctx.cleanup_client_pool = False

            # Read each input line (usually only one unless pushing multiple branches)
            # and convert to a list of "tuples" from which we can assign branches.
            prl = []
            delete_prl = []
            while True:
                line = sys.stdin.readline()
                if not line:
                    break
                LOG.debug('main() raw pre-receive-tuple: {}'.format(line))
                prt = PreReceiveTuple.from_line(line)
                if int(prt.new_sha1, 16) == 0:
                    delete_prl.append(prt)
                else:
                    prl.append(prt)

            # Initialize the external process launcher early, before allocating lots
            # of memory, and just after all other conditions have been checked.
            p4gf_proc.init()
            # Prepare for possible spawn of GitMirror worker process by forking
            # now before allocating lots of memory.
            p4gf_gitmirror.setup_spawn(view_name)
            # Kick off garbage collection debugging, if enabled.
            p4gf_gc.init_gc()

            # Reject attempt to delete any fully populated branch defined in
            # p4gf_config. Git Fusion never edits p4gf_config, so Git Fusion never
            # deletes fully populated branches. Edit p4gf_config yourself if you
            # want to remove a branch from history.
            for prt in delete_prl:
                git_branch_name = prt.git_branch_name()
                if not git_branch_name:
                    continue
                branch = ctx.git_branch_name_to_branch(git_branch_name)
                if not branch:
                    LOG.debug('attempt to delete branch {} which does not exist'
                              .format(git_branch_name))
                    break
                if not branch.is_lightweight:
                    raise RuntimeError(_('Cannot delete branches defined in'
                                         ' Git Fusion repo config file: {}')
                                       .format(git_branch_name))

            # Swarm review creates new Git merge commits. Must occur before branch
            # assignment so that the review reference can be moved to the new merge
            # commit.
            gsreview_coll = GSReviewCollection.from_prl(ctx, prl)
            if gsreview_coll:
                gsreview_coll.pre_copy_to_p4(prl)

            # Assign branches to each of the received commits for pushed branches  - skip deletes.
            if prl:
                assigner = Assigner(ctx.branch_dict(), prl, ctx)
                assigner.assign()

            # For each of the heads being pushed, copy their commits to Perforce.
            if prl:
                try:
                    err = _copy( ctx
                               , prl           = prl
                               , assigner      = assigner
                               , gsreview_coll = gsreview_coll)   # branch push
                    if err:
                        return _clean_exit(err)
                except RuntimeError as err:
                    # Log the error. The return call below eats the error and stack trace.
                    LOG.exception(NTR("_copy() raised exception."))
                    return _clean_exit(err)
            # For each of the heads being deleted, remove the branch definition from p4gf_config2
            if delete_prl:
                p4gf_call_git.prohibit_interrupt(view_name, os.getpid())
                try:
                    err = _delete(ctx, delete_prl)     # branch delete
                    if err:
                        return _clean_exit(err)
                except RuntimeError as err:
                    # Log the error. The return call below eats the error and stack trace.
                    LOG.exception(NTR("_delete() raised exception."))
                    return _clean_exit(err)
            # Process all of the tags at once.
            err = p4gf_tag.process_tags(ctx, prl + delete_prl)
            if err:
                return _clean_exit(err)

                            # If we have any new Git Swarm review references that
                            # auth/http_server must rename, send a list of such
                            # references across process boundary, via a file.
            if gsreview_coll:
                gsreview_coll.to_file()

            p4gf_gc.process_garbage("at end of pre_receive_hook")
            p4gf_gc.report_objects(NTR("at end of pre_receive_hook"))

        return 0
def main():
    """Copy incoming Git commits to Perforce changelists."""
    _log_environ(os.environ)
    log_l10n()
    LOG.debug("main() running, pid={}".format(os.getpid()))
    p4gf_proc.install_stack_dumper()
    for h in ['-?', '-h', '--help']:
        if h in sys.argv:
            print(_('Git Fusion pre-receive hook.'))
            return 2
    with p4gf_create_p4.Closer():
        p4gf_version.print_and_exit_if_argv()
        p4 = p4gf_create_p4.create_p4()
        if not p4:
            return 2
        p4gf_util.reset_git_enviro(p4)

        view_name = p4gf_util.cwd_to_view_name()
        view_lock = p4gf_lock.view_lock_heartbeat_only(p4, view_name)
        with p4gf_context.create_context(view_name, view_lock) as ctx:

            # this script is called by git while a context and temp clients
            # are already in use.  Don't sabotage that context by deleting
            # the temp clients from here.
            ctx.cleanup_client_pool = False

            # Read each input line (usually only one unless pushing multiple branches)
            # and convert to a list of "tuples" from which we can assign branches.
            prl = []
            delete_prl = []
            while True:
                line = sys.stdin.readline()
                if not line:
                    break
                LOG.debug('main() raw pre-receive-tuple: {}'.format(line))
                prt = PreReceiveTuple.from_line(line)
                if int(prt.new_sha1, 16) == 0:
                    delete_prl.append(prt)
                else:
                    prl.append(prt)

            # Initialize the external process launcher early, before allocating lots
            # of memory, and just after all other conditions have been checked.
            p4gf_proc.init()
            # Prepare for possible spawn of GitMirror worker process by forking
            # now before allocating lots of memory.
            p4gf_gitmirror.setup_spawn(view_name)
            # Kick off garbage collection debugging, if enabled.
            p4gf_gc.init_gc()

            # Reject attempt to delete any fully populated branch defined in
            # p4gf_config. Git Fusion never edits p4gf_config, so Git Fusion never
            # deletes fully populated branches. Edit p4gf_config yourself if you
            # want to remove a branch from history.
            for prt in delete_prl:
                git_branch_name = prt.git_branch_name()
                if not git_branch_name:
                    continue
                branch = ctx.git_branch_name_to_branch(git_branch_name)
                if not branch:
                    LOG.debug(
                        'attempt to delete branch {} which does not exist'.
                        format(git_branch_name))
                    break
                if not branch.is_lightweight:
                    raise RuntimeError(
                        _('Cannot delete branches defined in'
                          ' Git Fusion repo config file: {}').format(
                              git_branch_name))

            # Swarm review creates new Git merge commits. Must occur before branch
            # assignment so that the review reference can be moved to the new merge
            # commit.
            gsreview_coll = GSReviewCollection.from_prl(ctx, prl)
            if gsreview_coll:
                gsreview_coll.pre_copy_to_p4(prl)

            # Assign branches to each of the received commits for pushed branches  - skip deletes.
            if prl:
                assigner = Assigner(ctx.branch_dict(), prl, ctx)
                assigner.assign()

            # For each of the heads being pushed, copy their commits to Perforce.
            if prl:
                try:
                    err = _copy(ctx,
                                prl=prl,
                                assigner=assigner,
                                gsreview_coll=gsreview_coll)  # branch push
                    if err:
                        return _clean_exit(err)
                except RuntimeError as err:
                    # Log the error. The return call below eats the error and stack trace.
                    LOG.exception(NTR("_copy() raised exception."))
                    return _clean_exit(err)
            # For each of the heads being deleted, remove the branch definition from p4gf_config2
            if delete_prl:
                p4gf_call_git.prohibit_interrupt(view_name, os.getpid())
                try:
                    err = _delete(ctx, delete_prl)  # branch delete
                    if err:
                        return _clean_exit(err)
                except RuntimeError as err:
                    # Log the error. The return call below eats the error and stack trace.
                    LOG.exception(NTR("_delete() raised exception."))
                    return _clean_exit(err)
            # Process all of the tags at once.
            err = p4gf_tag.process_tags(ctx, prl + delete_prl)
            if err:
                return _clean_exit(err)

                # If we have any new Git Swarm review references that
                # auth/http_server must rename, send a list of such
                # references across process boundary, via a file.
            if gsreview_coll:
                gsreview_coll.to_file()

            p4gf_gc.process_garbage("at end of pre_receive_hook")
            p4gf_gc.report_objects(NTR("at end of pre_receive_hook"))

        return 0
def main(poll_only=False):
    """set up repo for a view
       view_name_git    is the untranslated repo name
       view_name        is the translated repo name
    """
    p4gf_proc.install_stack_dumper()
    _log_environ(os.environ)
    with p4gf_server_common.ExceptionAuditLogger()\
    , p4gf_create_p4.Closer():
        LOG.debug(p4gf_log.memory_usage())
        start_time = time.time()
        args = parse_args(sys.argv[1:])
        if not args:
            return 1

        is_push = 'upload' not in args.command[0]

        # Record the p4 user in environment. We use environment to pass to
        # git-invoked hook. We don't have to set ctx.authenticated_p4user because
        # Context.__init__() reads it from environment, which we set here.
        os.environ[p4gf_const.P4GF_AUTH_P4USER] = args.user

        # view_name_git    is the untranslated repo name
        # view_name        is the translated repo name

        # print "args={}".format(args)
        view_name_git = args.options[-1]
        # translate '/' ':' ' '  .. etc .. for internal view_name
        view_name = p4gf_translate.TranslateReponame.git_to_repo(view_name_git)
        LOG.debug("public view_name: {0}   internal view_name: {1}".format(
            view_name_git, view_name))

        p4gf_util.reset_git_enviro()
        p4 = p4gf_create_p4.create_p4()
        if not p4:
            return 2
        LOG.debug("connected to P4: %s", p4)

        p4gf_server_common.check_readiness(p4)

        p4gf_server_common.check_lock_perm(p4)

        if not p4gf_server_common.check_protects(p4):
            p4gf_server_common.raise_p4gf_perm()

        if p4gf_server_common.run_special_command(view_name, p4, args.user):
            return 0

        # Initialize the external process launcher early, before allocating lots
        # of memory, and just after all other conditions have been checked.
        p4gf_proc.init()
        # Prepare for possible spawn of GitMirror worker process by forking
        # now before allocating lots of memory.
        p4gf_gitmirror.setup_spawn(view_name)
        # Kick off garbage collection debugging, if enabled.
        p4gf_gc.init_gc()

        if poll_only:
            view_perm = None
        else:
            # Go no further, create NOTHING, if user not authorized.
            # We use the translated internal view name here for perm authorization
            required_perm = p4gf_server_common.COMMAND_TO_PERM[args.command[0]]
            view_perm = p4gf_group.ViewPerm.for_user_and_view(
                p4, args.user, view_name, required_perm)
            p4gf_server_common.check_authorization(p4, view_perm, args.user,
                                                   args.command[0], view_name)

        # Create Git Fusion server depot, user, config. NOPs if already created.
        p4gf_init.init(p4)

        write_motd()

        # view_name is the internal view_name (identical when notExist special chars)
        before_lock_time = time.time()
        with p4gf_lock.view_lock(p4, view_name) as view_lock:
            after_lock_time = time.time()

            # Create Git Fusion per-repo client view mapping and config.
            #
            # NOPs if already created.
            # Create the empty directory that will hold the git repo.
            init_repo_status = p4gf_init_repo.init_repo(
                p4, view_name, view_lock)
            if init_repo_status == p4gf_init_repo.INIT_REPO_OK:
                repo_created = True
            elif init_repo_status == p4gf_init_repo.INIT_REPO_EXISTS:
                repo_created = False
            else:
                return 1

            # If authorization came from default, not explicit group
            # membership, copy that authorization to a group now. Could
            # not do this until after p4gf_init_repo() has a chance to
            # create not-yet-existing groups.
            if view_perm:
                view_perm.write_if(p4)

            # Now that we have valid git-fusion-user and
            # git-fusion-<view> client, replace our temporary P4
            # connection with a more permanent Context, shared for the
            # remainder of this process.
            with p4gf_context.create_context(view_name, view_lock) as ctx:
                LOG.debug("reconnected to P4, p4gf=%s", ctx.p4gf)

                # Find directory paths to feed to git.
                ctx.log_context()

                # cd into the work directory. Not all git functions react well
                # to --work-tree=xxxx.
                cwd = os.getcwd()
                os.chdir(ctx.view_dirs.GIT_WORK_TREE)

                # Only copy from Perforce to Git if no other process is cloning
                # from this Git repo right now.
                shared_in_progress = p4gf_lock.shared_host_view_lock_exists(
                    ctx.p4, view_name)
                if not shared_in_progress:
                    # Copy any recent changes from Perforce to Git.
                    try:
                        LOG.debug(
                            "bare: No git-upload-pack in progress, force non-bare"
                            " before update Git from Perforce.")
                        p4gf_git.set_bare(False)
                        p4gf_copy_p2g.copy_p2g_ctx(ctx)
                        p4gf_init_repo.process_imports(ctx)

                        # Now is also an appropriate time to clear out any stale Git
                        # Swarm reviews. We're pre-pull, pre-push, time when we've
                        # got exclusive write access to the Git repo,
                        GSReviewCollection.delete_refs_for_closed_reviews(ctx)

                    except p4gf_lock.LockCanceled as lc:
                        LOG.warning(str(lc))
                    except:
                        # Dump failure to log, BEFORE cleanup, just in case
                        # cleanup ALSO fails and throws its own error (which
                        # happens if we're out of memory).
                        LOG.error(traceback.format_exc())

                        if repo_created:
                            # Return to the original working directory to allow the
                            # config code to call os.getcwd() without dying, since
                            # we are about to delete the current working directory.
                            os.chdir(cwd)
                            p4gf_server_common.cleanup_client(ctx, view_name)
                        raise

                if poll_only:
                    code = os.EX_OK
                else:

                    git_caller = functools.partial(_call_git, args, ctx)
                    try:

                        # Deep in call_git(), we grab an 'p4 reviews' lock on
                        # ctx.clientmap's LHS. Switch that clientmap to our
                        # full union view to prevent simultaneous 'git push'es
                        # from clobbering each other in some shared depot
                        # branch. Must include all lightweight branches, too.
                        ctx.switch_client_view_to_union()

                        exclusive = 'upload' not in args.command[0]
                        code = p4gf_call_git.call_git(git_caller, ctx,
                                                      view_name, view_lock,
                                                      exclusive)
                        if is_push:
                            GSReviewCollection.post_push(ctx)
                    except p4gf_atomic_lock.LockConflict as lc:
                        sys.stderr.write("{}\n".format(lc))
                        code = os.EX_SOFTWARE

            p4gf_gc.process_garbage(NTR('at end of auth_server'))
            if LOG.isEnabledFor(logging.DEBUG):
                end_time = time.time()
                frm = NTR("Runtime: preparation {} ms, lock acquisition {} ms,"
                          " processing {} ms")
                LOG.debug(
                    frm.format(before_lock_time - start_time,
                               after_lock_time - before_lock_time,
                               end_time - after_lock_time))
        return code
    def do_it(self):
        """Perform all of the setup, processing, and clean up.

        :rtype: int
        :return: status code for the process upon exit.

        """
        p4gf_util.log_environ(LOG, os.environ, self.label)
        log_l10n()
        p4gf_proc.install_stack_dumper()
        # Kick off garbage collection debugging, if enabled.
        p4gf_mem_gc.init_gc()

        # Use ExitStack to avoid deeply nested code.
        with ExitStack() as stack:
            stack.enter_context(p4gf_create_p4.Closer())
            p4 = p4gf_create_p4.create_p4_temp_client()
            if not p4:
                return 2
            repo_name = p4gf_path.cwd_to_repo_name()
            p4gf_util.reset_git_enviro()

            # Initialize the external process launcher early, before
            # allocating lots of memory, and just after all other
            # conditions have been checked.
            p4gf_proc.init()

            # Assume that something bad will happen (especially with preflight).
            exit_code = os.EX_SOFTWARE
            try:
                p4gf_log.configure_for_repo(repo_name)
                gid = os.environ[p4gf_const.P4GF_FORK_PUSH]
                self.before_p4key_lock(repo_name)
                with p4gf_lock.RepoLock(p4, repo_name,
                                        group_id=gid) as repo_lock:
                    # Work to be done with the p4key lock...
                    self.context = p4gf_context.create_context(repo_name)
                    self.context.p4gf = p4
                    self.context.repo_lock = repo_lock
                    self.context.foruser = os.getenv(p4gf_const.P4GF_FORUSER)
                    stack.enter_context(self.context)
                    self.before()
                    exit_code = self.process()
                if self.after_requires_write_lock():
                    # Work to be done without the p4key lock, but with the
                    # write lock. Note that we release the p4key lock
                    # before acquiring the write lock to avoid deadlock
                    # with the foreground process, which always gets the
                    # repo read/write lock _before_ acquiring the p4key
                    # lock. Hence all this complication with the locks.
                    with p4gf_git_repo_lock.write_lock(repo_name):
                        self.after(exit_code)
                else:
                    # The after() method does not need a write lock...
                    self.after(exit_code)
            finally:
                self.cleanup()
                p4gf_proc.stop()

        # Random tasks after all of the locks have been released.
        msg = NTR("at end of {hook}").format(hook=self.label)
        p4gf_mem_gc.process_garbage(msg)
        p4gf_mem_gc.report_objects(msg)
        return exit_code