def update_with_transaction(mirror, **kwargs): """ Helper function which updates a Repository's RepoMirrorConfig while also rolling its sync_transaction_id for locking purposes. """ # RepoMirrorConfig attributes which can be modified mutable_attributes = ('is_enabled', 'mirror_type', 'external_reference', 'external_registry_username', 'external_registry_password', 'external_registry_config', 'sync_interval', 'sync_start_date', 'sync_expiration_date', 'sync_retries_remaining', 'sync_status', 'sync_transaction_id') # Key-Value map of changes to make filtered_kwargs = { key: kwargs.pop(key) for key in mutable_attributes if key in kwargs } # Roll the sync_transaction_id to a new value filtered_kwargs['sync_transaction_id'] = uuid_generator() # Generate the query to perform the updates query = (RepoMirrorConfig.update(filtered_kwargs).where( RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id, RepoMirrorConfig.id == mirror.id)) # Apply the change(s) and return the object if successful if query.execute(): return RepoMirrorConfig.get_by_id(mirror.id) else: return None
def test_release_mirror(initialized_db): """ Mirrors that are SYNC_NOW, regardless of starting time. """ disable_existing_mirrors() mirror, repo = create_mirror_repo_robot(["updated", "created"], repo_name="first") # mysql rounds the milliseconds on update so force that to happen now query = RepoMirrorConfig.update( sync_start_date=mirror.sync_start_date).where( RepoMirrorConfig.id == mirror.id) query.execute() mirror = RepoMirrorConfig.get_by_id(mirror.id) original_sync_start_date = mirror.sync_start_date assert mirror.sync_retries_remaining == 3 mirror = release_mirror(mirror, RepoMirrorStatus.FAIL) assert mirror.sync_retries_remaining == 2 assert mirror.sync_start_date == original_sync_start_date mirror = release_mirror(mirror, RepoMirrorStatus.FAIL) assert mirror.sync_retries_remaining == 1 assert mirror.sync_start_date == original_sync_start_date mirror = release_mirror(mirror, RepoMirrorStatus.FAIL) assert mirror.sync_retries_remaining == 3 assert mirror.sync_start_date > original_sync_start_date
def update_sync_status_to_cancel(mirror): """ If the mirror is SYNCING, it will be force-claimed (ignoring existing transaction id), and the state will set to NEVER_RUN. None will be returned in cases where this is not possible, such as if the mirror is not in the SYNCING state. """ if ( mirror.sync_status != RepoMirrorStatus.SYNCING and mirror.sync_status != RepoMirrorStatus.SYNC_NOW ): return None query = RepoMirrorConfig.update( sync_transaction_id=uuid_generator(), sync_status=RepoMirrorStatus.NEVER_RUN, sync_expiration_date=None, ).where(RepoMirrorConfig.id == mirror.id) if query.execute(): return RepoMirrorConfig.get_by_id(mirror.id) return None
def _delete_user_linked_data(user): if user.organization: # Delete the organization's teams. with db_transaction(): for team in Team.select().where(Team.organization == user): team.delete_instance(recursive=True) # Delete any OAuth approvals and tokens associated with the user. with db_transaction(): for app in OAuthApplication.select().where(OAuthApplication.organization == user): app.delete_instance(recursive=True) else: # Remove the user from any teams in which they are a member. TeamMember.delete().where(TeamMember.user == user).execute() # Delete any repository buildtriggers where the user is the connected user. with db_transaction(): triggers = RepositoryBuildTrigger.select().where(RepositoryBuildTrigger.connected_user == user) for trigger in triggers: trigger.delete_instance(recursive=True, delete_nullable=False) # Delete any mirrors with robots owned by this user. with db_transaction(): robots = list(list_namespace_robots(user.username)) RepoMirrorConfig.delete().where(RepoMirrorConfig.internal_robot << robots).execute() # Delete any robots owned by this user. with db_transaction(): robots = list(list_namespace_robots(user.username)) for robot in robots: robot.delete_instance(recursive=True, delete_nullable=True) # Null out any service key approvals. We technically lose information here, but its better than # falling and only occurs if a superuser is being deleted. ServiceKeyApproval.update(approver=None).where(ServiceKeyApproval.approver == user).execute()
def update_sync_status_to_sync_now(mirror): """ This will change the sync status to SYNC_NOW and set the retries remaining to one, if it is less than one. None will be returned in cases where this is not possible, such as if the mirror is in the SYNCING state. """ if mirror.sync_status == RepoMirrorStatus.SYNCING: return None retries = max(mirror.sync_retries_remaining, 1) query = RepoMirrorConfig.update( sync_transaction_id=uuid_generator(), sync_status=RepoMirrorStatus.SYNC_NOW, sync_expiration_date=None, sync_retries_remaining=retries, ).where( RepoMirrorConfig.id == mirror.id, RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id, ) if query.execute(): return RepoMirrorConfig.get_by_id(mirror.id) return None
def robot_has_mirror(robot): """ Check whether the given robot is being used by any mirrors. """ try: RepoMirrorConfig.get(internal_robot=robot) return True except RepoMirrorConfig.DoesNotExist: return False
def wrapper(*args, **kwargs): for mirror in RepoMirrorConfig.select(): mirror.is_enabled = False mirror.save() func(*args, **kwargs) for mirror in RepoMirrorConfig.select(): mirror.is_enabled = True mirror.save()
def enable_mirroring_for_repository( repository, root_rule, internal_robot, external_reference, sync_interval, external_registry_username=None, external_registry_password=None, external_registry_config=None, is_enabled=True, sync_start_date=None, ): """ Create a RepoMirrorConfig and set the Repository to the MIRROR state. """ assert internal_robot.robot namespace, _ = parse_robot_username(internal_robot.username) if namespace != repository.namespace_user.username: raise DataModelException("Cannot use robot for mirroring") with db_transaction(): # Create the RepoMirrorConfig try: username = ( DecryptedValue(external_registry_username) if external_registry_username else None ) password = ( DecryptedValue(external_registry_password) if external_registry_password else None ) mirror = RepoMirrorConfig.create( repository=repository, root_rule=root_rule, is_enabled=is_enabled, internal_robot=internal_robot, external_reference=external_reference, external_registry_username=username, external_registry_password=password, external_registry_config=external_registry_config or {}, sync_interval=sync_interval, sync_start_date=sync_start_date or datetime.utcnow(), ) except IntegrityError: return RepoMirrorConfig.get(repository=repository) # Change Repository state to mirroring mode as needed if repository.state != RepositoryState.MIRROR: query = Repository.update(state=RepositoryState.MIRROR).where( Repository.id == repository.id ) if not query.execute(): raise DataModelException("Could not change the state of the repository") return mirror
def update_sync_status(mirror, sync_status): """ Update the sync status """ query = (RepoMirrorConfig.update( sync_transaction_id=uuid_generator(), sync_status=sync_status).where( RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id, RepoMirrorConfig.id == mirror.id)) if query.execute(): return RepoMirrorConfig.get_by_id(mirror.id) return None
def get_eligible_mirrors(): """ Returns the RepoMirrorConfig that are ready to run now. This includes those that are: 1. Not currently syncing but whose start time is in the past 2. Status of "sync now" 3. Currently marked as syncing but whose expiration time is in the past """ now = datetime.utcnow() immediate_candidates_filter = ( RepoMirrorConfig.sync_status == RepoMirrorStatus.SYNC_NOW) & ( RepoMirrorConfig.sync_expiration_date >> None) ready_candidates_filter = ( (RepoMirrorConfig.sync_start_date <= now) & (RepoMirrorConfig.sync_retries_remaining > 0) & (RepoMirrorConfig.sync_status != RepoMirrorStatus.SYNCING) & (RepoMirrorConfig.sync_expiration_date >> None) & (RepoMirrorConfig.is_enabled == True)) expired_candidates_filter = ( (RepoMirrorConfig.sync_start_date <= now) & (RepoMirrorConfig.sync_retries_remaining > 0) & (RepoMirrorConfig.sync_status == RepoMirrorStatus.SYNCING) & (RepoMirrorConfig.sync_expiration_date <= now) & (RepoMirrorConfig.is_enabled == True)) return (RepoMirrorConfig.select().join(Repository).where( Repository.state == RepositoryState.MIRROR).where( immediate_candidates_filter | ready_candidates_filter | expired_candidates_filter).order_by( RepoMirrorConfig.sync_start_date.asc()))
def get_mirror(repository): """ Return the RepoMirrorConfig associated with the given Repository, or None if it doesn't exist. """ try: return RepoMirrorConfig.get(repository=repository) except RepoMirrorConfig.DoesNotExist: return None
def expire_mirror(mirror): """ Set the mirror to synchronize ASAP and reset its failure count. """ # Set the next-sync date to now # TODO: Verify the `where` conditions would not expire a currently syncing mirror. query = (RepoMirrorConfig.update( sync_transaction_id=uuid_generator(), sync_expiration_date=datetime.utcnow(), sync_retries_remaining=MAX_SYNC_RETRIES).where( RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id, RepoMirrorConfig.id == mirror.id, RepoMirrorConfig.state != RepoMirrorStatus.SYNCING)) # Fetch and return the latest updates if query.execute(): return RepoMirrorConfig.get_by_id(mirror.id) # Unable to update expiration date. Perhaps another process has claimed it? return None # TODO: Raise some Exception?
def get_mirror(repository): """ Return the RepoMirrorConfig associated with the given Repository, or None if it doesn't exist. """ try: return (RepoMirrorConfig.select( RepoMirrorConfig, User, RepoMirrorRule).join(User, JOIN.LEFT_OUTER).switch( RepoMirrorConfig).join(RepoMirrorRule).where( RepoMirrorConfig.repository == repository).get()) except RepoMirrorConfig.DoesNotExist: return None
def release_mirror(mirror, sync_status): """ Return a mirror to the queue and update its status. Upon success, move next sync to be at the next interval in the future. Failures remain with current date to ensure they are picked up for repeat attempt. After MAX_SYNC_RETRIES, the next sync will be moved ahead as if it were a success. This is to allow a daily sync, for example, to retry the next day. Without this, users would need to manually run syncs to clear failure state. """ if sync_status == RepoMirrorStatus.FAIL: retries = max(0, mirror.sync_retries_remaining - 1) if sync_status == RepoMirrorStatus.SUCCESS or retries < 1: now = datetime.utcnow() delta = now - mirror.sync_start_date delta_seconds = (delta.days * 24 * 60 * 60) + delta.seconds next_start_date = now + timedelta( seconds=mirror.sync_interval - (delta_seconds % mirror.sync_interval)) retries = MAX_SYNC_RETRIES else: next_start_date = mirror.sync_start_date query = RepoMirrorConfig.update( sync_transaction_id=uuid_generator(), sync_status=sync_status, sync_start_date=next_start_date, sync_expiration_date=None, sync_retries_remaining=retries, ).where( RepoMirrorConfig.id == mirror.id, RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id, ) if query.execute(): return RepoMirrorConfig.get_by_id(mirror.id) # Unable to release Mirror. Has it been claimed by another process? return None
def claim_mirror(mirror): """ Attempt to create an exclusive lock on the RepoMirrorConfig and return it. If unable to create the lock, `None` will be returned. """ # Attempt to update the RepoMirrorConfig to mark it as "claimed" now = datetime.utcnow() expiration_date = now + timedelta(seconds=MAX_SYNC_DURATION) query = (RepoMirrorConfig.update( sync_status=RepoMirrorStatus.SYNCING, sync_expiration_date=expiration_date, sync_transaction_id=uuid_generator()).where( RepoMirrorConfig.id == mirror.id, RepoMirrorConfig.sync_transaction_id == mirror.sync_transaction_id) ) # If the update was successful, then it was claimed. Return the updated instance. if query.execute(): return RepoMirrorConfig.get_by_id(mirror.id) return None # Another process must have claimed the mirror faster.
def process(resources): response = [] changed = False for resource in resources: p_state = resource["state"] p_user = resource["user"] p_external_user = resource["external_user"] p_external_password = resource["external_password"] p_external_reference = resource["external_reference"] p_external_tag = resource["external_tag"] p_internal_robot = resource["internal_robot"] p_internal_namespace = resource["internal_namespace"] p_internal_repository = resource["internal_repository"] p_sync_start_date = resource["sync_start_date"] p_sync_interval = resource["sync_interval"] p_is_enabled = resource["is_enabled"] if p_sync_start_date == "now": p_sync_start_date = datetime.now() else: p_sync_start_date = datetime.strptime(p_sync_start_date, "%Y-%m-%d %H:%M") user = model.user.get_user(p_user) if user is None: return {"failed": True, "msg": "User '%s' does not exist" % (p_user)}, 400 name = "%s/%s" % (p_internal_namespace, p_internal_repository) repository = model.repository.get_repository( p_internal_namespace, p_internal_repository ) if repository is None: return ( { "failed": True, "msg": "Destination repository '%s/%s' does not exist" % (p_internal_namespace, p_internal_repository), }, 400, ) robot = model.user.lookup_robot(p_internal_robot) if robot is None: return ( { "failed": True, "msg": "Robot '%s' does not exist" % (p_internal_robot), }, 400, ) # TODO: Move this to repository repository.state = RepositoryState.MIRROR repository.save() if p_state == "present": try: rule = RepoMirrorRule.get(repository=repository) except RepoMirrorRule.DoesNotExist: rule = None try: config = RepoMirrorConfig.get(repository=repository) except RepoMirrorConfig.DoesNotExist: config = None if rule is None or config is None: changed = True rule = RepoMirrorRule.create( repository=repository, rule_type=RepoMirrorRuleType.TAG_GLOB_CSV, rule_value=p_external_tag, ) config = RepoMirrorConfig.create( repository=repository, root_rule=rule, internal_robot=robot, external_reference=p_external_reference, external_registry_username=p_external_user, external_registry_password=p_external_password, external_registry_config={}, sync_start_date=p_sync_start_date, sync_interval=30, sync_retries_remaining=3, sync_status=RepoMirrorStatus.NEVER_RUN, is_enabled=p_is_enabled, ) response.append("Repository mirror '%s' created" % name) else: rule_changed = False config_changed = False if rule.rule_value != p_external_tag: rule_changed = True rule.rule_value = p_external_tag response.append( "Repository mirror '%s' source tag updated to '%s'" % (name, p_external_tag) ) if config.internal_robot != robot: config_changed = True config.internal_robot = robot response.append( "Repository mirror '%s' robot updated to '%s'" % (name, p_internal_robot) ) if config.external_reference != p_external_reference: config_changed = True config.external_reference = p_external_reference response.append( "Repository mirror '%s' source registry reference updated to '%s'" % (name, p_external_reference) ) if config.external_namespace != p_external_namespace: config_changed = True config.external_namespace = p_external_namespace response.append( "Repository mirror '%s' source namespace updated to '%s'" % (name, p_external_namespace) ) if config.external_repository != p_external_repository: config_changed = True config.external_repository = p_external_repository response.append( "Repository mirror '%s' source repository updated to '%s'" % (name, p_external_repository) ) if config.external_registry_username != p_external_user: config_changed = True config.external_registry_username = p_external_user response.append( "Repository mirror '%s' source username updated to '%s'" % (name, p_external_user) ) if config.external_registry_password != p_external_password: config_changed = True config.external_registry_password = p_external_password response.append( "Repository mirror '%s' source password updated" % (name) ) if p_sync_start_date: if config.sync_start_date != p_sync_start_date: config_changed = True config.sync_start_date = p_sync_start_date response.append( "Repository mirror '%s' sync start date updated" % (name) ) if config.is_enabled != p_is_enabled: config_changed = True config.is_enabled = p_is_enabled response.append( "Repository mirror '%s' enabled flag updated" % (name) ) config.sync_status = RepoMirrorStatus.NEVER_RUN config.sync_interval = p_sync_interval config.external_registry_config = {} config_changed = True if rule_changed: rule.save() changed = True if config_changed: config.save() changed = True return {"failed": False, "changed": changed, "meta": response}, 200
def disable_existing_mirrors(): mirrors = RepoMirrorConfig.select().execute() for mirror in mirrors: mirror.is_enabled = False mirror.save()
def get_max_id_for_repo_mirror_config(): """ Gets the maximum id for repository mirroring """ return RepoMirrorConfig.select(fn.Max(RepoMirrorConfig.id)).scalar()
def test_inspect_error_mirror(run_skopeo_mock, initialized_db, app): """ Test for no tag for skopeo inspect. The mirror is processed four times, asserting that the remaining syncs decrement until next sync is bumped to the future, confirming the fourth is never processed. """ def skopeo_test(args, proxy): try: skopeo_call = skopeo_calls.pop(0) assert args == skopeo_call["args"] assert proxy == {} return skopeo_call["results"] except Exception as e: skopeo_calls.append(skopeo_call) raise e run_skopeo_mock.side_effect = skopeo_test worker = RepoMirrorWorker() mirror, repo = create_mirror_repo_robot(["7.1"]) # Call number 1 skopeo_calls = [ { "args": [ "/usr/bin/skopeo", "inspect", "--tls-verify=True", "docker://registry.example.com/namespace/repository:7.1", ], "results": SkopeoResults( False, [], "", 'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest 7.1 in registry.example.com/namespace/repository: manifest unknown: manifest unknown"', ), }, { "args": [ "/usr/bin/skopeo", "inspect", "--tls-verify=True", "docker://registry.example.com/namespace/repository:latest", ], "results": SkopeoResults( False, [], "", 'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest latest in registry.example.com/namespace/repository: manifest unknown: manifest unknown"', ), }, ] worker._process_mirrors() mirror = RepoMirrorConfig.get_by_id(mirror.id) assert [] == skopeo_calls assert 2 == mirror.sync_retries_remaining # Call number 2 skopeo_calls = [ { "args": [ "/usr/bin/skopeo", "inspect", "--tls-verify=True", "docker://registry.example.com/namespace/repository:7.1", ], "results": SkopeoResults( False, [], "", 'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest 7.1 in registry.example.com/namespace/repository: manifest unknown: manifest unknown"', ), }, { "args": [ "/usr/bin/skopeo", "inspect", "--tls-verify=True", "docker://registry.example.com/namespace/repository:latest", ], "results": SkopeoResults( False, [], "", 'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest latest in registry.example.com/namespace/repository: manifest unknown: manifest unknown"', ), }, ] worker._process_mirrors() mirror = RepoMirrorConfig.get_by_id(mirror.id) assert [] == skopeo_calls assert 1 == mirror.sync_retries_remaining # Call number 3 skopeo_calls = [ { "args": [ "/usr/bin/skopeo", "inspect", "--tls-verify=True", "docker://registry.example.com/namespace/repository:7.1", ], "results": SkopeoResults( False, [], "", 'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest 7.1 in registry.example.com/namespace/repository: manifest unknown: manifest unknown"', ), }, { "args": [ "/usr/bin/skopeo", "inspect", "--tls-verify=True", "docker://registry.example.com/namespace/repository:latest", ], "results": SkopeoResults( False, [], "", 'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest latest in registry.example.com/namespace/repository: manifest unknown: manifest unknown"', ), }, ] worker._process_mirrors() mirror = RepoMirrorConfig.get_by_id(mirror.id) assert [] == skopeo_calls assert 3 == mirror.sync_retries_remaining # Call number 4 skopeo_calls = [ { "args": [ "/usr/bin/skopeo", "inspect", "--tls-verify=True", "docker://registry.example.com/namespace/repository:7.1", ], "results": SkopeoResults( False, [], "", 'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest 7.1 in registry.example.com/namespace/repository: manifest unknown: manifest unknown"', ), }, { "args": [ "/usr/bin/skopeo", "inspect", "--tls-verify=True", "docker://registry.example.com/namespace/repository:latest", ], "results": SkopeoResults( False, [], "", 'time="2019-09-18T13:29:40Z" level=fatal msg="Error reading manifest latest in registry.example.com/namespace/repository: manifest unknown: manifest unknown"', ), }, ] worker._process_mirrors() mirror = RepoMirrorConfig.get_by_id(mirror.id) assert 2 == len(skopeo_calls) assert 3 == mirror.sync_retries_remaining
def get_min_id_for_repo_mirror_config(): """ Gets the minimum id for a repository mirroring. """ return RepoMirrorConfig.select(fn.Min(RepoMirrorConfig.id)).scalar()
def ask_disable_namespace(username, queue_name): user = model.user.get_namespace_user(username) if user is None: raise Exception("Unknown user or organization %s" % username) if not user.enabled: print("NOTE: Namespace %s is already disabled" % username) queue_prefix = "%s/%s/%%" % (queue_name, username) existing_queue_item_count = (QueueItem.select().where( QueueItem.queue_name**queue_prefix).where( QueueItem.available == 1, QueueItem.retries_remaining > 0, QueueItem.processing_expires > datetime.now(), ).count()) repository_trigger_count = ( RepositoryBuildTrigger.select().join(Repository).where( Repository.namespace_user == user).count()) print("=============================================") print("For namespace %s" % username) print("=============================================") print("User %s has email address %s" % (username, user.email)) print("User %s has %s queued builds in their namespace" % (username, existing_queue_item_count)) print("User %s has %s build triggers in their namespace" % (username, repository_trigger_count)) confirm_msg = ( "Would you like to disable this user and delete their triggers and builds? [y/N]> " ) letter = str(input(confirm_msg)) if letter.lower() != "y": print("Action canceled") return print("=============================================") triggers = [] count_removed = 0 with db_transaction(): user.enabled = False user.save() repositories_query = Repository.select().where( Repository.namespace_user == user) if len(repositories_query.clone()): builds = list(RepositoryBuild.select().where( RepositoryBuild.repository << list(repositories_query))) triggers = list(RepositoryBuildTrigger.select().where( RepositoryBuildTrigger.repository << list(repositories_query))) mirrors = list(RepoMirrorConfig.select().where( RepoMirrorConfig.repository << list(repositories_query))) # Delete all builds for the user's repositories. if builds: RepositoryBuild.delete().where( RepositoryBuild.id << builds).execute() # Delete all build triggers for the user's repositories. if triggers: RepositoryBuildTrigger.delete().where( RepositoryBuildTrigger.id << triggers).execute() # Delete all mirrors for the user's repositories. if mirrors: RepoMirrorConfig.delete().where( RepoMirrorConfig.id << mirrors).execute() # Delete all queue items for the user's namespace. dockerfile_build_queue = WorkQueue(queue_name, tf, has_namespace=True) count_removed = dockerfile_build_queue.delete_namespaced_items( user.username) info = (user.username, len(triggers), count_removed, len(mirrors)) print( "Namespace %s disabled, %s triggers deleted, %s queued builds removed, %s mirrors deleted" % info) return user