From 39ffd9977e2f6cb1ca1757e59173fc93e0eab72c Mon Sep 17 00:00:00 2001 From: Kuang-che Wu Date: Fri, 18 Oct 2024 23:32:08 +0800 Subject: sync: reduce multiprocessing serialization overhead Background: - Manifest object is large (for projects like Android) in terms of serialization cost and size (more than 1mb). - Lots of Project objects usually share only a few manifest objects. Before this CL, Project objects were passed to workers via function parameters. Function parameters are pickled separately (in chunk). In other words, manifests are serialized again and again. The major serialization overhead of repo sync was O(manifest_size * projects / chunksize) This CL uses following tricks to reduce serialization overhead. - All projects are pickled in one invocation. Because Project objects share manifests, pickle library remembers which objects are already seen and avoid the serialization cost. - Pass the Project objects to workers at worker intialization time. And pass project index as function parameters instead. The number of workers is much smaller than the number of projects. - Worker init state are shared on Linux (fork based). So it requires zero serialization for Project objects. On Linux (fork based), the serialization overhead is O(projects) --- one int per project On Windows (spawn based), the serialization overhead is O(manifest_size * min(workers, projects)) Moreover, use chunksize=1 to avoid the chance that some workers are idle while other workers still have more than one job in their chunk queue. Using 2.7k projects as the baseline, originally "repo sync" no-op sync takes 31s for fetch and 25s for checkout on my Linux workstation. With this CL, it takes 12s for fetch and 1s for checkout. Bug: b/371638995 Change-Id: Ifa22072ea54eacb4a5c525c050d84de371e87caa Reviewed-on: https://gerrit-review.googlesource.com/c/git-repo/+/439921 Tested-by: Kuang-che Wu Reviewed-by: Josip Sokcevic Commit-Queue: Kuang-che Wu --- command.py | 50 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 46 insertions(+), 4 deletions(-) (limited to 'command.py') diff --git a/command.py b/command.py index fa48264b..2a2ce138 100644 --- a/command.py +++ b/command.py @@ -12,6 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. +import contextlib import multiprocessing import optparse import os @@ -70,6 +71,14 @@ class Command: # migrated subcommands can set it to False. MULTI_MANIFEST_SUPPORT = True + # Shared data across parallel execution workers. + _parallel_context = None + + @classmethod + def get_parallel_context(cls): + assert cls._parallel_context is not None + return cls._parallel_context + def __init__( self, repodir=None, @@ -242,9 +251,36 @@ class Command: """Perform the action, after option parsing is complete.""" raise NotImplementedError - @staticmethod + @classmethod + @contextlib.contextmanager + def ParallelContext(cls): + """Obtains the context, which is shared to ExecuteInParallel workers. + + Callers can store data in the context dict before invocation of + ExecuteInParallel. The dict will then be shared to child workers of + ExecuteInParallel. + """ + assert cls._parallel_context is None + cls._parallel_context = {} + try: + yield + finally: + cls._parallel_context = None + + @classmethod + def _SetParallelContext(cls, context): + cls._parallel_context = context + + @classmethod def ExecuteInParallel( - jobs, func, inputs, callback, output=None, ordered=False + cls, + jobs, + func, + inputs, + callback, + output=None, + ordered=False, + chunksize=WORKER_BATCH_SIZE, ): """Helper for managing parallel execution boiler plate. @@ -269,6 +305,8 @@ class Command: output: An output manager. May be progress.Progess or color.Coloring. ordered: Whether the jobs should be processed in order. + chunksize: The number of jobs processed in batch by parallel + workers. Returns: The |callback| function's results are returned. @@ -278,12 +316,16 @@ class Command: if len(inputs) == 1 or jobs == 1: return callback(None, output, (func(x) for x in inputs)) else: - with multiprocessing.Pool(jobs) as pool: + with multiprocessing.Pool( + jobs, + initializer=cls._SetParallelContext, + initargs=(cls._parallel_context,), + ) as pool: submit = pool.imap if ordered else pool.imap_unordered return callback( pool, output, - submit(func, inputs, chunksize=WORKER_BATCH_SIZE), + submit(func, inputs, chunksize=chunksize), ) finally: if isinstance(output, progress.Progress): -- cgit v1.2.3-54-g00ecf