/home/lnzliplg/www/multiprocessing.zip
PK{��\�U�DDshared_memory.pynu�[���"""Provides shared memory for direct access across processes.

The API of this package is currently provisional. Refer to the
documentation for details.
"""


__all__ = [ 'SharedMemory', 'ShareableList' ]


from functools import partial
import mmap
import os
import errno
import struct
import secrets

if os.name == "nt":
    import _winapi
    _USE_POSIX = False
else:
    import _posixshmem
    _USE_POSIX = True


_O_CREX = os.O_CREAT | os.O_EXCL

# FreeBSD (and perhaps other BSDs) limit names to 14 characters.
_SHM_SAFE_NAME_LENGTH = 14

# Shared memory block name prefix
if _USE_POSIX:
    _SHM_NAME_PREFIX = '/psm_'
else:
    _SHM_NAME_PREFIX = 'wnsm_'


def _make_filename():
    "Create a random filename for the shared memory object."
    # number of random bytes to use for name
    nbytes = (_SHM_SAFE_NAME_LENGTH - len(_SHM_NAME_PREFIX)) // 2
    assert nbytes >= 2, '_SHM_NAME_PREFIX too long'
    name = _SHM_NAME_PREFIX + secrets.token_hex(nbytes)
    assert len(name) <= _SHM_SAFE_NAME_LENGTH
    return name


class SharedMemory:
    """Creates a new shared memory block or attaches to an existing
    shared memory block.

    Every shared memory block is assigned a unique name.  This enables
    one process to create a shared memory block with a particular name
    so that a different process can attach to that same shared memory
    block using that same name.

    As a resource for sharing data across processes, shared memory blocks
    may outlive the original process that created them.  When one process
    no longer needs access to a shared memory block that might still be
    needed by other processes, the close() method should be called.
    When a shared memory block is no longer needed by any process, the
    unlink() method should be called to ensure proper cleanup."""

    # Defaults; enables close() and unlink() to run without errors.
    _name = None
    _fd = -1
    _mmap = None
    _buf = None
    _flags = os.O_RDWR
    _mode = 0o600
    _prepend_leading_slash = True if _USE_POSIX else False

    def __init__(self, name=None, create=False, size=0):
        if not size >= 0:
            raise ValueError("'size' must be a positive integer")
        if create:
            self._flags = _O_CREX | os.O_RDWR
            if size == 0:
                raise ValueError("'size' must be a positive number different from zero")
        if name is None and not self._flags & os.O_EXCL:
            raise ValueError("'name' can only be None if create=True")

        if _USE_POSIX:

            # POSIX Shared Memory

            if name is None:
                while True:
                    name = _make_filename()
                    try:
                        self._fd = _posixshmem.shm_open(
                            name,
                            self._flags,
                            mode=self._mode
                        )
                    except FileExistsError:
                        continue
                    self._name = name
                    break
            else:
                name = "/" + name if self._prepend_leading_slash else name
                self._fd = _posixshmem.shm_open(
                    name,
                    self._flags,
                    mode=self._mode
                )
                self._name = name
            try:
                if create and size:
                    os.ftruncate(self._fd, size)
                stats = os.fstat(self._fd)
                size = stats.st_size
                self._mmap = mmap.mmap(self._fd, size)
            except OSError:
                self.unlink()
                raise

            from .resource_tracker import register
            register(self._name, "shared_memory")

        else:

            # Windows Named Shared Memory

            if create:
                while True:
                    temp_name = _make_filename() if name is None else name
                    # Create and reserve shared memory block with this name
                    # until it can be attached to by mmap.
                    h_map = _winapi.CreateFileMapping(
                        _winapi.INVALID_HANDLE_VALUE,
                        _winapi.NULL,
                        _winapi.PAGE_READWRITE,
                        (size >> 32) & 0xFFFFFFFF,
                        size & 0xFFFFFFFF,
                        temp_name
                    )
                    try:
                        last_error_code = _winapi.GetLastError()
                        if last_error_code == _winapi.ERROR_ALREADY_EXISTS:
                            if name is not None:
                                raise FileExistsError(
                                    errno.EEXIST,
                                    os.strerror(errno.EEXIST),
                                    name,
                                    _winapi.ERROR_ALREADY_EXISTS
                                )
                            else:
                                continue
                        self._mmap = mmap.mmap(-1, size, tagname=temp_name)
                    finally:
                        _winapi.CloseHandle(h_map)
                    self._name = temp_name
                    break

            else:
                self._name = name
                # Dynamically determine the existing named shared memory
                # block's size which is likely a multiple of mmap.PAGESIZE.
                h_map = _winapi.OpenFileMapping(
                    _winapi.FILE_MAP_READ,
                    False,
                    name
                )
                try:
                    p_buf = _winapi.MapViewOfFile(
                        h_map,
                        _winapi.FILE_MAP_READ,
                        0,
                        0,
                        0
                    )
                finally:
                    _winapi.CloseHandle(h_map)
                size = _winapi.VirtualQuerySize(p_buf)
                self._mmap = mmap.mmap(-1, size, tagname=name)

        self._size = size
        self._buf = memoryview(self._mmap)

    def __del__(self):
        try:
            self.close()
        except OSError:
            pass

    def __reduce__(self):
        return (
            self.__class__,
            (
                self.name,
                False,
                self.size,
            ),
        )

    def __repr__(self):
        return f'{self.__class__.__name__}({self.name!r}, size={self.size})'

    @property
    def buf(self):
        "A memoryview of contents of the shared memory block."
        return self._buf

    @property
    def name(self):
        "Unique name that identifies the shared memory block."
        reported_name = self._name
        if _USE_POSIX and self._prepend_leading_slash:
            if self._name.startswith("/"):
                reported_name = self._name[1:]
        return reported_name

    @property
    def size(self):
        "Size in bytes."
        return self._size

    def close(self):
        """Closes access to the shared memory from this instance but does
        not destroy the shared memory block."""
        if self._buf is not None:
            self._buf.release()
            self._buf = None
        if self._mmap is not None:
            self._mmap.close()
            self._mmap = None
        if _USE_POSIX and self._fd >= 0:
            os.close(self._fd)
            self._fd = -1

    def unlink(self):
        """Requests that the underlying shared memory block be destroyed.

        In order to ensure proper cleanup of resources, unlink should be
        called once (and only once) across all processes which have access
        to the shared memory block."""
        if _USE_POSIX and self._name:
            from .resource_tracker import unregister
            _posixshmem.shm_unlink(self._name)
            unregister(self._name, "shared_memory")


_encoding = "utf8"

class ShareableList:
    """Pattern for a mutable list-like object shareable via a shared
    memory block.  It differs from the built-in list type in that these
    lists can not change their overall length (i.e. no append, insert,
    etc.)

    Because values are packed into a memoryview as bytes, the struct
    packing format for any storable value must require no more than 8
    characters to describe its format."""

    _types_mapping = {
        int: "q",
        float: "d",
        bool: "xxxxxxx?",
        str: "%ds",
        bytes: "%ds",
        None.__class__: "xxxxxx?x",
    }
    _alignment = 8
    _back_transforms_mapping = {
        0: lambda value: value,                   # int, float, bool
        1: lambda value: value.rstrip(b'\x00').decode(_encoding),  # str
        2: lambda value: value.rstrip(b'\x00'),   # bytes
        3: lambda _value: None,                   # None
    }

    @staticmethod
    def _extract_recreation_code(value):
        """Used in concert with _back_transforms_mapping to convert values
        into the appropriate Python objects when retrieving them from
        the list as well as when storing them."""
        if not isinstance(value, (str, bytes, None.__class__)):
            return 0
        elif isinstance(value, str):
            return 1
        elif isinstance(value, bytes):
            return 2
        else:
            return 3  # NoneType

    def __init__(self, sequence=None, *, name=None):
        if sequence is not None:
            _formats = [
                self._types_mapping[type(item)]
                    if not isinstance(item, (str, bytes))
                    else self._types_mapping[type(item)] % (
                        self._alignment * (len(item) // self._alignment + 1),
                    )
                for item in sequence
            ]
            self._list_len = len(_formats)
            assert sum(len(fmt) <= 8 for fmt in _formats) == self._list_len
            self._allocated_bytes = tuple(
                    self._alignment if fmt[-1] != "s" else int(fmt[:-1])
                    for fmt in _formats
            )
            _recreation_codes = [
                self._extract_recreation_code(item) for item in sequence
            ]
            requested_size = struct.calcsize(
                "q" + self._format_size_metainfo +
                "".join(_formats) +
                self._format_packing_metainfo +
                self._format_back_transform_codes
            )

        else:
            requested_size = 8  # Some platforms require > 0.

        if name is not None and sequence is None:
            self.shm = SharedMemory(name)
        else:
            self.shm = SharedMemory(name, create=True, size=requested_size)

        if sequence is not None:
            _enc = _encoding
            struct.pack_into(
                "q" + self._format_size_metainfo,
                self.shm.buf,
                0,
                self._list_len,
                *(self._allocated_bytes)
            )
            struct.pack_into(
                "".join(_formats),
                self.shm.buf,
                self._offset_data_start,
                *(v.encode(_enc) if isinstance(v, str) else v for v in sequence)
            )
            struct.pack_into(
                self._format_packing_metainfo,
                self.shm.buf,
                self._offset_packing_formats,
                *(v.encode(_enc) for v in _formats)
            )
            struct.pack_into(
                self._format_back_transform_codes,
                self.shm.buf,
                self._offset_back_transform_codes,
                *(_recreation_codes)
            )

        else:
            self._list_len = len(self)  # Obtains size from offset 0 in buffer.
            self._allocated_bytes = struct.unpack_from(
                self._format_size_metainfo,
                self.shm.buf,
                1 * 8
            )

    def _get_packing_format(self, position):
        "Gets the packing format for a single value stored in the list."
        position = position if position >= 0 else position + self._list_len
        if (position >= self._list_len) or (self._list_len < 0):
            raise IndexError("Requested position out of range.")

        v = struct.unpack_from(
            "8s",
            self.shm.buf,
            self._offset_packing_formats + position * 8
        )[0]
        fmt = v.rstrip(b'\x00')
        fmt_as_str = fmt.decode(_encoding)

        return fmt_as_str

    def _get_back_transform(self, position):
        "Gets the back transformation function for a single value."

        position = position if position >= 0 else position + self._list_len
        if (position >= self._list_len) or (self._list_len < 0):
            raise IndexError("Requested position out of range.")

        transform_code = struct.unpack_from(
            "b",
            self.shm.buf,
            self._offset_back_transform_codes + position
        )[0]
        transform_function = self._back_transforms_mapping[transform_code]

        return transform_function

    def _set_packing_format_and_transform(self, position, fmt_as_str, value):
        """Sets the packing format and back transformation code for a
        single value in the list at the specified position."""

        position = position if position >= 0 else position + self._list_len
        if (position >= self._list_len) or (self._list_len < 0):
            raise IndexError("Requested position out of range.")

        struct.pack_into(
            "8s",
            self.shm.buf,
            self._offset_packing_formats + position * 8,
            fmt_as_str.encode(_encoding)
        )

        transform_code = self._extract_recreation_code(value)
        struct.pack_into(
            "b",
            self.shm.buf,
            self._offset_back_transform_codes + position,
            transform_code
        )

    def __getitem__(self, position):
        try:
            offset = self._offset_data_start \
                     + sum(self._allocated_bytes[:position])
            (v,) = struct.unpack_from(
                self._get_packing_format(position),
                self.shm.buf,
                offset
            )
        except IndexError:
            raise IndexError("index out of range")

        back_transform = self._get_back_transform(position)
        v = back_transform(v)

        return v

    def __setitem__(self, position, value):
        try:
            offset = self._offset_data_start \
                     + sum(self._allocated_bytes[:position])
            current_format = self._get_packing_format(position)
        except IndexError:
            raise IndexError("assignment index out of range")

        if not isinstance(value, (str, bytes)):
            new_format = self._types_mapping[type(value)]
            encoded_value = value
        else:
            encoded_value = (value.encode(_encoding)
                             if isinstance(value, str) else value)
            if len(encoded_value) > self._allocated_bytes[position]:
                raise ValueError("bytes/str item exceeds available storage")
            if current_format[-1] == "s":
                new_format = current_format
            else:
                new_format = self._types_mapping[str] % (
                    self._allocated_bytes[position],
                )

        self._set_packing_format_and_transform(
            position,
            new_format,
            value
        )
        struct.pack_into(new_format, self.shm.buf, offset, encoded_value)

    def __reduce__(self):
        return partial(self.__class__, name=self.shm.name), ()

    def __len__(self):
        return struct.unpack_from("q", self.shm.buf, 0)[0]

    def __repr__(self):
        return f'{self.__class__.__name__}({list(self)}, name={self.shm.name!r})'

    @property
    def format(self):
        "The struct packing format used by all currently stored values."
        return "".join(
            self._get_packing_format(i) for i in range(self._list_len)
        )

    @property
    def _format_size_metainfo(self):
        "The struct packing format used for metainfo on storage sizes."
        return f"{self._list_len}q"

    @property
    def _format_packing_metainfo(self):
        "The struct packing format used for the values' packing formats."
        return "8s" * self._list_len

    @property
    def _format_back_transform_codes(self):
        "The struct packing format used for the values' back transforms."
        return "b" * self._list_len

    @property
    def _offset_data_start(self):
        return (self._list_len + 1) * 8  # 8 bytes per "q"

    @property
    def _offset_packing_formats(self):
        return self._offset_data_start + sum(self._allocated_bytes)

    @property
    def _offset_back_transform_codes(self):
        return self._offset_packing_formats + self._list_len * 8

    def count(self, value):
        "L.count(value) -> integer -- return number of occurrences of value."

        return sum(value == entry for entry in self)

    def index(self, value):
        """L.index(value) -> integer -- return first index of value.
        Raises ValueError if the value is not present."""

        for position, entry in enumerate(self):
            if value == entry:
                return position
        else:
            raise ValueError(f"{value!r} not in this container")
PK{��\���}j-j-heap.pynu�[���#
# Module which supports allocation of memory from an mmap
#
# multiprocessing/heap.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

import bisect
from collections import defaultdict
import mmap
import os
import sys
import tempfile
import threading

from .context import reduction, assert_spawning
from . import util

__all__ = ['BufferWrapper']

#
# Inheritable class which wraps an mmap, and from which blocks can be allocated
#

if sys.platform == 'win32':

    import _winapi

    class Arena(object):
        """
        A shared memory area backed by anonymous memory (Windows).
        """

        _rand = tempfile._RandomNameSequence()

        def __init__(self, size):
            self.size = size
            for i in range(100):
                name = 'pym-%d-%s' % (os.getpid(), next(self._rand))
                buf = mmap.mmap(-1, size, tagname=name)
                if _winapi.GetLastError() == 0:
                    break
                # We have reopened a preexisting mmap.
                buf.close()
            else:
                raise FileExistsError('Cannot find name for new mmap')
            self.name = name
            self.buffer = buf
            self._state = (self.size, self.name)

        def __getstate__(self):
            assert_spawning(self)
            return self._state

        def __setstate__(self, state):
            self.size, self.name = self._state = state
            # Reopen existing mmap
            self.buffer = mmap.mmap(-1, self.size, tagname=self.name)
            # XXX Temporarily preventing buildbot failures while determining
            # XXX the correct long-term fix. See issue 23060
            #assert _winapi.GetLastError() == _winapi.ERROR_ALREADY_EXISTS

else:

    class Arena(object):
        """
        A shared memory area backed by a temporary file (POSIX).
        """

        if sys.platform == 'linux':
            _dir_candidates = ['/dev/shm']
        else:
            _dir_candidates = []

        def __init__(self, size, fd=-1):
            self.size = size
            self.fd = fd
            if fd == -1:
                # Arena is created anew (if fd != -1, it means we're coming
                # from rebuild_arena() below)
                self.fd, name = tempfile.mkstemp(
                     prefix='pym-%d-'%os.getpid(),
                     dir=self._choose_dir(size))
                os.unlink(name)
                util.Finalize(self, os.close, (self.fd,))
                os.ftruncate(self.fd, size)
            self.buffer = mmap.mmap(self.fd, self.size)

        def _choose_dir(self, size):
            # Choose a non-storage backed directory if possible,
            # to improve performance
            for d in self._dir_candidates:
                st = os.statvfs(d)
                if st.f_bavail * st.f_frsize >= size:  # enough free space?
                    return d
            return util.get_temp_dir()

    def reduce_arena(a):
        if a.fd == -1:
            raise ValueError('Arena is unpicklable because '
                             'forking was enabled when it was created')
        return rebuild_arena, (a.size, reduction.DupFd(a.fd))

    def rebuild_arena(size, dupfd):
        return Arena(size, dupfd.detach())

    reduction.register(Arena, reduce_arena)

#
# Class allowing allocation of chunks of memory from arenas
#

class Heap(object):

    # Minimum malloc() alignment
    _alignment = 8

    _DISCARD_FREE_SPACE_LARGER_THAN = 4 * 1024 ** 2  # 4 MB
    _DOUBLE_ARENA_SIZE_UNTIL = 4 * 1024 ** 2

    def __init__(self, size=mmap.PAGESIZE):
        self._lastpid = os.getpid()
        self._lock = threading.Lock()
        # Current arena allocation size
        self._size = size
        # A sorted list of available block sizes in arenas
        self._lengths = []

        # Free block management:
        # - map each block size to a list of `(Arena, start, stop)` blocks
        self._len_to_seq = {}
        # - map `(Arena, start)` tuple to the `(Arena, start, stop)` block
        #   starting at that offset
        self._start_to_block = {}
        # - map `(Arena, stop)` tuple to the `(Arena, start, stop)` block
        #   ending at that offset
        self._stop_to_block = {}

        # Map arenas to their `(Arena, start, stop)` blocks in use
        self._allocated_blocks = defaultdict(set)
        self._arenas = []

        # List of pending blocks to free - see comment in free() below
        self._pending_free_blocks = []

        # Statistics
        self._n_mallocs = 0
        self._n_frees = 0

    @staticmethod
    def _roundup(n, alignment):
        # alignment must be a power of 2
        mask = alignment - 1
        return (n + mask) & ~mask

    def _new_arena(self, size):
        # Create a new arena with at least the given *size*
        length = self._roundup(max(self._size, size), mmap.PAGESIZE)
        # We carve larger and larger arenas, for efficiency, until we
        # reach a large-ish size (roughly L3 cache-sized)
        if self._size < self._DOUBLE_ARENA_SIZE_UNTIL:
            self._size *= 2
        util.info('allocating a new mmap of length %d', length)
        arena = Arena(length)
        self._arenas.append(arena)
        return (arena, 0, length)

    def _discard_arena(self, arena):
        # Possibly delete the given (unused) arena
        length = arena.size
        # Reusing an existing arena is faster than creating a new one, so
        # we only reclaim space if it's large enough.
        if length < self._DISCARD_FREE_SPACE_LARGER_THAN:
            return
        blocks = self._allocated_blocks.pop(arena)
        assert not blocks
        del self._start_to_block[(arena, 0)]
        del self._stop_to_block[(arena, length)]
        self._arenas.remove(arena)
        seq = self._len_to_seq[length]
        seq.remove((arena, 0, length))
        if not seq:
            del self._len_to_seq[length]
            self._lengths.remove(length)

    def _malloc(self, size):
        # returns a large enough block -- it might be much larger
        i = bisect.bisect_left(self._lengths, size)
        if i == len(self._lengths):
            return self._new_arena(size)
        else:
            length = self._lengths[i]
            seq = self._len_to_seq[length]
            block = seq.pop()
            if not seq:
                del self._len_to_seq[length], self._lengths[i]

        (arena, start, stop) = block
        del self._start_to_block[(arena, start)]
        del self._stop_to_block[(arena, stop)]
        return block

    def _add_free_block(self, block):
        # make block available and try to merge with its neighbours in the arena
        (arena, start, stop) = block

        try:
            prev_block = self._stop_to_block[(arena, start)]
        except KeyError:
            pass
        else:
            start, _ = self._absorb(prev_block)

        try:
            next_block = self._start_to_block[(arena, stop)]
        except KeyError:
            pass
        else:
            _, stop = self._absorb(next_block)

        block = (arena, start, stop)
        length = stop - start

        try:
            self._len_to_seq[length].append(block)
        except KeyError:
            self._len_to_seq[length] = [block]
            bisect.insort(self._lengths, length)

        self._start_to_block[(arena, start)] = block
        self._stop_to_block[(arena, stop)] = block

    def _absorb(self, block):
        # deregister this block so it can be merged with a neighbour
        (arena, start, stop) = block
        del self._start_to_block[(arena, start)]
        del self._stop_to_block[(arena, stop)]

        length = stop - start
        seq = self._len_to_seq[length]
        seq.remove(block)
        if not seq:
            del self._len_to_seq[length]
            self._lengths.remove(length)

        return start, stop

    def _remove_allocated_block(self, block):
        arena, start, stop = block
        blocks = self._allocated_blocks[arena]
        blocks.remove((start, stop))
        if not blocks:
            # Arena is entirely free, discard it from this process
            self._discard_arena(arena)

    def _free_pending_blocks(self):
        # Free all the blocks in the pending list - called with the lock held.
        while True:
            try:
                block = self._pending_free_blocks.pop()
            except IndexError:
                break
            self._add_free_block(block)
            self._remove_allocated_block(block)

    def free(self, block):
        # free a block returned by malloc()
        # Since free() can be called asynchronously by the GC, it could happen
        # that it's called while self._lock is held: in that case,
        # self._lock.acquire() would deadlock (issue #12352). To avoid that, a
        # trylock is used instead, and if the lock can't be acquired
        # immediately, the block is added to a list of blocks to be freed
        # synchronously sometimes later from malloc() or free(), by calling
        # _free_pending_blocks() (appending and retrieving from a list is not
        # strictly thread-safe but under CPython it's atomic thanks to the GIL).
        if os.getpid() != self._lastpid:
            raise ValueError(
                "My pid ({0:n}) is not last pid {1:n}".format(
                    os.getpid(),self._lastpid))
        if not self._lock.acquire(False):
            # can't acquire the lock right now, add the block to the list of
            # pending blocks to free
            self._pending_free_blocks.append(block)
        else:
            # we hold the lock
            try:
                self._n_frees += 1
                self._free_pending_blocks()
                self._add_free_block(block)
                self._remove_allocated_block(block)
            finally:
                self._lock.release()

    def malloc(self, size):
        # return a block of right size (possibly rounded up)
        if size < 0:
            raise ValueError("Size {0:n} out of range".format(size))
        if sys.maxsize <= size:
            raise OverflowError("Size {0:n} too large".format(size))
        if os.getpid() != self._lastpid:
            self.__init__()                     # reinitialize after fork
        with self._lock:
            self._n_mallocs += 1
            # allow pending blocks to be marked available
            self._free_pending_blocks()
            size = self._roundup(max(size, 1), self._alignment)
            (arena, start, stop) = self._malloc(size)
            real_stop = start + size
            if real_stop < stop:
                # if the returned block is larger than necessary, mark
                # the remainder available
                self._add_free_block((arena, real_stop, stop))
            self._allocated_blocks[arena].add((start, real_stop))
            return (arena, start, real_stop)

#
# Class wrapping a block allocated out of a Heap -- can be inherited by child process
#

class BufferWrapper(object):

    _heap = Heap()

    def __init__(self, size):
        if size < 0:
            raise ValueError("Size {0:n} out of range".format(size))
        if sys.maxsize <= size:
            raise OverflowError("Size {0:n} too large".format(size))
        block = BufferWrapper._heap.malloc(size)
        self._state = (block, size)
        util.Finalize(self, BufferWrapper._heap.free, args=(block,))

    def create_memoryview(self):
        (arena, start, stop), size = self._state
        return memoryview(arena.buffer)[start:start+size]
PK{��\�0�"�"(__pycache__/process.cpython-38.opt-2.pycnu�[���U

e5d�.�@s8ddddgZddlZddlZddlZddlZddlZddlmZzej�	e�
��ZWnek
rldZYnXdd�Z
dd�Zd	d�Zd
d�ZGdd�de�ZGd
d�de�ZGdd�de�ZGdd�de�Zdae�ae�d�ae�a[iZeej� ��D]0\Z!Z"e!dd�dkr�de!kr�de!��ee"<q�e�Z#dS)�BaseProcess�current_process�active_children�parent_process�N)�WeakSetcCstS�N)�_current_process�r	r	�//usr/lib64/python3.8/multiprocessing/process.pyr%scCst�tt�Sr)�_cleanup�list�	_childrenr	r	r	r
r+scCstSr)�_parent_processr	r	r	r
r3scCs*tt�D]}|j��dk	rt�|�qdSr)rr
�_popen�poll�discard)�pr	r	r
r=src@s�eZdZdd�Zddddifdd�dd�Zdd	�Zd
d�Zdd
�Zdd�Zdd�Z	d+dd�Z
dd�Zdd�Ze
dd��Zejdd��Ze
dd��Zejdd��Ze
dd��Zejd d��Ze
d!d"��Ze
d#d$��ZeZe
d%d&��Zd'd(�Zd,d)d*�ZdS)-rcCst�dSr)�NotImplementedError��selfr	r	r
�_PopenMszBaseProcess._PopenNr	)�daemoncCs�tt�}tj|f|_tj��|_t��|_tj	|_
d|_d|_||_
t|�|_t|�|_|p�t|�jdd�dd�|jD��|_|dk	r�||_t�|�dS)NF�-�:css|]}t|�VqdSr)�str)�.0�ir	r	r
�	<genexpr>^sz'BaseProcess.__init__.<locals>.<genexpr>)�next�_process_counterr�	_identity�_config�copy�os�getpid�_parent_pid�name�_parent_namer�_closed�_target�tuple�_args�dict�_kwargs�type�__name__�join�_namer�	_dangling�add)r�group�targetr&�args�kwargsr�countr	r	r
�__init__Ps 


�zBaseProcess.__init__cCs|jrtd��dS)Nzprocess object is closed)r(�
ValueErrorrr	r	r
�
_check_closedcszBaseProcess._check_closedcCs|jr|j|j|j�dSr)r)r+r-rr	r	r
�rungszBaseProcess.runcCs>|��t�|�|�|_|jj|_|`|`|`t	�
|�dSr)r;rrr�sentinel�	_sentinelr)r+r-r
r3rr	r	r
�startns
zBaseProcess.startcCs|��|j��dSr)r;r�	terminaterr	r	r
r@�szBaseProcess.terminatecCs|��|j��dSr)r;r�killrr	r	r
rA�szBaseProcess.killcCs*|��|j�|�}|dk	r&t�|�dSr)r;r�waitr
r)r�timeout�resr	r	r
r0�szBaseProcess.joincCsJ|��|tkrdS|jdkr"dS|j��}|dkr8dSt�|�dSdS)NTF)r;rrrr
r)r�
returncoder	r	r
�is_alive�s


zBaseProcess.is_alivecCsH|jdk	r>|j��dkr td��|j��d|_|`t�|�d|_dS)Nz^Cannot close a process while it is still running. You should first call join() or terminate().T)rrr:�closer>r
rr(rr	r	r
rG�s


zBaseProcess.closecCs|jSr�r1rr	r	r
r&�szBaseProcess.namecCs
||_dSrrH)rr&r	r	r
r&�scCs|j�dd�S)NrF)r!�getrr	r	r
r�szBaseProcess.daemoncCs||jd<dS)Nr�r!)rZdaemonicr	r	r
r�scCs
|jdS�N�authkeyrJrr	r	r
rL�szBaseProcess.authkeycCst|�|jd<dSrK)�AuthenticationStringr!)rrLr	r	r
rL�scCs"|��|jdkr|jS|j��Sr)r;rrrr	r	r
�exitcode�s
zBaseProcess.exitcodecCs*|��|tkrt��S|jo$|jjSdSr)r;rr#r$r�pidrr	r	r
�ident�szBaseProcess.identcCs4|��z|jWStk
r.td�d�YnXdS)Nzprocess not started)r;r>�AttributeErrorr:rr	r	r
r=�s
zBaseProcess.sentinelcCs�d}|tkrd}nL|jrd}n@|jt��kr2d}n,|jdkrBd}n|j��}|dk	rZd}nd}t|�jd|j	g}|jdk	r�|�
d|jj�|�
d|j�|�
|�|dk	r�t�
||�}|�
d	|�|jr�|�
d
�dd�|�S)
NZstarted�closed�unknown�initialZstoppedzname=%rzpid=%sz	parent=%szexitcode=%srz<%s>� )rr(r%r#r$rrr.r/r1�appendrO�_exitcode_to_namerIrr0)rrNZstatus�infor	r	r
�__repr__s0




zBaseProcess.__repr__c
Csvddlm}m}�z>z�|jdk	r,|�|j�t	�
d�at�a
|��t}|at|j|j|�atjrnt����z|j��|��W5~X|�d�z|��d}W5|��XWn�tk
�r}zJ|js�d}n:t|jdt�r�|jd}nt j!�"t#|jd�d�d}W5d}~XYn2d}ddl$}t j!�"d|j%�|�&�YnXW5t��|�d|�|��X|S)N�)�util�contextz process exiting with exitcode %dz child process calling self.run()r�
zProcess %s:
)'�r[r\�	threadingZ	_shutdownrXZ_flush_std_streamsZ
_start_methodZ_force_start_method�	itertoolsr8r�setr
Z_close_stdinr�_ParentProcessr'r%rZ_HAVE_THREAD_NATIVE_IDZmain_threadZ_set_native_idZ_finalizer_registry�clearZ_run_after_forkersZ_exit_functionr<�
SystemExitr6�
isinstance�int�sys�stderr�writer�	tracebackr&�	print_exc)rZparent_sentinelr[r\rNZold_process�erjr	r	r
�
_bootstrap"sR

�


zBaseProcess._bootstrap)N)N)r/�
__module__�__qualname__rr9r;r<r?r@rAr0rFrG�propertyr&�setterrrLrNrPrOr=rYrmr	r	r	r
rGsB�







	


c@seZdZdd�ZdS)rMcCs,ddlm}|�dkrtd��tt|�ffS)NrZ)�get_spawning_popenzJPickling an AuthenticationString object is disallowed for security reasons)r\rr�	TypeErrorrM�bytes)rrrr	r	r
�
__reduce__Xs
�zAuthenticationString.__reduce__N)r/rnrorur	r	r	r
rMWsrMc@s6eZdZdd�Zdd�Zedd��Zd
dd	�ZeZdS)rbcCs4d|_||_||_d|_d|_d|_||_i|_dS)Nr	F)r r1�_pidr%rr(r>r!)rr&rOr=r	r	r
r9hsz_ParentProcess.__init__cCsddlm}||jgdd�S�Nr)rB)rC�Zmultiprocessing.connectionrBr>)rrBr	r	r
rFrsz_ParentProcess.is_alivecCs|jSr)rvrr	r	r
rPvsz_ParentProcess.identNcCs ddlm}||jg|d�dSrwrx)rrCrBr	r	r
r0zsz_ParentProcess.join)N)	r/rnror9rFrprPr0rOr	r	r	r
rbfs


rbc@seZdZdd�Zdd�ZdS)�_MainProcesscCs8d|_d|_d|_d|_d|_tt�d��dd�|_dS)Nr	ZMainProcessF� z/mp)rLZ	semprefix)	r r1r%rr(rMr#�urandomr!rr	r	r
r9�s�z_MainProcess.__init__cCsdSrr	rr	r	r
rG�sz_MainProcess.closeN)r/rnror9rGr	r	r	r
ry�sryrZ�ZSIG�_r)$�__all__r#rg�signalr`r_Z_weakrefsetr�path�abspath�getcwdZORIGINAL_DIR�OSErrorrrrr�objectrrtrMrbryrrr8rrar
rWr�__dict__�itemsr&Zsignumr2r	r	r	r
�<module>
s@�


!
PK{��\��L��a�a__pycache__/pool.cpython-38.pycnu�[���U

e5d�~�@sdddgZddlZddlZddlZddlZddlZddlZddlZddlZddlm	Z	ddl
mZddl
mZm
Z
ddlmZd	Zd
ZdZdZe��Zd
d�Zdd�ZGdd�de�ZGdd�d�Zdd�ZGdd�de�Zd+dd�Zdd�ZGdd �d e�Z Gd!d�de!�Z"Gd"d#�d#e!�Z#e#Z$Gd$d%�d%e#�Z%Gd&d'�d'e!�Z&Gd(d)�d)e&�Z'Gd*d�de"�Z(dS),�Pool�
ThreadPool�N)�Empty�)�util)�get_context�TimeoutError)�wait�INIT�RUN�CLOSE�	TERMINATEcCstt|��S�N)�list�map��args�r�,/usr/lib64/python3.8/multiprocessing/pool.py�mapstar/srcCstt�|d|d��S)Nrr)r�	itertools�starmaprrrr�starmapstar2src@seZdZdd�Zdd�ZdS)�RemoteTracebackcCs
||_dSr��tb)�selfrrrr�__init__:szRemoteTraceback.__init__cCs|jSrr�rrrr�__str__<szRemoteTraceback.__str__N)�__name__�
__module__�__qualname__rrrrrrr9src@seZdZdd�Zdd�ZdS)�ExceptionWithTracebackcCs0t�t|�||�}d�|�}||_d||_dS)N�z

"""
%s""")�	traceback�format_exception�type�join�excr)rr)rrrrr@s
zExceptionWithTraceback.__init__cCst|j|jffSr)�rebuild_excr)rrrrr�
__reduce__Esz!ExceptionWithTraceback.__reduce__N)r r!r"rr+rrrrr#?sr#cCst|�|_|Sr)r�	__cause__)r)rrrrr*Hs
r*cs0eZdZdZ�fdd�Zdd�Zdd�Z�ZS)�MaybeEncodingErrorzVWraps possible unpickleable errors, so they can be
    safely sent through the socket.cs.t|�|_t|�|_tt|��|j|j�dSr)�reprr)�value�superr-r)rr)r/��	__class__rrrTs

zMaybeEncodingError.__init__cCsd|j|jfS)Nz(Error sending result: '%s'. Reason: '%s')r/r)rrrrrYs�zMaybeEncodingError.__str__cCsd|jj|fS)Nz<%s: %s>)r2r rrrr�__repr__]szMaybeEncodingError.__repr__)r r!r"�__doc__rrr3�
__classcell__rrr1rr-Psr-rFc
Cs�|dk	r(t|t�r|dks(td�|���|j}|j}t|d�rR|j��|j	��|dk	rb||�d}|dks~|�r�||k�r�z
|�}	Wn(t
tfk
r�t�
d�Y�q�YnX|	dkr�t�
d��q�|	\}
}}}
}zd||
|�f}WnHtk
�r0}z(|�r|tk	�rt||j�}d|f}W5d}~XYnXz||
||f�WnRtk
�r�}z2t||d�}t�
d	|�||
|d|ff�W5d}~XYnXd}	}
}}}
}|d7}qft�
d
|�dS)NrzMaxtasks {!r} is not valid�_writerrz)worker got EOFError or OSError -- exitingzworker got sentinel -- exitingTFz0Possible encoding error while sending result: %szworker exiting after %d tasks)�
isinstance�int�AssertionError�format�put�get�hasattrr6�close�_reader�EOFError�OSErrorr�debug�	Exception�_helper_reraises_exceptionr#�
__traceback__r-)�inqueue�outqueue�initializer�initargsZmaxtasks�wrap_exceptionr;r<Z	completed�task�job�i�funcr�kwds�result�e�wrappedrrr�workerasN�





�$
rScCs|�dS)z@Pickle-able helper function for use by _guarded_task_generation.Nr)ZexrrrrD�srDcs2eZdZdZdd��fdd�
Z�fdd�Z�ZS)�
_PoolCachez�
    Class that implements a cache for the Pool class that will notify
    the pool management threads every time the cache is emptied. The
    notification is done by the use of a queue that is provided when
    instantiating the cache.
    N��notifiercs||_t�j||�dSr)rVr0r)rrVrrOr1rrr�sz_PoolCache.__init__cs t��|�|s|j�d�dSr)r0�__delitem__rVr;)r�itemr1rrrW�sz_PoolCache.__delitem__)r r!r"r4rrWr5rrr1rrT�srTc@s�eZdZdZdZedd��ZdLdd�Zej	e
fd	d
�Zdd�Zd
d�Z
edd��Zedd��Zdd�Zedd��Zedd��Zdd�Zdd�Zdifdd�ZdMdd �ZdNd!d"�ZdOd#d$�Zd%d&�ZdPd(d)�ZdQd*d+�Zdiddfd,d-�ZdRd.d/�ZdSd0d1�ZedTd2d3��Ze d4d5��Z!ed6d7��Z"ed8d9��Z#ed:d;��Z$d<d=�Z%d>d?�Z&d@dA�Z'dBdC�Z(edDdE��Z)e dFdG��Z*dHdI�Z+dJdK�Z,dS)UrzS
    Class which supports an async version of applying functions to arguments.
    TcOs|j||�Sr��Process)�ctxrrOrrrrZ�szPool.ProcessNrcCsg|_t|_|pt�|_|��t��|_|j��|_	t
|j	d�|_||_||_
||_|dkrjt��phd}|dkrztd��|dk	r�t|�s�td��||_z|��WnHtk
r�|jD]}|jdkr�|��q�|jD]}|��q؂YnX|��}tjtj|j|j|j|j|j|j|j |j!|j
|j|j|j"||j	fd�|_#d|j#_$t%|j#_|j#�&�tjtj'|j|j(|j!|j|jfd�|_)d|j)_$t%|j)_|j)�&�tjtj*|j!|j+|jfd�|_,d|j,_$t%|j,_|j,�&�t-j.||j/|j|j |j!|j|j	|j#|j)|j,|jf	dd�|_0t%|_dS)	NrUrz&Number of processes must be at least 1zinitializer must be a callable��targetrT�)rZexitpriority)1�_poolr
�_stater�_ctx�
_setup_queues�queue�SimpleQueue�
_taskqueue�_change_notifierrT�_cache�_maxtasksperchild�_initializer�	_initargs�os�	cpu_count�
ValueError�callable�	TypeError�
_processes�_repopulate_poolrC�exitcode�	terminater(�_get_sentinels�	threadingZThreadr�_handle_workersrZ�_inqueue�	_outqueue�_wrap_exception�_worker_handler�daemonr�start�
_handle_tasks�
_quick_put�
_task_handler�_handle_results�
_quick_get�_result_handlerrZFinalize�_terminate_pool�
_terminate)r�	processesrHrI�maxtasksperchild�context�p�	sentinelsrrrr�s�





��
��
�
��z
Pool.__init__cCs>|j|kr:|d|��t|d�t|dd�dk	r:|j�d�dS)Nz&unclosed running multiprocessing pool )�sourcerf)r`�ResourceWarning�getattrrfr;)rZ_warnrrrr�__del__s

�zPool.__del__c	Cs0|j}d|j�d|j�d|j�dt|j��d�	S)N�<�.z state=z pool_size=�>)r2r!r"r`�lenr_)r�clsrrrr3sz
Pool.__repr__cCs|jjg}|jjg}||�Sr)rxr?rf)rZtask_queue_sentinelsZself_notifier_sentinelsrrrrts

zPool._get_sentinelscCsdd�|D�S)NcSsg|]}t|d�r|j�qS)�sentinel)r=r�)�.0rSrrr�
<listcomp>s
�z.Pool._get_worker_sentinels.<locals>.<listcomp>r�Zworkersrrr�_get_worker_sentinelss�zPool._get_worker_sentinelscCsPd}ttt|���D]6}||}|jdk	rt�d|�|��d}||=q|S)z�Cleanup after any worker processes which have exited due to reaching
        their specified lifetime.  Returns True if any workers were cleaned up.
        FN�cleaning up worker %dT)�reversed�ranger�rrrrBr()�poolZcleanedrMrSrrr�_join_exited_workerss
zPool._join_exited_workerscCs0|�|j|j|j|j|j|j|j|j|j	|j
�
Sr)�_repopulate_pool_staticrarZrpr_rwrxrirjrhryrrrrrq.s�zPool._repopulate_poolc

Csft|t|��D]P}
||t||||||	fd�}|j�dd�|_d|_|��|�|�t�	d�qdS)z�Bring the number of pool processes up to the specified number,
        for use after reaping workers which have exited.
        r\rZZ
PoolWorkerTzadded workerN)
r�r�rS�name�replacer{r|�appendrrB)r[rZr�r�rFrGrHrIr�rJrM�wrrrr�7s��
zPool._repopulate_pool_staticc

Cs*t�|�r&t�||||||||||	�
dS)zEClean up any exited workers and start replacements for them.
        N)rr�r�)
r[rZr�r�rFrGrHrIr�rJrrr�_maintain_poolJs
�zPool._maintain_poolcCs4|j��|_|j��|_|jjj|_|jjj|_	dSr)
rardrwrxr6�sendr~r?�recvr�rrrrrbVszPool._setup_queuescCs|jtkrtd��dS)NzPool not running)r`rrmrrrr�_check_running\s
zPool._check_runningcCs|�|||���S)zT
        Equivalent of `func(*args, **kwds)`.
        Pool must be running.
        )�apply_asyncr<)rrNrrOrrr�apply`sz
Pool.applycCs|�||t|���S)zx
        Apply `func` to each element in `iterable`, collecting the results
        in a list that is returned.
        )�
_map_asyncrr<�rrN�iterable�	chunksizerrrrgszPool.mapcCs|�||t|���S)z�
        Like `map()` method but the elements of the `iterable` are expected to
        be iterables as well and will be unpacked as arguments. Hence
        `func` and (a, b) becomes func(a, b).
        )r�rr<r�rrrrnszPool.starmapcCs|�||t|||�S)z=
        Asynchronous version of `starmap()` method.
        )r�r�rrNr�r��callback�error_callbackrrr�
starmap_asyncvs�zPool.starmap_asyncc
csjz,d}t|�D]\}}||||fifVqWn8tk
rd}z||dt|fifVW5d}~XYnXdS)z�Provides a generator of tasks for imap and imap_unordered with
        appropriate handling for iterables which throw exceptions during
        iteration.���rN)�	enumeraterCrD)rZ
result_jobrNr�rM�xrQrrr�_guarded_task_generation~szPool._guarded_task_generationrcCs�|��|dkr:t|�}|j�|�|j||�|jf�|S|dkrPtd�|���t	�
|||�}t|�}|j�|�|jt|�|jf�dd�|D�SdS)zP
        Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
        rzChunksize must be 1+, not {0:n}css|]}|D]
}|Vq
qdSrr�r��chunkrXrrr�	<genexpr>�szPool.imap.<locals>.<genexpr>N)r��IMapIteratorrer;r��_job�_set_lengthrmr:r�
_get_tasksr�rrNr�r�rP�task_batchesrrr�imap�s4�������z	Pool.imapcCs�|��|dkr:t|�}|j�|�|j||�|jf�|S|dkrPtd�|���t	�
|||�}t|�}|j�|�|jt|�|jf�dd�|D�SdS)zL
        Like `imap()` method but ordering of results is arbitrary.
        rzChunksize must be 1+, not {0!r}css|]}|D]
}|Vq
qdSrrr�rrrr��sz&Pool.imap_unordered.<locals>.<genexpr>N)r��IMapUnorderedIteratorrer;r�r�r�rmr:rr�rr�rrr�imap_unordered�s0������zPool.imap_unorderedcCs6|��t|||�}|j�|jd|||fgdf�|S)z;
        Asynchronous version of `apply()` method.
        rN)r��ApplyResultrer;r�)rrNrrOr�r�rPrrrr��szPool.apply_asynccCs|�||t|||�S)z9
        Asynchronous version of `map()` method.
        )r�rr�rrr�	map_async�s�zPool.map_asyncc
Cs�|��t|d�st|�}|dkrJtt|�t|j�d�\}}|rJ|d7}t|�dkrZd}t�|||�}t||t|�||d�}	|j	�
|�|	j||�df�|	S)zY
        Helper function to implement map, starmap and their async counterparts.
        �__len__N�rr�r�)
r�r=r�divmodr�r_rr��	MapResultrer;r�r�)
rrNr�Zmapperr�r�r�Zextrar�rPrrrr��s,
����zPool._map_asynccCs"t||d�|��s|��qdS)N)�timeout)r	�emptyr<)r��change_notifierr�rrr�_wait_for_updates�szPool._wait_for_updatescCspt��}|jtks |rX|jtkrX|�|||||||	|
||�
|�|�|
�}|�||�q|�d�t	�
d�dS)Nzworker handler exiting)ru�current_threadr`rr
r�r�r�r;rrB)r��cache�	taskqueuer[rZr�r�rFrGrHrIr�rJr�r��threadZcurrent_sentinelsrrrrv�s�
zPool._handle_workersc

Cspt��}t|jd�D]�\}}d}z�|D]�}|jtkrBt�d�q�z||�Wq&tk
r�}
zB|dd�\}	}z||	�	|d|
f�Wnt
k
r�YnXW5d}
~
XYq&Xq&|r�t�d�|r�|dnd}||d�W�qW�
�q
W5d}}}	Xqt�d�z6t�d�|�d�t�d	�|D]}|d��q.Wn tk
�r`t�d
�YnXt�d�dS)Nz'task handler found thread._state != RUN�Fzdoing set_length()rr�ztask handler got sentinelz/task handler sending sentinel to result handlerz(task handler sending sentinel to workersz/task handler got OSError when sending sentinelsztask handler exiting)
rur��iterr<r`rrrBrC�_set�KeyErrorr;rA)
r�r;rGr�r�r�ZtaskseqZ
set_lengthrKrLrQ�idxr�rrrr}sB






zPool._handle_tasksc	Cs�t��}z
|�}Wn$ttfk
r6t�d�YdSX|jtkr`|jtksTt	d��t�d�q�|dkrtt�d�q�|\}}}z||�
||�Wntk
r�YnXd}}}q|�rR|jtk�rRz
|�}Wn$ttfk
r�t�d�YdSX|dk�rt�d�q�|\}}}z||�
||�Wntk
�rBYnXd}}}q�t|d��r�t�d�z,t
d�D]}|j���s��q�|��qrWnttfk
�r�YnXt�d	t|�|j�dS)
Nz.result handler got EOFError/OSError -- exitingzThread not in TERMINATEz,result handler found thread._state=TERMINATEzresult handler got sentinelz&result handler ignoring extra sentinelr?z"ensuring that outqueue is not full�
z7result handler exiting: len(cache)=%s, thread._state=%s)rur�rAr@rrBr`rr
r9r�r�r=r�r?�pollr�)rGr<r�r�rKrLrM�objrrrr�:s^











�zPool._handle_resultsccs0t|�}tt�||��}|s dS||fVqdSr)r��tupler�islice)rN�it�sizer�rrrr�vs
zPool._get_taskscCstd��dS)Nz:pool objects cannot be passed between processes or pickled)�NotImplementedErrorrrrrr+s�zPool.__reduce__cCs2t�d�|jtkr.t|_t|j_|j�d�dS)Nzclosing pool)rrBr`rrrzrfr;rrrrr>�s


z
Pool.closecCst�d�t|_|��dS)Nzterminating pool)rrBr
r`r�rrrrrs�s
zPool.terminatecCsjt�d�|jtkrtd��n|jttfkr4td��|j��|j	��|j
��|jD]}|��qXdS)Nzjoining poolzPool is still runningzIn unknown state)rrBr`rrmrr
rzr(rr�r_)rr�rrrr(�s






z	Pool.joincCs@t�d�|j��|��r<|j��r<|j��t�	d�qdS)Nz7removing tasks from inqueue until task handler finishedr)
rrBZ_rlock�acquire�is_aliver?r�r��time�sleep)rF�task_handlerr�rrr�_help_stuff_finish�s



zPool._help_stuff_finishc
CsXt�d�t|_|�d�t|_t�d�|�||t|��|��sXt|	�dkrXtd��t|_|�d�|�d�t�d�t	�
�|k	r�|��|r�t|dd�r�t�d�|D]}
|
j
dkr�|
��q�t�d�t	�
�|k	r�|��t�d	�t	�
�|k	�r|��|�rTt|dd��rTt�d
�|D](}
|
���r*t�d|
j�|
���q*dS)Nzfinalizing poolz&helping task handler/workers to finishrz.Cannot have cache with result_hander not alivezjoining worker handlerrszterminating workerszjoining task handlerzjoining result handlerzjoining pool workersr�)rrBr
r`r;r�r�r�r9rur�r(r=rrrs�pid)r�r�rFrGr�r�Zworker_handlerr�Zresult_handlerr�r�rrrr��sB


�









zPool._terminate_poolcCs|��|Sr)r�rrrr�	__enter__�szPool.__enter__cCs|��dSr)rs)r�exc_typeZexc_valZexc_tbrrr�__exit__�sz
Pool.__exit__)NNrNN)N)N)NNN)r)r)NNN)NNN)N)-r r!r"r4ry�staticmethodrZr�warnings�warnrr�r3rtr�r�rqr�r�rbr�r�rrr�r�r�r�r�r�r�r��classmethodrvr}r�r�r+r>rsr(r�r�r�r�rrrrr�sx
�
P

	



�


�

�
�


-
;


5c@s@eZdZdd�Zdd�Zdd�Zddd	�Zdd
d�Zdd
�ZdS)r�cCs>||_t��|_tt�|_|j|_||_||_	||j|j<dSr)
r_ruZEvent�_event�next�job_counterr�rg�	_callback�_error_callback)rr�r�r�rrrr�s

zApplyResult.__init__cCs
|j��Sr)r�Zis_setrrrr�ready�szApplyResult.readycCs|��std�|���|jS)Nz{0!r} not ready)r�rmr:�_successrrrr�
successful�szApplyResult.successfulNcCs|j�|�dSr)r�r	�rr�rrrr	�szApplyResult.waitcCs,|�|�|��st�|jr"|jS|j�dSr)r	r�rr��_valuer�rrrr<�s
zApplyResult.getcCsZ|\|_|_|jr$|jr$|�|j�|jr<|js<|�|j�|j��|j|j=d|_dSr)	r�r�r�r�r��setrgr�r_�rrMr�rrrr�s

zApplyResult._set)N)N)	r r!r"rr�r�r	r<r�rrrrr��s	

	r�c@seZdZdd�Zdd�ZdS)r�cCshtj||||d�d|_dg||_||_|dkrNd|_|j��|j|j	=n||t
||�|_dS)Nr�Tr)r�rr�r��
_chunksize�_number_leftr�r�rgr��bool)rr�r��lengthr�r�rrrrs
�
zMapResult.__init__cCs�|jd8_|\}}|rv|jrv||j||j|d|j�<|jdkr�|jrZ|�|j�|j|j=|j��d|_	nL|s�|jr�d|_||_|jdkr�|j
r�|�
|j�|j|j=|j��d|_	dS)NrrF)r�r�r�r�r�rgr�r�r�r_r�)rrMZsuccess_result�successrPrrrr�$s&







zMapResult._setN)r r!r"rr�rrrrr�s
r�c@s:eZdZdd�Zdd�Zddd�ZeZdd	�Zd
d�ZdS)
r�cCsT||_t�t���|_tt�|_|j|_t	�
�|_d|_d|_
i|_||j|j<dS)Nr)r_ruZ	ConditionZLock�_condr�r�r�rg�collections�deque�_items�_index�_length�	_unsorted)rr�rrrrBs

zIMapIterator.__init__cCs|Srrrrrr�__iter__MszIMapIterator.__iter__NcCs�|j��z|j��}Wnztk
r�|j|jkr>d|_td�|j�|�z|j��}Wn2tk
r�|j|jkr�d|_td�t	d�YnXYnXW5QRX|\}}|r�|S|�dSr)
r�r��popleft�
IndexErrorr�rr_�
StopIterationr	r)rr�rXr�r/rrrr�Ps&zIMapIterator.nextc	Cs�|j��|j|krn|j�|�|jd7_|j|jkrb|j�|j�}|j�|�|jd7_q,|j��n
||j|<|j|jkr�|j|j	=d|_
W5QRXdS�Nr)r�r�r�r�r�pop�notifyrrgr�r_r�rrrr�hs


zIMapIterator._setc	CsB|j�2||_|j|jkr4|j��|j|j=d|_W5QRXdSr)r�rr�rrgr�r_)rr�rrrr�ys

zIMapIterator._set_length)N)	r r!r"rrr��__next__r�r�rrrrr�@s
r�c@seZdZdd�ZdS)r�c	CsV|j�F|j�|�|jd7_|j��|j|jkrH|j|j=d|_W5QRXdSr)	r�r�r�r�rrrgr�r_r�rrrr��s

zIMapUnorderedIterator._setN)r r!r"r�rrrrr��sr�c@sVeZdZdZedd��Zddd�Zdd	�Zd
d�Zedd
��Z	edd��Z
dd�ZdS)rFcOsddlm}|||�S)NrrY)ZdummyrZ)r[rrOrZrrrrZ�szThreadPool.ProcessNrcCst�||||�dSr)rr)rr�rHrIrrrr�szThreadPool.__init__cCs,t��|_t��|_|jj|_|jj|_dSr)rcrdrwrxr;r~r<r�rrrrrb�s


zThreadPool._setup_queuescCs
|jjgSr)rfr?rrrrrt�szThreadPool._get_sentinelscCsgSrrr�rrrr��sz ThreadPool._get_worker_sentinelscCsFz|jdd�qWntjk
r(YnXt|�D]}|�d�q2dS)NF)�block)r<rcrr�r;)rFr�r�rMrrrr��szThreadPool._help_stuff_finishcCst�|�dSr)r�r�)rr�r�r�rrrr��szThreadPool._wait_for_updates)NNr)r r!r"ryr�rZrrbrtr�r�r�rrrrr�s




)NrNF))�__all__r�rrkrcrur�r%r�rr$rrrZ
connectionr	r
rrr
�countr�rrrCrr#r*r-rSrD�dictrT�objectrr�ZAsyncResultr�r�r�rrrrr�<module>
sN	�
-=)+EPK{��\j,"'))*__pycache__/reduction.cpython-38.opt-2.pycnu�[���U

e5d(%�@sddlmZddlZddlZddlZddlZddlZddlZddlZddl	m
Z
ddddd	gZejd
kp�e
ed�o�e
ed�o�e
ejd
�ZGdd�dej�ZejZd6dd	�Zejd
k�redddg7ZddlZd7dd�dd�Zdd�Zdd�Zdd�ZGdd�de�ZnHedddg7ZddlZejdkZdd�Zdd�Zd d�Zd!d�Zd"d�Zd#d$�ZGd%d&�d&�Z ee!e �j"�e�d'd(�Z#ee!e$j%�e#�ee!e&j'�e#�d)d*�Z(d+d,�Z)eej*e(�ejd
k�r�d-d.�Z+d/d0�Z,eeje+�nd1d.�Z+d2d0�Z,eeje+�Gd3d4�d4ed5�Z-dS)8�)�ABCMetaN�)�context�send_handle�recv_handle�ForkingPickler�register�dump�win32ZCMSG_LEN�
SCM_RIGHTS�sendmsgcsFeZdZiZejZ�fdd�Zedd��Z	eddd��Z
ejZ�Z
S)	rcs*t�j|�|j��|_|j�|j�dS�N)�super�__init__�_copyreg_dispatch_table�copy�dispatch_table�update�_extra_reducers��self�args��	__class__��1/usr/lib64/python3.8/multiprocessing/reduction.pyr&szForkingPickler.__init__cCs||j|<dSr
)r)�cls�type�reducerrrr+szForkingPickler.registerNcCs t��}|||��|�|��Sr
)�io�BytesIOr	�	getbuffer)r�obj�protocolZbufrrr�dumps0szForkingPickler.dumps)N)�__name__�
__module__�__qualname__r�copyregrrr�classmethodrr$�pickle�loads�
__classcell__rrrrr!s
cCst||��|�dSr
)rr	)r"�filer#rrrr	:s�	DupHandle�	duplicate�steal_handleF)�source_processcCs6t��}|dkr|}|dkr |}t�|||d|tj�S)Nr)�_winapi�GetCurrentProcess�DuplicateHandle�DUPLICATE_SAME_ACCESS)�handleZtarget_processZinheritabler1Zcurrent_processrrrr/Gs�c	CsFt�tjd|�}z$t�||t��ddtjtjB�W�St�|�XdS�NFr)r2�OpenProcess�PROCESS_DUP_HANDLE�CloseHandler4r3r5�DUPLICATE_CLOSE_SOURCE)Z
source_pidr6Zsource_process_handlerrrr0Ss�
�cCst|tj|�}|�|�dSr
)r.r2r5�send)�connr6�destination_pidZdhrrrr_scCs|����Sr
)�recv�detach)r=rrrrdsc@seZdZddd�Zdd�ZdS)r.Nc	Cs\|dkrt��}t�tjd|�}zt�t��|||dd�|_W5t�|�X||_	||_
dSr7)�os�getpidr2r8r9r:r4r3�_handle�_access�_pid)rr6�access�pid�procrrrrjs�
zDupHandle.__init__c	CsZ|jt��kr|jSt�tjd|j�}z"t�||jt�	�|j
dtj�W�St�|�XdS)NF)rErArBrCr2r8r9r:r4r3rDr;)rrHrrrr@ys
��zDupHandle.detach)N)r%r&r'rr@rrrrr.hs
�DupFd�sendfds�recvfds�darwincCsVt�d|�}tt|�dg�}|�|gtjtj|fg�trR|�d�dkrRt	d��dS)N�i�r�Az%did not receive acknowledgement of fd)
�array�bytes�lenr�socket�
SOL_SOCKETr�ACKNOWLEDGEr?�RuntimeError)�sockZfds�msgrrrrJ�s
c	Cst�d�}|j|}|�dt�|��\}}}}|s:|s:t�z�trJ|�d�t|�dkrft	dt|���|d\}}	}
|tj
kr�|	tjkr�t|
�|jdkr�t�|�
|
�t|�d|dkr�td�t|�|d���t|�WSWnttfk
r�YnXt	d��dS)	NrMrrOzreceived %d items of ancdatarrNz Len is {0:n} but msg[0] is {1!r}zInvalid data received)rP�itemsizeZrecvmsgrSZ
CMSG_SPACE�EOFErrorrUr<rRrVrTr�
ValueErrorZ	frombytes�AssertionError�format�list�
IndexError)rW�size�aZ
bytes_sizerXZancdata�flagsZaddrZ
cmsg_levelZ	cmsg_typeZ	cmsg_datarrrrK�s<


�
�
��c	Cs2t�|��tjtj��}t||g�W5QRXdSr
)rS�fromfd�fileno�AF_UNIX�SOCK_STREAMrJ)r=r6r>�srrrr�sc
Cs<t�|��tjtj��}t|d�dW5QR�SQRXdS)Nrr)rSrcrdrerfrK)r=rgrrrr�scCsFt��}|dk	r |�|�|��Str:ddlm}|�|�Std��dS)Nr)�resource_sharerz&SCM_RIGHTS appears not to be available)rZget_spawning_popenrIZduplicate_for_child�HAVE_SEND_HANDLE�rhr[)�fdZ	popen_objrhrrrrI�s
cCs2|jdkrt|j|jjffSt|j|jjffSdSr
)�__self__�getattrr�__func__r%��mrrr�_reduce_method�s
rqc@seZdZdd�ZdS)�_CcCsdSr
r)rrrr�f�sz_C.fN)r%r&r'rsrrrrrr�srrcCst|j|jffSr
)rm�__objclass__r%rorrr�_reduce_method_descriptor�srucCst|j|j|jpiffSr
)�_rebuild_partial�funcr�keywords)�prrr�_reduce_partial�srzcCstj|f|�|�Sr
)�	functools�partial)rwrrxrrrrv�srvcCsddlm}t||�ffS)Nr)�	DupSocket)rhr}�_rebuild_socket)rgr}rrr�_reduce_socket�srcCs|��Sr
)r@)Zdsrrrr~�sr~cCs"t|���}t||j|j|jffSr
)rIrdr~�familyr�proto)rg�dfrrrr�scCs|��}tj||||d�S)N)rd)r@rS)r�r�rr�rkrrrr~�sc@s`eZdZeZeZeZeZeZej	dkr4e
Z
eZeZne
Z
eZeZeZeZeZeZeZdd�ZdS)�AbstractReducerr
cGsNttt�j�t�tttj�t�tttj	�t�tt
jt�tt
j
t�dSr
)rrrrrsrqr^�appendru�int�__add__r{r|rzrSrrrrrrs
zAbstractReducer.__init__N)r%r&r'rrr	rr�sys�platformr0r/r.rJrKrIrqrurvrr~rrrrrr��s$
r�)�	metaclass)N)NF).�abcrr(r{rrAr*rSr�rjr�__all__r��hasattrriZPicklerrrr	r2r/r0rr�objectr.rPrUrJrKrIrqrrrrsrur^r�r�r�rzrvr|rr~r�rrrr�<module>
sj

�
�	
�#
PK{��\;P�vBB-__pycache__/sharedctypes.cpython-38.opt-2.pycnu�[���U

e5d��@sBddlZddlZddlmZddlmZddlmZmZejZ	dddd	d
dgZ
ejejej
ejejejejejejejejejejejd�Zd
d�Zdd�Zdd�Zddd�dd�Zddd�dd	�Zdd
�Zd&dd�Z dd�Z!dd�Z"dd�Z#dZ$iZ%e�&�Z'Gdd�de(�Z)Gd d!�d!e)�Z*Gd"d#�d#e)�Z+Gd$d%�d%e+�Z,dS)'�N�)�heap)�get_context)�	reduction�assert_spawning�RawValue�RawArray�Value�Array�copy�synchronized)�c�u�b�B�h�H�i�I�l�L�q�Q�f�dcCs t�|�}t�|�}t||d�S�N)�ctypes�sizeofrZ
BufferWrapper�
rebuild_ctype)�type_�size�wrapper�r"�4/usr/lib64/python3.8/multiprocessing/sharedctypes.py�
_new_value's

r$cGs<t�||�}t|�}t�t�|�dt�|��|j|�|S�Nr)�typecode_to_type�getr$r�memset�	addressofr�__init__)�typecode_or_type�argsr�objr"r"r#r,s

cCsjt�||�}t|t�rD||}t|�}t�t�|�dt�|��|S|t	|�}t|�}|j
|�|SdSr%)r&r'�
isinstance�intr$rr(r)r�lenr*)r+�size_or_initializerrr-�resultr"r"r#r6s

T)�lock�ctxcGsXt|f|��}|dkr|S|dkr4|p*t�}|��}t|d�sJtd|��t|||d�S�NF)TN�acquirez%r has no method 'acquire')r4)rr�RLock�hasattr�AttributeErrorr)r+r3r4r,r-r"r"r#r	Fs

cCsTt||�}|dkr|S|dkr0|p&t�}|��}t|d�sFtd|��t|||d�Sr5)rrr7r8r9r)r+r1r3r4r-r"r"r#r
Ts


cCstt|��}|t�|�d<|Sr%)r$�typerZpointer)r-Znew_objr"r"r#rbscCs�|pt�}t|tj�r"t|||�St|tj�rR|jtjkrFt|||�St	|||�St
|�}zt|}WnRtk
r�dd�|j
D�}dd�|D�}d|j}t
|tf|�}t|<YnX||||�SdS)NcSsg|]}|d�qS)rr")�.0Zfieldr"r"r#�
<listcomp>vsz synchronized.<locals>.<listcomp>cSsi|]}|t|��qSr")�
make_property)r;�namer"r"r#�
<dictcomp>wsz synchronized.<locals>.<dictcomp>�Synchronized)rr.rZ_SimpleCDatar@r
�_type_�c_char�SynchronizedString�SynchronizedArrayr:�class_cache�KeyErrorZ_fields_�__name__�SynchronizedBase)r-r3r4�clsZscls�namesrZ	classnamer"r"r#rgs 

cCs@t|�t|tj�r(t|j|j|jffStt|�|jdffSdSr)	rr.rr
rrA�_wrapperZ_length_r:)r-r"r"r#�reduce_ctype�srLcCs8|dk	r||}t�|t�|��}|�|�}||_|Sr)�_ForkingPickler�registerrLZcreate_memoryviewZfrom_bufferrK)rr!ZlengthZbufr-r"r"r#r�s
rcCsPz
t|WStk
rJi}tt|fd|�||t|<||YSXdS)N�)�
prop_cacherF�exec�template)r>rr"r"r#r=�s
r=z�
def get%s(self):
    self.acquire()
    try:
        return self._obj.%s
    finally:
        self.release()
def set%s(self, value):
    self.acquire()
    try:
        self._obj.%s = value
    finally:
        self.release()
%s = property(get%s, set%s)
c@sFeZdZddd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Zdd�Z	dS)rHNcCsB||_|r||_n|ptdd�}|��|_|jj|_|jj|_dS)NT)Zforce)�_obj�_lockrr7r6�release)�selfr-r3r4r"r"r#r*�s

zSynchronizedBase.__init__cCs
|j��Sr)rT�	__enter__�rVr"r"r#rW�szSynchronizedBase.__enter__cGs|jj|�Sr)rT�__exit__)rVr,r"r"r#rY�szSynchronizedBase.__exit__cCst|�t|j|jffSr)rrrSrTrXr"r"r#�
__reduce__�szSynchronizedBase.__reduce__cCs|jSr�rSrXr"r"r#�get_obj�szSynchronizedBase.get_objcCs|jSr)rTrXr"r"r#�get_lock�szSynchronizedBase.get_lockcCsdt|�j|jfS)Nz<%s wrapper for %s>)r:rGrSrXr"r"r#�__repr__�szSynchronizedBase.__repr__)NN)
rG�
__module__�__qualname__r*rWrYrZr\r]r^r"r"r"r#rH�s

rHc@seZdZed�ZdS)r@�valueN)rGr_r`r=rar"r"r"r#r@�sr@c@s4eZdZdd�Zdd�Zdd�Zdd�Zd	d
�ZdS)rDcCs
t|j�Sr)r0rSrXr"r"r#�__len__�szSynchronizedArray.__len__c
Cs&|�|j|W5QR�SQRXdSrr[)rVrr"r"r#�__getitem__�szSynchronizedArray.__getitem__c	Cs|�||j|<W5QRXdSrr[)rVrrar"r"r#�__setitem__�szSynchronizedArray.__setitem__c
Cs*|�|j||�W5QR�SQRXdSrr[)rV�start�stopr"r"r#�__getslice__�szSynchronizedArray.__getslice__c	Cs"|�||j||�<W5QRXdSrr[)rVrerf�valuesr"r"r#�__setslice__�szSynchronizedArray.__setslice__N)rGr_r`rbrcrdrgrir"r"r"r#rD�s
rDc@seZdZed�Zed�ZdS)rCra�rawN)rGr_r`r=rarjr"r"r"r#rC�srC)NN)-r�weakref�rr�contextrrZForkingPicklerrM�__all__rBZc_wcharZc_byteZc_ubyteZc_shortZc_ushortZc_intZc_uintZc_longZc_ulongZ
c_longlongZc_ulonglongZc_floatZc_doubler&r$rrr	r
rrrLrr=rRrP�WeakKeyDictionaryrE�objectrHr@rDrCr"r"r"r#�<module>
sL�


	 PK{��\���h�*�*,__pycache__/synchronize.cpython-38.opt-1.pycnu�[���U

e5dZ-�@s,ddddddgZddlZddlZddlZddlZddlZdd	lmZdd
lmZddlm	Z	zddlm
Z
mZWnek
r�ed
��YnXe
ed��\ZZej
jZGdd�de�Z
Gdd�de
�ZGdd�de�ZGdd�de
�ZGdd�de
�ZGdd�de�ZGdd�de�ZGdd�dej�ZdS)�Lock�RLock�	Semaphore�BoundedSemaphore�	Condition�Event�N�)�context)�process)�util)�SemLock�
sem_unlinkz�This platform lacks a functioning sem_open implementation, therefore, the required synchronization primitives needed will not function, see issue 3770.�c@s\eZdZe��Zdd�Zedd��Zdd�Z	dd�Z
d	d
�Zdd�Zd
d�Z
edd��ZdS)rc	Cs�|dkrtj��}|��}tjdkp*|dk}td�D]>}z t�||||�	�|�}|_
Wntk
rlYq4Xq|q4td��t�
d|j�|��tjdkr�dd�}	t�||	�|j
jdk	r�dd	lm}
|
|j
jd
�tj|tj|j
jfdd�dS)
N�win32�fork�dzcannot find name for semaphorezcreated semlock with handle %scSs|j��dS�N)�_semlock�_after_fork)�obj�r�3/usr/lib64/python3.8/multiprocessing/synchronize.pyrGsz%SemLock.__init__.<locals>._after_forkr)�register�	semaphorer)Zexitpriority)r	Z_default_contextZget_contextZget_start_method�sys�platform�range�_multiprocessingr�
_make_namer�FileExistsErrorr�debug�handle�
_make_methodsZregister_after_fork�name�resource_trackerrZFinalize�_cleanup)�self�kind�value�maxvalue�ctxr#Z
unlink_now�i�slrrrrr�__init__2s8
�
�zSemLock.__init__cCs"ddlm}t|�||d�dS)Nr)�
unregisterr)r$r.r
)r#r.rrrr%TszSemLock._cleanupcCs|jj|_|jj|_dSr)r�acquire�release�r&rrrr"Zs
zSemLock._make_methodscCs
|j��Sr)r�	__enter__r1rrrr2^szSemLock.__enter__cGs|jj|�Sr)r�__exit__�r&�argsrrrr3aszSemLock.__exit__cCsDt�|�|j}tjdkr,t���|j�}n|j}||j|j	|j
fS)Nr)r	�assert_spawningrrrZget_spawning_popenZduplicate_for_childr!r'r)r#)r&r,�hrrr�__getstate__ds

zSemLock.__getstate__cCs,tjj|�|_t�d|d�|��dS)Nz recreated blocker with handle %rr)rrZ_rebuildrrr r"�r&�staterrr�__setstate__mszSemLock.__setstate__cCsdt��jdttj�fS)Nz%s-%sZ	semprefix)r
�current_processZ_config�nextr�_randrrrrrrs�zSemLock._make_nameN)�__name__�
__module__�__qualname__�tempfileZ_RandomNameSequencer>r-�staticmethodr%r"r2r3r8r;rrrrrr.s"
	rc@s&eZdZd	dd�Zdd�Zdd�ZdS)
rrcCstj|t|t|d�dS�N�r*)rr-�	SEMAPHORE�
SEM_VALUE_MAX�r&r(r*rrrr-}szSemaphore.__init__cCs
|j��Sr)r�
_get_valuer1rrr�	get_value�szSemaphore.get_valuecCs8z|j��}Wntk
r&d}YnXd|jj|fS)N�unknownz<%s(value=%s)>)rrI�	Exception�	__class__r?�r&r(rrr�__repr__�s

zSemaphore.__repr__N)r)r?r@rAr-rJrOrrrrr{s
c@seZdZddd�Zdd�ZdS)rrcCstj|t|||d�dSrD�rr-rFrHrrrr-�szBoundedSemaphore.__init__cCs>z|j��}Wntk
r&d}YnXd|jj||jjfS)NrKz<%s(value=%s, maxvalue=%s)>)rrIrLrMr?r)rNrrrrO�s
�zBoundedSemaphore.__repr__N)r�r?r@rAr-rOrrrrr�s
c@seZdZdd�Zdd�ZdS)rcCstj|tdd|d�dS�NrrErP�r&r*rrrr-�sz
Lock.__init__cCs�zf|j��r8t��j}t��jdkrd|dt��j7}n,|j��dkrLd}n|j��dkr`d}nd}Wnt	k
r~d}YnXd	|j
j|fS)
N�
MainThread�|r�Noner�SomeOtherThread�SomeOtherProcessrKz<%s(owner=%s)>)r�_is_miner
r<r#�	threading�current_threadrI�_countrLrMr?)r&r#rrrrO�s


z
Lock.__repr__NrQrrrrr�sc@seZdZdd�Zdd�ZdS)rcCstj|tdd|d�dSrR)rr-�RECURSIVE_MUTEXrSrrrr-�szRLock.__init__cCs�z||j��rBt��j}t��jdkr6|dt��j7}|j��}n8|j��dkrZd\}}n |j��dkrrd\}}nd\}}Wnt	k
r�d\}}YnXd	|j
j||fS)
NrTrUr)rVrr)rW�nonzero)rXr^)rKrK�<%s(%s, %s)>)rrYr
r<r#rZr[r\rIrLrMr?)r&r#�countrrrrO�s



zRLock.__repr__NrQrrrrr�sc@sleZdZddd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Zdd�Z	ddd�Z
ddd�Zdd�Zddd�Z
dS)rNcCs>|p
|��|_|�d�|_|�d�|_|�d�|_|��dS�Nr)r�_lockr�_sleeping_count�_woken_count�_wait_semaphorer")r&�lockr*rrrr-�s
zCondition.__init__cCst�|�|j|j|j|jfSr)r	r6rbrcrdrer1rrrr8�s

�zCondition.__getstate__cCs |\|_|_|_|_|��dSr)rbrcrdrer"r9rrrr;�s
�
zCondition.__setstate__cCs
|j��Sr)rbr2r1rrrr2�szCondition.__enter__cGs|jj|�Sr)rbr3r4rrrr3�szCondition.__exit__cCs|jj|_|jj|_dSr)rbr/r0r1rrrr"�s
zCondition._make_methodscCsJz|jj��|jj��}Wntk
r4d}YnXd|jj|j|fS)NrKr_)rcrrIrdrLrMr?rb)r&Znum_waitersrrrrO�s

�
zCondition.__repr__c	Csj|j��|jj��}t|�D]}|j��qz|j�d|�W�S|j��t|�D]}|j��qTXdS)NT)	rcr0rbrr\rrdr/re)r&�timeoutr`r+rrr�wait�s

zCondition.waitrcCst|j�d�r|j�d�}qd}||krF|j�d�rF|j��|d7}q|rpt|�D]}|j��qR|j�d�rpqbdS)NFrr)rdr/rcrer0r)r&�n�resZsleepersr+rrr�notifys

zCondition.notifycCs|jtjd�dS)N)ri)rkr�maxsizer1rrr�
notify_all(szCondition.notify_allcCsd|�}|r|S|dk	r$t��|}nd}d}|s`|dk	rN|t��}|dkrNq`|�|�|�}q,|Sra)�time�	monotonicrh)r&Z	predicaterg�resultZendtimeZwaittimerrr�wait_for+s
zCondition.wait_for)N)N)r)N)r?r@rAr-r8r;r2r3r"rOrhrkrmrqrrrrr�s


c@s6eZdZdd�Zdd�Zdd�Zdd�Zdd
d�Zd	S)
rcCs |�|���|_|�d�|_dSra)rr�_condr�_flagrSrrrr-CszEvent.__init__c	CsD|j�4|j�d�r,|j��W5QR�dSW5QR�dSQRXdS�NFT)rrrsr/r0r1rrr�is_setGs

zEvent.is_setc	Cs6|j�&|j�d�|j��|j��W5QRXdS�NF)rrrsr/r0rmr1rrr�setNs
z	Event.setc	Cs"|j�|j�d�W5QRXdSrv)rrrsr/r1rrr�clearTszEvent.clearNc	Csh|j�X|j�d�r |j��n|j�|�|j�d�rP|j��W5QR�dSW5QR�dSQRXdSrt)rrrsr/r0rh)r&rgrrrrhXs
z
Event.wait)N)r?r@rAr-rurwrxrhrrrrrAs
c@sZeZdZddd�Zdd�Zdd�Zedd	��Zejd
d	��Zedd��Z	e	jd
d��Z	dS)�BarrierNc	CsRddl}ddlm}||�d�d�}|��}|�|||||f�d|_d|_dS)Nrr)�
BufferWrapperr+r)�struct�heaprzZcalcsizerr;�_stater\)	r&Zparties�actionrgr*r{rz�wrapperZcondrrrr-jszBarrier.__init__cCs.|\|_|_|_|_|_|j���d�|_dS)Nr+)�_parties�_action�_timeoutrr�_wrapperZcreate_memoryview�cast�_arrayr9rrrr;ss
�zBarrier.__setstate__cCs|j|j|j|j|jfSr)r�r�r�rrr�r1rrrr8xs�zBarrier.__getstate__cCs
|jdSra�r�r1rrrr}|szBarrier._statecCs||jd<dSrar�rNrrrr}�scCs
|jdS�Nrr�r1rrrr\�szBarrier._countcCs||jd<dSr�r�rNrrrr\�s)NN)
r?r@rAr-r;r8�propertyr}�setterr\rrrrryhs
	


ry)�__all__rZrrBrrn�r	r
rrr
�ImportError�listrr]rFrG�objectrrrrrrryrrrr�<module>s8�	Mo'PK{��\����*�*"__pycache__/process.cpython-38.pycnu�[���U

e5d�.�@s8ddddgZddlZddlZddlZddlZddlZddlmZzej�	e�
��ZWnek
rldZYnXdd�Z
dd�Zd	d�Zd
d�ZGdd�de�ZGd
d�de�ZGdd�de�ZGdd�de�Zdae�ae�d�ae�a[iZeej� ��D]0\Z!Z"e!dd�dkr�de!kr�de!��ee"<q�e�Z#dS)�BaseProcess�current_process�active_children�parent_process�N)�WeakSetcCstS)z@
    Return process object representing the current process
    )�_current_process�rr�//usr/lib64/python3.8/multiprocessing/process.pyr%scCst�tt�S)zN
    Return list of process objects corresponding to live child processes
    )�_cleanup�list�	_childrenrrrr	r+scCstS)z?
    Return process object representing the parent process
    )�_parent_processrrrr	r3scCs*tt�D]}|j��dk	rt�|�qdS�N)rr�_popen�poll�discard)�prrr	r
=sr
c@s�eZdZdZdd�Zddddifdd�dd�Zd	d
�Zdd�Zd
d�Zdd�Z	dd�Z
d,dd�Zdd�Zdd�Z
edd��Zejdd��Zedd��Zejdd��Zedd ��Zejd!d ��Zed"d#��Zed$d%��ZeZed&d'��Zd(d)�Zd-d*d+�ZdS).rz�
    Process objects represent activity that is run in a separate process

    The class is analogous to `threading.Thread`
    cCst�dSr)�NotImplementedError��selfrrr	�_PopenMszBaseProcess._PopenNr)�daemoncCs�|dkstd��tt�}tj|f|_tj��|_t��|_	tj
|_d|_d|_
||_t|�|_t|�|_|p�t|�jdd�dd�|jD��|_|dk	r�||_t�|�dS)Nz#group argument must be None for nowF�-�:css|]}t|�VqdSr)�str)�.0�irrr	�	<genexpr>^sz'BaseProcess.__init__.<locals>.<genexpr>)�AssertionError�next�_process_counterr�	_identity�_config�copy�os�getpid�_parent_pid�name�_parent_namer�_closed�_target�tuple�_args�dict�_kwargs�type�__name__�join�_namer�	_dangling�add)r�group�targetr'�args�kwargsr�countrrr	�__init__Ps"


�zBaseProcess.__init__cCs|jrtd��dS)Nzprocess object is closed)r)�
ValueErrorrrrr	�
_check_closedcszBaseProcess._check_closedcCs|jr|j|j|j�dS)zQ
        Method to be run in sub-process; can be overridden in sub-class
        N)r*r,r.rrrr	�rungszBaseProcess.runcCsz|��|jdkstd��|jt��ks0td��tj�d�rDtd��t	�|�
|�|_|jj|_|`
|`|`t�|�dS)z%
        Start child process
        Nzcannot start a process twicez:can only start a process object created by current processrz3daemonic processes are not allowed to have children)r<rrr&r$r%rr"�getr
r�sentinel�	_sentinelr*r,r.rr4rrrr	�startns��
zBaseProcess.startcCs|��|j��dS)zT
        Terminate process; sends SIGTERM signal or uses TerminateProcess()
        N)r<r�	terminaterrrr	rB�szBaseProcess.terminatecCs|��|j��dS)zT
        Terminate process; sends SIGKILL signal or uses TerminateProcess()
        N)r<r�killrrrr	rC�szBaseProcess.killcCsR|��|jt��kstd��|jdk	s0td��|j�|�}|dk	rNt�|�dS)z5
        Wait until child process terminates
        zcan only join a child processNzcan only join a started process)	r<r&r$r%rr�waitrr)r�timeout�resrrr	r1�szBaseProcess.joincCs`|��|tkrdS|jt��ks*td��|jdkr8dS|j��}|dkrNdSt�	|�dSdS)z1
        Return whether process is alive
        Tzcan only test a child processNF)
r<rr&r$r%rrrrr)r�
returncoderrr	�is_alive�s


zBaseProcess.is_alivecCsH|jdk	r>|j��dkr td��|j��d|_|`t�|�d|_dS)z�
        Close the Process object.

        This method releases resources held by the Process object.  It is
        an error to call this method if the child process is still running.
        Nz^Cannot close a process while it is still running. You should first call join() or terminate().T)rrr;�closer@rrr)rrrr	rI�s


zBaseProcess.closecCs|jSr)r2rrrr	r'�szBaseProcess.namecCst|t�std��||_dS)Nzname must be a string)�
isinstancerrr2)rr'rrr	r'�scCs|j�dd�S)z4
        Return whether process is a daemon
        rF)r"r>rrrr	r�szBaseProcess.daemoncCs |jdkstd��||jd<dS)z1
        Set whether process is a daemon
        Nzprocess has already startedr)rrr")rZdaemonicrrr	r�scCs
|jdS)N�authkey)r"rrrr	rK�szBaseProcess.authkeycCst|�|jd<dS)z2
        Set authorization key of process
        rKN)�AuthenticationStringr")rrKrrr	rK�scCs"|��|jdkr|jS|j��S)zM
        Return exit code of process or `None` if it has yet to stop
        N)r<rrrrrr	�exitcode�s
zBaseProcess.exitcodecCs*|��|tkrt��S|jo$|jjSdS)zU
        Return identifier (PID) of process or `None` if it has yet to start
        N)r<rr$r%r�pidrrrr	�ident�szBaseProcess.identcCs4|��z|jWStk
r.td�d�YnXdS)z{
        Return a file descriptor (Unix) or handle (Windows) suitable for
        waiting for process termination.
        zprocess not startedN)r<r@�AttributeErrorr;rrrr	r?�s
zBaseProcess.sentinelcCs�d}|tkrd}nL|jrd}n@|jt��kr2d}n,|jdkrBd}n|j��}|dk	rZd}nd}t|�jd|j	g}|jdk	r�|�
d|jj�|�
d|j�|�
|�|dk	r�t�
||�}|�
d	|�|jr�|�
d
�dd�|�S)
NZstarted�closed�unknown�initialZstoppedzname=%rzpid=%sz	parent=%szexitcode=%srz<%s>� )rr)r&r$r%rrr/r0r2�appendrN�_exitcode_to_namer>rr1)rrMZstatus�inforrr	�__repr__s0




zBaseProcess.__repr__c
Csvddlm}m}�z>z�|jdk	r,|�|j�t	�
d�at�a
|��t}|at|j|j|�atjrnt����z|j��|��W5~X|�d�z|��d}W5|��XWn�tk
�r}zJ|js�d}n:t|jdt�r�|jd}nt j!�"t#|jd�d�d}W5d}~XYn2d}ddl$}t j!�"d|j%�|�&�YnXW5t��|�d|�|��X|S)N�)�util�contextz process exiting with exitcode %dz child process calling self.run()r�
zProcess %s:
)'�rZr[�	threadingZ	_shutdownrWZ_flush_std_streamsZ
_start_methodZ_force_start_method�	itertoolsr9r �setrZ_close_stdinr�_ParentProcessr(r&r
Z_HAVE_THREAD_NATIVE_IDZmain_threadZ_set_native_idZ_finalizer_registry�clearZ_run_after_forkersZ_exit_functionr=�
SystemExitr7rJ�int�sys�stderr�writer�	tracebackr'�	print_exc)rZparent_sentinelrZr[rMZold_process�erhrrr	�
_bootstrap"sR

�


zBaseProcess._bootstrap)N)N)r0�
__module__�__qualname__�__doc__rr:r<r=rArBrCr1rHrI�propertyr'�setterrrKrMrOrNr?rXrkrrrr	rGsD�







	


c@seZdZdd�ZdS)rLcCs,ddlm}|�dkrtd��tt|�ffS)NrY)�get_spawning_popenzJPickling an AuthenticationString object is disallowed for security reasons)r[rq�	TypeErrorrL�bytes)rrqrrr	�
__reduce__Xs
�zAuthenticationString.__reduce__N)r0rlrmrtrrrr	rLWsrLc@s6eZdZdd�Zdd�Zedd��Zd
dd	�ZeZdS)racCs4d|_||_||_d|_d|_d|_||_i|_dS)NrF)r!r2�_pidr&rr)r@r")rr'rNr?rrr	r:hsz_ParentProcess.__init__cCsddlm}||jgdd�S)Nr�rD�rE�Zmultiprocessing.connectionrDr@)rrDrrr	rHrsz_ParentProcess.is_alivecCs|jSr)rurrrr	rOvsz_ParentProcess.identNcCs ddlm}||jg|d�dS)z6
        Wait until parent process terminates
        rrvrwNrx)rrErDrrr	r1zsz_ParentProcess.join)N)	r0rlrmr:rHrorOr1rNrrrr	rafs


rac@seZdZdd�Zdd�ZdS)�_MainProcesscCs8d|_d|_d|_d|_d|_tt�d��dd�|_dS)NrZMainProcessF� z/mp)rKZ	semprefix)	r!r2r&rr)rLr$�urandomr"rrrr	r:�s�z_MainProcess.__init__cCsdSrrrrrr	rI�sz_MainProcess.closeN)r0rlrmr:rIrrrr	ry�sryrY�ZSIG�_r)$�__all__r$re�signalr_r^Z_weakrefsetr�path�abspath�getcwdZORIGINAL_DIR�OSErrorrrrr
�objectrrsrLraryr
rr9r r`rrVr�__dict__�itemsr'Zsignumr3rrrr	�<module>
s@�


!
PK{��\w�5�x)x)%__pycache__/util.cpython-38.opt-2.pycnu�[���U

e5d~6�@s�ddlZddlZddlZddlZddlZddlZddlmZddlm	Z	ddddd	d
ddd
ddddddgZ
dZdZdZ
dZdZdZdZdadadd�Zdd�Zdd�Zdd�Zdd	�Zd@d d
�Zd!d"�Zd#d$�Ze�Zd%d&�Zd'd�Ze��Z e�!�Z"d(d)�Z#d*d�Z$iZ%e�!�Z&Gd+d�de'�Z(dAd,d-�Z)d.d
�Z*da+eee)e	j,e	j-fd/d0�Z.e�/e.�Gd1d�de'�Z0Gd2d�dej1�Z2ze�3d3�Z4Wne5k
�r�d4Z4YnXd5d�Z6d6d7�Z7d8d9�Z8d:d;�Z9d<d=�Z:d>d?�Z;dS)B�N)�_args_from_interpreter_flags�)�process�	sub_debug�debug�info�sub_warning�
get_logger�
log_to_stderr�get_temp_dir�register_after_fork�
is_exiting�Finalize�ForkAwareThreadLock�ForkAwareLocal�close_all_fds_except�SUBDEBUG�
SUBWARNING��
���multiprocessingz+[%(levelname)s/%(processName)s] %(message)sFcGstrtjt|f|��dS�N)�_logger�logr��msg�args�r�,/usr/lib64/python3.8/multiprocessing/util.pyr,scGstrtjt|f|��dSr)rr�DEBUGrrrr r0scGstrtjt|f|��dSr)rr�INFOrrrr r4scGstrtjt|f|��dSr)rrrrrrr r8scCs|ddl}|��z\tsj|�t�adt_ttd�rFt�	t
�t�t
�n$tj�
t
dif�tj�t
dif�W5|��XtS)Nr�
unregisterr)�loggingZ_acquireLockZ_releaseLockrZ	getLogger�LOGGER_NAMEZ	propagate�hasattr�atexitr#�_exit_function�registerZ
_exithandlers�remove�append)r$rrr r	<s



cCsJddl}t�}|�t�}|��}|�|�|�|�|rB|�|�dat	S)NrT)
r$r	Z	Formatter�DEFAULT_LOGGING_FORMATZ
StreamHandlerZsetFormatterZ
addHandlerZsetLevel�_log_to_stderrr)�levelr$ZloggerZ	formatterZhandlerrrr r
Ws



cCs tjdkrdSttd�rdSdS)NZlinuxTZgetandroidapilevelF)�sys�platformr&rrrr �#_platform_supports_abstract_socketsls


r1cCs@|sdSt|t�r|ddkSt|t�r4|ddkStd��dS)NFr�z(address type of {address!r} unrecognized)�
isinstance�bytes�str�	TypeError)Zaddressrrr �is_abstract_socket_namespacets

r7cCs&||�t��}|dk	r"d|jd<dS)N�tempdir)r�current_process�_config)�rmtreer8r9rrr �_remove_temp_dir�sr<cCsft��j�d�}|dkrbddl}ddl}|jdd�}td|�tdt	|j
|fdd�|t��jd<|S)Nr8rzpymp-)�prefixzcreated temp directory %si����)r�exitpriority)rr9r:�get�shutil�tempfileZmkdtemprrr<r;)r8r@rArrr r�s
�cCsftt���}|��|D]H\\}}}}z||�Wqtk
r^}ztd|�W5d}~XYqXqdS)Nz after forker raised exception %s)�list�_afterfork_registry�items�sort�	Exceptionr)rD�indexZident�func�obj�errr �_run_after_forkers�srKcCs|ttt�t|�|f<dSr)rC�next�_afterfork_counter�id)rIrHrrr r�sc@sBeZdZd
dd�Zdeeejfdd�Zdd�Z	d	d
�Z
dd�ZdS)rrNcCs�|dk	r&t|t�s&td�|t|����|dk	r>t�||�|_n|dkrNtd��||_	||_
|p`i|_|tt
�f|_t��|_|t|j<dS)Nz3Exitpriority ({0!r}) must be None or int, not {1!s}z+Without object, exitpriority cannot be None)r3�intr6�format�type�weakref�ref�_weakref�
ValueError�	_callback�_args�_kwargsrL�_finalizer_counter�_key�os�getpid�_pid�_finalizer_registry)�selfrI�callbackr�kwargsr>rrr �__init__�s"��

zFinalize.__init__cCs�z||j=Wntk
r(|d�YnbX|j|�krD|d�d}n$|d|j|j|j�|j|j|j�}d|_|_|_|_|_|SdS)Nzfinalizer no longer registeredz+finalizer ignored because different processz/finalizer calling %s with args %s and kwargs %s)rZ�KeyErrorr]rVrWrXrT)r_Zwrr^rr\�resrrr �__call__�s$��zFinalize.__call__cCsDzt|j=Wntk
r Yn Xd|_|_|_|_|_dSr)r^rZrcrTrVrWrX�r_rrr �cancel�s��zFinalize.cancelcCs
|jtkSr)rZr^rfrrr �still_active�szFinalize.still_activec	Cs�z|��}Wnttfk
r(d}YnX|dkr>d|jjSd|jjt|jd|j�f}|jrr|dt|j�7}|j	r�|dt|j	�7}|j
ddk	r�|dt|j
d�7}|dS)	Nz<%s object, dead>z<%s object, callback=%s�__name__z, args=z	, kwargs=rz, exitpriority=�>)rT�AttributeErrorr6�	__class__ri�getattrrVrWr5rXrZ)r_rI�xrrr �__repr__�s"
�zFinalize.__repr__)rNN)ri�
__module__�__qualname__rbr^rr[r\rergrhrorrrr r�s
�
c	s�tdkrdS�dkrdd��n�fdd���fdd�tt�D�}|jdd�|D]P}t�|�}|dk	rPtd|�z
|�WqPtk
r�d	dl}|��YqPXqP�dkr�t��dS)
NcSs|ddk	S�Nrr��prrr �<lambda>�z!_run_finalizers.<locals>.<lambda>cs|ddk	o|d�kSrrrrs)�minpriorityrr rurvcsg|]}�|�r|�qSrr)�.0�key)�frr �
<listcomp>#sz#_run_finalizers.<locals>.<listcomp>T)�reversez
calling %sr)	r^rBrEr?rrF�	traceback�	print_exc�clear)rw�keysry�	finalizerr}r)rzrwr �_run_finalizerss$



r�cCstp
tdkSr)�_exitingrrrr r
8scCs�ts�da|d�|d�|d�|�dk	rr|�D] }|jr0|d|j�|j��q0|�D]}|d|j�|��qX|d�|�dS)NTzprocess shutting downz2running all "atexit" finalizers with priority >= 0rz!calling terminate() for daemon %szcalling join() for process %sz)running the remaining "atexit" finalizers)r�Zdaemon�nameZ_popenZ	terminate�join)rrr��active_childrenr9rtrrr r(@s	



r(c@s,eZdZdd�Zdd�Zdd�Zdd�Zd	S)
rcCs|��t|tj�dSr)�_resetrrrfrrr rbqszForkAwareThreadLock.__init__cCs"t��|_|jj|_|jj|_dSr)�	threadingZLock�_lock�acquire�releaserfrrr r�us

zForkAwareThreadLock._resetcCs
|j��Sr)r��	__enter__rfrrr r�zszForkAwareThreadLock.__enter__cGs|jj|�Sr)r��__exit__)r_rrrr r�}szForkAwareThreadLock.__exit__N)rirprqrbr�r�r�rrrr rpsc@seZdZdd�Zdd�ZdS)rcCst|dd��dS)NcSs
|j��Sr)�__dict__r)rIrrr ru�rvz)ForkAwareLocal.__init__.<locals>.<lambda>)rrfrrr rb�szForkAwareLocal.__init__cCst|�dfS)Nr)rQrfrrr �
__reduce__�szForkAwareLocal.__reduce__N)rirprqrbr�rrrr r�s�SC_OPEN_MAX�cCsNt|�dtg}|��tt|�d�D] }t�||d||d�q(dS)N���r)rB�MAXFDrE�range�lenr[�
closerange)�fds�irrr r�sc	Cs�tjdkrdSztj��Wnttfk
r4YnXz@t�tjtj�}zt|dd�t_Wnt�|��YnXWnttfk
r�YnXdS)NF)�closefd)	r/�stdin�close�OSErrorrUr[�open�devnull�O_RDONLY)�fdrrr �_close_stdin�s

r�c	CsTztj��Wnttfk
r&YnXztj��Wnttfk
rNYnXdSr)r/�stdout�flushrkrU�stderrrrrr �_flush_std_streams�sr�cCsxddl}tttt|���}t��\}}z6|�|t�	|�gd|dddddddd||ddd�W�St�|�t�|�XdS)NrTr�F)
�_posixsubprocess�tuple�sorted�maprOr[�piper�Z	fork_exec�fsencode)�pathrZpassfdsr�Zerrpipe_readZ
errpipe_writerrr �spawnv_passfds�s2
�
r�cGs|D]}t�|�qdSr)r[r�)r�r�rrr �	close_fds�sr�cCsZddlm}t��ddlm}|j��ddlm}|j	��t
�|��|��dS)Nr)�support)�
forkserver)�resource_tracker)
Ztestr�rZ_cleanuprr�Z_forkserverZ_stopr�Z_resource_trackerr�Z
gc_collectZ
reap_children)r�r�r�rrr �_cleanup_tests�s

r�)N)N)<r[�	itertoolsr/rRr'r��
subprocessr�r�__all__ZNOTSETrr!r"rr%r,rr-rrrrr	r
r1r7Zabstract_sockets_supportedr<rZWeakValueDictionaryrC�countrMrKrr^rY�objectrr�r
r�r�r9r(r)rZlocalr�sysconfr�rFrr�r�r�r�r�rrrr �<module>
s��

		V
,�
*



PK{��\Y]NZT%T%!__pycache__/queues.cpython-38.pycnu�[���U

e5d�-�@s�dddgZddlZddlZddlZddlZddlZddlZddlZddlm	Z	m
Z
ddlZddlm
Z
ddlmZejjZdd	lmZmZmZmZmZGd
d�de�Ze�ZGdd�de�ZGdd�de�ZdS)
�Queue�SimpleQueue�
JoinableQueue�N)�Empty�Full�)�
connection)�context)�debug�info�Finalize�register_after_fork�
is_exitingc@s�eZdZd*dd�Zdd�Zdd�Zdd	�Zd+dd
�Zd,dd�Zdd�Z	dd�Z
dd�Zdd�Zdd�Z
dd�Zdd�Zdd�Zd d!�Zed"d#��Zed$d%��Zed&d'��Zed(d)��ZdS)-rrcCs�|dkrddlm}||_tjdd�\|_|_|��|_t	�
�|_tj
dkrTd|_n
|��|_|�|�|_d|_|��tj
dkr�t|tj�dS)Nrr)�
SEM_VALUE_MAXF�Zduplex�win32)Zsynchronizer�_maxsizer�Pipe�_reader�_writer�Lock�_rlock�os�getpid�_opid�sys�platform�_wlockZBoundedSemaphore�_sem�
_ignore_epipe�_after_forkr
r��self�maxsize�ctx�r%�./usr/lib64/python3.8/multiprocessing/queues.py�__init__$s




zQueue.__init__cCs.t�|�|j|j|j|j|j|j|j|j	fS�N)
r	�assert_spawningrrrrrrrr�r"r%r%r&�__getstate__9s
�zQueue.__getstate__c	Cs0|\|_|_|_|_|_|_|_|_|��dSr()	rrrrrrrrr �r"�stater%r%r&�__setstate__>s�zQueue.__setstate__cCsbtd�t�t���|_t��|_d|_d|_	d|_
d|_d|_|j
j|_|jj|_|jj|_dS)NzQueue._after_fork()F)r
�	threading�	Conditionr�	_notempty�collections�deque�_buffer�_thread�_jointhread�_joincancelled�_closed�_closer�
send_bytes�_send_bytesr�
recv_bytes�_recv_bytes�poll�_pollr*r%r%r&r Cs


zQueue._after_forkTNc	Csf|jrtd|�d���|j�||�s(t�|j�.|jdkrB|��|j�	|�|j�
�W5QRXdS�NzQueue z
 is closed)r8�
ValueErrorr�acquirerr1r5�
_start_threadr4�append�notify�r"�obj�block�timeoutr%r%r&�putPs
z	Queue.putc	Cs�|jrtd|�d���|rH|dkrH|j�|��}W5QRX|j��nr|rXt��|}|j�||�sjt	�zB|r�|t��}|�
|�s�t	�n|�
�s�t	�|��}|j��W5|j��Xt�|�Sr@)
r8rArr=r�release�time�	monotonicrBrr?�_ForkingPickler�loads)r"rHrI�resZdeadliner%r%r&�get\s*
z	Queue.getcCs|j|jj��Sr()rr�_semlockZ
_get_valuer*r%r%r&�qsizevszQueue.qsizecCs
|��Sr(�r?r*r%r%r&�emptyzszQueue.emptycCs|jj��Sr()rrR�_is_zeror*r%r%r&�full}sz
Queue.fullcCs
|�d�S�NF)rQr*r%r%r&�
get_nowait�szQueue.get_nowaitcCs|�|d�SrX)rJ�r"rGr%r%r&�
put_nowait�szQueue.put_nowaitcCs2d|_z|j��W5|j}|r,d|_|�XdS)NT)r8r9r�close)r"r\r%r%r&r\�szQueue.closecCs.td�|jstd�|���|jr*|��dS)NzQueue.join_thread()zQueue {0!r} not closed)r
r8�AssertionError�formatr6r*r%r%r&�join_thread�szQueue.join_threadcCs6td�d|_z|j��Wntk
r0YnXdS)NzQueue.cancel_join_thread()T)r
r7r6Zcancel�AttributeErrorr*r%r%r&�cancel_join_thread�szQueue.cancel_join_threadc
Cs�td�|j��tjtj|j|j|j|j	|j
j|j|j
|jfdd�|_d|j_td�|j��td�|js�t|jtjt�|j�gdd�|_t|tj|j|jgd	d�|_dS)
NzQueue._start_thread()ZQueueFeederThread)�target�args�nameTzdoing self._thread.start()z... done self._thread.start()���)Zexitpriority�
)r
r4�clearr/ZThreadr�_feedr1r;rrr\r�_on_queue_feeder_errorrr5Zdaemon�startr7r�_finalize_join�weakref�refr6�_finalize_closer9r*r%r%r&rC�s<
��
�
�zQueue._start_threadcCs4td�|�}|dk	r(|��td�ntd�dS)Nzjoining queue threadz... queue thread joinedz... queue thread already dead)r
�join)Ztwr�threadr%r%r&rk�s
zQueue._finalize_joinc	Cs.td�|�|�t�|��W5QRXdS)Nztelling queue thread to quit)r
rD�	_sentinelrE)�buffer�notemptyr%r%r&rn�s
zQueue._finalize_closec
CsXtd�|j}|j}	|j}
|j}t}tjdkr<|j}
|j}nd}
z�|�z|sT|
�W5|	�Xzb|�}||kr�td�|�WWdSt�	|�}|
dkr�||�qb|
�z||�W5|�XqbWnt
k
r�YnXWq@tk
�rP}zV|�rt|dd�t
jk�rWY�6dSt��r.td|�WY�dS|��|||�W5d}~XYq@Xq@dS)Nz$starting thread to feed data to piperz%feeder thread got sentinel -- exiting�errnorzerror in queue thread: %s)r
rBrK�wait�popleftrqrrrN�dumps�
IndexError�	Exception�getattrrtZEPIPErr)rrrsr:Z	writelockr\Zignore_epipe�onerrorZ	queue_semZnacquireZnreleaseZnwaitZbpopleft�sentinelZwacquireZwreleaserG�er%r%r&rh�sN







zQueue._feedcCsddl}|��dS)z�
        Private API hook called when feeding data in the background thread
        raises an exception.  For overriding by concurrent.futures.
        rN)�	traceback�	print_exc)r}rGr~r%r%r&ri
szQueue._on_queue_feeder_error)r)TN)TN)�__name__�
__module__�__qualname__r'r+r.r rJrQrSrUrWrYr[r\r_rarC�staticmethodrkrnrhrir%r%r%r&r"s.



 
	

=c@s@eZdZddd�Zdd�Zdd�Zdd
d�Zdd
�Zdd�Zd	S)rrcCs*tj|||d�|�d�|_|��|_dS)N)r$r)rr'Z	Semaphore�_unfinished_tasksr0�_condr!r%r%r&r'#szJoinableQueue.__init__cCst�|�|j|jfSr()rr+r�r�r*r%r%r&r+(szJoinableQueue.__getstate__cCs,t�||dd��|dd�\|_|_dS)N���)rr.r�r�r,r%r%r&r.+szJoinableQueue.__setstate__TNc
Cs�|jrtd|�d���|j�||�s(t�|j�J|j�8|jdkrJ|��|j	�
|�|j��|j�
�W5QRXW5QRXdSr@)r8rArrBrr1r�r5rCr4rDr�rKrErFr%r%r&rJ/s

zJoinableQueue.putc	Cs@|j�0|j�d�std��|jj��r2|j��W5QRXdS)NFz!task_done() called too many times)r�r�rBrArRrVZ
notify_allr*r%r%r&�	task_done<s
zJoinableQueue.task_donec	Cs,|j�|jj��s|j��W5QRXdSr()r�r�rRrVrur*r%r%r&roCszJoinableQueue.join)r)TN)	r�r�r�r'r+r.rJr�ror%r%r%r&r!s


c@s<eZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
S)rcCsHtjdd�\|_|_|��|_|jj|_tj	dkr:d|_
n
|��|_
dS)NFrr)rrrrrrr>r?rrr)r"r$r%r%r&r'Ns


zSimpleQueue.__init__cCs
|��Sr(rTr*r%r%r&rUWszSimpleQueue.emptycCst�|�|j|j|j|jfSr()r	r)rrrrr*r%r%r&r+Zs
zSimpleQueue.__getstate__cCs"|\|_|_|_|_|jj|_dSr()rrrrr>r?r,r%r%r&r.^szSimpleQueue.__setstate__c	Cs&|j�|j��}W5QRXt�|�Sr()rrr<rNrO)r"rPr%r%r&rQbszSimpleQueue.getc	CsDt�|�}|jdkr"|j�|�n|j�|j�|�W5QRXdSr()rNrwrrr:rZr%r%r&rJhs


zSimpleQueue.putN)	r�r�r�r'rUr+r.rQrJr%r%r%r&rLs	)�__all__rrr/r2rLrlrtZqueuerrZ_multiprocessing�rr	Z	reductionZForkingPicklerrN�utilr
rrr
r�objectrrqrrr%r%r%r&�<module>
s$
v
+PK{��\�}�y�2�2(__pycache__/context.cpython-38.opt-1.pycnu�[���U

e5d�+�@s�ddlZddlZddlZddlmZddlmZdZGdd�de�ZGdd	�d	e�Z	Gd
d�de�Z
Gdd
�d
e�ZGdd�de�Z
Gdd�dej�ZGdd�de
�Zejdk�rRGdd�dej�ZGdd�dej�ZGdd�dej�ZGdd�de
�ZGdd�de
�ZGdd �d e
�Ze�e�e�d!�Zejd"k�rDeed#�Zneed$�Zn8Gd%d�dej�ZGd&d�de
�Zd#e�iZeed#�Zd'd(�Ze��Zd)d*�Zd+d,�Zd-d.�ZdS)/�N�)�process)�	reduction�c@seZdZdS)�ProcessErrorN��__name__�
__module__�__qualname__rrr�//usr/lib64/python3.8/multiprocessing/context.pyrsrc@seZdZdS)�BufferTooShortNrrrrrrsrc@seZdZdS)�TimeoutErrorNrrrrrr
sr
c@seZdZdS)�AuthenticationErrorNrrrrrrsrc@sXeZdZeZeZeZeZeej	�Z	eej
�Z
eej�Zdd�Zdd�Z
dCdd�Zdd	�Zd
d�ZdDd
d�ZdEdd�ZdFdd�Zdd�ZdGdd�ZdHdd�ZdIdd�Zdd�ZdJd d!�Zd"d#�Zd$d%�Zdd&�d'd(�Zdd&�d)d*�Zd+d,�Zd-d.�ZdKd/d0�Z d1d2�Z!d3d4�Z"d5d6�Z#dLd7d8�Z$dMd:d;�Z%dNd<d=�Z&e'd>d?��Z(e(j)d@d?��Z(dAdB�Z*dS)O�BaseContextcCs"t��}|dkrtd��n|SdS)z(Returns the number of CPUs in the systemNzcannot determine number of cpus)�os�	cpu_count�NotImplementedError)�selfZnumrrrr)s
zBaseContext.cpu_countcCs&ddlm}||��d�}|��|S)z�Returns a manager associated with a running server process

        The managers methods such as `Lock()`, `Condition()` and `Queue()`
        can be used to create shared objects.
        r)�SyncManager��ctx)Zmanagersr�get_context�start)rr�mrrr�Manager1szBaseContext.ManagerTcCsddlm}||�S)z1Returns two connection object connected by a piper)�Pipe)�
connectionr)rZduplexrrrrr<szBaseContext.PipecCsddlm}||��d�S)z#Returns a non-recursive lock objectr)�Lockr)�synchronizerr)rrrrrrAszBaseContext.LockcCsddlm}||��d�S)zReturns a recursive lock objectr)�RLockr)rrr)rrrrrrFszBaseContext.RLockNcCsddlm}|||��d�S)zReturns a condition objectr)�	Conditionr)rr r)r�lockr rrrr KszBaseContext.ConditionrcCsddlm}|||��d�S)zReturns a semaphore objectr)�	Semaphorer)rr"r)r�valuer"rrrr"PszBaseContext.SemaphorecCsddlm}|||��d�S)z"Returns a bounded semaphore objectr)�BoundedSemaphorer)rr$r)rr#r$rrrr$UszBaseContext.BoundedSemaphorecCsddlm}||��d�S)zReturns an event objectr)�Eventr)rr%r)rr%rrrr%ZszBaseContext.EventcCs ddlm}|||||��d�S)zReturns a barrier objectr)�Barrierr)rr&r)rZparties�actionZtimeoutr&rrrr&_szBaseContext.BarrierrcCsddlm}|||��d�S)�Returns a queue objectr)�Queuer)�queuesr)r)r�maxsizer)rrrr)dszBaseContext.QueuecCsddlm}|||��d�S)r(r)�
JoinableQueuer)r*r,r)rr+r,rrrr,iszBaseContext.JoinableQueuecCsddlm}||��d�S)r(r)�SimpleQueuer)r*r-r)rr-rrrr-nszBaseContext.SimpleQueuercCs"ddlm}||||||��d�S)zReturns a process pool objectr)�Pool)�context)Zpoolr.r)rZ	processesZinitializerZinitargsZmaxtasksperchildr.rrrr.ss
�zBaseContext.PoolcGsddlm}||f|��S)zReturns a shared objectr)�RawValue)�sharedctypesr0)r�typecode_or_type�argsr0rrrr0zszBaseContext.RawValuecCsddlm}|||�S)zReturns a shared arrayr)�RawArray)r1r4)rr2�size_or_initializerr4rrrr4szBaseContext.RawArray)r!cGs&ddlm}||f|�||��d��S)z$Returns a synchronized shared objectr)�Value�r!r)r1r6r)rr2r!r3r6rrrr6�s�zBaseContext.ValuecCs ddlm}|||||��d�S)z#Returns a synchronized shared arrayr)�Arrayr7)r1r8r)rr2r5r!r8rrrr8�s�zBaseContext.ArraycCs,tjdkr(ttdd�r(ddlm}|�dS)z�Check whether this is a fake forked process in a frozen executable.
        If so then run code specified by commandline and exit.
        �win32�frozenFr)�freeze_supportN)�sys�platform�getattr�spawnr;)rr;rrrr;�szBaseContext.freeze_supportcCsddlm}|�S)zZReturn package logger -- if it does not already exist then
        it is created.
        r)�
get_logger)�utilr@)rr@rrrr@�szBaseContext.get_loggercCsddlm}||�S)z8Turn on logging and add a handler which prints to stderrr)�
log_to_stderr)rArB)r�levelrBrrrrB�szBaseContext.log_to_stderrcCsddlm}dS)zVInstall support for sending connections and sockets
        between processes
        r)rN)�r)rrrrr�allow_connection_pickling�sz%BaseContext.allow_connection_picklingcCsddlm}||�dS)z�Sets the path to a python.exe or pythonw.exe binary used to run
        child processes instead of sys.executable when using the 'spawn'
        start method.  Useful for people embedding Python.
        r)�set_executableN)r?rF)r�
executablerFrrrrF�szBaseContext.set_executablecCsddlm}||�dS)zkSet list of module names to try to load in forkserver process.
        This is really just a hint.
        r)�set_forkserver_preloadN)�
forkserverrH)rZmodule_namesrHrrrrH�sz"BaseContext.set_forkserver_preloadcCsH|dkr|Szt|}Wn"tk
r:td|�d�YnX|��|S)Nzcannot find context for %r)�_concrete_contexts�KeyError�
ValueError�_check_available)r�methodrrrrr�szBaseContext.get_contextFcCs|jS�N)�_name�rZ
allow_nonerrr�get_start_method�szBaseContext.get_start_methodcCstd��dS)Nz+cannot set start method of concrete context)rL�rrNZforcerrr�set_start_method�szBaseContext.set_start_methodcCst��d�S)z_Controls how objects will be reduced to a form that can be
        shared with other processes.r)�globals�get�rrrr�reducer�szBaseContext.reducercCs|t�d<dS)Nr)rU)rrrrrrX�scCsdSrOrrWrrrrM�szBaseContext._check_available)T)N)r)r)NN)r)r)NNrN)N)N)F)F)+rr	r
rrr
r�staticmethodrZcurrent_processZparent_processZactive_childrenrrrrrr r"r$r%r&r)r,r-r.r0r4r6r8r;r@rBrErFrHrrRrT�propertyrX�setterrMrrrrrsR









�







rc@seZdZdZedd��ZdS)�ProcessNcCst��j�|�SrO)�_default_contextrr\�_Popen)�process_objrrrr^�szProcess._Popen�rr	r
Z
_start_methodrYr^rrrrr\�sr\csFeZdZeZdd�Zd
�fdd�	Zddd�Zdd	d
�Zdd�Z�Z	S)�DefaultContextcCs||_d|_dSrO)r]�_actual_context)rr/rrr�__init__�szDefaultContext.__init__Ncs0|dkr |jdkr|j|_|jSt��|�SdSrO)rbr]�superr)rrN��	__class__rrr�s

zDefaultContext.get_contextFcCs<|jdk	r|std��|dkr,|r,d|_dS|�|�|_dS)Nzcontext has already been set)rb�RuntimeErrorrrSrrrrT�szDefaultContext.set_start_methodcCs"|jdkr|rdS|j|_|jjSrO)rbr]rPrQrrrrR�s

zDefaultContext.get_start_methodcCsBtjdkrdgStjdkr"ddgnddg}tjr:|�d�|SdS)Nr9r?�darwin�forkrI)r<r=r�HAVE_SEND_HANDLE�append)r�methodsrrr�get_all_start_methodss

z$DefaultContext.get_all_start_methods)N)F)F)
rr	r
r\rcrrTrRrm�
__classcell__rrrerra�s

rar9c@seZdZdZedd��ZdS)�ForkProcessricCsddlm}||�S�Nr)�Popen)Z
popen_forkrq�r_rqrrrr^szForkProcess._PopenNr`rrrrrosroc@seZdZdZedd��ZdS)�SpawnProcessr?cCsddlm}||�Srp)Zpopen_spawn_posixrqrrrrrr^s�SpawnProcess._PopenNr`rrrrrssrsc@seZdZdZedd��ZdS)�ForkServerProcessrIcCsddlm}||�Srp)Zpopen_forkserverrqrrrrrr^ szForkServerProcess._PopenNr`rrrrrusruc@seZdZdZeZdS)�ForkContextriN)rr	r
rPror\rrrrrv%srvc@seZdZdZeZdS��SpawnContextr?N�rr	r
rPrsr\rrrrrx)srxc@seZdZdZeZdd�ZdS)�ForkServerContextrIcCstjstd��dS)Nz%forkserver start method not available)rrjrLrWrrrrM0sz"ForkServerContext._check_availableN)rr	r
rPrur\rMrrrrrz-srz)rir?rIrhr?ric@seZdZdZedd��ZdS)rsr?cCsddlm}||�Srp)Zpopen_spawn_win32rqrrrrrr^DsrtNr`rrrrrsBsc@seZdZdZeZdSrwryrrrrrxIscCst|t_dSrO)rJr]rb)rNrrr�_force_start_methodVsr{cCsttdd�S)N�spawning_popen)r>�_tlsrrrr�get_spawning_popen_sr~cCs
|t_dSrO)r}r|)�popenrrr�set_spawning_popenbsr�cCs t�dkrtdt|�j��dS)NzF%s objects should only be shared between processes through inheritance)r~rg�typer)�objrrr�assert_spawninges
��r�) rr<Z	threadingrDrr�__all__�	Exceptionrrr
r�objectrZBaseProcessr\rar=rorsrurvrxrzrJr]r{Zlocalr}r~r�r�rrrr�<module>sL?,��PK{��\��A�L�L�#__pycache__/managers.cpython-38.pycnu�[���U

e5d��@sBdddddgZddlZddlZddlZddlZddlZddlZddlZddlmZddl	m
Z
d	d
lmZd	dl
mZmZmZd	dlmZd	d
lmZd	dlmZd	dlmZzd	dlmZdZWnek
r�dZYnXdd�Ze�eje�dd�dD�Zedek	�r.dd�ZeD]Ze�ee��qGdd�de�Zdifdd�Z dd�Z!Gd d!�d!e"�Z#d"d#�Z$d$d%�Z%Gd&d'�d'e�Z&Gd(d)�d)e�Z'ej(ej)fej*ej+fd*�Z,Gd+d�de�Z-Gd,d-�d-e.�Z/Gd.d�de�Z0d/d0�Z1ifd1d2�Z2dld3d4�Z3Gd5d6�d6e�Z4Gd7d8�d8e�Z5dmd9d:�Z6Gd;d<�d<e0�Z7Gd=d>�d>e0�Z8Gd?d@�d@e8�Z9GdAdB�dBe0�Z:GdCdD�dDe0�Z;GdEdF�dFe0�Z<GdGdH�dHe0�Z=e2dIdJ�Z>GdKdL�dLe>�Z?e2dMdN�Z@dOdPie@_Ae2dQdR�ZBe2dSdT�ZCdUdUdUdPdPdV�eC_AGdWdS�dSeC�ZDGdXd�de-�ZEeE�dYejF�eE�dZejF�eE�d[ejGe:�eE�d\ejHe8�eE�d]ejIe8�eE�d^ejJe8�eE�d_ejKe8�eE�d`ejLe9�eE�daejMe;�eE�dbejNeD�eE�dcee?�eE�ddeOe@�eE�d8e5e=�eE�d:e6eB�eE�d6e4e<�eEjdPe7dde�eEjdUddf�e�r>Gdgdh�dh�ZPGdidj�dje&�ZQGdkd�de-�ZRdS)n�BaseManager�SyncManager�	BaseProxy�Token�SharedMemoryManager�N)�getpid)�
format_exc�)�
connection)�	reduction�get_spawning_popen�ProcessError)�pool)�process)�util)�get_context)�
shared_memoryTFcCstj|j|��ffS�N)�array�typecode�tobytes)�a�r�0/usr/lib64/python3.8/multiprocessing/managers.py�reduce_array-srcCsg|]}tti|����qSr)�type�getattr��.0�namerrr�
<listcomp>1sr )�items�keys�valuescCstt|�ffSr)�list��objrrr�rebuild_as_list3sr'c@s4eZdZdZdZdd�Zdd�Zdd�Zd	d
�ZdS)rz3
    Type to uniquely identify a shared object
    ��typeid�address�idcCs||||_|_|_dSrr()�selfr)r*r+rrr�__init__BszToken.__init__cCs|j|j|jfSrr(�r,rrr�__getstate__EszToken.__getstate__cCs|\|_|_|_dSrr(�r,�staterrr�__setstate__HszToken.__setstate__cCsd|jj|j|j|jfS)Nz %s(typeid=%r, address=%r, id=%r))�	__class__�__name__r)r*r+r.rrr�__repr__Ks�zToken.__repr__N)	r4�
__module__�__qualname__�__doc__�	__slots__r-r/r2r5rrrrr<srcCs8|�||||f�|��\}}|dkr*|St||��dS)zL
    Send a message to manager using connection `c` and return response
    �#RETURNN)�send�recv�convert_to_error)�cr+�
methodname�args�kwds�kind�resultrrr�dispatchSs
rDcCsd|dkr|S|dkrRt|t�s4td�||t|����|dkrHtd|�St|�Sntd�|��SdS)N�#ERROR)�
#TRACEBACK�#UNSERIALIZABLEz.Result {0!r} (kind '{1}') type is {2}, not strrGzUnserializable message: %s
zUnrecognized message type {!r})�
isinstance�str�	TypeError�formatr�RemoteError�
ValueError)rBrCrrrr=]s
��
r=c@seZdZdd�ZdS)rLcCsdt|jd�dS)NzM
---------------------------------------------------------------------------
rzK---------------------------------------------------------------------------)rIr@r.rrr�__str__mszRemoteError.__str__N)r4r6r7rNrrrrrLlsrLcCs2g}t|�D] }t||�}t|�r|�|�q|S)z4
    Return a list of names of methods of `obj`
    )�dirr�callable�append)r&�tempr�funcrrr�all_methodsts
rTcCsdd�t|�D�S)zP
    Return a list of names of methods of `obj` which do not start with '_'
    cSsg|]}|ddkr|�qS)r�_rrrrrr �sz"public_methods.<locals>.<listcomp>)rTr%rrr�public_methodssrVc	@s�eZdZdZdddddddd	d
g	Zdd�Zd
d�Zdd�Zdd�Zdd�Z	dd�Z
dd�Zdd�Zeee
d�Z
dd�Zdd�Zd d!�Zd"d#�Zd$d%�Zd&e_d'd(�Zd)d*�Zd+d,�Zd-d.�Zd/S)0�ServerzM
    Server class which runs in a process controlled by a manager object
    �shutdown�create�accept_connection�get_methods�
debug_info�number_of_objects�dummy�incref�decrefcCsxt|t�std�|t|����||_t�|�|_t	|\}}||dd�|_
|j
j|_ddi|_i|_
i|_t��|_dS)Nz&Authkey {0!r} is type {1!s}, not bytes�)r*Zbacklog�0�Nr)rH�bytesrJrKr�registryr�AuthenticationString�authkey�listener_client�listenerr*�	id_to_obj�id_to_refcount�id_to_local_proxy_obj�	threading�Lock�mutex)r,rer*rg�
serializer�Listener�Clientrrrr-�s 
��

zServer.__init__c	Cs�t��|_|t��_zVtj|jd�}d|_|��z|j��sL|j�d�q4Wnttfk
rfYnXW5tjtjkr�t	�
d�tjt_tjt_t�
d�XdS)z(
        Run the server forever
        zresetting stdout, stderrr)�targetTr	N)rm�Event�
stop_eventr�current_process�_manager_server�sys�stdout�
__stdout__r�debug�
__stderr__�stderr�exit�Thread�accepter�daemon�start�is_set�wait�KeyboardInterrupt�
SystemExit)r,r�rrr�
serve_forever�s 




zServer.serve_forevercCsNz|j��}Wntk
r&YqYnXtj|j|fd�}d|_|��qdS)N�rsr@T)riZaccept�OSErrorrmr�handle_requestr�r�)r,r>�trrrr��s
zServer.acceptercCsLd}}}zTt�||j�t�||j�|��}|\}}}}||jksTtd|��t||�}Wntk
r~dt	�f}	Yn>Xz||f|�|�}Wntk
r�dt	�f}	Yn
Xd|f}	z|�
|	�Wnttk
�r>}
zTz|�
dt	�f�Wntk
�rYnXt�d|	�t�d|�t�d|
�W5d}
~
XYnX|�
�dS)z)
        Handle a new connection
        Nz%r unrecognizedrFr:zFailure to send message: %rz ... request was %r� ... exception was %r)r
Zdeliver_challengergZanswer_challenger<�public�AssertionErrorr�	Exceptionrr;r�info�close)r,r>�funcnamerC�request�ignorer@rArS�msg�errrr��s4zServer.handle_requestc
Cs�t�dt��j�|j}|j}|j}|j�	��s�zBd}}|�}|\}}}	}
z||\}}}Wn^t
k
r�}
z@z|j|\}}}Wn&t
k
r�}z|
�W5d}~XYnXW5d}
~
XYnX||kr�td|t
|�|f��t||�}z||	|
�}Wn,tk
�r"}zd|f}W5d}~XYnPX|�o4|�|d�}|�rj|�|||�\}}t||j|�}d||ff}nd|f}Wn�tk
�r�|dk�r�dt�f}nNz,|j|}|||||f|	�|
�}d|f}Wn tk
�r�dt�f}YnXYnPtk
�rt�dt��j�t�d	�Yn tk
�r<dt�f}YnXzDz||�Wn2tk
�r~}z|d
t�f�W5d}~XYnXWq$tk
�r�}z@t�dt��j�t�d|�t�d
|�|��t�d�W5d}~XYq$Xq$dS)zQ
        Handle requests from the proxies in a particular process/thread
        z$starting server thread to service %rNz+method %r of %r object is not in exposed=%rrE�#PROXYr:rFz$got EOF -- exiting thread serving %rrrGzexception in thread serving %rz ... message was %rr�r	)rr{rm�current_threadrr<r;rjrur��KeyErrorrl�AttributeErrorrrr��getrYrr*r�fallback_mapping�EOFErrorrxr~r�r�)r,�connr<r;rjr?r&r��identr@rA�exposed�	gettypeid�keZ	second_keZfunction�resr�r�r)ZridentZrexposed�tokenZ
fallback_funcrCrrr�serve_client�s���(��


����$�zServer.serve_clientcCs|Srr�r,r�r�r&rrr�fallback_getvalue5szServer.fallback_getvaluecCst|�Sr�rIr�rrr�fallback_str8szServer.fallback_strcCst|�Sr)�reprr�rrr�
fallback_repr;szServer.fallback_repr)rNr5�	#GETVALUEcCsdSrr�r,r>rrrr^DszServer.dummyc
Cs�|j�tg}t|j���}|��|D]<}|dkr&|�d||j|t|j|d�dd�f�q&d�|�W5QR�SQRXdS)zO
        Return some info --- useful to spot problems with refcounting
        rbz  %s:       refcount=%s
    %srN�K�
)	ror$rkr"�sortrQrIrj�join)r,r>rCr"r�rrrr\Gs
��zServer.debug_infocCs
t|j�S)z*
        Number of shared objects
        )�lenrkr�rrrr]WszServer.number_of_objectscCsLz:zt�d�|�d�Wnddl}|��YnXW5|j��XdS)z'
        Shutdown this process
        z!manager received shutdown message�r:NrN)ru�setrr{r;�	traceback�	print_exc)r,r>r�rrrrX^s
zServer.shutdownc	Os�t|�dkr|^}}}}n�|s(td��n�d|krDtdt|�d��|�d�}t|�dkr~|^}}}ddl}|jd	tdd
�nFd|kr�tdt|�d��|�d�}|^}}ddl}|jdtdd
�t|�}|j��|j|\}}}}	|dk�r|�st|�dk�rt	d
��|d}
n
|||�}
|dk�r2t
|
�}|dk	�rlt|t��s\td�
|t|����t|�t|�}dt|
�}t�d||�|
t|�|f|j|<||jk�r�d|j|<W5QRX|�||�|t|�fS)z>
        Create a new shared object and return its id
        �z8descriptor 'create' of 'Server' object needs an argumentr)�7create expected at least 2 positional arguments, got %dr	�rNz2Passing 'typeid' as keyword argument is deprecated)�
stacklevelr>z-Passing 'c' as keyword argument is deprecatedz4Without callable, must have one non-keyword argumentz,Method_to_typeid {0!r}: type {1!s}, not dictz%xz&%r callable returned object with id %r)r�rJ�pop�warnings�warn�DeprecationWarning�tuplerorerMrVrH�dictrKrr$r+rr{r�rjrkr_)r@rAr,r>r)r�rPr��method_to_typeid�	proxytyper&r�rrrrYksp

�

�
�
��

�



��z
Server.createz$($self, c, typeid, /, *args, **kwds)cCst|j|jd�S)zL
        Return the methods of the shared object indicated by token
        r	)r�rjr+)r,r>r�rrrr[�szServer.get_methodscCs"|t��_|�d�|�|�dS)z=
        Spawn a new thread to serve this connection
        r�N)rmr�rr;r�)r,r>rrrrrZ�s

zServer.accept_connectioncCs�|j��z|j|d7<Wnhtk
r�}zJ||jkrrd|j|<|j||j|<|j|\}}}t�d|�n|�W5d}~XYnXW5QRXdS)Nr	z&Server re-enabled tracking & INCREF %r)rorkr�rlrjrr{)r,r>r�r�r&r�r�rrrr_�s

�z
Server.increfc	Cs�||jkr$||jkr$t�d|�dS|j�Z|j|dkrXtd�||j||j|���|j|d8<|j|dkr�|j|=W5QRX||jkr�d|j|<t�d|�|j�|j|=W5QRXdS)NzServer DECREF skipping %rrz+Id {0!s} ({1!r}) has refcount {2:n}, not 1+r	)NrNzdisposing of obj with id %r)rkrlrr{ror�rKrj)r,r>r�rrrr`�s,
���

z
Server.decrefN)r4r6r7r8r�r-r�r�r�r�r�r�r�r�r^r\r]rXrY�__text_signature__r[rZr_r`rrrrrW�s<�
"Q�
=rWc@seZdZdgZdZdZdZdS)�State�valuerr	r�N)r4r6r7r9�INITIAL�STARTED�SHUTDOWNrrrrr��sr�)�pickleZ	xmlrpclibc@s�eZdZdZiZeZd"dd�Zdd�Zdd	�Z	d#dd�Z
ed$d
d��Zdd�Z
d%dd�Zdd�Zdd�Zdd�Zdd�Zedd��Zedd��Zed&d d!��ZdS)'rz!
    Base class for managers
    Nr�cCs\|dkrt��j}||_t�|�|_t�|_tj|j_	||_
t|\|_|_
|pTt�|_dSr)rrvrg�_addressrf�_authkeyr��_stater�r��_serializerrhZ	_Listener�_Clientr�_ctx)r,r*rgrpZctxrrrr-s

zBaseManager.__init__cCsf|jjtjkrP|jjtjkr&td��n*|jjtjkr>td��ntd�|jj���t|j	|j
|j|j�S)zX
        Return server object with serve_forever() method and address attribute
        �Already started server�Manager has shut down�Unknown state {!r})
r�r�r�r�r�r
r�rKrW�	_registryr�r�r�r.rrr�
get_servers

�
�zBaseManager.get_servercCs8t|j\}}||j|jd�}t|dd�tj|j_dS)z>
        Connect manager object to the server process
        �rgNr^)	rhr�r�r�rDr�r�r�r�)r,rqrrr�rrr�connectszBaseManager.connectrc	Cs4|jjtjkrP|jjtjkr&td��n*|jjtjkr>td��ntd�|jj���|dk	rht|�sht	d��t
jdd�\}}|jj
t|�j|j|j|j|j|||fd�|_d	�d
d�|jjD��}t|�jd||j_|j��|��|��|_|��tj|j_tj|t|�j|j|j|j|j|jfd
d�|_ dS)z@
        Spawn a server process for this manager object
        r�r�r�Nzinitializer must be a callableF)Zduplexr��:css|]}t|�VqdSrr�)r�irrr�	<genexpr>Asz$BaseManager.start.<locals>.<genexpr>�-r�r@Zexitpriority)!r�r�r�r�r�r
r�rKrPrJr
ZPiper�ZProcessr�_run_serverr�r�r�r��_processr�Z	_identityr4rr�r�r<r�Finalize�_finalize_managerr�rX)r,�initializer�initargs�reader�writerr�rrrr�(sH

���


��zBaseManager.startc	Cs^t�tjtj�|dk	r ||�|�||||�}|�|j�|��t�d|j�|�	�dS)z@
        Create a server, report its address and run it
        Nzmanager serving at %r)
�signal�SIGINT�SIG_IGN�_Serverr;r*r�rr�r�)	�clsrer*rgrpr�r�r��serverrrrr�SszBaseManager._run_servercOsd|jjtjkstd��|j|j|jd�}zt	|dd|f||�\}}W5|��Xt
||j|�|fS)zP
        Create a new shared object; return the token and exposed tuple
        zserver not yet startedr�NrY)r�r�r�r�r�r�r�r�r�rDr)r,r)r@rAr�r+r�rrr�_createjs
zBaseManager._createcCs*|jdk	r&|j�|�|j��s&d|_dS)zC
        Join the manager process (if it has been spawned)
        N)r�r��is_alive�r,�timeoutrrrr�vs

zBaseManager.joincCs2|j|j|jd�}zt|dd�W�S|��XdS)zS
        Return some info about the servers shared objects and connections
        r�Nr\�r�r�r�r�rD�r,r�rrr�_debug_infoszBaseManager._debug_infocCs2|j|j|jd�}zt|dd�W�S|��XdS)z5
        Return the number of shared objects
        r�Nr]r�r�rrr�_number_of_objects�szBaseManager._number_of_objectscCsj|jjtjkr|��|jjtjkrf|jjtjkr<td��n*|jjtjkrTtd��ntd�|jj���|S)NzUnable to start serverr�r�)	r�r�r�r�r�r�r
r�rKr.rrr�	__enter__�s

�zBaseManager.__enter__cCs|��dSr)rX�r,�exc_typeZexc_valZexc_tbrrr�__exit__�szBaseManager.__exit__cCs�|��r�t�d�z,|||d�}zt|dd�W5|��XWntk
rRYnX|jdd�|��r�t�d�t|d�r�t�d	�|��|jd
d�|��r�t�d�t	j
|_ztj
|=Wntk
r�YnXdS)zQ
        Shutdown the manager process; will be registered as a finalizer
        z#sending shutdown message to managerr�NrXg�?)r�zmanager still alive�	terminatez'trying to `terminate()` manager processg�������?z#manager still alive after terminate)r�rr�r�rDr�r��hasattrr�r�r�r�r�_address_to_localr�)rr*rgr1r�r�rrrr��s.




zBaseManager._finalize_managercCs|jSr)r�r.rrrr*�szBaseManager.addressTc
s�d|jkr|j��|_�dkr"t�|p0t�dd�}|p@t�dd�}|r�t|���D]8\}}t|�tksrt	d|��t|�tksRt	d|��qR|||�f|j�<|r‡�fdd�}	�|	_
t|�|	�dS)z9
        Register a typeid with the manager type
        r�N�	_exposed_�_method_to_typeid_z%r is not a stringcs`t�d��|j�f|�|�\}}�||j||j|d�}|j|j|jd�}t|dd|jf�|S)Nz)requesting creation of a shared %r object��managerrgr�r�r`)	rr{r�r�r�r�r*rDr+)r,r@rAr�Zexp�proxyr��r�r)rrrR�s�z"BaseManager.register.<locals>.temp)�__dict__r��copy�	AutoProxyrr$r!rrIr�r4�setattr)
r�r)rPr�r�r��
create_method�keyr�rRrr�r�register�s*

��

zBaseManager.register)NNr�N)Nr)Nr)N)NNNNT)r4r6r7r8r�rWr�r-r�r�r��classmethodr�r�r�r�r�r�r��staticmethodr��propertyr*rrrrrr�s8�
	
+�
	




�c@seZdZdd�Zdd�ZdS)�ProcessLocalSetcCst�|dd��dS)NcSs|��Sr)�clearr%rrr�<lambda>��z*ProcessLocalSet.__init__.<locals>.<lambda>)r�register_after_forkr.rrrr-�szProcessLocalSet.__init__cCst|�dfSrc)rr.rrr�
__reduce__�szProcessLocalSet.__reduce__N)r4r6r7r-rrrrrr	�sr	c@s�eZdZdZiZe��Zddd�Zdd�Z	d	ifd
d�Z
dd
�Zdd�Ze
dd��Zdd�Zdd�Zdd�Zdd�Zdd�ZdS)rz.
    A base for proxies of shared objects
    NTFc		Cs�tj�8tj�|jd�}|dkr:t��t�f}|tj|j<W5QRX|d|_|d|_	||_
|j
j|_||_
||_t|d|_||_|dk	r�t�|�|_n"|j
dk	r�|j
j|_nt��j|_|r�|��t�|tj�dS)Nrr	)r�_mutexr�r�r*rZForkAwareLocalr	�_tls�_idset�_tokenr+�_id�_managerr�rhr��_owned_by_managerrrfr�rvrg�_increfr
�_after_fork)	r,r�rpr�rgr�r_�
manager_ownedZ	tls_idsetrrrr-s*



zBaseProxy.__init__cCsdt�d�t��j}t��jdkr4|dt��j7}|j|jj	|j
d�}t|dd|f�||j_
dS)Nzmaking connection to managerZ
MainThread�|r�rZ)rr{rrvrrmr�r�rr*r�rDrr
)r,rr�rrr�_connect-s

zBaseProxy._connectrcCs�z|jj}Wn6tk
rBt�dt��j�|��|jj}YnX|�	|j
|||f�|��\}}|dkrp|S|dkr�|\}}|jj
|jd}	|jj|_|	||j|j|j|d�}
|j|j|jd�}t|dd|jf�|
St||��dS)	zV
        Try to call a method of the referent and return a copy of the result
        z#thread %r does not own a connectionr:r����r�r�Nr`)rr
r�rr{rmr�rrr;rr<rr�r)rr*r�r�r�rDr+r=)r,r?r@rAr�rBrCr�r�r�r�rrr�_callmethod6s6�
�zBaseProxy._callmethodcCs
|�d�S)z9
        Get a copy of the value of the referent
        r��rr.rrr�	_getvalueTszBaseProxy._getvaluec	Cs�|jrt�d|jj�dS|j|jj|jd�}t|dd|j	f�t�d|jj�|j
�|j	�|joj|jj
}tj|tj|j|j||j|j
|jfdd�|_dS)Nz%owned_by_manager skipped INCREF of %rr�r_z	INCREF %r�
r�)rrr{rr+r�r*r�rDrr�addrr�r�r�_decrefrZ_close)r,r�r1rrrrZs$
��zBaseProxy._increfc
Cs�|�|j�|dks |jtjkr�z2t�d|j�||j|d�}t|dd|jf�Wq�t	k
r�}zt�d|�W5d}~XYq�Xnt�d|j�|s�t
|d�r�t�dt��j
�|j��|`dS)Nz	DECREF %rr�r`z... decref failed %sz%DECREF %r -- manager already shutdownr
z-thread %r has no more proxies so closing conn)�discardr+r�r�r�rr{r*rDr�r�rmr�rr
r�)r�rgr1ZtlsZidsetr�r�r�rrrr!ns �
zBaseProxy._decrefc
CsHd|_z|��Wn0tk
rB}zt�d|�W5d}~XYnXdS)Nzincref failed: %s)rrr�rr�)r,r�rrrr�s
zBaseProxy._after_forkcCs^i}t�dk	r|j|d<t|dd�rB|j|d<tt|j|j|ffStt|�|j|j|ffSdS)Nrg�_isautoFr�)	rr�rr��RebuildProxyrrr�r�r,rArrrr�s


��zBaseProxy.__reduce__cCs|��Sr)r)r,Zmemorrr�__deepcopy__�szBaseProxy.__deepcopy__cCsdt|�j|jjt|�fS)Nz<%s object, typeid %r at %#x>)rr4rr)r+r.rrrr5�s�zBaseProxy.__repr__cCs:z|�d�WStk
r4t|�dd�dYSXdS)zV
        Return representation of the referent (or a fall-back if that fails)
        r5Nrz; '__str__()' failed>)rr�r�r.rrrrN�szBaseProxy.__str__)NNNTF)r4r6r7r8r�rZForkAwareThreadLockrr-rrrrrr!rrr&r5rNrrrrr�s(�
)	

cCs�tt��dd�}|rT|j|jkrTt�d|�d|d<|j|jkrT|j|j|j|j<|�	dd�optt��dd�}|||fd|i|��S)	z5
    Function used for unpickling proxy objects.
    rwNz*Rebuild a proxy owned by manager, token=%rTrr_Z_inheritingF)
rrrvr*rr{r+rlrjr�)rSr�rprAr�r_rrrr$�s
�
�r$cCspt|�}z|||fWStk
r*YnXi}|D]}td||f|�q4t|tf|�}||_||||f<|S)zB
    Return a proxy type whose methods are given by `exposed`
    zOdef %s(self, /, *args, **kwds):
        return self._callmethod(%r, args, kwds))r�r��execrrr�)rr��_cacheZdicZmeth�	ProxyTyperrr�
MakeProxyType�s ��r*c
Cs�t|d}|dkrB||j|d�}zt|dd|f�}W5|��X|dkrX|dk	rX|j}|dkrjt��j}td|j	|�}||||||d�}	d|	_
|	S)z*
    Return an auto-proxy for `token`
    r	Nr�r[z
AutoProxy[%s])r�rgr_T)rhr*r�rDr�rrvrgr*r)r#)
r�rpr�rgr�r_r�r�r)r�rrrr�s 


�rc@seZdZdd�Zdd�ZdS)�	NamespacecKs|j�|�dSr)r��updater%rrrr-�szNamespace.__init__cCsZt|j���}g}|D]$\}}|�d�s|�d||f�q|��d|jjd�|�fS)NrUz%s=%rz%s(%s)z, )	r$r�r!�
startswithrQr�r3r4r�)r,r!rRrr�rrrr5�s
zNamespace.__repr__N)r4r6r7r-r5rrrrr+�sr+c@s8eZdZddd�Zdd�Zdd�Zdd	�Zeee�Zd
S)�ValueTcCs||_||_dSr)�	_typecode�_value)r,rr��lockrrrr-szValue.__init__cCs|jSr�r0r.rrrr�sz	Value.getcCs
||_dSrr2�r,r�rrrr�
sz	Value.setcCsdt|�j|j|jfS)Nz
%s(%r, %r))rr4r/r0r.rrrr5szValue.__repr__N)T)	r4r6r7r-r�r�r5rr�rrrrr.s

r.cCst�||�Sr)r)r�sequencer1rrr�Arraysr5c@s8eZdZdZdd�Zdd�Zdd�Zdd	�Zd
d�ZdS)
�
IteratorProxy)�__next__r;�throwr�cCs|Srrr.rrr�__iter__szIteratorProxy.__iter__cGs|�d|�S)Nr7r�r,r@rrrr7szIteratorProxy.__next__cGs|�d|�S)Nr;rr:rrrr;szIteratorProxy.sendcGs|�d|�S)Nr8rr:rrrr8szIteratorProxy.throwcGs|�d|�S)Nr�rr:rrrr�!szIteratorProxy.closeN)	r4r6r7r�r9r7r;r8r�rrrrr6sr6c@s2eZdZdZddd�Zdd�Zdd	�Zd
d�ZdS)
�
AcquirerProxy)�acquire�releaseTNcCs"|dkr|fn||f}|�d|�S�Nr<r)r,Zblockingr�r@rrrr<'szAcquirerProxy.acquirecCs
|�d�S�Nr=rr.rrrr=*szAcquirerProxy.releasecCs
|�d�Sr>rr.rrrr�,szAcquirerProxy.__enter__cCs
|�d�Sr?rr�rrrr�.szAcquirerProxy.__exit__)TN)r4r6r7r�r<r=r�r�rrrrr;%s

r;c@s6eZdZdZddd�Zd
dd�Zdd	�Zdd
d�ZdS)�ConditionProxy)r<r=r��notify�
notify_allNcCs|�d|f�S�Nr�rr�rrrr�4szConditionProxy.waitr	cCs|�d|f�S)NrAr)r,�nrrrrA6szConditionProxy.notifycCs
|�d�S)NrBrr.rrrrB8szConditionProxy.notify_allcCsd|�}|r|S|dk	r$t��|}nd}d}|s`|dk	rN|t��}|dkrNq`|�|�|�}q,|S)Nr)�time�	monotonicr�)r,Z	predicater�rCZendtimeZwaittimerrr�wait_for:s
zConditionProxy.wait_for)N)r	)N)r4r6r7r�r�rArBrGrrrrr@2s


r@c@s2eZdZdZdd�Zdd�Zdd�Zdd	d
�ZdS)�
EventProxy)r�r�r
r�cCs
|�d�S)Nr�rr.rrrr�OszEventProxy.is_setcCs
|�d�S�Nr�rr.rrrr�QszEventProxy.setcCs
|�d�S)Nr
rr.rrrr
SszEventProxy.clearNcCs|�d|f�SrCrr�rrrr�UszEventProxy.wait)N)r4r6r7r�r�r�r
r�rrrrrHMs
rHc@sNeZdZdZddd�Zdd�Zdd�Zed	d
��Zedd��Z	ed
d��Z
dS)�BarrierProxy)�__getattribute__r��abort�resetNcCs|�d|f�SrCrr�rrrr�[szBarrierProxy.waitcCs
|�d�S)NrLrr.rrrrL]szBarrierProxy.abortcCs
|�d�S)NrMrr.rrrrM_szBarrierProxy.resetcCs|�dd�S)NrK)�partiesrr.rrrrNaszBarrierProxy.partiescCs|�dd�S)NrK)�	n_waitingrr.rrrrOdszBarrierProxy.n_waitingcCs|�dd�S)NrK)�brokenrr.rrrrPgszBarrierProxy.broken)N)r4r6r7r�r�rLrMrrNrOrPrrrrrJYs


rJc@s(eZdZdZdd�Zdd�Zdd�ZdS)	�NamespaceProxy)rK�__setattr__�__delattr__cCs0|ddkrt�||�St�|d�}|d|f�S)NrrUrrK)�objectrK�r,r�
callmethodrrr�__getattr__nszNamespaceProxy.__getattr__cCs4|ddkrt�|||�St�|d�}|d||f�S)NrrUrrR)rTrRrK)r,rr�rVrrrrRsszNamespaceProxy.__setattr__cCs0|ddkrt�||�St�|d�}|d|f�S)NrrUrrS)rTrSrKrUrrrrSxszNamespaceProxy.__delattr__N)r4r6r7r�rWrRrSrrrrrQlsrQc@s*eZdZdZdd�Zdd�Zeee�ZdS)�
ValueProxy)r�r�cCs
|�d�S)Nr�rr.rrrr��szValueProxy.getcCs|�d|f�SrIrr3rrrr��szValueProxy.setN)r4r6r7r�r�r�rr�rrrrrXsrX�
BaseListProxy)�__add__�__contains__�__delitem__�__getitem__�__len__�__mul__�__reversed__�__rmul__�__setitem__rQ�count�extend�index�insertr��remove�reverser��__imul__c@seZdZdd�Zdd�ZdS)�	ListProxycCs|�d|f�|S)Nrdrr3rrr�__iadd__�szListProxy.__iadd__cCs|�d|f�|S)Nrirr3rrrri�szListProxy.__imul__N)r4r6r7rkrirrrrrj�srj�	DictProxy)r[r\r]r9r^rbr
rr�r!r"r��popitem�
setdefaultr,r#r9�Iterator�
ArrayProxy)r^r]rb�	PoolProxy)Zapply�apply_asyncr��imap�imap_unorderedr��map�	map_async�starmap�
starmap_asyncr�ZAsyncResult)rrrvrxrsrtc@seZdZdd�Zdd�ZdS)rqcCs|Srrr.rrrr��szPoolProxy.__enter__cCs|��dSr)r�r�rrrr��szPoolProxy.__exit__N)r4r6r7r�r�rrrrrq�sc@seZdZdZdS)ra(
    Subclass of `BaseManager` which supports a number of shared object types.

    The types registered are those intended for the synchronization
    of threads, plus `dict`, `list` and `Namespace`.

    The `multiprocessing.Manager()` function creates started instances of
    this class.
    N)r4r6r7r8rrrrr�s�QueueZ
JoinableQueuertrn�RLock�	Semaphore�BoundedSemaphore�	Condition�Barrier�Poolr$r�)r�r)rc@sLeZdZdZgfdd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Z	dd�Z
dS)�_SharedMemoryTrackerz+Manages one or more shared memory segments.cCs||_||_dSr�Zshared_memory_context_name�
segment_names)r,rr�rrrr-�sz_SharedMemoryTracker.__init__cCs(t�d|�dt����|j�|�dS)z6Adds the supplied shared memory block name to tracker.zRegister segment � in pid N)rr{rr�rQ�r,�segment_namerrr�register_segment�sz%_SharedMemoryTracker.register_segmentcCsBt�d|�dt����|j�|�t�|�}|��|��dS)z�Calls unlink() on the shared memory block with the supplied name
            and removes it from the list of blocks being tracked.zDestroy segment r�N)	rr{rr�rgr�SharedMemoryr��unlink)r,r�Zsegmentrrr�destroy_segment�s

z$_SharedMemoryTracker.destroy_segmentcCs"|jdd�D]}|�|�qdS)z<Calls destroy_segment() on all tracked shared memory blocks.N)r�r�r�rrrr��sz_SharedMemoryTracker.unlinkcCs(t�d|jj�dt����|��dS)NzCall z.__del__ in )rr{r3r4rr�r.rrr�__del__�sz_SharedMemoryTracker.__del__cCs|j|jfSrr�r.rrrr/�sz!_SharedMemoryTracker.__getstate__cCs|j|�dSr)r-r0rrrr2sz!_SharedMemoryTracker.__setstate__N)r4r6r7r8r-r�r�r�r�r/r2rrrrr��s	r�c@sReZdZejdddgZdd�Zdd�Zde_d	d
�Zdd�Z	d
d�Z
dd�ZdS)�SharedMemoryServer�
track_segment�release_segment�
list_segmentscOsZtj|f|�|�|j}t|t�r,t�|�}td|�dt����|_	t
�dt����dS)NZshm_rUz"SharedMemoryServer started by pid )rWr-r*rHrd�os�fsdecoder�r�shared_memory_contextrr{)r,r@�kwargsr*rrrr-
s

�zSharedMemoryServer.__init__cOstt|�dkr|d}n4d|kr(|d}n"|s6td��ntdt|�d��ttj|dd�rhtj|d	<tj||�S)
z�Create a new distributed-shared object (not backed by a shared
            memory block) and return its id to be used in a Proxy Object.r�r�r)zDdescriptor 'create' of 'SharedMemoryServer' object needs an argumentr�r	rZ_shared_memory_proxyr�)r�rJr�r,rer�rWrY)r@r�Ztypeodr)rrrrYs



�
zSharedMemoryServer.createz&($self, c, typeid, /, *args, **kwargs)cCs|j��t�||�S)zACall unlink() on all tracked shared memory, terminate the Server.)r�r�rWrXr�rrrrX)s
zSharedMemoryServer.shutdowncCs|j�|�dS)z?Adds the supplied shared memory block name to Server's tracker.N)r�r��r,r>r�rrrr�.sz SharedMemoryServer.track_segmentcCs|j�|�dS)z�Calls unlink() on the shared memory block with the supplied name
            and removes it from the tracker instance inside the Server.N)r�r�r�rrrr�2sz"SharedMemoryServer.release_segmentcCs|jjS)zbReturns a list of names of shared memory blocks that the Server
            is currently tracking.)r�r�r�rrrr�7sz SharedMemoryServer.list_segmentsN)r4r6r7rWr�r-rYr�rXr�r�r�rrrrr�s�
r�c@s<eZdZdZeZdd�Zdd�Zdd�Zdd	�Z	d
d�Z
dS)
ra�Like SyncManager but uses SharedMemoryServer instead of Server.

        It provides methods for creating and returning SharedMemory instances
        and for creating a list-like object (ShareableList) backed by shared
        memory.  It also provides methods that create and return Proxy Objects
        that support synchronization across processes (i.e. multi-process-safe
        locks and semaphores).
        cOsNtjdkrddlm}|��tj|f|�|�t�|j	j
�dt����dS)N�posixr	)�resource_trackerz created by pid )r�r�r�Zensure_runningrr-rr{r3r4r)r,r@r�r�rrrr-Is

zSharedMemoryManager.__init__cCst�|jj�dt����dS)Nz.__del__ by pid )rr{r3r4rr.rrrr�UszSharedMemoryManager.__del__cCsh|jjtjkrP|jjtjkr&td��n*|jjtjkr>td��ntd�|jj���|�|j	|j
|j|j�S)z@Better than monkeypatching for now; merge into Server ultimatelyz"Already started SharedMemoryServerz!SharedMemoryManager has shut downr�)
r�r�r�r�r�r
r�rKr�r�r�r�r�r.rrrr�Ys

��zSharedMemoryManager.get_servercCsx|j|j|jd��\}tjdd|d�}zt|dd|jf�Wn.tk
rh}z|��|�W5d}~XYnXW5QRX|S)zoReturns a new SharedMemory instance with the specified size in
            bytes, to be tracked by the manager.r�NT)rY�sizer�)	r�r�r�rr�rDr�
BaseExceptionr�)r,r�r�Zsmsr�rrrr�fs z SharedMemoryManager.SharedMemorycCsv|j|j|jd��Z}t�|�}zt|dd|jjf�Wn0tk
rf}z|j�	�|�W5d}~XYnXW5QRX|S)z�Returns a new ShareableList instance populated with the values
            from the input sequence, to be tracked by the manager.r�Nr�)
r�r�r�r�
ShareableListrDZshmrr�r�)r,r4r�Zslr�rrrr�rs

 z!SharedMemoryManager.ShareableListN)r4r6r7r8r�r�r-r�r�r�r�rrrrr=s	
)NNNT)T)S�__all__rxrmr�rZqueuerEr�rr�rr�r
�contextrrr
rrrrrZ	HAS_SHMEM�ImportErrorrrZ
view_typesr$r'Z	view_typerTrrDr=r�rLrTrVrWr�rqrrZXmlListenerZ	XmlClientrhrr�r	rr$r*rr+r.r5r6r;r@rHrJrQrXrYrjrlr�rpZ
BasePoolProxyrqrryrtrnrzr{r|r}r~rr�r�r�rrrrr�<module>s��


c

�	w
4�


	
	
�

�

�%8PK{��\s=q���__pycache__/heap.cpython-38.pycnu�[���U

e5dj-�@s�ddlZddlmZddlZddlZddlZddlZddlZddlm	Z	m
Z
ddlmZdgZ
ejdkr�ddlZGdd	�d	e�Zn,Gd
d	�d	e�Zdd�Zd
d�Ze	�ee�Gdd�de�ZGdd�de�ZdS)�N)�defaultdict�)�	reduction�assert_spawning)�util�
BufferWrapperZwin32c@s0eZdZdZe��Zdd�Zdd�Zdd�Z	dS)	�ArenazL
        A shared memory area backed by anonymous memory (Windows).
        cCsx||_td�D]B}dt��t|j�f}tjd||d�}t��dkrHqZ|�	�qt
d��||_||_|j|jf|_
dS)N�dz	pym-%d-%s����ZtagnamerzCannot find name for new mmap)�size�range�os�getpid�next�_rand�mmap�_winapiZGetLastError�close�FileExistsError�name�buffer�_state)�selfr�irZbuf�r�,/usr/lib64/python3.8/multiprocessing/heap.py�__init__&s
�Arena.__init__cCst|�|jS�N)rr)rrrr�__getstate__5szArena.__getstate__cCs,|\|_|_|_tjd|j|jd�|_dS)Nr
r)rrrrr)r�staterrr�__setstate__9szArena.__setstate__N)
�__name__�
__module__�__qualname__�__doc__�tempfileZ_RandomNameSequencerrr r"rrrrrs
rc@s8eZdZdZejdkrdgZngZd
dd�Zdd�Zd	S)rzJ
        A shared memory area backed by a temporary file (POSIX).
        Zlinuxz/dev/shmr
cCsx||_||_|dkrbtjdt��|�|�d�\|_}t�|�t�	|tj
|jf�t�|j|�t�|j|j�|_
dS)Nr
zpym-%d-)�prefix�dir)r�fdr'Zmkstemprr�_choose_dir�unlinkr�Finalizer�	ftruncaterr)rrr*rrrrrMs
�
rcCs6|jD]&}t�|�}|j|j|kr|Sqt��Sr)�_dir_candidatesr�statvfs�f_bavail�f_frsizerZget_temp_dir)rr�d�strrrr+[s



zArena._choose_dirN)r
)	r#r$r%r&�sys�platformr/rr+rrrrrCs

cCs(|jdkrtd��t|jt�|j�ffS)Nr
zDArena is unpicklable because forking was enabled when it was created)r*�
ValueError�
rebuild_arenarrZDupFd)�arrr�reduce_arenads
r:cCst||���Sr)r�detach)rZdupfdrrrr8jsr8c@szeZdZdZdZdZejfdd�Ze	dd��Z
dd�Zd	d
�Zdd�Z
d
d�Zdd�Zdd�Zdd�Zdd�Zdd�ZdS)�Heap�i@cCsXt��|_t��|_||_g|_i|_i|_	i|_
tt�|_
g|_g|_d|_d|_dS�Nr)rr�_lastpid�	threadingZLock�_lock�_size�_lengths�_len_to_seq�_start_to_block�_stop_to_blockr�set�_allocated_blocks�_arenas�_pending_free_blocks�
_n_mallocs�_n_frees)rrrrrr{s


z
Heap.__init__cCs|d}|||@S)Nrr)�nZ	alignment�maskrrr�_roundup�sz
Heap._roundupcCsZ|�t|j|�tj�}|j|jkr0|jd9_t�d|�t|�}|j	�
|�|d|fS)N�z"allocating a new mmap of length %dr)rO�maxrBr�PAGESIZE�_DOUBLE_ARENA_SIZE_UNTILr�inforrI�append)rr�length�arenarrr�
_new_arena�szHeap._new_arenacCs�|j}||jkrdS|j�|�}|r(t�|j|df=|j||f=|j�|�|j	|}|�|d|f�|s~|j	|=|j
�|�dSr>)r�_DISCARD_FREE_SPACE_LARGER_THANrH�pop�AssertionErrorrErFrI�removerDrC)rrWrV�blocks�seqrrr�_discard_arena�s

zHeap._discard_arenac	Cs|t�|j|�}|t|j�kr&|�|�S|j|}|j|}|��}|sV|j|=|j|=|\}}}|j||f=|j||f=|Sr)	�bisectZbisect_leftrC�lenrXrDrZrErF)	rrrrVr^�blockrW�start�stoprrr�_malloc�s



zHeap._mallocc	Cs�|\}}}z|j||f}Wntk
r0YnX|�|�\}}z|j||f}Wntk
rfYnX|�|�\}}|||f}||}z|j|�|�Wn.tk
r�|g|j|<t�|j|�YnX||j||f<||j||f<dSr)	rF�KeyError�_absorbrErDrUr`ZinsortrC)	rrbrWrcrdZ
prev_block�_Z
next_blockrVrrr�_add_free_block�s(

zHeap._add_free_blockcCs^|\}}}|j||f=|j||f=||}|j|}|�|�|sV|j|=|j�|�||fSr)rErFrDr\rC)rrbrWrcrdrVr^rrrrg�s


zHeap._absorbcCs4|\}}}|j|}|�||f�|s0|�|�dSr)rHr\r_)rrbrWrcrdr]rrr�_remove_allocated_block�s


zHeap._remove_allocated_blockcCsBz|j��}Wntk
r&Yq>YnX|�|�|�|�qdSr)rJrZ�
IndexErrorrirj�rrbrrr�_free_pending_blockss

zHeap._free_pending_blockscCs~t��|jkr$td�t��|j���|j�d�s>|j�|�n<z.|j
d7_
|��|�|�|�
|�W5|j�	�XdS)Nz$My pid ({0:n}) is not last pid {1:n}Fr)rrr?r7�formatrA�acquirerJrU�releaserLrmrirjrlrrr�frees
��
z	Heap.freec
Cs�|dkrtd�|���tj|kr.td�|���t��|jkrD|��|j	��|j
d7_
|��|�t
|d�|j�}|�|�\}}}||}||kr�|�|||f�|j|�||f�|||fW5QR�SQRXdS)Nr�Size {0:n} out of range�Size {0:n} too larger)r7rnr5�maxsize�
OverflowErrorrrr?rrArKrmrOrQ�
_alignmentrerirH�add)rrrWrcrdZ	real_stoprrr�malloc(s 
zHeap.mallocN)r#r$r%rvrYrSrrRr�staticmethodrOrXr_rerirgrjrmrqrxrrrrr<ss

r<c@s"eZdZe�Zdd�Zdd�ZdS)rcCs^|dkrtd�|���tj|kr.td�|���tj�|�}||f|_t	j
|tjj|fd�dS)Nrrrrs)�args)r7rnr5rtrur�_heaprxrrr-rq)rrrbrrrrFs

zBufferWrapper.__init__cCs&|j\\}}}}t|j�|||�Sr)r�
memoryviewr)rrWrcrdrrrr�create_memoryviewOszBufferWrapper.create_memoryviewN)r#r$r%r<r{rr}rrrrrBs	)r`�collectionsrrrr5r'r@�contextrr�r�__all__r6r�objectrr:r8�registerr<rrrrr�<module>
s&
$!PPK{��\�Wq(q((__pycache__/process.cpython-38.opt-1.pycnu�[���U

e5d�.�@s8ddddgZddlZddlZddlZddlZddlZddlmZzej�	e�
��ZWnek
rldZYnXdd�Z
dd�Zd	d�Zd
d�ZGdd�de�ZGd
d�de�ZGdd�de�ZGdd�de�Zdae�ae�d�ae�a[iZeej� ��D]0\Z!Z"e!dd�dkr�de!kr�de!��ee"<q�e�Z#dS)�BaseProcess�current_process�active_children�parent_process�N)�WeakSetcCstS)z@
    Return process object representing the current process
    )�_current_process�rr�//usr/lib64/python3.8/multiprocessing/process.pyr%scCst�tt�S)zN
    Return list of process objects corresponding to live child processes
    )�_cleanup�list�	_childrenrrrr	r+scCstS)z?
    Return process object representing the parent process
    )�_parent_processrrrr	r3scCs*tt�D]}|j��dk	rt�|�qdS�N)rr�_popen�poll�discard)�prrr	r
=sr
c@s�eZdZdZdd�Zddddifdd�dd�Zd	d
�Zdd�Zd
d�Zdd�Z	dd�Z
d,dd�Zdd�Zdd�Z
edd��Zejdd��Zedd��Zejdd��Zedd ��Zejd!d ��Zed"d#��Zed$d%��ZeZed&d'��Zd(d)�Zd-d*d+�ZdS).rz�
    Process objects represent activity that is run in a separate process

    The class is analogous to `threading.Thread`
    cCst�dSr)�NotImplementedError��selfrrr	�_PopenMszBaseProcess._PopenNr)�daemoncCs�tt�}tj|f|_tj��|_t��|_tj	|_
d|_d|_||_
t|�|_t|�|_|p�t|�jdd�dd�|jD��|_|dk	r�||_t�|�dS)NF�-�:css|]}t|�VqdSr)�str)�.0�irrr	�	<genexpr>^sz'BaseProcess.__init__.<locals>.<genexpr>)�next�_process_counterr�	_identity�_config�copy�os�getpid�_parent_pid�name�_parent_namer�_closed�_target�tuple�_args�dict�_kwargs�type�__name__�join�_namer�	_dangling�add)r�group�targetr&�args�kwargsr�countrrr	�__init__Ps 


�zBaseProcess.__init__cCs|jrtd��dS)Nzprocess object is closed)r(�
ValueErrorrrrr	�
_check_closedcszBaseProcess._check_closedcCs|jr|j|j|j�dS)zQ
        Method to be run in sub-process; can be overridden in sub-class
        N)r)r+r-rrrr	�rungszBaseProcess.runcCs>|��t�|�|�|_|jj|_|`|`|`t	�
|�dS)z%
        Start child process
        N)r;r
rr�sentinel�	_sentinelr)r+r-rr3rrrr	�startns
zBaseProcess.startcCs|��|j��dS)zT
        Terminate process; sends SIGTERM signal or uses TerminateProcess()
        N)r;r�	terminaterrrr	r@�szBaseProcess.terminatecCs|��|j��dS)zT
        Terminate process; sends SIGKILL signal or uses TerminateProcess()
        N)r;r�killrrrr	rA�szBaseProcess.killcCs*|��|j�|�}|dk	r&t�|�dS)z5
        Wait until child process terminates
        N)r;r�waitrr)r�timeout�resrrr	r0�szBaseProcess.joincCsJ|��|tkrdS|jdkr"dS|j��}|dkr8dSt�|�dSdS)z1
        Return whether process is alive
        TNF)r;rrrrr)r�
returncoderrr	�is_alive�s


zBaseProcess.is_alivecCsH|jdk	r>|j��dkr td��|j��d|_|`t�|�d|_dS)z�
        Close the Process object.

        This method releases resources held by the Process object.  It is
        an error to call this method if the child process is still running.
        Nz^Cannot close a process while it is still running. You should first call join() or terminate().T)rrr:�closer>rrr(rrrr	rG�s


zBaseProcess.closecCs|jSr�r1rrrr	r&�szBaseProcess.namecCs
||_dSrrH)rr&rrr	r&�scCs|j�dd�S)z4
        Return whether process is a daemon
        rF)r!�getrrrr	r�szBaseProcess.daemoncCs||jd<dS)z1
        Set whether process is a daemon
        rN�r!)rZdaemonicrrr	r�scCs
|jdS)N�authkeyrJrrrr	rK�szBaseProcess.authkeycCst|�|jd<dS)z2
        Set authorization key of process
        rKN)�AuthenticationStringr!)rrKrrr	rK�scCs"|��|jdkr|jS|j��S)zM
        Return exit code of process or `None` if it has yet to stop
        N)r;rrrrrr	�exitcode�s
zBaseProcess.exitcodecCs*|��|tkrt��S|jo$|jjSdS)zU
        Return identifier (PID) of process or `None` if it has yet to start
        N)r;rr#r$r�pidrrrr	�ident�szBaseProcess.identcCs4|��z|jWStk
r.td�d�YnXdS)z{
        Return a file descriptor (Unix) or handle (Windows) suitable for
        waiting for process termination.
        zprocess not startedN)r;r>�AttributeErrorr:rrrr	r=�s
zBaseProcess.sentinelcCs�d}|tkrd}nL|jrd}n@|jt��kr2d}n,|jdkrBd}n|j��}|dk	rZd}nd}t|�jd|j	g}|jdk	r�|�
d|jj�|�
d|j�|�
|�|dk	r�t�
||�}|�
d	|�|jr�|�
d
�dd�|�S)
NZstarted�closed�unknown�initialZstoppedzname=%rzpid=%sz	parent=%szexitcode=%srz<%s>� )rr(r%r#r$rrr.r/r1�appendrN�_exitcode_to_namerIrr0)rrMZstatus�inforrr	�__repr__s0




zBaseProcess.__repr__c
Csvddlm}m}�z>z�|jdk	r,|�|j�t	�
d�at�a
|��t}|at|j|j|�atjrnt����z|j��|��W5~X|�d�z|��d}W5|��XWn�tk
�r}zJ|js�d}n:t|jdt�r�|jd}nt j!�"t#|jd�d�d}W5d}~XYn2d}ddl$}t j!�"d|j%�|�&�YnXW5t��|�d|�|��X|S)N�)�util�contextz process exiting with exitcode %dz child process calling self.run()r�
zProcess %s:
)'�rZr[�	threadingZ	_shutdownrWZ_flush_std_streamsZ
_start_methodZ_force_start_method�	itertoolsr8r�setrZ_close_stdinr�_ParentProcessr'r%r
Z_HAVE_THREAD_NATIVE_IDZmain_threadZ_set_native_idZ_finalizer_registry�clearZ_run_after_forkersZ_exit_functionr<�
SystemExitr6�
isinstance�int�sys�stderr�writer�	tracebackr&�	print_exc)rZparent_sentinelrZr[rMZold_process�erirrr	�
_bootstrap"sR

�


zBaseProcess._bootstrap)N)N)r/�
__module__�__qualname__�__doc__rr9r;r<r?r@rAr0rFrG�propertyr&�setterrrKrMrOrNr=rXrlrrrr	rGsD�







	


c@seZdZdd�ZdS)rLcCs,ddlm}|�dkrtd��tt|�ffS)NrY)�get_spawning_popenzJPickling an AuthenticationString object is disallowed for security reasons)r[rr�	TypeErrorrL�bytes)rrrrrr	�
__reduce__Xs
�zAuthenticationString.__reduce__N)r/rmrnrurrrr	rLWsrLc@s6eZdZdd�Zdd�Zedd��Zd
dd	�ZeZdS)racCs4d|_||_||_d|_d|_d|_||_i|_dS)NrF)r r1�_pidr%rr(r>r!)rr&rNr=rrr	r9hsz_ParentProcess.__init__cCsddlm}||jgdd�S)Nr�rB�rC�Zmultiprocessing.connectionrBr>)rrBrrr	rFrsz_ParentProcess.is_alivecCs|jSr)rvrrrr	rOvsz_ParentProcess.identNcCs ddlm}||jg|d�dS)z6
        Wait until parent process terminates
        rrwrxNry)rrCrBrrr	r0zsz_ParentProcess.join)N)	r/rmrnr9rFrprOr0rNrrrr	rafs


rac@seZdZdd�Zdd�ZdS)�_MainProcesscCs8d|_d|_d|_d|_d|_tt�d��dd�|_dS)NrZMainProcessF� z/mp)rKZ	semprefix)	r r1r%rr(rLr#�urandomr!rrrr	r9�s�z_MainProcess.__init__cCsdSrrrrrr	rG�sz_MainProcess.closeN)r/rmrnr9rGrrrr	rz�srzrY�ZSIG�_r)$�__all__r#rf�signalr_r^Z_weakrefsetr�path�abspath�getcwdZORIGINAL_DIR�OSErrorrrrr
�objectrrtrLrarzr
rr8rr`rrVr�__dict__�itemsr&Zsignumr2rrrr	�<module>
s@�


!
PK{��\�����0__pycache__/resource_sharer.cpython-38.opt-2.pycnu�[���U

e5d��@s�ddlZddlZddlZddlZddlZddlmZddlmZddlm	Z	dgZ
ejdkrxe
dg7Z
Gd	d�de�Z
ne
d
g7Z
Gdd
�d
e�ZGdd
�d
e�Ze�ZejZdS)�N�)�process)�	reduction)�util�stopZwin32�	DupSocketc@seZdZdd�Zdd�ZdS)rcs(|����fdd�}t�|�j�|_dS)Ncs��|�}|�|�dS�N)�shareZ
send_bytes)�conn�pidr	�Znew_sock��7/usr/lib64/python3.8/multiprocessing/resource_sharer.py�sends
z DupSocket.__init__.<locals>.send)�dup�_resource_sharer�register�close�_id)�selfZsockrr
rr�__init__szDupSocket.__init__c
Cs6t�|j�� }|��}t�|�W5QR�SQRXdSr)r�get_connectionrZ
recv_bytes�socketZ	fromshare)rr
r	r
r
r�detach$szDupSocket.detachN��__name__�
__module__�__qualname__rrr
r
r
rrs�DupFdc@seZdZdd�Zdd�ZdS)rcs4t�|���fdd�}�fdd�}t�||�|_dS)Ncst�|�|�dSr)rZsend_handle)r
r�Znew_fdr
rr1szDupFd.__init__.<locals>.sendcst���dSr)�osrr
rr
rr3szDupFd.__init__.<locals>.close)r rrrr)r�fdrrr
rrr/s
zDupFd.__init__c
Cs.t�|j��}t�|�W5QR�SQRXdSr)rrrrZrecv_handle)rr
r
r
rr7szDupFd.detachNrr
r
r
rr-sc@sJeZdZdd�Zdd�Zedd��Zddd	�Zd
d�Zdd
�Z	dd�Z
dS)�_ResourceSharercCs@d|_i|_g|_t��|_d|_d|_d|_t	�
|tj�dS)Nr)
�_key�_cache�
_old_locks�	threading�Lock�_lock�	_listener�_address�_threadrZregister_after_forkr"�
_afterfork)rr
r
rr?s
z_ResourceSharer.__init__c
CsZ|j�J|jdkr|��|jd7_||f|j|j<|j|jfW5QR�SQRXdS)Nr)r(r*�_startr#r$)rrrr
r
rrIs
z_ResourceSharer.registercCs<ddlm}|\}}||t��jd�}|�|t��f�|S)Nr��Client��authkey)�
connectionr/r�current_processr1rr �getpid)Zidentr/�address�key�cr
r
rrRs
z_ResourceSharer.get_connectionNc	Cs�ddlm}|j��|jdk	r�||jt��jd�}|�d�|��|j	�
|�|j	��rdt�
d�|j��d|_	d|_d|_|j��D]\}\}}|�q�|j��W5QRXdS)Nrr.r0z._ResourceSharer thread did not stop when asked)r2r/r(r*rr3r1rrr+�joinZis_aliverZsub_warningr)r$�items�clear)rZtimeoutr/r7r6rrr
r
rr[s$
�



z_ResourceSharer.stopcCsj|j��D]\}\}}|�q
|j��|j�|j�t��|_|jdk	rT|j�	�d|_d|_
d|_dSr)r$r9r:r%�appendr(r&r'r)rr*r+)rr6rrr
r
rr,ps



z_ResourceSharer._afterforkcCsXddlm}t�d�|t��jd�|_|jj|_	t
j|jd�}d|_
|��||_dS)Nr)�Listenerz0starting listener and thread for sending handlesr0)�targetT)r2r<r�debugrr3r1r)r5r*r&ZThread�_serveZdaemon�startr+)rr<�tr
r
rr-~s

z_ResourceSharer._startc	Cs�ttd�rt�tjt���zh|j���T}|��}|dkrHW5QR�Wq�|\}}|j�	|�\}}z|||�W5|�XW5QRXWqt
��s�tj
t���YqXqdS)N�pthread_sigmask)�hasattr�signalrB�	SIG_BLOCK�
valid_signalsr)ZacceptZrecvr$�poprZ
is_exiting�sys�
excepthook�exc_info)rr
�msgr6Zdestination_pidrrr
r
rr?�s
z_ResourceSharer._serve)N)rrrrr�staticmethodrrr,r-r?r
r
r
rr"=s
	

r")r rDrrHr&�r�contextrr�__all__�platform�objectrrr"rrr
r
r
r�<module>s 


`PK{��\�Q��,,&__pycache__/synchronize.cpython-38.pycnu�[���U

e5dZ-�@s,ddddddgZddlZddlZddlZddlZddlZdd	lmZdd
lmZddlm	Z	zddlm
Z
mZWnek
r�ed
��YnXe
ed��\ZZej
jZGdd�de�Z
Gdd�de
�ZGdd�de�ZGdd�de
�ZGdd�de
�ZGdd�de�ZGdd�de�ZGdd�dej�ZdS)�Lock�RLock�	Semaphore�BoundedSemaphore�	Condition�Event�N�)�context)�process)�util)�SemLock�
sem_unlinkz�This platform lacks a functioning sem_open implementation, therefore, the required synchronization primitives needed will not function, see issue 3770.�c@s\eZdZe��Zdd�Zedd��Zdd�Z	dd�Z
d	d
�Zdd�Zd
d�Z
edd��ZdS)rc	Cs�|dkrtj��}|��}tjdkp*|dk}td�D]>}z t�||||�	�|�}|_
Wntk
rlYq4Xq|q4td��t�
d|j�|��tjdkr�dd�}	t�||	�|j
jdk	r�dd	lm}
|
|j
jd
�tj|tj|j
jfdd�dS)
N�win32�fork�dzcannot find name for semaphorezcreated semlock with handle %scSs|j��dS�N)�_semlock�_after_fork)�obj�r�3/usr/lib64/python3.8/multiprocessing/synchronize.pyrGsz%SemLock.__init__.<locals>._after_forkr)�register�	semaphorer)Zexitpriority)r	Z_default_contextZget_contextZget_start_method�sys�platform�range�_multiprocessingr�
_make_namer�FileExistsErrorr�debug�handle�
_make_methodsZregister_after_fork�name�resource_trackerrZFinalize�_cleanup)�self�kind�value�maxvalue�ctxr#Z
unlink_now�i�slrrrrr�__init__2s8
�
�zSemLock.__init__cCs"ddlm}t|�||d�dS)Nr)�
unregisterr)r$r.r
)r#r.rrrr%TszSemLock._cleanupcCs|jj|_|jj|_dSr)r�acquire�release�r&rrrr"Zs
zSemLock._make_methodscCs
|j��Sr)r�	__enter__r1rrrr2^szSemLock.__enter__cGs|jj|�Sr)r�__exit__�r&�argsrrrr3aszSemLock.__exit__cCsDt�|�|j}tjdkr,t���|j�}n|j}||j|j	|j
fS)Nr)r	�assert_spawningrrrZget_spawning_popenZduplicate_for_childr!r'r)r#)r&r,�hrrr�__getstate__ds

zSemLock.__getstate__cCs,tjj|�|_t�d|d�|��dS)Nz recreated blocker with handle %rr)rrZ_rebuildrrr r"�r&�staterrr�__setstate__mszSemLock.__setstate__cCsdt��jdttj�fS)Nz%s-%sZ	semprefix)r
�current_processZ_config�nextr�_randrrrrrrs�zSemLock._make_nameN)�__name__�
__module__�__qualname__�tempfileZ_RandomNameSequencer>r-�staticmethodr%r"r2r3r8r;rrrrrr.s"
	rc@s&eZdZd	dd�Zdd�Zdd�ZdS)
rrcCstj|t|t|d�dS�N�r*)rr-�	SEMAPHORE�
SEM_VALUE_MAX�r&r(r*rrrr-}szSemaphore.__init__cCs
|j��Sr)r�
_get_valuer1rrr�	get_value�szSemaphore.get_valuecCs8z|j��}Wntk
r&d}YnXd|jj|fS)N�unknownz<%s(value=%s)>)rrI�	Exception�	__class__r?�r&r(rrr�__repr__�s

zSemaphore.__repr__N)r)r?r@rAr-rJrOrrrrr{s
c@seZdZddd�Zdd�ZdS)rrcCstj|t|||d�dSrD�rr-rFrHrrrr-�szBoundedSemaphore.__init__cCs>z|j��}Wntk
r&d}YnXd|jj||jjfS)NrKz<%s(value=%s, maxvalue=%s)>)rrIrLrMr?r)rNrrrrO�s
�zBoundedSemaphore.__repr__N)r�r?r@rAr-rOrrrrr�s
c@seZdZdd�Zdd�ZdS)rcCstj|tdd|d�dS�NrrErP�r&r*rrrr-�sz
Lock.__init__cCs�zf|j��r8t��j}t��jdkrd|dt��j7}n,|j��dkrLd}n|j��dkr`d}nd}Wnt	k
r~d}YnXd	|j
j|fS)
N�
MainThread�|r�Noner�SomeOtherThread�SomeOtherProcessrKz<%s(owner=%s)>)r�_is_miner
r<r#�	threading�current_threadrI�_countrLrMr?)r&r#rrrrO�s


z
Lock.__repr__NrQrrrrr�sc@seZdZdd�Zdd�ZdS)rcCstj|tdd|d�dSrR)rr-�RECURSIVE_MUTEXrSrrrr-�szRLock.__init__cCs�z||j��rBt��j}t��jdkr6|dt��j7}|j��}n8|j��dkrZd\}}n |j��dkrrd\}}nd\}}Wnt	k
r�d\}}YnXd	|j
j||fS)
NrTrUr)rVrr)rW�nonzero)rXr^)rKrK�<%s(%s, %s)>)rrYr
r<r#rZr[r\rIrLrMr?)r&r#�countrrrrO�s



zRLock.__repr__NrQrrrrr�sc@sleZdZddd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Zdd�Z	ddd�Z
ddd�Zdd�Zddd�Z
dS)rNcCs>|p
|��|_|�d�|_|�d�|_|�d�|_|��dS�Nr)r�_lockr�_sleeping_count�_woken_count�_wait_semaphorer")r&�lockr*rrrr-�s
zCondition.__init__cCst�|�|j|j|j|jfSr)r	r6rbrcrdrer1rrrr8�s

�zCondition.__getstate__cCs |\|_|_|_|_|��dSr)rbrcrdrer"r9rrrr;�s
�
zCondition.__setstate__cCs
|j��Sr)rbr2r1rrrr2�szCondition.__enter__cGs|jj|�Sr)rbr3r4rrrr3�szCondition.__exit__cCs|jj|_|jj|_dSr)rbr/r0r1rrrr"�s
zCondition._make_methodscCsJz|jj��|jj��}Wntk
r4d}YnXd|jj|j|fS)NrKr_)rcrrIrdrLrMr?rb)r&Znum_waitersrrrrO�s

�
zCondition.__repr__c	Cs~|jj��std��|j��|jj��}t|�D]}|j��q2z|j
�	d|�W�S|j��t|�D]}|j�	�qhXdS)Nz,must acquire() condition before using wait()T)rbrrY�AssertionErrorrcr0r\rrdr/re)r&�timeoutr`r+rrr�wait�s�

zCondition.waitrcCs�|jj��std��|j�d�r(td��|j�d�rN|j�d�}|s(td��q(d}||krz|j�d�rz|j��|d7}qR|r�t	|�D]}|j��q�|j�d�r�q�dS)Nzlock is not ownedFz<notify: Should not have been able to acquire _wait_semaphorez>notify: Bug in sleeping_count.acquire- res should not be Falserr)
rbrrYrgrer/rdrcr0r)r&�n�resZsleepersr+rrr�notifys$��

zCondition.notifycCs|jtjd�dS)N)rj)rlr�maxsizer1rrr�
notify_all(szCondition.notify_allcCsd|�}|r|S|dk	r$t��|}nd}d}|s`|dk	rN|t��}|dkrNq`|�|�|�}q,|Sra)�time�	monotonicri)r&Z	predicaterh�resultZendtimeZwaittimerrr�wait_for+s
zCondition.wait_for)N)N)r)N)r?r@rAr-r8r;r2r3r"rOrirlrnrrrrrrr�s


c@s6eZdZdd�Zdd�Zdd�Zdd�Zdd
d�Zd	S)
rcCs |�|���|_|�d�|_dSra)rr�_condr�_flagrSrrrr-CszEvent.__init__c	CsD|j�4|j�d�r,|j��W5QR�dSW5QR�dSQRXdS�NFT)rsrtr/r0r1rrr�is_setGs

zEvent.is_setc	Cs6|j�&|j�d�|j��|j��W5QRXdS�NF)rsrtr/r0rnr1rrr�setNs
z	Event.setc	Cs"|j�|j�d�W5QRXdSrw)rsrtr/r1rrr�clearTszEvent.clearNc	Csh|j�X|j�d�r |j��n|j�|�|j�d�rP|j��W5QR�dSW5QR�dSQRXdSru)rsrtr/r0ri)r&rhrrrriXs
z
Event.wait)N)r?r@rAr-rvrxryrirrrrrAs
c@sZeZdZddd�Zdd�Zdd�Zedd	��Zejd
d	��Zedd��Z	e	jd
d��Z	dS)�BarrierNc	CsRddl}ddlm}||�d�d�}|��}|�|||||f�d|_d|_dS)Nrr)�
BufferWrapperr+r)�struct�heapr{Zcalcsizerr;�_stater\)	r&Zparties�actionrhr*r|r{�wrapperZcondrrrr-jszBarrier.__init__cCs.|\|_|_|_|_|_|j���d�|_dS)Nr+)�_parties�_action�_timeoutrs�_wrapperZcreate_memoryview�cast�_arrayr9rrrr;ss
�zBarrier.__setstate__cCs|j|j|j|j|jfSr)r�r�r�rsr�r1rrrr8xs�zBarrier.__getstate__cCs
|jdSra�r�r1rrrr~|szBarrier._statecCs||jd<dSrar�rNrrrr~�scCs
|jdS�Nrr�r1rrrr\�szBarrier._countcCs||jd<dSr�r�rNrrrr\�s)NN)
r?r@rAr-r;r8�propertyr~�setterr\rrrrrzhs
	


rz)�__all__rZrrBrro�r	r
rrr
�ImportError�listrr]rFrG�objectrrrrrrrzrrrr�<module>s8�	Mo'PK{��\i�+C��1__pycache__/resource_tracker.cpython-38.opt-1.pycnu�[���U

e5d�!�@s�ddlZddlZddlZddlZddlZddlmZddlmZdddgZe	ed�Z
ejejfZ
d	d
d�iZejdkr�ddlZddlZe�ejejd
��Gdd�de�Ze�ZejZejZejZejZdd�ZdS)�N�)�spawn)�util�ensure_running�register�
unregister�pthread_sigmaskZnoopcCsdS�N�r
r
r
�8/usr/lib64/python3.8/multiprocessing/resource_tracker.py�<lambda>!�r�posix)Z	semaphoreZ
shared_memoryc@sLeZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	dd�Z
dS)�ResourceTrackercCst��|_d|_d|_dSr	)�	threadingZLock�_lock�_fd�_pid��selfr
r
r�__init__0s
zResourceTracker.__init__c	CsT|j�D|jdkr W5QR�dSt�|j�d|_t�|jd�d|_W5QRXdS)Nr)rr�os�close�waitpidrrr
r
r�_stop5s
zResourceTracker._stopcCs|��|jSr	)rrrr
r
r�getfdBszResourceTracker.getfdcCst|j��b|jdk	r~|��r*W5QR�dSt�|j�z|jdk	rPt�|jd�Wntk
rfYnXd|_d|_t�	d�g}z|�
tj�
��Wntk
r�YnXd}t��\}}z�zr|�
|�t��}|gt��}|d||g7}z&t�rt�tjt�t�|||�}W5t�r,t�tjt�XWnt�|��YnX||_||_W5t�|�XW5QRXdS)z�Make sure that resource tracker process is running.

        This can be run from any process.  Usually a child process will use
        the resource created by its parent.NrzUresource_tracker: process died unexpectedly, relaunching.  Some resources might leak.z:from multiprocessing.resource_tracker import main;main(%d)z-c)rr�_check_aliverrrr�ChildProcessError�warnings�warn�append�sys�stderr�fileno�	Exception�piperZget_executablerZ_args_from_interpreter_flags�
_HAVE_SIGMASK�signalr�SIG_UNBLOCK�_IGNORED_SIGNALS�	SIG_BLOCKZspawnv_passfds)rZfds_to_pass�cmd�r�wZexe�args�pidr
r
rrFsJ






zResourceTracker.ensure_runningcCs2zt�|jd�Wntk
r(YdSXdSdS)z;Check that the pipe has not been closed by sending a probe.s
PROBE:0:noop
FTN)r�writer�OSErrorrr
r
rr�s
zResourceTracker._check_alivecCs|�d||�dS)z0Register name of resource with resource tracker.�REGISTERN��_send�r�name�rtyper
r
rr�szResourceTracker.registercCs|�d||�dS)z2Unregister name of resource with resource tracker.�
UNREGISTERNr3r5r
r
rr�szResourceTracker.unregistercCsB|��d�|||��d�}t|�dkr0td��t�|j|�}dS)Nz{0}:{1}:{2}
�asciiiz
name too long)r�format�encode�len�
ValueErrorrr0r)rr+r6r7�msg�nbytesr
r
rr4�szResourceTracker._sendN)�__name__�
__module__�__qualname__rrrrrrrr4r
r
r
rr.s
@rc
Cst�tjtj�t�tjtj�tr2t�tjt�tj	tj
fD]&}z|��Wq>tk
rbYq>Xq>dd�t
��D�}z�t|d���}|D]�}z�|���d��d�\}}}t
�|d�}	|	dkr�td	|�d
|����|dkr�||�|�n2|dk�r||�|�n|d
k�rntd|��Wq�tk
�rTztjt���WnYnXYq�Xq�W5QRXW5|��D]�\}}|�r�zt�dt|�|f�Wntk
�r�YnX|D]V}zLzt
||�Wn6tk
�r�}zt�d||f�W5d}~XYnXW5X�q��qnXdS)zRun resource tracker.cSsi|]}|t��qSr
)�set)�.0r7r
r
r�
<dictcomp>�szmain.<locals>.<dictcomp>zQresource_tracker: There appear to be %d leaked %s objects to clean up at shutdownzresource_tracker: %r: %sN�rbr9�:zCannot register z. for automatic cleanup: unknown resource type r2r8ZPROBEzunrecognized command %r)r'�SIGINT�SIG_IGN�SIGTERMr&rr(r)r!�stdin�stdoutrr$�_CLEANUP_FUNCS�keys�itemsrrr<�open�strip�decode�split�getr=�add�remove�RuntimeError�
excepthook�exc_info)
�fd�f�cacher7Zrtype_cacher6�e�liner+Zcleanup_funcr
r
r�main�s^�


�
(r_)rr'r!rr�rr�__all__�hasattrr&rHrJr)rMr6Z_multiprocessingZ_posixshmem�updateZ
sem_unlinkZ
shm_unlink�objectrZ_resource_trackerrrrrr_r
r
r
r�<module>s4

�
�wPK{��\������&__pycache__/spawn.cpython-38.opt-1.pycnu�[���U

e5dP$�@s$ddlZddlZddlZddlZddlmZmZddlmZddlm	Z	ddlm
Z
ddd	d
ddd
gZejdkrzdZ
dZneedd�Z
ej���d�Zer�ej�ejd�anejadd	�Zdd
�Zdd�Zdd�Zdd�Zd&dd�Zdd�Zdd�Zdd�ZgZ dd �Z!d!d"�Z"d#d$�Z#d%d
�Z$dS)'�N�)�get_start_method�set_start_method)�process)�	reduction)�util�_main�freeze_support�set_executable�get_executable�get_preparation_data�get_command_line�import_main_path�win32F�frozenzpythonservice.exez
python.execCs|adS�N��_python_exe)Zexe�r�-/usr/lib64/python3.8/multiprocessing/spawn.pyr
)scCstSrrrrrrr-scCs$t|�dkr|ddkrdSdSdS)z=
    Return whether commandline indicates we are forking
    �r�--multiprocessing-forkTFN)�len)�argvrrr�
is_forking4srcCsdttj�r`i}tjdd�D]0}|�d�\}}|dkr@d||<qt|�||<qtf|�t��dS)zE
    Run code for process object if this in not the main process
    rN�=�None)r�sysr�split�int�
spawn_main�exit)�kwds�arg�name�valuerrrr	>s


cKshttdd�r(tjdgdd�|��D�Sd}|d�dd	�|��D��;}t��}tg|d
|dgSdS)zJ
    Returns prefix of command line used for spawning a child process
    rFrcSsg|]}d|�qS)�%s=%rr��.0�itemrrr�
<listcomp>Tsz$get_command_line.<locals>.<listcomp>z<from multiprocessing.spawn import spawn_main; spawn_main(%s)z, css|]}d|VqdS)r&Nrr'rrr�	<genexpr>Wsz#get_command_line.<locals>.<genexpr>z-cN)�getattrr�
executable�items�joinrZ_args_from_interpreter_flagsr)r"�progZoptsrrrr
Ns�cCs�tjdkr`ddl}ddl}|dk	r:|�|j|jBd|�}nd}tj||d�}|�	|t
j�}|}n"ddlm
}	||	j_|}t
�|�}t||�}
t�|
�dS)z7
    Run code specified by data received over pipe
    rrNF)�source_processr)�resource_tracker)r�platform�msvcrt�_winapiZOpenProcessZSYNCHRONIZEZPROCESS_DUP_HANDLErZ	duplicateZopen_osfhandle�os�O_RDONLY�r2Z_resource_trackerZ_fd�duprr!)Zpipe_handleZ
parent_pidZ
tracker_fdr4r5r1Z
new_handle�fd�parent_sentinelr2Zexitcoderrrr \s*

��

r c	Cs`tj|ddd��@}dt��_z$tj�|�}t|�tj�|�}W5t��`XW5QRX|�	|�S)N�rbT)�closefd)
r6�fdopenr�current_process�_inheritingr�pickle�load�prepare�
_bootstrap)r:r;Zfrom_parentZpreparation_data�selfrrrrxs
cCstt��dd�rtd��dS)Nr@Fa
        An attempt has been made to start a new process before the
        current process has finished its bootstrapping phase.

        This probably means that you are not using fork to start your
        child processes and you have forgotten to use the proper idiom
        in the main module:

            if __name__ == '__main__':
                freeze_support()
                ...

        The "freeze_support()" line can be omitted if the program
        is not going to be frozen to produce an executable.)r,rr?�RuntimeErrorrrrr�_check_not_importing_main�srGcCst�ttjt��jd�}tjdk	r2tj��|d<t	j
��}z|�d�}Wnt
k
r^YnXtj||<|j||t	jtjt��t�d�t	jd}t|jdd�}|dk	r�||d<nft	jd	ks�t�st�st|d
d�}|dk	�rtj
�|��s
tjdk	�r
tj
�tj|�}tj
�|�|d<|S)zM
    Return info about parent needed by child to unpickle process object
    )�
log_to_stderr�authkeyN�	log_levelr8)r$�sys_path�sys_argv�orig_dir�dir�start_method�__main__r$�init_main_from_namer�__file__�init_main_from_path)rG�dictrZ_log_to_stderrrr?rIZ_loggerZgetEffectiveLevelr�path�copy�index�
ValueError�ORIGINAL_DIR�updaterr6�getcwdr�modulesr,�__spec__r3�WINEXE�
WINSERVICE�isabsr/�normpath)r$�drK�i�main_moduleZ
main_mod_name�	main_pathrrrr�sD�


�


�cCs�d|kr|dt��_d|kr,|dt��_d|krD|drDt��d|kr^t���|d�d|krp|dt_	d|kr�|dt_
d|kr�t�|d�d|kr�|dt_
d	|kr�t|d	d
d�d|kr�t|d�nd
|kr�t|d
�dS)zE
    Try to get current process ready to unpickle process object
    r$rIrHrJrKrLrNrMrOT)ZforcerQrSN)rr?r$rIrrHZ
get_loggerZsetLevelrrUrr6�chdirrYr�_fixup_main_from_name�_fixup_main_from_path)�datarrrrC�s,


rCcCs~tjd}|dks|�d�r dSt|jdd�|kr6dSt�|�t�d�}t	j
|ddd�}|j�|�|tjd<tjd<dS)NrPz	.__main__r$�__mp_main__T)�run_nameZ	alter_sys)
rr\�endswithr,r]�old_main_modules�append�types�
ModuleType�runpyZ
run_module�__dict__rZ)Zmod_name�current_mainrd�main_contentrrrrg�s


�rgcCs�tjd}tj�tj�|��d}|dkr.dSt|dd�|krBdSt�|�t	�
d�}tj|dd�}|j
�|�|tjd<tjd<dS)NrPrZipythonrRrj)rk)rr\r6rU�splitext�basenamer,rmrnrorprqZrun_pathrrrZ)rersZ	main_namerdrtrrrrh	s


�rhcCst|�dS)z<
    Set sys.modules['__main__'] to module at main_path
    N)rh)rerrrr%s)NN)%r6rrqror8rrr�contextrr�__all__r3r^r_r,r-�lowerrlrUr/�exec_prefixrr
rrr	r
r rrGrrmrCrgrhrrrrr�<module>sD�


2&PK{��\�;z ��)__pycache__/__init__.cpython-38.opt-2.pycnu�[���U

e5d��@sdddlZddlmZdd�eej�D�Ze��dd�eD��dZd	Z	d
ej
kr`ej
d
ej
d<dS)�N�)�contextcCsg|]}|�d�s|�qS)�_)�
startswith)�.0�x�r�0/usr/lib64/python3.8/multiprocessing/__init__.py�
<listcomp>s
r
ccs|]}|ttj|�fVqdS)N)�getattrr�_default_context)r�namerrr	�	<genexpr>sr���__main__Z__mp_main__)�sys�r�dirr�__all__�globals�updateZSUBDEBUGZ
SUBWARNING�modulesrrrr	�<module>s
PK{��\���,�,__pycache__/util.cpython-38.pycnu�[���U

e5d~6�@s�ddlZddlZddlZddlZddlZddlZddlmZddlm	Z	ddddd	d
ddd
ddddddgZ
dZdZdZ
dZdZdZdZdadadd�Zdd�Zdd�Zdd�Zdd	�Zd@d d
�Zd!d"�Zd#d$�Ze�Zd%d&�Zd'd�Ze��Z e�!�Z"d(d)�Z#d*d�Z$iZ%e�!�Z&Gd+d�de'�Z(dAd,d-�Z)d.d
�Z*da+eee)e	j,e	j-fd/d0�Z.e�/e.�Gd1d�de'�Z0Gd2d�dej1�Z2ze�3d3�Z4Wne5k
�r�d4Z4YnXd5d�Z6d6d7�Z7d8d9�Z8d:d;�Z9d<d=�Z:d>d?�Z;dS)B�N)�_args_from_interpreter_flags�)�process�	sub_debug�debug�info�sub_warning�
get_logger�
log_to_stderr�get_temp_dir�register_after_fork�
is_exiting�Finalize�ForkAwareThreadLock�ForkAwareLocal�close_all_fds_except�SUBDEBUG�
SUBWARNING��
���multiprocessingz+[%(levelname)s/%(processName)s] %(message)sFcGstrtjt|f|��dS�N)�_logger�logr��msg�args�r�,/usr/lib64/python3.8/multiprocessing/util.pyr,scGstrtjt|f|��dSr)rr�DEBUGrrrr r0scGstrtjt|f|��dSr)rr�INFOrrrr r4scGstrtjt|f|��dSr)rrrrrrr r8scCs|ddl}|��z\tsj|�t�adt_ttd�rFt�	t
�t�t
�n$tj�
t
dif�tj�t
dif�W5|��XtS)z0
    Returns logger used by multiprocessing
    rN�
unregisterr)�loggingZ_acquireLockZ_releaseLockrZ	getLogger�LOGGER_NAMEZ	propagate�hasattr�atexitr#�_exit_function�registerZ
_exithandlers�remove�append)r$rrr r	<s



cCsJddl}t�}|�t�}|��}|�|�|�|�|rB|�|�dat	S)zB
    Turn on logging and add a handler which prints to stderr
    rNT)
r$r	Z	Formatter�DEFAULT_LOGGING_FORMATZ
StreamHandlerZsetFormatterZ
addHandlerZsetLevel�_log_to_stderrr)�levelr$ZloggerZ	formatterZhandlerrrr r
Ws



cCs tjdkrdSttd�rdSdS)NZlinuxTZgetandroidapilevelF)�sys�platformr&rrrr �#_platform_supports_abstract_socketsls


r1cCs@|sdSt|t�r|ddkSt|t�r4|ddkStd��dS)NFr�z(address type of {address!r} unrecognized)�
isinstance�bytes�str�	TypeError)Zaddressrrr �is_abstract_socket_namespacets

r7cCs&||�t��}|dk	r"d|jd<dS)N�tempdir)r�current_process�_config)�rmtreer8r9rrr �_remove_temp_dir�sr<cCsft��j�d�}|dkrbddl}ddl}|jdd�}td|�tdt	|j
|fdd�|t��jd<|S)Nr8rzpymp-)�prefixzcreated temp directory %si����)r�exitpriority)rr9r:�get�shutil�tempfileZmkdtemprrr<r;)r8r@rArrr r�s
�cCsftt���}|��|D]H\\}}}}z||�Wqtk
r^}ztd|�W5d}~XYqXqdS)Nz after forker raised exception %s)�list�_afterfork_registry�items�sort�	Exceptionr)rD�indexZident�func�obj�errr �_run_after_forkers�srKcCs|ttt�t|�|f<dSr)rC�next�_afterfork_counter�id)rIrHrrr r�sc@sFeZdZdZddd�Zdeeejfdd�Z	dd	�Z
d
d�Zdd
�ZdS)rzA
    Class which supports object finalization using weakrefs
    rNcCs�|dk	r&t|t�s&td�|t|����|dk	r>t�||�|_n|dkrNtd��||_	||_
|p`i|_|tt
�f|_t��|_|t|j<dS)Nz3Exitpriority ({0!r}) must be None or int, not {1!s}z+Without object, exitpriority cannot be None)r3�intr6�format�type�weakref�ref�_weakref�
ValueError�	_callback�_args�_kwargsrL�_finalizer_counter�_key�os�getpid�_pid�_finalizer_registry)�selfrI�callbackr�kwargsr>rrr �__init__�s"��

zFinalize.__init__cCs�z||j=Wntk
r(|d�YnbX|j|�krD|d�d}n$|d|j|j|j�|j|j|j�}d|_|_|_|_|_|SdS)zQ
        Run the callback unless it has already been called or cancelled
        zfinalizer no longer registeredz+finalizer ignored because different processNz/finalizer calling %s with args %s and kwargs %s)rZ�KeyErrorr]rVrWrXrT)r_Zwrr^rr\�resrrr �__call__�s$��zFinalize.__call__cCsDzt|j=Wntk
r Yn Xd|_|_|_|_|_dS)z3
        Cancel finalization of the object
        N)r^rZrcrTrVrWrX�r_rrr �cancel�s��zFinalize.cancelcCs
|jtkS)zS
        Return whether this finalizer is still waiting to invoke callback
        )rZr^rfrrr �still_active�szFinalize.still_activec	Cs�z|��}Wnttfk
r(d}YnX|dkr>d|jjSd|jjt|jd|j�f}|jrr|dt|j�7}|j	r�|dt|j	�7}|j
ddk	r�|dt|j
d�7}|dS)	Nz<%s object, dead>z<%s object, callback=%s�__name__z, args=z	, kwargs=rz, exitpriority=�>)rT�AttributeErrorr6�	__class__ri�getattrrVrWr5rXrZ)r_rI�xrrr �__repr__�s"
�zFinalize.__repr__)rNN)
ri�
__module__�__qualname__�__doc__rbr^rr[r\rergrhrorrrr r�s
�
c	s�tdkrdS�dkrdd��n�fdd���fdd�tt�D�}|jdd�|D]P}t�|�}|dk	rPtd	|�z
|�WqPtk
r�d
dl}|��YqPXqP�dkr�t��dS)z�
    Run all finalizers whose exit priority is not None and at least minpriority

    Finalizers with highest priority are called first; finalizers with
    the same priority will be called in reverse order of creation.
    NcSs|ddk	S�Nrr��prrr �<lambda>�z!_run_finalizers.<locals>.<lambda>cs|ddk	o|d�kSrsrrt)�minpriorityrr rvrwcsg|]}�|�r|�qSrr)�.0�key)�frr �
<listcomp>#sz#_run_finalizers.<locals>.<listcomp>T)�reversez
calling %sr)	r^rBrEr?rrF�	traceback�	print_exc�clear)rx�keysrz�	finalizerr~r)r{rxr �_run_finalizerss$



r�cCstp
tdkS)z6
    Returns true if the process is shutting down
    N)�_exitingrrrr r
8scCs�ts�da|d�|d�|d�|�dk	rr|�D] }|jr0|d|j�|j��q0|�D]}|d|j�|��qX|d�|�dS)NTzprocess shutting downz2running all "atexit" finalizers with priority >= 0rz!calling terminate() for daemon %szcalling join() for process %sz)running the remaining "atexit" finalizers)r�Zdaemon�nameZ_popenZ	terminate�join)rrr��active_childrenr9rurrr r(@s	



r(c@s,eZdZdd�Zdd�Zdd�Zdd�Zd	S)
rcCs|��t|tj�dSr)�_resetrrrfrrr rbqszForkAwareThreadLock.__init__cCs"t��|_|jj|_|jj|_dSr)�	threadingZLock�_lock�acquire�releaserfrrr r�us

zForkAwareThreadLock._resetcCs
|j��Sr)r��	__enter__rfrrr r�zszForkAwareThreadLock.__enter__cGs|jj|�Sr)r��__exit__)r_rrrr r�}szForkAwareThreadLock.__exit__N)rirprqrbr�r�r�rrrr rpsc@seZdZdd�Zdd�ZdS)rcCst|dd��dS)NcSs
|j��Sr)�__dict__r�)rIrrr rv�rwz)ForkAwareLocal.__init__.<locals>.<lambda>)rrfrrr rb�szForkAwareLocal.__init__cCst|�dfS)Nr)rQrfrrr �
__reduce__�szForkAwareLocal.__reduce__N)rirprqrbr�rrrr r�s�SC_OPEN_MAX�cCsbt|�dtg}|��|dtks,td��tt|�d�D] }t�||d||d�q<dS)N���zfd too larger)rB�MAXFDrE�AssertionError�range�lenr[�
closerange)�fds�irrr r�s
c	Cs�tjdkrdSztj��Wnttfk
r4YnXz@t�tjtj�}zt|dd�t_Wnt�|��YnXWnttfk
r�YnXdS)NF)�closefd)	r/�stdin�close�OSErrorrUr[�open�devnull�O_RDONLY)�fdrrr �_close_stdin�s

r�c	CsTztj��Wnttfk
r&YnXztj��Wnttfk
rNYnXdSr)r/�stdout�flushrkrU�stderrrrrr �_flush_std_streams�sr�cCsxddl}tttt|���}t��\}}z6|�|t�	|�gd|dddddddd||ddd�W�St�|�t�|�XdS)NrTr�F)
�_posixsubprocess�tuple�sorted�maprOr[�piper�Z	fork_exec�fsencode)�pathrZpassfdsr�Zerrpipe_readZ
errpipe_writerrr �spawnv_passfds�s2
�
r�cGs|D]}t�|�qdS)z/Close each file descriptor given as an argumentN)r[r�)r�r�rrr �	close_fds�sr�cCsZddlm}t��ddlm}|j��ddlm}|j	��t
�|��|��dS)zKCleanup multiprocessing resources when multiprocessing tests
    completed.r)�support)�
forkserver)�resource_trackerN)
Ztestr�rZ_cleanuprr�Z_forkserverZ_stopr�Z_resource_trackerr�Z
gc_collectZ
reap_children)r�r�r�rrr �_cleanup_tests�s

r�)N)N)<r[�	itertoolsr/rRr'r��
subprocessr�r�__all__ZNOTSETrr!r"rr%r,rr-rrrrr	r
r1r7Zabstract_sockets_supportedr<rZWeakValueDictionaryrC�countrMrKrr^rY�objectrr�r
r�r�r9r(r)rZlocalr�sysconfr�rFrr�r�r�r�r�rrrr �<module>
s��

		V
,�
*



PK{��\#���'__pycache__/sharedctypes.cpython-38.pycnu�[���U

e5d��@sBddlZddlZddlmZddlmZddlmZmZejZ	dddd	d
dgZ
ejejej
ejejejejejejejejejejejd�Zd
d�Zdd�Zdd�Zddd�dd�Zddd�dd	�Zdd
�Zd&dd�Z dd�Z!dd�Z"dd�Z#dZ$iZ%e�&�Z'Gdd�de(�Z)Gd d!�d!e)�Z*Gd"d#�d#e)�Z+Gd$d%�d%e+�Z,dS)'�N�)�heap)�get_context)�	reduction�assert_spawning�RawValue�RawArray�Value�Array�copy�synchronized)�c�u�b�B�h�H�i�I�l�L�q�Q�f�dcCs t�|�}t�|�}t||d�S�N)�ctypes�sizeofrZ
BufferWrapper�
rebuild_ctype)�type_�size�wrapper�r"�4/usr/lib64/python3.8/multiprocessing/sharedctypes.py�
_new_value's

r$cGs<t�||�}t|�}t�t�|�dt�|��|j|�|S)z>
    Returns a ctypes object allocated from shared memory
    r)�typecode_to_type�getr$r�memset�	addressofr�__init__)�typecode_or_type�argsr�objr"r"r#r,s

cCsjt�||�}t|t�rD||}t|�}t�t�|�dt�|��|S|t	|�}t|�}|j
|�|SdS)z=
    Returns a ctypes array allocated from shared memory
    rN)r%r&�
isinstance�intr$rr'r(r�lenr))r*�size_or_initializerrr,�resultr"r"r#r6s

T)�lock�ctxcGsXt|f|��}|dkr|S|dkr4|p*t�}|��}t|d�sJtd|��t|||d�S)z6
    Return a synchronization wrapper for a Value
    F�TN�acquire�%r has no method 'acquire'�r3)rr�RLock�hasattr�AttributeErrorr)r*r2r3r+r,r"r"r#r	Fs

cCsTt||�}|dkr|S|dkr0|p&t�}|��}t|d�sFtd|��t|||d�S)z9
    Return a synchronization wrapper for a RawArray
    Fr4r5r6r7)rrr8r9r:r)r*r0r2r3r,r"r"r#r
Ts


cCstt|��}|t�|�d<|S)Nr)r$�typerZpointer)r,Znew_objr"r"r#rbscCs�t|t�rtd��|pt�}t|tj�r4t|||�St|tj�rd|jtj	krXt
|||�St|||�St|�}zt
|}WnRtk
r�dd�|jD�}dd�|D�}d|j}t|tf|�}t
|<YnX||||�SdS)Nzobject already synchronizedcSsg|]}|d�qS)rr")�.0Zfieldr"r"r#�
<listcomp>vsz synchronized.<locals>.<listcomp>cSsi|]}|t|��qSr")�
make_property)r<�namer"r"r#�
<dictcomp>wsz synchronized.<locals>.<dictcomp>�Synchronized)r-�SynchronizedBase�AssertionErrorrrZ_SimpleCDatarAr
�_type_�c_char�SynchronizedString�SynchronizedArrayr;�class_cache�KeyErrorZ_fields_�__name__)r,r2r3�clsZscls�namesrZ	classnamer"r"r#rgs"

cCs@t|�t|tj�r(t|j|j|jffStt|�|jdffSdSr)	rr-rr
rrD�_wrapperZ_length_r;)r,r"r"r#�reduce_ctype�srNcCs8|dk	r||}t�|t�|��}|�|�}||_|Sr)�_ForkingPickler�registerrNZcreate_memoryviewZfrom_bufferrM)rr!ZlengthZbufr,r"r"r#r�s
rcCsPz
t|WStk
rJi}tt|fd|�||t|<||YSXdS)N�)�
prop_cacherI�exec�template)r?rr"r"r#r>�s
r>z�
def get%s(self):
    self.acquire()
    try:
        return self._obj.%s
    finally:
        self.release()
def set%s(self, value):
    self.acquire()
    try:
        self._obj.%s = value
    finally:
        self.release()
%s = property(get%s, set%s)
c@sFeZdZddd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Zdd�Z	dS)rBNcCsB||_|r||_n|ptdd�}|��|_|jj|_|jj|_dS)NT)Zforce)�_obj�_lockrr8r5�release)�selfr,r2r3r"r"r#r)�s

zSynchronizedBase.__init__cCs
|j��Sr)rV�	__enter__�rXr"r"r#rY�szSynchronizedBase.__enter__cGs|jj|�Sr)rV�__exit__)rXr+r"r"r#r[�szSynchronizedBase.__exit__cCst|�t|j|jffSr)rrrUrVrZr"r"r#�
__reduce__�szSynchronizedBase.__reduce__cCs|jSr�rUrZr"r"r#�get_obj�szSynchronizedBase.get_objcCs|jSr)rVrZr"r"r#�get_lock�szSynchronizedBase.get_lockcCsdt|�j|jfS)Nz<%s wrapper for %s>)r;rJrUrZr"r"r#�__repr__�szSynchronizedBase.__repr__)NN)
rJ�
__module__�__qualname__r)rYr[r\r^r_r`r"r"r"r#rB�s

rBc@seZdZed�ZdS)rA�valueN)rJrarbr>rcr"r"r"r#rA�srAc@s4eZdZdd�Zdd�Zdd�Zdd�Zd	d
�ZdS)rGcCs
t|j�Sr)r/rUrZr"r"r#�__len__�szSynchronizedArray.__len__c
Cs&|�|j|W5QR�SQRXdSrr])rXrr"r"r#�__getitem__�szSynchronizedArray.__getitem__c	Cs|�||j|<W5QRXdSrr])rXrrcr"r"r#�__setitem__�szSynchronizedArray.__setitem__c
Cs*|�|j||�W5QR�SQRXdSrr])rX�start�stopr"r"r#�__getslice__�szSynchronizedArray.__getslice__c	Cs"|�||j||�<W5QRXdSrr])rXrgrh�valuesr"r"r#�__setslice__�szSynchronizedArray.__setslice__N)rJrarbrdrerfrirkr"r"r"r#rG�s
rGc@seZdZed�Zed�ZdS)rFrc�rawN)rJrarbr>rcrlr"r"r"r#rF�srF)NN)-r�weakref�rr�contextrrZForkingPicklerrO�__all__rEZc_wcharZc_byteZc_ubyteZc_shortZc_ushortZc_intZc_uintZc_longZc_ulongZ
c_longlongZc_ulonglongZc_floatZc_doubler%r$rrr	r
rrrNrr>rTrR�WeakKeyDictionaryrH�objectrBrArGrFr"r"r"r#�<module>
sL�


	 PK{��\��>�OO0__pycache__/resource_sharer.cpython-38.opt-1.pycnu�[���U

e5d��@s�ddlZddlZddlZddlZddlZddlmZddlmZddlm	Z	dgZ
ejdkrxe
dg7Z
Gd	d�de�Z
ne
d
g7Z
Gdd
�d
e�ZGdd
�d
e�Ze�ZejZdS)�N�)�process)�	reduction)�util�stopZwin32�	DupSocketc@s eZdZdZdd�Zdd�ZdS)rzPicklable wrapper for a socket.cs(|����fdd�}t�|�j�|_dS)Ncs��|�}|�|�dS�N)�shareZ
send_bytes)�conn�pidr	�Znew_sock��7/usr/lib64/python3.8/multiprocessing/resource_sharer.py�sends
z DupSocket.__init__.<locals>.send)�dup�_resource_sharer�register�close�_id)�selfZsockrr
rr�__init__szDupSocket.__init__c
Cs6t�|j�� }|��}t�|�W5QR�SQRXdS)z1Get the socket.  This should only be called once.N)r�get_connectionrZ
recv_bytes�socketZ	fromshare)rr
r	r
r
r�detach$szDupSocket.detachN��__name__�
__module__�__qualname__�__doc__rrr
r
r
rrs�DupFdc@s eZdZdZdd�Zdd�ZdS)rz-Wrapper for fd which can be used at any time.cs4t�|���fdd�}�fdd�}t�||�|_dS)Ncst�|�|�dSr)rZsend_handle)r
r�Znew_fdr
rr1szDupFd.__init__.<locals>.sendcst���dSr)�osrr
r r
rr3szDupFd.__init__.<locals>.close)r!rrrr)r�fdrrr
r rr/s
zDupFd.__init__c
Cs.t�|j��}t�|�W5QR�SQRXdS)z-Get the fd.  This should only be called once.N)rrrrZrecv_handle)rr
r
r
rr7szDupFd.detachNrr
r
r
rr-sc@sNeZdZdZdd�Zdd�Zedd��Zdd	d
�Zdd�Z	d
d�Z
dd�ZdS)�_ResourceSharerz.Manager for resources using background thread.cCs@d|_i|_g|_t��|_d|_d|_d|_t	�
|tj�dS)Nr)
�_key�_cache�
_old_locks�	threading�Lock�_lock�	_listener�_address�_threadrZregister_after_forkr#�
_afterfork)rr
r
rr?s
z_ResourceSharer.__init__c
CsZ|j�J|jdkr|��|jd7_||f|j|j<|j|jfW5QR�SQRXdS)z+Register resource, returning an identifier.Nr)r)r+�_startr$r%)rrrr
r
rrIs
z_ResourceSharer.registercCs<ddlm}|\}}||t��jd�}|�|t��f�|S)z<Return connection from which to receive identified resource.r��Client��authkey)�
connectionr0r�current_processr2rr!�getpid)Zidentr0�address�key�cr
r
rrRs
z_ResourceSharer.get_connectionNc	Cs�ddlm}|j��|jdk	r�||jt��jd�}|�d�|��|j	�
|�|j	��rdt�
d�|j��d|_	d|_d|_|j��D]\}\}}|�q�|j��W5QRXdS)z:Stop the background thread and clear registered resources.rr/Nr1z._ResourceSharer thread did not stop when asked)r3r0r)r+rr4r2rrr,�joinZis_aliverZsub_warningr*r%�items�clear)rZtimeoutr0r8r7rrr
r
rr[s$
�



z_ResourceSharer.stopcCsj|j��D]\}\}}|�q
|j��|j�|j�t��|_|jdk	rT|j�	�d|_d|_
d|_dSr)r%r:r;r&�appendr)r'r(r*rr+r,)rr7rrr
r
rr-ps



z_ResourceSharer._afterforkcCsXddlm}t�d�|t��jd�|_|jj|_	t
j|jd�}d|_
|��||_dS)Nr)�Listenerz0starting listener and thread for sending handlesr1)�targetT)r3r=r�debugrr4r2r*r6r+r'ZThread�_serveZdaemon�startr,)rr=�tr
r
rr.~s

z_ResourceSharer._startc	Cs�ttd�rt�tjt���zh|j���T}|��}|dkrHW5QR�Wq�|\}}|j�	|�\}}z|||�W5|�XW5QRXWqt
��s�tj
t���YqXqdS)N�pthread_sigmask)�hasattr�signalrC�	SIG_BLOCK�
valid_signalsr*ZacceptZrecvr%�poprZ
is_exiting�sys�
excepthook�exc_info)rr
�msgr7Zdestination_pidrrr
r
rr@�s
z_ResourceSharer._serve)N)rrrrrr�staticmethodrrr-r.r@r
r
r
rr#=s
	

r#)r!rErrIr'�r�contextrr�__all__�platform�objectrrr#rrr
r
r
r�<module>s 


`PK{��\�Y0%8888(__pycache__/shared_memory.cpython-38.pycnu�[���U

e5dD�@s�dZddgZddlmZddlZddlZddlZddlZddlZej	dkrXddl
Z
dZnddlZdZej
ejBZd	Zer~d
ZndZdd
�ZGdd�d�ZdZGdd�d�ZdS)z�Provides shared memory for direct access across processes.

The API of this package is currently provisional. Refer to the
documentation for details.
�SharedMemory�
ShareableList�)�partialN�ntFT�z/psm_Zwnsm_cCsBttt�d}|dks td��tt�|�}t|�tks>t�|S)z6Create a random filename for the shared memory object.�z_SHM_NAME_PREFIX too long)�_SHM_SAFE_NAME_LENGTH�len�_SHM_NAME_PREFIX�AssertionError�secretsZ	token_hex)�nbytes�name�r�5/usr/lib64/python3.8/multiprocessing/shared_memory.py�_make_filename&s
rc@s�eZdZdZdZdZdZdZej	Z
dZer.dndZ
ddd	�Zd
d�Zdd
�Zdd�Zedd��Zedd��Zedd��Zdd�Zdd�ZdS)ra�Creates a new shared memory block or attaches to an existing
    shared memory block.

    Every shared memory block is assigned a unique name.  This enables
    one process to create a shared memory block with a particular name
    so that a different process can attach to that same shared memory
    block using that same name.

    As a resource for sharing data across processes, shared memory blocks
    may outlive the original process that created them.  When one process
    no longer needs access to a shared memory block that might still be
    needed by other processes, the close() method should be called.
    When a shared memory block is no longer needed by any process, the
    unlink() method should be called to ensure proper cleanup.N���i�TFrc
	Csl|dkstd��|r0ttjB|_|dkr0td��|dkrL|jtj@sLtd��t�rH|dkr�t�}ztj	||j|j
d�|_Wntk
r�YqZYnX||_
q�qZn.|jr�d|n|}tj	||j|j
d�|_||_
z<|r�|r�t�|j|�t�|j�}|j}t�|j|�|_Wn tk
�r*|���YnXddlm}||j
d	��n|�r�|dk�r^t�n|}t�tjtjtj|d
?d@|d@|�}zXt��}|tjk�r�|dk	�r�tt j!t�"t j!�|tj��nW��qNtjd||d
�|_W5t�|�X||_
�qV�qNnX||_
t�#tj$d|�}zt�%|tj$ddd�}	W5t�|�Xt�&|	�}tjd||d
�|_||_'t(|j�|_)dS)Nrz!'size' must be a positive integerz4'size' must be a positive number different from zeroz&'name' can only be None if create=True)�mode�/�)�register�
shared_memory� l��r)ZtagnameF)*�
ValueError�_O_CREX�os�O_RDWR�_flags�O_EXCL�
_USE_POSIXr�_posixshmemZshm_open�_mode�_fd�FileExistsError�_name�_prepend_leading_slash�	ftruncate�fstat�st_size�mmap�_mmap�OSError�unlink�resource_trackerr�_winapiZCreateFileMappingZINVALID_HANDLE_VALUEZNULLZPAGE_READWRITEZCloseHandleZGetLastErrorZERROR_ALREADY_EXISTS�errnoZEEXIST�strerrorZOpenFileMappingZ
FILE_MAP_READZ
MapViewOfFileZVirtualQuerySize�_size�
memoryview�_buf)
�selfr�create�sizeZstatsrZ	temp_nameZh_mapZlast_error_codeZp_bufrrr�__init__Is��
�
�

�
��
zSharedMemory.__init__cCs&z|��Wntk
r YnXdS�N)�closer+�r4rrr�__del__�szSharedMemory.__del__cCs|j|jd|jffS)NF)�	__class__rr6r:rrr�
__reduce__�s��zSharedMemory.__reduce__cCs|jj�d|j�d|j�d�S)N�(z, size=�))r<�__name__rr6r:rrr�__repr__�szSharedMemory.__repr__cCs|jS)z4A memoryview of contents of the shared memory block.)r3r:rrr�buf�szSharedMemory.bufcCs.|j}tr*|jr*|j�d�r*|jdd�}|S)z4Unique name that identifies the shared memory block.rrN)r$rr%�
startswith)r4Z
reported_namerrrr�s

zSharedMemory.namecCs|jS)zSize in bytes.)r1r:rrrr6�szSharedMemory.sizecCsX|jdk	r|j��d|_|jdk	r4|j��d|_trT|jdkrTt�|j�d|_dS)zkCloses access to the shared memory from this instance but does
        not destroy the shared memory block.Nrr)r3�releaser*r9rr"rr:rrrr9�s



zSharedMemory.closecCs2tr.|jr.ddlm}t�|j�||jd�dS)z�Requests that the underlying shared memory block be destroyed.

        In order to ensure proper cleanup of resources, unlink should be
        called once (and only once) across all processes which have access
        to the shared memory block.r)�
unregisterrN)rr$r-rEr Z
shm_unlink)r4rErrrr,�s
zSharedMemory.unlink)NFr)r@�
__module__�__qualname__�__doc__r$r"r*r3rrrr!rr%r7r;r=rA�propertyrBrr6r9r,rrrrr0s(
l




�utf8c@seZdZdZedededededdj	diZ
dZd	d
�dd
�dd
�d
d
�d�Ze
dd��Zd6dd�dd�Zdd�Zdd�Zdd�Zdd�Zdd�Zdd�Zd d!�Zd"d#�Zed$d%��Zed&d'��Zed(d)��Zed*d+��Zed,d-��Zed.d/��Zed0d1��Zd2d3�Z d4d5�Z!dS)7ra�Pattern for a mutable list-like object shareable via a shared
    memory block.  It differs from the built-in list type in that these
    lists can not change their overall length (i.e. no append, insert,
    etc.)

    Because values are packed into a memoryview as bytes, the struct
    packing format for any storable value must require no more than 8
    characters to describe its format.�q�dzxxxxxxx?z%dsNzxxxxxx?x�cCs|Sr8r��valuerrr�<lambda>
�zShareableList.<lambda>cCs|�d��t�S�N�)�rstrip�decode�	_encodingrNrrrrPrQcCs
|�d�SrR)rTrNrrrrPrQcCsdSr8r)Z_valuerrrrP
rQ)rrr�cCs:t|ttdjf�sdSt|t�r$dSt|t�r2dSdSdS)z�Used in concert with _back_transforms_mapping to convert values
        into the appropriate Python objects when retrieving them from
        the list as well as when storing them.NrrrrW)�
isinstance�str�bytesr<rNrrr�_extract_recreation_codes

z&ShareableList._extract_recreation_code�rcs�|dk	r��fdd�|D�}t|��_tdd�|D���jks@t�t�fdd�|D���_�fdd�|D�}t�d�jd�	|��j
�j�}nd	}|dk	r�|dkr�t|��_
nt|d
|d��_
|dk	�rjt�tjd�j�j
jd�jf�j��tjd�	|��j
j�jf�fd
d�|D���tj�j
�j
j�jf�fdd�|D���tj�j�j
j�jf|��n t���_t��j�j
jd	��_dS)NcsPg|]H}t|ttf�s$�jt|�n&�jt|��jt|��jdf�qS)r)rXrYrZ�_types_mapping�type�
_alignmentr	��.0�itemr:rr�
<listcomp> s���z*ShareableList.__init__.<locals>.<listcomp>css|]}t|�dkVqdS)rMN)r	�ra�fmtrrr�	<genexpr>)sz)ShareableList.__init__.<locals>.<genexpr>c3s0|](}|ddkr�jnt|dd��VqdS)r�sN)r_�intrdr:rrrf*s�csg|]}��|��qSr)r[r`r:rrrc.srK�rMT)r5r6rc3s&|]}t|t�r|���n|VqdSr8)rXrY�encode�ra�v��_encrrrfMsc3s|]}|���VqdSr8)rjrkrmrrrfSs)r	�	_list_len�sumr�tuple�_allocated_bytes�structZcalcsize�_format_size_metainfo�join�_format_packing_metainfo�_format_back_transform_codesr�shmrV�	pack_intorB�_offset_data_start�_offset_packing_formats�_offset_back_transform_codes�unpack_from)r4ZsequencerZ_formatsZ_recreation_codesZrequested_sizer)rnr4rr7s|
�
�

�����
��������
�zShareableList.__init__cCsj|dkr|n||j}||jks*|jdkr2td��t�d|jj|j|d�d}|�d�}|�t	�}|S)z>Gets the packing format for a single value stored in the list.r� Requested position out of range.�8srMrS)
ro�
IndexErrorrsr}rxrBr{rTrUrV)r4�positionrlre�
fmt_as_strrrr�_get_packing_formatds��

z!ShareableList._get_packing_formatcCs\|dkr|n||j}||jks*|jdkr2td��t�d|jj|j|�d}|j|}|S)z9Gets the back transformation function for a single value.rr~�b)ror�rsr}rxrBr|�_back_transforms_mapping)r4r��transform_codeZtransform_functionrrr�_get_back_transformts��
z!ShareableList._get_back_transformcCs~|dkr|n||j}||jks*|jdkr2td��t�d|jj|j|d|�t��|�	|�}t�d|jj|j
||�dS)zvSets the packing format and back transformation code for a
        single value in the list at the specified position.rr~rrMr�N)ror�rsryrxrBr{rjrVr[r|)r4r�r�rOr�rrr�!_set_packing_format_and_transform�s �
�z/ShareableList._set_packing_format_and_transformcCsjz6|jt|jd|��}t�|�|�|jj|�\}Wntk
rRtd��YnX|�	|�}||�}|S)Nzindex out of range)
rzrprrrsr}r�rxrBr�r�)r4r��offsetrlZback_transformrrr�__getitem__�s��

zShareableList.__getitem__cCs�z&|jt|jd|��}|�|�}Wntk
rBtd��YnXt|ttf�sf|jt	|�}|}nZt|t�rz|�
t�n|}t|�|j|kr�t
d��|ddkr�|}n|jt|j|f}|�|||�t�||jj||�dS)Nzassignment index out of rangez(bytes/str item exceeds available storagerrg)rzrprrr�r�rXrYrZr]r^rjrVr	rr�rsryrxrB)r4r�rOr�Zcurrent_formatZ
new_formatZ
encoded_valuerrr�__setitem__�s6�����zShareableList.__setitem__cCst|j|jjd�dfS)Nr\r)rr<rxrr:rrrr=�szShareableList.__reduce__cCst�d|jjd�dS)NrKr)rsr}rxrBr:rrr�__len__�szShareableList.__len__cCs"|jj�dt|��d|jj�d�S)Nr>z, name=r?)r<r@�listrxrr:rrrrA�szShareableList.__repr__csd��fdd�t�j�D��S)z>The struct packing format used by all currently stored values.ric3s|]}��|�VqdSr8)r�)ra�ir:rrrf�sz'ShareableList.format.<locals>.<genexpr>)ru�rangeror:rr:r�format�s�zShareableList.formatcCs|j�d�S)z=The struct packing format used for metainfo on storage sizes.rK�ror:rrrrt�sz#ShareableList._format_size_metainfocCs
d|jS)z?The struct packing format used for the values' packing formats.rr�r:rrrrv�sz&ShareableList._format_packing_metainfocCs
d|jS)z?The struct packing format used for the values' back transforms.r�r�r:rrrrw�sz*ShareableList._format_back_transform_codescCs|jddS)NrrMr�r:rrrrz�sz ShareableList._offset_data_startcCs|jt|j�Sr8)rzrprrr:rrrr{�sz%ShareableList._offset_packing_formatscCs|j|jdS)NrM)r{ror:rrrr|�sz*ShareableList._offset_back_transform_codescst�fdd�|D��S)zCL.count(value) -> integer -- return number of occurrences of value.c3s|]}�|kVqdSr8r)ra�entryrNrrrf�sz&ShareableList.count.<locals>.<genexpr>)rp)r4rOrrNr�count�szShareableList.countcCs4t|�D]\}}||kr|Sqt|�d���dS)zpL.index(value) -> integer -- return first index of value.
        Raises ValueError if the value is not present.z not in this containerN)�	enumerater)r4rOr�r�rrr�index�s
zShareableList.index)N)"r@rFrGrHrh�float�boolrYrZr<r]r_r��staticmethodr[r7r�r�r�r�r�r=r�rArIr�rtrvrwrzr{r|r�r�rrrrr�s^
��

F






)rH�__all__�	functoolsrr)rr/rsrrr.rr �O_CREATrrrr
rrrVrrrrr�<module>s,

EPK{��\�z��{ { +__pycache__/forkserver.cpython-38.opt-1.pycnu�[���U

e5d�0�@s�ddlZddlZddlZddlZddlZddlZddlZddlZddlZddl	m
Z
ddl	mZddlm
Z
ddl	mZddl	mZddl	mZd	d
ddgZd
Ze�d�ZGdd�de�Zddd�Zdd�Zdd�Zdd�Ze�ZejZejZejZejZdS)�N�)�
connection)�process)�	reduction)�resource_tracker)�spawn)�util�ensure_running�get_inherited_fds�connect_to_new_process�set_forkserver_preload��qc@sDeZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	dS)�
ForkServercCs.d|_d|_d|_d|_t��|_dg|_dS)N�__main__)�_forkserver_address�_forkserver_alive_fd�_forkserver_pid�_inherited_fds�	threadingZLock�_lock�_preload_modules��self�r�2/usr/lib64/python3.8/multiprocessing/forkserver.py�__init__"s
zForkServer.__init__c	Cs|j�|��W5QRXdS�N)r�_stop_unlockedrrrr�_stop*szForkServer._stopcCsV|jdkrdSt�|j�d|_t�|jd�d|_t�|j�sLt�|j�d|_dS)Nr)	r�os�closer�waitpidr�is_abstract_socket_namespacer�unlinkrrrrr/s
zForkServer._stop_unlockedcCs&tdd�|jD��std��||_dS)z>Set list of module names to try to load in forkserver process.css|]}t|�tkVqdSr)�type�str)�.0�modrrr�	<genexpr>@sz4ForkServer.set_forkserver_preload.<locals>.<genexpr>z&module_names must be a list of stringsN)�allr�	TypeError)rZ
modules_namesrrrr>sz!ForkServer.set_forkserver_preloadcCs|jS)z�Return list of fds inherited from parent process.

        This returns None if the current process was not started by fork
        server.
        )rrrrrr
DszForkServer.get_inherited_fdsc
Cs�|��t|�dtkr td��t�tj���}|�|j�t�	�\}}t�	�\}}|||j
t��g}||7}zNz&t�||�||fWW�4W5QR�St�
|�t�
|��YnXW5t�
|�t�
|�XW5QRXdS)a;Request forkserver to create a child process.

        Returns a pair of fds (status_r, data_w).  The calling process can read
        the child process's pid and (eventually) its returncode from status_r.
        The calling process should write to data_w the pickled preparation and
        process data.
        �ztoo many fdsN)r	�len�MAXFDS_TO_SEND�
ValueError�socket�AF_UNIXZconnectrr �piperrZgetfdr!rZsendfds)r�fdsZclientZparent_r�child_w�child_rZparent_wZallfdsrrrrLs(�


z!ForkServer.connect_to_new_processcs�|j��~t��|jdk	r`t�|jtj�\}}|sBW5QR�dSt�|j�d|_	d|_d|_d}|j
r�ddh�t�d�}�fdd�|�
�D�}ni}t�tj���}t�d�}|�|�t�|�s�t�|d	�|��t��\}}ztzV|��|g}	||��||j
|f;}t��}
|
gt��}|d
|g7}t�|
||	�}Wnt�|��YnXW5t�|�X||_	||_||_W5QRXW5QRXdS)z�Make sure that a fork server is running.

        This can be called from any process.  Note that usually a child
        process will just reuse the forkserver started by its parent, so
        ensure_running() will do nothing.
        NzCfrom multiprocessing.forkserver import main; main(%d, %d, %r, **%r)�	main_path�sys_path�ignorecsi|]\}}|�kr||�qSrr)r'�x�y�Zdesired_keysrr�
<dictcomp>�sz-ForkServer.ensure_running.<locals>.<dictcomp>r1i�z-c)rrr	rr r"�WNOHANGr!rrrrZget_preparation_data�itemsr0r1rZarbitrary_addressZbindrr#�chmodZlistenr2�filenoZget_executableZ_args_from_interpreter_flagsZspawnv_passfds)r�pidZstatus�cmd�data�listenerZaddress�alive_rZalive_wZfds_to_passZexe�argsrr;rr	isN





�
zForkServer.ensure_runningN)
�__name__�
__module__�__qualname__rrrrr
rr	rrrrr srcCs�|rdd|kr8|dk	r8dt��_zt�|�W5t��`X|D]&}zt|�Wq<tk
r`Yq<Xq<t��t	�
�\}}t	�|d�t	�|d�dd�}tj
|tjtji}	dd�|	��D�}
t�|�i}tjtj|d	����}t�����}
|��t_|
�|tj�|
�|tj�|
�|tj��znd
d�|
��D�}|�r"�qB�q"||k�rPt�||k�rBt	�|d�zt	�d
t	j�\}}Wnt k
�r�Y�qBYnX|dk�r��qB|�!|d�}|dk	�r0t	�"|��r�t	�#|�}n&t	�$|��s�t%d�&||���t	�'|�}zt(||�Wnt)k
�r"YnXt	�*|�nt+�,d|��qf||k�r�|�-�d��,}t.�/|t0d�}t1|�t0k�r�t2d�&t1|����|^}}}|�*�t	�3�}|dk�r4d}zpz<|�*�|
�*�||||g}|�5|�6��t7||||
�}Wn.t8k
�r t9j:t9�;��t9j<�=�YnXW5t	�4|�XnNzt(||�Wnt)k
�rXYnX|||<t	�*|�|D]}t	�*|��qpW5QRXWn4t>k
�r�}z|j?t?j@k�r��W5d}~XYnX�qW5QRXW5QRXdS)zRun forkserver.rNTFcWsdSrr)Z_unusedrrr�sigchld_handler�szmain.<locals>.sigchld_handlercSsi|]\}}|t�||��qSr)�signal)r'�sig�valrrrr<�s�zmain.<locals>.<dictcomp>)r@cSsg|]\}}|j�qSr)Zfileobj)r'�keyZeventsrrr�
<listcomp>�szmain.<locals>.<listcomp>i���rzChild {0:n} status is {1:n}z.forkserver: waitpid returned unexpected pid %drzToo many ({0:n}) fds to send)ArZcurrent_processZ_inheritingrZimport_main_path�
__import__�ImportErrorrZ_close_stdinr r2�set_blockingrK�SIGCHLD�SIGINT�SIG_IGNr>�
set_wakeup_fdr0r1�	selectorsZDefaultSelectorZgetsockname�_forkserverr�registerZ
EVENT_READZselect�
SystemExit�readr"r=�ChildProcessError�pop�WIFSIGNALED�WTERMSIG�	WIFEXITED�AssertionError�format�WEXITSTATUS�write_signed�BrokenPipeErrorr!�warnings�warnZacceptrZrecvfdsr.r-�RuntimeError�fork�_exit�extend�values�
_serve_one�	Exception�sys�
excepthook�exc_info�stderr�flush�OSError�errnoZECONNABORTED)Zlistener_fdrEZpreloadr6r7�modnameZsig_rZsig_wrJ�handlersZold_handlersZ	pid_to_fdrDZselectorZrfdsrA�stsr4�
returncode�sr3r5�code�
unused_fds�fd�errr�main�s�

��
�




��
�

��

�
r�c	Csht�d�|��D]\}}t�||�q|D]}t�|�q,|^t_tj_	t_
t�|�}t�
||�}|S)NrP)rKrWr>r r!rYrrZ_resource_trackerZ_fdr�duprZ_main)	r5r3r}rxrLrMr~Zparent_sentinelr|rrrrn1s
�
rncCsNd}tj}t|�|kr@t�||t|��}|s6td��||7}q
t�|�dS)N�zunexpected EOFr)�
SIGNED_STRUCT�sizer-r r\�EOFErrorZunpack)r~rCZlengthr{rrr�read_signedHs
r�cCs<t�|�}|r8t�||�}|dkr*td��||d�}q
dS)Nrzshould not get here)r�Zpackr �writeri)r~�n�msg�nbytesrrrreRs
re)NN) rvr rXrKr0Zstructrprrg�rr�contextrrrr�__all__r.ZStructr��objectrr�rnr�rerYr	r
rrrrrr�<module>s>�


PK{��\�1�� � %__pycache__/forkserver.cpython-38.pycnu�[���U

e5d�0�@s�ddlZddlZddlZddlZddlZddlZddlZddlZddlZddl	m
Z
ddl	mZddlm
Z
ddl	mZddl	mZddl	mZd	d
ddgZd
Ze�d�ZGdd�de�Zddd�Zdd�Zdd�Zdd�Ze�ZejZejZejZejZdS)�N�)�
connection)�process)�	reduction)�resource_tracker)�spawn)�util�ensure_running�get_inherited_fds�connect_to_new_process�set_forkserver_preload��qc@sDeZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	dS)�
ForkServercCs.d|_d|_d|_d|_t��|_dg|_dS)N�__main__)�_forkserver_address�_forkserver_alive_fd�_forkserver_pid�_inherited_fds�	threadingZLock�_lock�_preload_modules��self�r�2/usr/lib64/python3.8/multiprocessing/forkserver.py�__init__"s
zForkServer.__init__c	Cs|j�|��W5QRXdS�N)r�_stop_unlockedrrrr�_stop*szForkServer._stopcCsV|jdkrdSt�|j�d|_t�|jd�d|_t�|j�sLt�|j�d|_dS)Nr)	r�os�closer�waitpidr�is_abstract_socket_namespacer�unlinkrrrrr/s
zForkServer._stop_unlockedcCs&tdd�|jD��std��||_dS)z>Set list of module names to try to load in forkserver process.css|]}t|�tkVqdSr)�type�str)�.0�modrrr�	<genexpr>@sz4ForkServer.set_forkserver_preload.<locals>.<genexpr>z&module_names must be a list of stringsN)�allr�	TypeError)rZ
modules_namesrrrr>sz!ForkServer.set_forkserver_preloadcCs|jS)z�Return list of fds inherited from parent process.

        This returns None if the current process was not started by fork
        server.
        )rrrrrr
DszForkServer.get_inherited_fdsc
Cs�|��t|�dtkr td��t�tj���}|�|j�t�	�\}}t�	�\}}|||j
t��g}||7}zNz&t�||�||fWW�4W5QR�St�
|�t�
|��YnXW5t�
|�t�
|�XW5QRXdS)a;Request forkserver to create a child process.

        Returns a pair of fds (status_r, data_w).  The calling process can read
        the child process's pid and (eventually) its returncode from status_r.
        The calling process should write to data_w the pickled preparation and
        process data.
        �ztoo many fdsN)r	�len�MAXFDS_TO_SEND�
ValueError�socket�AF_UNIXZconnectrr �piperrZgetfdr!rZsendfds)r�fdsZclientZparent_r�child_w�child_rZparent_wZallfdsrrrrLs(�


z!ForkServer.connect_to_new_processcs�|j��~t��|jdk	r`t�|jtj�\}}|sBW5QR�dSt�|j�d|_	d|_d|_d}|j
r�ddh�t�d�}�fdd�|�
�D�}ni}t�tj���}t�d�}|�|�t�|�s�t�|d	�|��t��\}}ztzV|��|g}	||��||j
|f;}t��}
|
gt��}|d
|g7}t�|
||	�}Wnt�|��YnXW5t�|�X||_	||_||_W5QRXW5QRXdS)z�Make sure that a fork server is running.

        This can be called from any process.  Note that usually a child
        process will just reuse the forkserver started by its parent, so
        ensure_running() will do nothing.
        NzCfrom multiprocessing.forkserver import main; main(%d, %d, %r, **%r)�	main_path�sys_path�ignorecsi|]\}}|�kr||�qSrr)r'�x�y�Zdesired_keysrr�
<dictcomp>�sz-ForkServer.ensure_running.<locals>.<dictcomp>r1i�z-c)rrr	rr r"�WNOHANGr!rrrrZget_preparation_data�itemsr0r1rZarbitrary_addressZbindrr#�chmodZlistenr2�filenoZget_executableZ_args_from_interpreter_flagsZspawnv_passfds)r�pidZstatus�cmd�data�listenerZaddress�alive_rZalive_wZfds_to_passZexe�argsrr;rr	isN





�
zForkServer.ensure_runningN)
�__name__�
__module__�__qualname__rrrrr
rr	rrrrr srcCs�|rdd|kr8|dk	r8dt��_zt�|�W5t��`X|D]&}zt|�Wq<tk
r`Yq<Xq<t��t	�
�\}}t	�|d�t	�|d�dd�}tj
|tjtji}	dd�|	��D�}
t�|�i}tjtj|d	���}t�����}
|��t_|
�|tj�|
�|tj�|
�|tj��z�d
d�|
��D�}|�r"�qB�q"||k�rjt	�|d�d
k�sftd��t�||k�r\t	�|d�zt	�dt	j �\}}Wnt!k
�r�Y�q\YnX|dk�rq\|�"|d�}|dk	�rJt	�#|��r�t	�$|�}n&t	�%|��std�&||���t	�'|�}zt(||�Wnt)k
�r<YnXt	�*|�nt+�,d|��q�||k�r�|�-�d��,}t.�/|t0d�}t1|�t0k�r�t2d�&t1|����|^}}}|�*�t	�3�}|dk�rNd}zpz<|�*�|
�*�||||g}|�5|�6��t7||||
�}Wn.t8k
�r:t9j:t9�;��t9j<�=�YnXW5t	�4|�XnNzt(||�Wnt)k
�rrYnX|||<t	�*|�|D]}t	�*|��q�W5QRXWn4t>k
�r�}z|j?t?j@k�r̂W5d}~XYnX�qW5QRXW5QRXdS)zRun forkserver.rNTFcWsdSrr)Z_unusedrrr�sigchld_handler�szmain.<locals>.sigchld_handlercSsi|]\}}|t�||��qSr)�signal)r'�sig�valrrrr<�s�zmain.<locals>.<dictcomp>)r@cSsg|]\}}|j�qSr)Zfileobj)r'�keyZeventsrrr�
<listcomp>�szmain.<locals>.<listcomp>r�zNot at EOF?i���rzChild {0:n} status is {1:n}z.forkserver: waitpid returned unexpected pid %dzToo many ({0:n}) fds to send)ArZcurrent_processZ_inheritingrZimport_main_path�
__import__�ImportErrorrZ_close_stdinr r2�set_blockingrK�SIGCHLD�SIGINT�SIG_IGNr>�
set_wakeup_fdr0r1�	selectorsZDefaultSelectorZgetsockname�_forkserverr�registerZ
EVENT_READZselect�read�AssertionError�
SystemExitr"r=�ChildProcessError�pop�WIFSIGNALED�WTERMSIG�	WIFEXITED�format�WEXITSTATUS�write_signed�BrokenPipeErrorr!�warnings�warnZacceptrZrecvfdsr.r-�RuntimeError�fork�_exit�extend�values�
_serve_one�	Exception�sys�
excepthook�exc_info�stderr�flush�OSError�errnoZECONNABORTED)Zlistener_fdrEZpreloadr6r7�modnameZsig_rZsig_wrJ�handlersZold_handlersZ	pid_to_fdrDZselectorZrfdsrA�stsr4�
returncode�sr3r5�code�
unused_fds�fd�errr�main�s�

��
�




��
�

��

�
r�c	Csht�d�|��D]\}}t�||�q|D]}t�|�q,|^t_tj_	t_
t�|�}t�
||�}|S)NrQ)rKrXr>r r!rZrrZ_resource_trackerZ_fdr�duprZ_main)	r5r3r~ryrLrMrZparent_sentinelr}rrrro1s
�
rocCsNd}tj}t|�|kr@t�||t|��}|s6td��||7}q
t�|�dS)NrPzunexpected EOFr)�
SIGNED_STRUCT�sizer-r r\�EOFErrorZunpack)rrCZlengthr|rrr�read_signedHs
r�cCs<t�|�}|r8t�||�}|dkr*td��||d�}q
dS)Nrzshould not get here)r�Zpackr �writerj)r�n�msg�nbytesrrrrfRs
rf)NN) rwr rYrKr0Zstructrqrrh�rr�contextrrrr�__all__r.ZStructr��objectrr�ror�rfrZr	r
rrrrrr�<module>s>�


PK{��\_��w��,__pycache__/popen_spawn_posix.cpython-38.pycnu�[���U

e5d��@spddlZddlZddlmZmZddlmZddlmZddlmZdgZ	Gdd	�d	e
�ZGd
d�dej�ZdS)�N�)�	reduction�set_spawning_popen)�
popen_fork)�spawn)�util�Popenc@seZdZdd�Zdd�ZdS)�_DupFdcCs
||_dS�N��fd��selfr�r�9/usr/lib64/python3.8/multiprocessing/popen_spawn_posix.py�__init__sz_DupFd.__init__cCs|jSr
r)rrrr�detachsz
_DupFd.detachN)�__name__�
__module__�__qualname__rrrrrrr	sr	cs4eZdZdZeZ�fdd�Zdd�Zdd�Z�Z	S)rrcsg|_t��|�dSr
)�_fds�superr)r�process_obj��	__class__rrrszPopen.__init__cCs|j�|�|Sr
)r�appendr
rrr�duplicate_for_child"szPopen.duplicate_for_childcCsXddlm}|��}|j�|�t�|j�}t�	�}t
|�zt�||�t�||�W5t
d�Xd}}}}	z~t��\}}t��\}}	tj||d�}|j�||g�t
�t��||j�|_||_t|	ddd��}
|
�|���W5QRXW5g}
||	fD]}|dk	�r|
�|��qt
�|t
j|
�|_||fD]}|dk	�r6t�|��q6XdS)Nr)�resource_tracker)�
tracker_fdZpipe_handle�wbF)�closefd)�rZgetfdrrrZget_preparation_data�_name�io�BytesIOrr�dumprZFinalizeZ	close_fds�	finalizer�os�close�pipeZget_command_line�extendZspawnv_passfdsZget_executable�pid�sentinel�open�write�	getbuffer)rrrrZ	prep_data�fpZparent_rZchild_wZchild_rZparent_wZfds_to_closer�cmd�frrr�_launch&sB
�
�

z
Popen._launch)
rrr�methodr	ZDupFdrrr3�
__classcell__rrrrrs
)
r#r'�contextrrr!rrr�__all__�objectr	rrrrr�<module>s
PK{��\@hr+CC+__pycache__/resource_tracker.cpython-38.pycnu�[���U

e5d�!�@s�ddlZddlZddlZddlZddlZddlmZddlmZdddgZe	ed�Z
ejejfZ
d	d
d�iZejdkr�ddlZddlZe�ejejd
��Gdd�de�Ze�ZejZejZejZejZdd�ZdS)�N�)�spawn)�util�ensure_running�register�
unregister�pthread_sigmaskZnoopcCsdS�N�r
r
r
�8/usr/lib64/python3.8/multiprocessing/resource_tracker.py�<lambda>!�r�posix)Z	semaphoreZ
shared_memoryc@sLeZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	dd�Z
dS)�ResourceTrackercCst��|_d|_d|_dSr	)�	threadingZLock�_lock�_fd�_pid��selfr
r
r�__init__0s
zResourceTracker.__init__c	CsT|j�D|jdkr W5QR�dSt�|j�d|_t�|jd�d|_W5QRXdS)Nr)rr�os�close�waitpidrrr
r
r�_stop5s
zResourceTracker._stopcCs|��|jSr	)rrrr
r
r�getfdBszResourceTracker.getfdcCst|j��b|jdk	r~|��r*W5QR�dSt�|j�z|jdk	rPt�|jd�Wntk
rfYnXd|_d|_t�	d�g}z|�
tj�
��Wntk
r�YnXd}t��\}}z�zr|�
|�t��}|gt��}|d||g7}z&t�rt�tjt�t�|||�}W5t�r,t�tjt�XWnt�|��YnX||_||_W5t�|�XW5QRXdS)z�Make sure that resource tracker process is running.

        This can be run from any process.  Usually a child process will use
        the resource created by its parent.NrzUresource_tracker: process died unexpectedly, relaunching.  Some resources might leak.z:from multiprocessing.resource_tracker import main;main(%d)z-c)rr�_check_aliverrrr�ChildProcessError�warnings�warn�append�sys�stderr�fileno�	Exception�piperZget_executablerZ_args_from_interpreter_flags�
_HAVE_SIGMASK�signalr�SIG_UNBLOCK�_IGNORED_SIGNALS�	SIG_BLOCKZspawnv_passfds)rZfds_to_pass�cmd�r�wZexe�args�pidr
r
rrFsJ






zResourceTracker.ensure_runningcCs2zt�|jd�Wntk
r(YdSXdSdS)z;Check that the pipe has not been closed by sending a probe.s
PROBE:0:noop
FTN)r�writer�OSErrorrr
r
rr�s
zResourceTracker._check_alivecCs|�d||�dS)z0Register name of resource with resource tracker.�REGISTERN��_send�r�name�rtyper
r
rr�szResourceTracker.registercCs|�d||�dS)z2Unregister name of resource with resource tracker.�
UNREGISTERNr3r5r
r
rr�szResourceTracker.unregistercCsb|��d�|||��d�}t|�dkr0td��t�|j|�}|t|�ks^td�|t|����dS)Nz{0}:{1}:{2}
�asciiiz
name too longznbytes {0:n} but len(msg) {1:n})	r�format�encode�len�
ValueErrorrr0r�AssertionError)rr+r6r7�msg�nbytesr
r
rr4�s�zResourceTracker._sendN)�__name__�
__module__�__qualname__rrrrrrrr4r
r
r
rr.s
@rc
Cst�tjtj�t�tjtj�tr2t�tjt�tj	tj
fD]&}z|��Wq>tk
rbYq>Xq>dd�t
��D�}z�t|d���}|D]�}z�|���d��d�\}}}t
�|d�}	|	dkr�td	|�d
|����|dkr�||�|�n2|dk�r||�|�n|d
k�rntd|��Wq�tk
�rTztjt���WnYnXYq�Xq�W5QRXW5|��D]�\}}|�r�zt�dt|�|f�Wntk
�r�YnX|D]V}zLzt
||�Wn6tk
�r�}zt�d||f�W5d}~XYnXW5X�q��qnXdS)zRun resource tracker.cSsi|]}|t��qSr
)�set)�.0r7r
r
r�
<dictcomp>�szmain.<locals>.<dictcomp>zQresource_tracker: There appear to be %d leaked %s objects to clean up at shutdownzresource_tracker: %r: %sN�rbr9�:zCannot register z. for automatic cleanup: unknown resource type r2r8ZPROBEzunrecognized command %r)r'�SIGINT�SIG_IGN�SIGTERMr&rr(r)r!�stdin�stdoutrr$�_CLEANUP_FUNCS�keys�itemsrrr<�open�strip�decode�split�getr=�add�remove�RuntimeError�
excepthook�exc_info)
�fd�f�cacher7Zrtype_cacher6�e�liner+Zcleanup_funcr
r
r�main�s^�


�
(r`)rr'r!rr�rr�__all__�hasattrr&rIrKr)rNr6Z_multiprocessingZ_posixshmem�updateZ
sem_unlinkZ
shm_unlink�objectrZ_resource_trackerrrrrr`r
r
r
r�<module>s4

�
�wPK{��\���Fy�y�)__pycache__/managers.cpython-38.opt-1.pycnu�[���U

e5d��@sBdddddgZddlZddlZddlZddlZddlZddlZddlZddlmZddl	m
Z
d	d
lmZd	dl
mZmZmZd	dlmZd	d
lmZd	dlmZd	dlmZzd	dlmZdZWnek
r�dZYnXdd�Ze�eje�dd�dD�Zedek	�r.dd�ZeD]Ze�ee��qGdd�de�Zdifdd�Z dd�Z!Gd d!�d!e"�Z#d"d#�Z$d$d%�Z%Gd&d'�d'e�Z&Gd(d)�d)e�Z'ej(ej)fej*ej+fd*�Z,Gd+d�de�Z-Gd,d-�d-e.�Z/Gd.d�de�Z0d/d0�Z1ifd1d2�Z2dld3d4�Z3Gd5d6�d6e�Z4Gd7d8�d8e�Z5dmd9d:�Z6Gd;d<�d<e0�Z7Gd=d>�d>e0�Z8Gd?d@�d@e8�Z9GdAdB�dBe0�Z:GdCdD�dDe0�Z;GdEdF�dFe0�Z<GdGdH�dHe0�Z=e2dIdJ�Z>GdKdL�dLe>�Z?e2dMdN�Z@dOdPie@_Ae2dQdR�ZBe2dSdT�ZCdUdUdUdPdPdV�eC_AGdWdS�dSeC�ZDGdXd�de-�ZEeE�dYejF�eE�dZejF�eE�d[ejGe:�eE�d\ejHe8�eE�d]ejIe8�eE�d^ejJe8�eE�d_ejKe8�eE�d`ejLe9�eE�daejMe;�eE�dbejNeD�eE�dcee?�eE�ddeOe@�eE�d8e5e=�eE�d:e6eB�eE�d6e4e<�eEjdPe7dde�eEjdUddf�e�r>Gdgdh�dh�ZPGdidj�dje&�ZQGdkd�de-�ZRdS)n�BaseManager�SyncManager�	BaseProxy�Token�SharedMemoryManager�N)�getpid)�
format_exc�)�
connection)�	reduction�get_spawning_popen�ProcessError)�pool)�process)�util)�get_context)�
shared_memoryTFcCstj|j|��ffS�N)�array�typecode�tobytes)�a�r�0/usr/lib64/python3.8/multiprocessing/managers.py�reduce_array-srcCsg|]}tti|����qSr)�type�getattr��.0�namerrr�
<listcomp>1sr )�items�keys�valuescCstt|�ffSr)�list��objrrr�rebuild_as_list3sr'c@s4eZdZdZdZdd�Zdd�Zdd�Zd	d
�ZdS)rz3
    Type to uniquely identify a shared object
    ��typeid�address�idcCs||||_|_|_dSrr()�selfr)r*r+rrr�__init__BszToken.__init__cCs|j|j|jfSrr(�r,rrr�__getstate__EszToken.__getstate__cCs|\|_|_|_dSrr(�r,�staterrr�__setstate__HszToken.__setstate__cCsd|jj|j|j|jfS)Nz %s(typeid=%r, address=%r, id=%r))�	__class__�__name__r)r*r+r.rrr�__repr__Ks�zToken.__repr__N)	r4�
__module__�__qualname__�__doc__�	__slots__r-r/r2r5rrrrr<srcCs8|�||||f�|��\}}|dkr*|St||��dS)zL
    Send a message to manager using connection `c` and return response
    �#RETURNN)�send�recv�convert_to_error)�cr+�
methodname�args�kwds�kind�resultrrr�dispatchSs
rDcCsd|dkr|S|dkrRt|t�s4td�||t|����|dkrHtd|�St|�Sntd�|��SdS)N�#ERROR)�
#TRACEBACK�#UNSERIALIZABLEz.Result {0!r} (kind '{1}') type is {2}, not strrGzUnserializable message: %s
zUnrecognized message type {!r})�
isinstance�str�	TypeError�formatr�RemoteError�
ValueError)rBrCrrrr=]s
��
r=c@seZdZdd�ZdS)rLcCsdt|jd�dS)NzM
---------------------------------------------------------------------------
rzK---------------------------------------------------------------------------)rIr@r.rrr�__str__mszRemoteError.__str__N)r4r6r7rNrrrrrLlsrLcCs2g}t|�D] }t||�}t|�r|�|�q|S)z4
    Return a list of names of methods of `obj`
    )�dirr�callable�append)r&�tempr�funcrrr�all_methodsts
rTcCsdd�t|�D�S)zP
    Return a list of names of methods of `obj` which do not start with '_'
    cSsg|]}|ddkr|�qS)r�_rrrrrr �sz"public_methods.<locals>.<listcomp>)rTr%rrr�public_methodssrVc	@s�eZdZdZdddddddd	d
g	Zdd�Zd
d�Zdd�Zdd�Zdd�Z	dd�Z
dd�Zdd�Zeee
d�Z
dd�Zdd�Zd d!�Zd"d#�Zd$d%�Zd&e_d'd(�Zd)d*�Zd+d,�Zd-d.�Zd/S)0�ServerzM
    Server class which runs in a process controlled by a manager object
    �shutdown�create�accept_connection�get_methods�
debug_info�number_of_objects�dummy�incref�decrefcCsxt|t�std�|t|����||_t�|�|_t	|\}}||dd�|_
|j
j|_ddi|_i|_
i|_t��|_dS)Nz&Authkey {0!r} is type {1!s}, not bytes�)r*Zbacklog�0�Nr)rH�bytesrJrKr�registryr�AuthenticationString�authkey�listener_client�listenerr*�	id_to_obj�id_to_refcount�id_to_local_proxy_obj�	threading�Lock�mutex)r,rer*rg�
serializer�Listener�Clientrrrr-�s 
��

zServer.__init__c	Cs�t��|_|t��_zVtj|jd�}d|_|��z|j��sL|j�d�q4Wnttfk
rfYnXW5tjtjkr�t	�
d�tjt_tjt_t�
d�XdS)z(
        Run the server forever
        zresetting stdout, stderrr)�targetTr	N)rm�Event�
stop_eventr�current_process�_manager_server�sys�stdout�
__stdout__r�debug�
__stderr__�stderr�exit�Thread�accepter�daemon�start�is_set�wait�KeyboardInterrupt�
SystemExit)r,r�rrr�
serve_forever�s 




zServer.serve_forevercCsNz|j��}Wntk
r&YqYnXtj|j|fd�}d|_|��qdS)N�rsr@T)riZaccept�OSErrorrmr�handle_requestr�r�)r,r>�trrrr��s
zServer.acceptercCs4d}}}z>t�||j�t�||j�|��}|\}}}}t||�}Wntk
rhdt�f}	Yn>Xz||f|�|�}Wntk
r�dt�f}	Yn
Xd|f}	z|�|	�Wnrtk
�r&}
zRz|�dt�f�Wntk
r�YnXt	�
d|	�t	�
d|�t	�
d|
�W5d}
~
XYnX|��dS)z)
        Handle a new connection
        NrFr:zFailure to send message: %rz ... request was %r� ... exception was %r)r
Zdeliver_challengergZanswer_challenger<r�	Exceptionrr;r�info�close)r,r>�funcnamerC�request�ignorer@rArS�msg�errrr��s2zServer.handle_requestc
Cs�t�dt��j�|j}|j}|j}|j�	��s�zBd}}|�}|\}}}	}
z||\}}}Wn^t
k
r�}
z@z|j|\}}}Wn&t
k
r�}z|
�W5d}~XYnXW5d}
~
XYnX||kr�td|t
|�|f��t||�}z||	|
�}Wn,tk
�r"}zd|f}W5d}~XYnPX|�o4|�|d�}|�rj|�|||�\}}t||j|�}d||ff}nd|f}Wn�tk
�r�|dk�r�dt�f}nNz,|j|}|||||f|	�|
�}d|f}Wn tk
�r�dt�f}YnXYnPtk
�rt�dt��j�t�d	�Yn tk
�r<dt�f}YnXzDz||�Wn2tk
�r~}z|d
t�f�W5d}~XYnXWq$tk
�r�}z@t�dt��j�t�d|�t�d
|�|��t�d�W5d}~XYq$Xq$dS)zQ
        Handle requests from the proxies in a particular process/thread
        z$starting server thread to service %rNz+method %r of %r object is not in exposed=%rrE�#PROXYr:rFz$got EOF -- exiting thread serving %rrrGzexception in thread serving %rz ... message was %rr�r	)rr{rm�current_threadrr<r;rjrur��KeyErrorrl�AttributeErrorrrr��getrYrr*r�fallback_mapping�EOFErrorrxr~r�r�)r,�connr<r;rjr?r&r��identr@rA�exposed�	gettypeid�keZ	second_keZfunction�resr�r�r)ZridentZrexposed�tokenZ
fallback_funcrCrrr�serve_client�s���(��


����$�zServer.serve_clientcCs|Srr�r,r�r�r&rrr�fallback_getvalue5szServer.fallback_getvaluecCst|�Sr�rIr�rrr�fallback_str8szServer.fallback_strcCst|�Sr)�reprr�rrr�
fallback_repr;szServer.fallback_repr)rNr5�	#GETVALUEcCsdSrr�r,r>rrrr^DszServer.dummyc
Cs�|j�tg}t|j���}|��|D]<}|dkr&|�d||j|t|j|d�dd�f�q&d�|�W5QR�SQRXdS)zO
        Return some info --- useful to spot problems with refcounting
        rbz  %s:       refcount=%s
    %srN�K�
)	ror$rkr"�sortrQrIrj�join)r,r>rCr"r�rrrr\Gs
��zServer.debug_infocCs
t|j�S)z*
        Number of shared objects
        )�lenrkr�rrrr]WszServer.number_of_objectscCsLz:zt�d�|�d�Wnddl}|��YnXW5|j��XdS)z'
        Shutdown this process
        z!manager received shutdown message�r:NrN)ru�setrr{r;�	traceback�	print_exc)r,r>r�rrrrX^s
zServer.shutdownc	Os�t|�dkr|^}}}}n�|s(td��n�d|krDtdt|�d��|�d�}t|�dkr~|^}}}ddl}|jd	tdd
�nFd|kr�tdt|�d��|�d�}|^}}ddl}|jdtdd
�t|�}|j��|j|\}}}}	|dk�r|�st|�dk�rt	d
��|d}
n
|||�}
|dk�r2t
|
�}|dk	�rlt|t��s\td�
|t|����t|�t|�}dt|
�}t�d||�|
t|�|f|j|<||jk�r�d|j|<W5QRX|�||�|t|�fS)z>
        Create a new shared object and return its id
        �z8descriptor 'create' of 'Server' object needs an argumentr)�7create expected at least 2 positional arguments, got %dr	�rNz2Passing 'typeid' as keyword argument is deprecated)�
stacklevelr>z-Passing 'c' as keyword argument is deprecatedz4Without callable, must have one non-keyword argumentz,Method_to_typeid {0!r}: type {1!s}, not dictz%xz&%r callable returned object with id %r)r�rJ�pop�warnings�warn�DeprecationWarning�tuplerorerMrVrH�dictrKrr$r+rr{r�rjrkr_)r@rAr,r>r)r�rPr��method_to_typeid�	proxytyper&r�rrrrYksp

�

�
�
��

�



��z
Server.createz$($self, c, typeid, /, *args, **kwds)cCst|j|jd�S)zL
        Return the methods of the shared object indicated by token
        r	)r�rjr+)r,r>r�rrrr[�szServer.get_methodscCs"|t��_|�d�|�|�dS)z=
        Spawn a new thread to serve this connection
        r�N)rmr�rr;r�)r,r>rrrrrZ�s

zServer.accept_connectioncCs�|j��z|j|d7<Wnhtk
r�}zJ||jkrrd|j|<|j||j|<|j|\}}}t�d|�n|�W5d}~XYnXW5QRXdS)Nr	z&Server re-enabled tracking & INCREF %r)rorkr�rlrjrr{)r,r>r�r�r&r�r�rrrr_�s

�z
Server.increfc	Cs�||jkr$||jkr$t�d|�dS|j�Z|j|dkrXtd�||j||j|���|j|d8<|j|dkr�|j|=W5QRX||jkr�d|j|<t�d|�|j�|j|=W5QRXdS)NzServer DECREF skipping %rrz+Id {0!s} ({1!r}) has refcount {2:n}, not 1+r	)NrNzdisposing of obj with id %r)rkrlrr{ro�AssertionErrorrKrj)r,r>r�rrrr`�s,
���

z
Server.decrefN)r4r6r7r8�publicr-r�r�r�r�r�r�r�r�r^r\r]rXrY�__text_signature__r[rZr_r`rrrrrW�s<�
"Q�
=rWc@seZdZdgZdZdZdZdS)�State�valuerr	r�N)r4r6r7r9�INITIAL�STARTED�SHUTDOWNrrrrr��sr�)�pickleZ	xmlrpclibc@s�eZdZdZiZeZd"dd�Zdd�Zdd	�Z	d#dd�Z
ed$d
d��Zdd�Z
d%dd�Zdd�Zdd�Zdd�Zdd�Zedd��Zedd��Zed&d d!��ZdS)'rz!
    Base class for managers
    Nr�cCs\|dkrt��j}||_t�|�|_t�|_tj|j_	||_
t|\|_|_
|pTt�|_dSr)rrvrg�_addressrf�_authkeyr��_stater�r��_serializerrhZ	_Listener�_Clientr�_ctx)r,r*rgrpZctxrrrr-s

zBaseManager.__init__cCsf|jjtjkrP|jjtjkr&td��n*|jjtjkr>td��ntd�|jj���t|j	|j
|j|j�S)zX
        Return server object with serve_forever() method and address attribute
        �Already started server�Manager has shut down�Unknown state {!r})
r�r�r�r�r�r
r�rKrW�	_registryr�r�r�r.rrr�
get_servers

�
�zBaseManager.get_servercCs8t|j\}}||j|jd�}t|dd�tj|j_dS)z>
        Connect manager object to the server process
        �rgNr^)	rhr�r�r�rDr�r�r�r�)r,rqrrr�rrr�connectszBaseManager.connectrc	Cs4|jjtjkrP|jjtjkr&td��n*|jjtjkr>td��ntd�|jj���|dk	rht|�sht	d��t
jdd�\}}|jj
t|�j|j|j|j|j|||fd�|_d	�d
d�|jjD��}t|�jd||j_|j��|��|��|_|��tj|j_tj|t|�j|j|j|j|j|jfd
d�|_ dS)z@
        Spawn a server process for this manager object
        r�r�r�Nzinitializer must be a callableF)Zduplexr��:css|]}t|�VqdSrr�)r�irrr�	<genexpr>Asz$BaseManager.start.<locals>.<genexpr>�-r�r@Zexitpriority)!r�r�r�r�r�r
r�rKrPrJr
ZPiper�ZProcessr�_run_serverr�r�r�r��_processr�Z	_identityr4rr�r�r<r�Finalize�_finalize_managerr�rX)r,�initializer�initargs�reader�writerr�rrrr�(sH

���


��zBaseManager.startc	Cs^t�tjtj�|dk	r ||�|�||||�}|�|j�|��t�d|j�|�	�dS)z@
        Create a server, report its address and run it
        Nzmanager serving at %r)
�signal�SIGINT�SIG_IGN�_Serverr;r*r�rr�r�)	�clsrer*rgrpr�r�r��serverrrrr�SszBaseManager._run_servercOsN|j|j|jd�}zt|dd|f||�\}}W5|��Xt||j|�|fS)zP
        Create a new shared object; return the token and exposed tuple
        r�NrY)r�r�r�r�rDr)r,r)r@rAr�r+r�rrr�_createjs

zBaseManager._createcCs*|jdk	r&|j�|�|j��s&d|_dS)zC
        Join the manager process (if it has been spawned)
        N)r�r��is_alive�r,�timeoutrrrr�vs

zBaseManager.joincCs2|j|j|jd�}zt|dd�W�S|��XdS)zS
        Return some info about the servers shared objects and connections
        r�Nr\�r�r�r�r�rD�r,r�rrr�_debug_infoszBaseManager._debug_infocCs2|j|j|jd�}zt|dd�W�S|��XdS)z5
        Return the number of shared objects
        r�Nr]r�r�rrr�_number_of_objects�szBaseManager._number_of_objectscCsj|jjtjkr|��|jjtjkrf|jjtjkr<td��n*|jjtjkrTtd��ntd�|jj���|S)NzUnable to start serverr�r�)	r�r�r�r�r�r�r
r�rKr.rrr�	__enter__�s

�zBaseManager.__enter__cCs|��dSr)rX�r,�exc_typeZexc_valZexc_tbrrr�__exit__�szBaseManager.__exit__cCs�|��r�t�d�z,|||d�}zt|dd�W5|��XWntk
rRYnX|jdd�|��r�t�d�t|d�r�t�d	�|��|jd
d�|��r�t�d�t	j
|_ztj
|=Wntk
r�YnXdS)zQ
        Shutdown the manager process; will be registered as a finalizer
        z#sending shutdown message to managerr�NrXg�?)r�zmanager still alive�	terminatez'trying to `terminate()` manager processg�������?z#manager still alive after terminate)r�rr�r�rDr�r��hasattrr�r�r�r�r�_address_to_localr�)rr*rgr1r�r�rrrr��s.




zBaseManager._finalize_managercCs|jSr)r�r.rrrr*�szBaseManager.addressTc
s�d|jkr|j��|_�dkr"t�|p0t�dd�}|p@t�dd�}|r\t|���D]\}}qR|||�f|j�<|r���fdd�}	�|	_t|�|	�dS)z9
        Register a typeid with the manager type
        r�N�	_exposed_�_method_to_typeid_cs`t�d��|j�f|�|�\}}�||j||j|d�}|j|j|jd�}t|dd|jf�|S)Nz)requesting creation of a shared %r object��managerrgr�r�r`)	rr{r�r�r�r�r*rDr+)r,r@rAr�Zexp�proxyr��r�r)rrrR�s�z"BaseManager.register.<locals>.temp)	�__dict__r��copy�	AutoProxyrr$r!r4�setattr)
r�r)rPr�r�r��
create_method�keyr�rRrr�r�register�s(

��

zBaseManager.register)NNr�N)Nr)Nr)N)NNNNT)r4r6r7r8r�rWr�r-r�r�r��classmethodr�r�r�r�r�r�r��staticmethodr��propertyr*rrrrrr�s8�
	
+�
	




�c@seZdZdd�Zdd�ZdS)�ProcessLocalSetcCst�|dd��dS)NcSs|��Sr)�clearr%rrr�<lambda>��z*ProcessLocalSet.__init__.<locals>.<lambda>)r�register_after_forkr.rrrr-�szProcessLocalSet.__init__cCst|�dfSrc)rr.rrr�
__reduce__�szProcessLocalSet.__reduce__N)r4r6r7r-rrrrrr	�sr	c@s�eZdZdZiZe��Zddd�Zdd�Z	d	ifd
d�Z
dd
�Zdd�Ze
dd��Zdd�Zdd�Zdd�Zdd�Zdd�ZdS)rz.
    A base for proxies of shared objects
    NTFc		Cs�tj�8tj�|jd�}|dkr:t��t�f}|tj|j<W5QRX|d|_|d|_	||_
|j
j|_||_
||_t|d|_||_|dk	r�t�|�|_n"|j
dk	r�|j
j|_nt��j|_|r�|��t�|tj�dS)Nrr	)r�_mutexr�r�r*rZForkAwareLocalr	�_tls�_idset�_tokenr+�_id�_managerr�rhr��_owned_by_managerrrfr�rvrg�_increfr
�_after_fork)	r,r�rpr�rgr�r_�
manager_ownedZ	tls_idsetrrrr-s*



zBaseProxy.__init__cCsdt�d�t��j}t��jdkr4|dt��j7}|j|jj	|j
d�}t|dd|f�||j_
dS)Nzmaking connection to managerZ
MainThread�|r�rZ)rr{rrvrrmr�r�rr*r�rDrr
)r,rr�rrr�_connect-s

zBaseProxy._connectrcCs�z|jj}Wn6tk
rBt�dt��j�|��|jj}YnX|�	|j
|||f�|��\}}|dkrp|S|dkr�|\}}|jj
|jd}	|jj|_|	||j|j|j|d�}
|j|j|jd�}t|dd|jf�|
St||��dS)	zV
        Try to call a method of the referent and return a copy of the result
        z#thread %r does not own a connectionr:r����r�r�Nr`)rr
r�rr{rmr�rrr;rr<rr�r)rr*r�r�r�rDr+r=)r,r?r@rAr�rBrCr�r�r�r�rrr�_callmethod6s6�
�zBaseProxy._callmethodcCs
|�d�S)z9
        Get a copy of the value of the referent
        r��rr.rrr�	_getvalueTszBaseProxy._getvaluec	Cs�|jrt�d|jj�dS|j|jj|jd�}t|dd|j	f�t�d|jj�|j
�|j	�|joj|jj
}tj|tj|j|j||j|j
|jfdd�|_dS)Nz%owned_by_manager skipped INCREF of %rr�r_z	INCREF %r�
r�)rrr{rr+r�r*r�rDrr�addrr�r�r�_decrefrZ_close)r,r�r1rrrrZs$
��zBaseProxy._increfc
Cs�|�|j�|dks |jtjkr�z2t�d|j�||j|d�}t|dd|jf�Wq�t	k
r�}zt�d|�W5d}~XYq�Xnt�d|j�|s�t
|d�r�t�dt��j
�|j��|`dS)Nz	DECREF %rr�r`z... decref failed %sz%DECREF %r -- manager already shutdownr
z-thread %r has no more proxies so closing conn)�discardr+r�r�r�rr{r*rDr�r�rmr�rr
r�)r�rgr1ZtlsZidsetr�r�r�rrrr!ns �
zBaseProxy._decrefc
CsHd|_z|��Wn0tk
rB}zt�d|�W5d}~XYnXdS)Nzincref failed: %s)rrr�rr�)r,r�rrrr�s
zBaseProxy._after_forkcCs^i}t�dk	r|j|d<t|dd�rB|j|d<tt|j|j|ffStt|�|j|j|ffSdS)Nrg�_isautoFr�)	rr�rr��RebuildProxyrrr�r�r,rArrrr�s


��zBaseProxy.__reduce__cCs|��Sr)r)r,Zmemorrr�__deepcopy__�szBaseProxy.__deepcopy__cCsdt|�j|jjt|�fS)Nz<%s object, typeid %r at %#x>)rr4rr)r+r.rrrr5�s�zBaseProxy.__repr__cCs:z|�d�WStk
r4t|�dd�dYSXdS)zV
        Return representation of the referent (or a fall-back if that fails)
        r5Nrz; '__str__()' failed>)rr�r�r.rrrrN�szBaseProxy.__str__)NNNTF)r4r6r7r8r�rZForkAwareThreadLockrr-rrrrrr!rrr&r5rNrrrrr�s(�
)	

cCs�tt��dd�}|rT|j|jkrTt�d|�d|d<|j|jkrT|j|j|j|j<|�	dd�optt��dd�}|||fd|i|��S)	z5
    Function used for unpickling proxy objects.
    rwNz*Rebuild a proxy owned by manager, token=%rTrr_Z_inheritingF)
rrrvr*rr{r+rlrjr�)rSr�rprAr�r_rrrr$�s
�
�r$cCspt|�}z|||fWStk
r*YnXi}|D]}td||f|�q4t|tf|�}||_||||f<|S)zB
    Return a proxy type whose methods are given by `exposed`
    zOdef %s(self, /, *args, **kwds):
        return self._callmethod(%r, args, kwds))r�r��execrrr�)rr��_cacheZdicZmeth�	ProxyTyperrr�
MakeProxyType�s ��r*c
Cs�t|d}|dkrB||j|d�}zt|dd|f�}W5|��X|dkrX|dk	rX|j}|dkrjt��j}td|j	|�}||||||d�}	d|	_
|	S)z*
    Return an auto-proxy for `token`
    r	Nr�r[z
AutoProxy[%s])r�rgr_T)rhr*r�rDr�rrvrgr*r)r#)
r�rpr�rgr�r_r�r�r)r�rrrr�s 


�rc@seZdZdd�Zdd�ZdS)�	NamespacecKs|j�|�dSr)r��updater%rrrr-�szNamespace.__init__cCsZt|j���}g}|D]$\}}|�d�s|�d||f�q|��d|jjd�|�fS)NrUz%s=%rz%s(%s)z, )	r$r�r!�
startswithrQr�r3r4r�)r,r!rRrr�rrrr5�s
zNamespace.__repr__N)r4r6r7r-r5rrrrr+�sr+c@s8eZdZddd�Zdd�Zdd�Zdd	�Zeee�Zd
S)�ValueTcCs||_||_dSr)�	_typecode�_value)r,rr��lockrrrr-szValue.__init__cCs|jSr�r0r.rrrr�sz	Value.getcCs
||_dSrr2�r,r�rrrr�
sz	Value.setcCsdt|�j|j|jfS)Nz
%s(%r, %r))rr4r/r0r.rrrr5szValue.__repr__N)T)	r4r6r7r-r�r�r5rr�rrrrr.s

r.cCst�||�Sr)r)r�sequencer1rrr�Arraysr5c@s8eZdZdZdd�Zdd�Zdd�Zdd	�Zd
d�ZdS)
�
IteratorProxy)�__next__r;�throwr�cCs|Srrr.rrr�__iter__szIteratorProxy.__iter__cGs|�d|�S)Nr7r�r,r@rrrr7szIteratorProxy.__next__cGs|�d|�S)Nr;rr:rrrr;szIteratorProxy.sendcGs|�d|�S)Nr8rr:rrrr8szIteratorProxy.throwcGs|�d|�S)Nr�rr:rrrr�!szIteratorProxy.closeN)	r4r6r7r�r9r7r;r8r�rrrrr6sr6c@s2eZdZdZddd�Zdd�Zdd	�Zd
d�ZdS)
�
AcquirerProxy)�acquire�releaseTNcCs"|dkr|fn||f}|�d|�S�Nr<r)r,Zblockingr�r@rrrr<'szAcquirerProxy.acquirecCs
|�d�S�Nr=rr.rrrr=*szAcquirerProxy.releasecCs
|�d�Sr>rr.rrrr�,szAcquirerProxy.__enter__cCs
|�d�Sr?rr�rrrr�.szAcquirerProxy.__exit__)TN)r4r6r7r�r<r=r�r�rrrrr;%s

r;c@s6eZdZdZddd�Zd
dd�Zdd	�Zdd
d�ZdS)�ConditionProxy)r<r=r��notify�
notify_allNcCs|�d|f�S�Nr�rr�rrrr�4szConditionProxy.waitr	cCs|�d|f�S)NrAr)r,�nrrrrA6szConditionProxy.notifycCs
|�d�S)NrBrr.rrrrB8szConditionProxy.notify_allcCsd|�}|r|S|dk	r$t��|}nd}d}|s`|dk	rN|t��}|dkrNq`|�|�|�}q,|S)Nr)�time�	monotonicr�)r,Z	predicater�rCZendtimeZwaittimerrr�wait_for:s
zConditionProxy.wait_for)N)r	)N)r4r6r7r�r�rArBrGrrrrr@2s


r@c@s2eZdZdZdd�Zdd�Zdd�Zdd	d
�ZdS)�
EventProxy)r�r�r
r�cCs
|�d�S)Nr�rr.rrrr�OszEventProxy.is_setcCs
|�d�S�Nr�rr.rrrr�QszEventProxy.setcCs
|�d�S)Nr
rr.rrrr
SszEventProxy.clearNcCs|�d|f�SrCrr�rrrr�UszEventProxy.wait)N)r4r6r7r�r�r�r
r�rrrrrHMs
rHc@sNeZdZdZddd�Zdd�Zdd�Zed	d
��Zedd��Z	ed
d��Z
dS)�BarrierProxy)�__getattribute__r��abort�resetNcCs|�d|f�SrCrr�rrrr�[szBarrierProxy.waitcCs
|�d�S)NrLrr.rrrrL]szBarrierProxy.abortcCs
|�d�S)NrMrr.rrrrM_szBarrierProxy.resetcCs|�dd�S)NrK)�partiesrr.rrrrNaszBarrierProxy.partiescCs|�dd�S)NrK)�	n_waitingrr.rrrrOdszBarrierProxy.n_waitingcCs|�dd�S)NrK)�brokenrr.rrrrPgszBarrierProxy.broken)N)r4r6r7r�r�rLrMrrNrOrPrrrrrJYs


rJc@s(eZdZdZdd�Zdd�Zdd�ZdS)	�NamespaceProxy)rK�__setattr__�__delattr__cCs0|ddkrt�||�St�|d�}|d|f�S)NrrUrrK)�objectrK�r,r�
callmethodrrr�__getattr__nszNamespaceProxy.__getattr__cCs4|ddkrt�|||�St�|d�}|d||f�S)NrrUrrR)rTrRrK)r,rr�rVrrrrRsszNamespaceProxy.__setattr__cCs0|ddkrt�||�St�|d�}|d|f�S)NrrUrrS)rTrSrKrUrrrrSxszNamespaceProxy.__delattr__N)r4r6r7r�rWrRrSrrrrrQlsrQc@s*eZdZdZdd�Zdd�Zeee�ZdS)�
ValueProxy)r�r�cCs
|�d�S)Nr�rr.rrrr��szValueProxy.getcCs|�d|f�SrIrr3rrrr��szValueProxy.setN)r4r6r7r�r�r�rr�rrrrrXsrX�
BaseListProxy)�__add__�__contains__�__delitem__�__getitem__�__len__�__mul__�__reversed__�__rmul__�__setitem__rQ�count�extend�index�insertr��remove�reverser��__imul__c@seZdZdd�Zdd�ZdS)�	ListProxycCs|�d|f�|S)Nrdrr3rrr�__iadd__�szListProxy.__iadd__cCs|�d|f�|S)Nrirr3rrrri�szListProxy.__imul__N)r4r6r7rkrirrrrrj�srj�	DictProxy)r[r\r]r9r^rbr
rr�r!r"r��popitem�
setdefaultr,r#r9�Iterator�
ArrayProxy)r^r]rb�	PoolProxy)Zapply�apply_asyncr��imap�imap_unorderedr��map�	map_async�starmap�
starmap_asyncr�ZAsyncResult)rrrvrxrsrtc@seZdZdd�Zdd�ZdS)rqcCs|Srrr.rrrr��szPoolProxy.__enter__cCs|��dSr)r�r�rrrr��szPoolProxy.__exit__N)r4r6r7r�r�rrrrrq�sc@seZdZdZdS)ra(
    Subclass of `BaseManager` which supports a number of shared object types.

    The types registered are those intended for the synchronization
    of threads, plus `dict`, `list` and `Namespace`.

    The `multiprocessing.Manager()` function creates started instances of
    this class.
    N)r4r6r7r8rrrrr�s�QueueZ
JoinableQueuertrn�RLock�	Semaphore�BoundedSemaphore�	Condition�Barrier�Poolr$r�)r�r)rc@sLeZdZdZgfdd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Z	dd�Z
dS)�_SharedMemoryTrackerz+Manages one or more shared memory segments.cCs||_||_dSr�Zshared_memory_context_name�
segment_names)r,rr�rrrr-�sz_SharedMemoryTracker.__init__cCs(t�d|�dt����|j�|�dS)z6Adds the supplied shared memory block name to tracker.zRegister segment � in pid N)rr{rr�rQ�r,�segment_namerrr�register_segment�sz%_SharedMemoryTracker.register_segmentcCsBt�d|�dt����|j�|�t�|�}|��|��dS)z�Calls unlink() on the shared memory block with the supplied name
            and removes it from the list of blocks being tracked.zDestroy segment r�N)	rr{rr�rgr�SharedMemoryr��unlink)r,r�Zsegmentrrr�destroy_segment�s

z$_SharedMemoryTracker.destroy_segmentcCs"|jdd�D]}|�|�qdS)z<Calls destroy_segment() on all tracked shared memory blocks.N)r�r�r�rrrr��sz_SharedMemoryTracker.unlinkcCs(t�d|jj�dt����|��dS)NzCall z.__del__ in )rr{r3r4rr�r.rrr�__del__�sz_SharedMemoryTracker.__del__cCs|j|jfSrr�r.rrrr/�sz!_SharedMemoryTracker.__getstate__cCs|j|�dSr)r-r0rrrr2sz!_SharedMemoryTracker.__setstate__N)r4r6r7r8r-r�r�r�r�r/r2rrrrr��s	r�c@sReZdZejdddgZdd�Zdd�Zde_d	d
�Zdd�Z	d
d�Z
dd�ZdS)�SharedMemoryServer�
track_segment�release_segment�
list_segmentscOsZtj|f|�|�|j}t|t�r,t�|�}td|�dt����|_	t
�dt����dS)NZshm_rUz"SharedMemoryServer started by pid )rWr-r*rHrd�os�fsdecoder�r�shared_memory_contextrr{)r,r@�kwargsr*rrrr-
s

�zSharedMemoryServer.__init__cOstt|�dkr|d}n4d|kr(|d}n"|s6td��ntdt|�d��ttj|dd�rhtj|d	<tj||�S)
z�Create a new distributed-shared object (not backed by a shared
            memory block) and return its id to be used in a Proxy Object.r�r�r)zDdescriptor 'create' of 'SharedMemoryServer' object needs an argumentr�r	rZ_shared_memory_proxyr�)r�rJr�r,rer�rWrY)r@r�Ztypeodr)rrrrYs



�
zSharedMemoryServer.createz&($self, c, typeid, /, *args, **kwargs)cCs|j��t�||�S)zACall unlink() on all tracked shared memory, terminate the Server.)r�r�rWrXr�rrrrX)s
zSharedMemoryServer.shutdowncCs|j�|�dS)z?Adds the supplied shared memory block name to Server's tracker.N)r�r��r,r>r�rrrr�.sz SharedMemoryServer.track_segmentcCs|j�|�dS)z�Calls unlink() on the shared memory block with the supplied name
            and removes it from the tracker instance inside the Server.N)r�r�r�rrrr�2sz"SharedMemoryServer.release_segmentcCs|jjS)zbReturns a list of names of shared memory blocks that the Server
            is currently tracking.)r�r�r�rrrr�7sz SharedMemoryServer.list_segmentsN)r4r6r7rWr�r-rYr�rXr�r�r�rrrrr�s�
r�c@s<eZdZdZeZdd�Zdd�Zdd�Zdd	�Z	d
d�Z
dS)
ra�Like SyncManager but uses SharedMemoryServer instead of Server.

        It provides methods for creating and returning SharedMemory instances
        and for creating a list-like object (ShareableList) backed by shared
        memory.  It also provides methods that create and return Proxy Objects
        that support synchronization across processes (i.e. multi-process-safe
        locks and semaphores).
        cOsNtjdkrddlm}|��tj|f|�|�t�|j	j
�dt����dS)N�posixr	)�resource_trackerz created by pid )r�r�r�Zensure_runningrr-rr{r3r4r)r,r@r�r�rrrr-Is

zSharedMemoryManager.__init__cCst�|jj�dt����dS)Nz.__del__ by pid )rr{r3r4rr.rrrr�UszSharedMemoryManager.__del__cCsh|jjtjkrP|jjtjkr&td��n*|jjtjkr>td��ntd�|jj���|�|j	|j
|j|j�S)z@Better than monkeypatching for now; merge into Server ultimatelyz"Already started SharedMemoryServerz!SharedMemoryManager has shut downr�)
r�r�r�r�r�r
r�rKr�r�r�r�r�r.rrrr�Ys

��zSharedMemoryManager.get_servercCsx|j|j|jd��\}tjdd|d�}zt|dd|jf�Wn.tk
rh}z|��|�W5d}~XYnXW5QRX|S)zoReturns a new SharedMemory instance with the specified size in
            bytes, to be tracked by the manager.r�NT)rY�sizer�)	r�r�r�rr�rDr�
BaseExceptionr�)r,r�r�Zsmsr�rrrr�fs z SharedMemoryManager.SharedMemorycCsv|j|j|jd��Z}t�|�}zt|dd|jjf�Wn0tk
rf}z|j�	�|�W5d}~XYnXW5QRX|S)z�Returns a new ShareableList instance populated with the values
            from the input sequence, to be tracked by the manager.r�Nr�)
r�r�r�r�
ShareableListrDZshmrr�r�)r,r4r�Zslr�rrrr�rs

 z!SharedMemoryManager.ShareableListN)r4r6r7r8r�r�r-r�r�r�r�rrrrr=s	
)NNNT)T)S�__all__rxrmr�rZqueuerEr�rr�rr�r
�contextrrr
rrrrrZ	HAS_SHMEM�ImportErrorrrZ
view_typesr$r'Z	view_typerTrrDr=r�rLrTrVrWr�rqrrZXmlListenerZ	XmlClientrhrr�r	rr$r*rr+r.r5r6r;r@rHrJrQrXrYrjrlr�rpZ
BasePoolProxyrqrryrtrnrzr{r|r}r~rr�r�r�rrrrr�<module>s��


c

�	w
4�


	
	
�

�

�%8PK{��\�Uxb
b
2__pycache__/popen_spawn_win32.cpython-38.opt-1.pycnu�[���U

e5d��@s�ddlZddlZddlZddlZddlZddlmZmZmZddl	m
Z
ddl	mZdgZdZ
ejdkoreed	d
�Zej���d�Zdd
�Zeejej�Zdd�ZGdd�de�ZdS)�N�)�	reduction�get_spawning_popen�set_spawning_popen)�spawn)�util�PopeniZwin32�frozenFzpythonservice.execCs ||kptj�|�tj�|�kS�N)�os�path�normcase)Zp1Zp2�r�9/usr/lib64/python3.8/multiprocessing/popen_spawn_win32.py�_path_eqsrcGs|D]}t�|�qdSr
)�_winapi�CloseHandle)Zhandles�handlerrr�_close_handlessrc@sJeZdZdZdZdd�Zdd�Zddd	�Zd
d�Zdd
�Z	e	Z
dd�ZdS)rz@
    Start a subprocess to run the code of a process object
    rcCsTt�|j�}t�dd�\}}t�|d�}tjt�	�|d�}d�
dd�|D��}t��}tr�t
|tj�r�tj}tj��}tj|d<nd}t|ddd	���}	z0t�||ddd
d|dd�	\}
}}}
t�|�Wnt�|��YnX||_d|_|
|_t|
�|_t�|t|jt|�f�|_t|�zt �!||	�t �!||	�W5td�XW5QRXdS)Nr)Z
parent_pidZpipe_handle� css|]}d|VqdS)z"%s"Nr)�.0�xrrr�	<genexpr>9sz!Popen.__init__.<locals>.<genexpr>�__PYVENV_LAUNCHER__�wbT)�closefdF)"rZget_preparation_data�_namerZ
CreatePipe�msvcrtZopen_osfhandleZget_command_liner�getpid�joinZget_executable�WINENVr�sys�
executable�_base_executable�environ�copy�openZ
CreateProcessr�pid�
returncode�_handle�int�sentinelrZFinalizer�	finalizerrr�dump)�selfZprocess_objZ	prep_dataZrhandleZwhandleZwfd�cmdZ
python_exe�envZto_childZhpZhtr'�tidrrr�__init__,sT
�
�

�zPopen.__init__cCst�||j�Sr
)rZ	duplicater+)r.rrrr�duplicate_for_childaszPopen.duplicate_for_childNcCst|jdkrn|dkrtj}ntdt|dd��}t�t|j�|�}|tjkrnt�|j�}|t	krht
j}||_|jS)Nri�g�?)r(rZINFINITE�maxr*ZWaitForSingleObjectr)Z
WAIT_OBJECT_0ZGetExitCodeProcess�	TERMINATE�signal�SIGTERM)r.�timeoutZmsecs�res�coderrr�waites

z
Popen.waitcCs|jdd�S)Nr�r8)r;�r.rrr�pollusz
Popen.pollcCsL|jdkrHzt�t|j�t�Wn&tk
rF|jdd�dkrB�YnXdS)Ng�?r<)r(rZTerminateProcessr*r)r5�OSErrorr;r=rrr�	terminatexs
zPopen.terminatecCs|��dSr
)r,r=rrr�close�szPopen.close)N)�__name__�
__module__�__qualname__�__doc__�methodr2r3r;r>r@�killrArrrrr&s5
)rrr6r!r�contextrrr�rr�__all__r5�platform�getattrZWINEXEr"�lower�endswithZ
WINSERVICErr#r r�objectrrrrr�<module>s
PK{��\{�ٿ��%__pycache__/heap.cpython-38.opt-1.pycnu�[���U

e5dj-�@s�ddlZddlmZddlZddlZddlZddlZddlZddlm	Z	m
Z
ddlmZdgZ
ejdkr�ddlZGdd	�d	e�Zn,Gd
d	�d	e�Zdd�Zd
d�Ze	�ee�Gdd�de�ZGdd�de�ZdS)�N)�defaultdict�)�	reduction�assert_spawning)�util�
BufferWrapperZwin32c@s0eZdZdZe��Zdd�Zdd�Zdd�Z	dS)	�ArenazL
        A shared memory area backed by anonymous memory (Windows).
        cCsx||_td�D]B}dt��t|j�f}tjd||d�}t��dkrHqZ|�	�qt
d��||_||_|j|jf|_
dS)N�dz	pym-%d-%s����ZtagnamerzCannot find name for new mmap)�size�range�os�getpid�next�_rand�mmap�_winapiZGetLastError�close�FileExistsError�name�buffer�_state)�selfr�irZbuf�r�,/usr/lib64/python3.8/multiprocessing/heap.py�__init__&s
�Arena.__init__cCst|�|jS�N)rr)rrrr�__getstate__5szArena.__getstate__cCs,|\|_|_|_tjd|j|jd�|_dS)Nr
r)rrrrr)r�staterrr�__setstate__9szArena.__setstate__N)
�__name__�
__module__�__qualname__�__doc__�tempfileZ_RandomNameSequencerrr r"rrrrrs
rc@s8eZdZdZejdkrdgZngZd
dd�Zdd�Zd	S)rzJ
        A shared memory area backed by a temporary file (POSIX).
        Zlinuxz/dev/shmr
cCsx||_||_|dkrbtjdt��|�|�d�\|_}t�|�t�	|tj
|jf�t�|j|�t�|j|j�|_
dS)Nr
zpym-%d-)�prefix�dir)r�fdr'Zmkstemprr�_choose_dir�unlinkr�Finalizer�	ftruncaterr)rrr*rrrrrMs
�
rcCs6|jD]&}t�|�}|j|j|kr|Sqt��Sr)�_dir_candidatesr�statvfs�f_bavail�f_frsizerZget_temp_dir)rr�d�strrrr+[s



zArena._choose_dirN)r
)	r#r$r%r&�sys�platformr/rr+rrrrrCs

cCs(|jdkrtd��t|jt�|j�ffS)Nr
zDArena is unpicklable because forking was enabled when it was created)r*�
ValueError�
rebuild_arenarrZDupFd)�arrr�reduce_arenads
r:cCst||���Sr)r�detach)rZdupfdrrrr8jsr8c@szeZdZdZdZdZejfdd�Ze	dd��Z
dd�Zd	d
�Zdd�Z
d
d�Zdd�Zdd�Zdd�Zdd�Zdd�ZdS)�Heap�i@cCsXt��|_t��|_||_g|_i|_i|_	i|_
tt�|_
g|_g|_d|_d|_dS�Nr)rr�_lastpid�	threadingZLock�_lock�_size�_lengths�_len_to_seq�_start_to_block�_stop_to_blockr�set�_allocated_blocks�_arenas�_pending_free_blocks�
_n_mallocs�_n_frees)rrrrrr{s


z
Heap.__init__cCs|d}|||@S)Nrr)�nZ	alignment�maskrrr�_roundup�sz
Heap._roundupcCsZ|�t|j|�tj�}|j|jkr0|jd9_t�d|�t|�}|j	�
|�|d|fS)N�z"allocating a new mmap of length %dr)rO�maxrBr�PAGESIZE�_DOUBLE_ARENA_SIZE_UNTILr�inforrI�append)rr�length�arenarrr�
_new_arena�szHeap._new_arenacCsz|j}||jkrdS|j�|�}|j|df=|j||f=|j�|�|j|}|�|d|f�|sv|j|=|j	�|�dSr>)
r�_DISCARD_FREE_SPACE_LARGER_THANrH�poprErFrI�removerDrC)rrWrV�blocks�seqrrr�_discard_arena�s

zHeap._discard_arenac	Cs|t�|j|�}|t|j�kr&|�|�S|j|}|j|}|��}|sV|j|=|j|=|\}}}|j||f=|j||f=|Sr)	�bisectZbisect_leftrC�lenrXrDrZrErF)	rrrrVr]�blockrW�start�stoprrr�_malloc�s



zHeap._mallocc	Cs�|\}}}z|j||f}Wntk
r0YnX|�|�\}}z|j||f}Wntk
rfYnX|�|�\}}|||f}||}z|j|�|�Wn.tk
r�|g|j|<t�|j|�YnX||j||f<||j||f<dSr)	rF�KeyError�_absorbrErDrUr_ZinsortrC)	rrarWrbrcZ
prev_block�_Z
next_blockrVrrr�_add_free_block�s(

zHeap._add_free_blockcCs^|\}}}|j||f=|j||f=||}|j|}|�|�|sV|j|=|j�|�||fSr)rErFrDr[rC)rrarWrbrcrVr]rrrrf�s


zHeap._absorbcCs4|\}}}|j|}|�||f�|s0|�|�dSr)rHr[r^)rrarWrbrcr\rrr�_remove_allocated_block�s


zHeap._remove_allocated_blockcCsBz|j��}Wntk
r&Yq>YnX|�|�|�|�qdSr)rJrZ�
IndexErrorrhri�rrarrr�_free_pending_blockss

zHeap._free_pending_blockscCs~t��|jkr$td�t��|j���|j�d�s>|j�|�n<z.|j
d7_
|��|�|�|�
|�W5|j�	�XdS)Nz$My pid ({0:n}) is not last pid {1:n}Fr)rrr?r7�formatrA�acquirerJrU�releaserLrlrhrirkrrr�frees
��
z	Heap.freec
Cs�|dkrtd�|���tj|kr.td�|���t��|jkrD|��|j	��|j
d7_
|��|�t
|d�|j�}|�|�\}}}||}||kr�|�|||f�|j|�||f�|||fW5QR�SQRXdS)Nr�Size {0:n} out of range�Size {0:n} too larger)r7rmr5�maxsize�
OverflowErrorrrr?rrArKrlrOrQ�
_alignmentrdrhrH�add)rrrWrbrcZ	real_stoprrr�malloc(s 
zHeap.mallocN)r#r$r%rurYrSrrRr�staticmethodrOrXr^rdrhrfrirlrprwrrrrr<ss

r<c@s"eZdZe�Zdd�Zdd�ZdS)rcCs^|dkrtd�|���tj|kr.td�|���tj�|�}||f|_t	j
|tjj|fd�dS)Nrrqrr)�args)r7rmr5rsrtr�_heaprwrrr-rp)rrrarrrrFs

zBufferWrapper.__init__cCs&|j\\}}}}t|j�|||�Sr)r�
memoryviewr)rrWrbrcrrrr�create_memoryviewOszBufferWrapper.create_memoryviewN)r#r$r%r<rzrr|rrrrrBs	)r_�collectionsrrrr5r'r@�contextrr�r�__all__r6r�objectrr:r8�registerr<rrrrr�<module>
s&
$!PPK{��\��O�** __pycache__/spawn.cpython-38.pycnu�[���U

e5dP$�@s$ddlZddlZddlZddlZddlmZmZddlmZddlm	Z	ddlm
Z
ddd	d
ddd
gZejdkrzdZ
dZneedd�Z
ej���d�Zer�ej�ejd�anejadd	�Zdd
�Zdd�Zdd�Zdd�Zd&dd�Zdd�Zdd�Zdd�ZgZ dd �Z!d!d"�Z"d#d$�Z#d%d
�Z$dS)'�N�)�get_start_method�set_start_method)�process)�	reduction)�util�_main�freeze_support�set_executable�get_executable�get_preparation_data�get_command_line�import_main_path�win32F�frozenzpythonservice.exez
python.execCs|adS�N��_python_exe)Zexe�r�-/usr/lib64/python3.8/multiprocessing/spawn.pyr
)scCstSrrrrrrr-scCs$t|�dkr|ddkrdSdSdS)z=
    Return whether commandline indicates we are forking
    �r�--multiprocessing-forkTFN)�len)�argvrrr�
is_forking4srcCsdttj�r`i}tjdd�D]0}|�d�\}}|dkr@d||<qt|�||<qtf|�t��dS)zE
    Run code for process object if this in not the main process
    rN�=�None)r�sysr�split�int�
spawn_main�exit)�kwds�arg�name�valuerrrr	>s


cKshttdd�r(tjdgdd�|��D�Sd}|d�dd	�|��D��;}t��}tg|d
|dgSdS)zJ
    Returns prefix of command line used for spawning a child process
    rFrcSsg|]}d|�qS)�%s=%rr��.0�itemrrr�
<listcomp>Tsz$get_command_line.<locals>.<listcomp>z<from multiprocessing.spawn import spawn_main; spawn_main(%s)z, css|]}d|VqdS)r&Nrr'rrr�	<genexpr>Wsz#get_command_line.<locals>.<genexpr>z-cN)�getattrr�
executable�items�joinrZ_args_from_interpreter_flagsr)r"�progZoptsrrrr
Ns�cCs�ttj�std��tjdkrrddl}ddl}|dk	rL|�|j|j	Bd|�}nd}t
j||d�}|�|t
j�}|}n"ddlm}	||	j_|}t
�|�}t||�}
t�|
�dS)	z7
    Run code specified by data received over pipe
    zNot forkingrrNF)�source_processr)�resource_tracker)rrr�AssertionError�platform�msvcrt�_winapiZOpenProcessZSYNCHRONIZEZPROCESS_DUP_HANDLErZ	duplicateZopen_osfhandle�os�O_RDONLY�r2Z_resource_trackerZ_fd�duprr!)Zpipe_handleZ
parent_pidZ
tracker_fdr5r6r1Z
new_handle�fd�parent_sentinelr2Zexitcoderrrr \s,

��

r c	Cs`tj|ddd��@}dt��_z$tj�|�}t|�tj�|�}W5t��`XW5QRX|�	|�S)N�rbT)�closefd)
r7�fdopenr�current_process�_inheritingr�pickle�load�prepare�
_bootstrap)r;r<Zfrom_parentZpreparation_data�selfrrrrxs
cCstt��dd�rtd��dS)NrAFa
        An attempt has been made to start a new process before the
        current process has finished its bootstrapping phase.

        This probably means that you are not using fork to start your
        child processes and you have forgotten to use the proper idiom
        in the main module:

            if __name__ == '__main__':
                freeze_support()
                ...

        The "freeze_support()" line can be omitted if the program
        is not going to be frozen to produce an executable.)r,rr@�RuntimeErrorrrrr�_check_not_importing_main�srHcCst�ttjt��jd�}tjdk	r2tj��|d<t	j
��}z|�d�}Wnt
k
r^YnXtj||<|j||t	jtjt��t�d�t	jd}t|jdd�}|dk	r�||d<nft	jd	ks�t�st�st|d
d�}|dk	�rtj
�|��s
tjdk	�r
tj
�tj|�}tj
�|�|d<|S)zM
    Return info about parent needed by child to unpickle process object
    )�
log_to_stderr�authkeyN�	log_levelr9)r$�sys_path�sys_argv�orig_dir�dir�start_method�__main__r$�init_main_from_namer�__file__�init_main_from_path)rH�dictrZ_log_to_stderrrr@rJZ_loggerZgetEffectiveLevelr�path�copy�index�
ValueError�ORIGINAL_DIR�updaterr7�getcwdr�modulesr,�__spec__r4�WINEXE�
WINSERVICE�isabsr/�normpath)r$�drL�i�main_moduleZ
main_mod_name�	main_pathrrrr�sD�


�


�cCs�d|kr|dt��_d|kr,|dt��_d|krD|drDt��d|kr^t���|d�d|krp|dt_	d|kr�|dt_
d|kr�t�|d�d|kr�|dt_
d	|kr�t|d	d
d�d|kr�t|d�nd
|kr�t|d
�dS)zE
    Try to get current process ready to unpickle process object
    r$rJrIrKrLrMrOrNrPT)ZforcerRrTN)rr@r$rJrrIZ
get_loggerZsetLevelrrVrr7�chdirrZr�_fixup_main_from_name�_fixup_main_from_path)�datarrrrD�s,


rDcCs~tjd}|dks|�d�r dSt|jdd�|kr6dSt�|�t�d�}t	j
|ddd�}|j�|�|tjd<tjd<dS)NrQz	.__main__r$�__mp_main__T)�run_nameZ	alter_sys)
rr]�endswithr,r^�old_main_modules�append�types�
ModuleType�runpyZ
run_module�__dict__r[)Zmod_name�current_mainre�main_contentrrrrh�s


�rhcCs�tjd}tj�tj�|��d}|dkr.dSt|dd�|krBdSt�|�t	�
d�}tj|dd�}|j
�|�|tjd<tjd<dS)NrQrZipythonrSrk)rl)rr]r7rV�splitext�basenamer,rnrorprqrrZrun_pathrsr[)rfrtZ	main_namererurrrri	s


�ricCst|�dS)z<
    Set sys.modules['__main__'] to module at main_path
    N)ri)rfrrrr%s)NN)%r7rrrrpr9rrr�contextrr�__all__r4r_r`r,r-�lowerrmrVr/�exec_prefixrr
rrr	r
r rrHrrnrDrhrirrrrr�<module>sD�


2&PK{��\��hh+__pycache__/forkserver.cpython-38.opt-2.pycnu�[���U

e5d�0�@s�ddlZddlZddlZddlZddlZddlZddlZddlZddlZddl	m
Z
ddl	mZddlm
Z
ddl	mZddl	mZddl	mZd	d
ddgZd
Ze�d�ZGdd�de�Zddd�Zdd�Zdd�Zdd�Ze�ZejZejZejZejZdS)�N�)�
connection)�process)�	reduction)�resource_tracker)�spawn)�util�ensure_running�get_inherited_fds�connect_to_new_process�set_forkserver_preload��qc@sDeZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	dS)�
ForkServercCs.d|_d|_d|_d|_t��|_dg|_dS)N�__main__)�_forkserver_address�_forkserver_alive_fd�_forkserver_pid�_inherited_fds�	threadingZLock�_lock�_preload_modules��self�r�2/usr/lib64/python3.8/multiprocessing/forkserver.py�__init__"s
zForkServer.__init__c	Cs|j�|��W5QRXdS�N)r�_stop_unlockedrrrr�_stop*szForkServer._stopcCsV|jdkrdSt�|j�d|_t�|jd�d|_t�|j�sLt�|j�d|_dS)Nr)	r�os�closer�waitpidr�is_abstract_socket_namespacer�unlinkrrrrr/s
zForkServer._stop_unlockedcCs&tdd�|jD��std��||_dS)Ncss|]}t|�tkVqdSr)�type�str)�.0�modrrr�	<genexpr>@sz4ForkServer.set_forkserver_preload.<locals>.<genexpr>z&module_names must be a list of strings)�allr�	TypeError)rZ
modules_namesrrrr>sz!ForkServer.set_forkserver_preloadcCs|jSr)rrrrrr
DszForkServer.get_inherited_fdsc
Cs�|��t|�dtkr td��t�tj���}|�|j�t�	�\}}t�	�\}}|||j
t��g}||7}zNz&t�||�||fWW�4W5QR�St�
|�t�
|��YnXW5t�
|�t�
|�XW5QRXdS)N�ztoo many fds)r	�len�MAXFDS_TO_SEND�
ValueError�socket�AF_UNIXZconnectrr �piperrZgetfdr!rZsendfds)r�fdsZclientZparent_r�child_w�child_rZparent_wZallfdsrrrrLs(�


z!ForkServer.connect_to_new_processcs�|j��~t��|jdk	r`t�|jtj�\}}|sBW5QR�dSt�|j�d|_	d|_d|_d}|j
r�ddh�t�d�}�fdd�|�
�D�}ni}t�tj���}t�d�}|�|�t�|�s�t�|d�|��t��\}}ztzV|��|g}	||��||j
|f;}t��}
|
gt��}|d	|g7}t�|
||	�}Wnt�|��YnXW5t�|�X||_	||_||_W5QRXW5QRXdS)
NzCfrom multiprocessing.forkserver import main; main(%d, %d, %r, **%r)�	main_path�sys_path�ignorecsi|]\}}|�kr||�qSrr)r'�x�y�Zdesired_keysrr�
<dictcomp>�sz-ForkServer.ensure_running.<locals>.<dictcomp>r1i�z-c)rrr	rr r"�WNOHANGr!rrrrZget_preparation_data�itemsr0r1rZarbitrary_addressZbindrr#�chmodZlistenr2�filenoZget_executableZ_args_from_interpreter_flagsZspawnv_passfds)r�pidZstatus�cmd�data�listenerZaddress�alive_rZalive_wZfds_to_passZexe�argsrr;rr	isN





�
zForkServer.ensure_runningN)
�__name__�
__module__�__qualname__rrrrr
rr	rrrrr srcCs�|rdd|kr8|dk	r8dt��_zt�|�W5t��`X|D]&}zt|�Wq<tk
r`Yq<Xq<t��t	�
�\}}t	�|d�t	�|d�dd�}tj
|tjtji}	dd�|	��D�}
t�|�i}tjtj|d����}t�����}
|��t_|
�|tj�|
�|tj�|
�|tj��znd	d
�|
��D�}|�r"�qB�q"||k�rPt�||k�rBt	�|d�zt	�dt	j�\}}Wnt k
�r�Y�qBYnX|d
k�r��qB|�!|d�}|dk	�r0t	�"|��r�t	�#|�}n&t	�$|��s�t%d�&||���t	�'|�}zt(||�Wnt)k
�r"YnXt	�*|�nt+�,d|��qf||k�r�|�-�d
��,}t.�/|t0d�}t1|�t0k�r�t2d�&t1|����|^}}}|�*�t	�3�}|d
k�r4d}zpz<|�*�|
�*�||||g}|�5|�6��t7||||
�}Wn.t8k
�r t9j:t9�;��t9j<�=�YnXW5t	�4|�XnNzt(||�Wnt)k
�rXYnX|||<t	�*|�|D]}t	�*|��qpW5QRXWn4t>k
�r�}z|j?t?j@k�r��W5d}~XYnX�qW5QRXW5QRXdS)NrTFcWsdSrr)Z_unusedrrr�sigchld_handler�szmain.<locals>.sigchld_handlercSsi|]\}}|t�||��qSr)�signal)r'�sig�valrrrr<�s�zmain.<locals>.<dictcomp>)r@cSsg|]\}}|j�qSr)Zfileobj)r'�keyZeventsrrr�
<listcomp>�szmain.<locals>.<listcomp>i���rzChild {0:n} status is {1:n}z.forkserver: waitpid returned unexpected pid %drzToo many ({0:n}) fds to send)ArZcurrent_processZ_inheritingrZimport_main_path�
__import__�ImportErrorrZ_close_stdinr r2�set_blockingrK�SIGCHLD�SIGINT�SIG_IGNr>�
set_wakeup_fdr0r1�	selectorsZDefaultSelectorZgetsockname�_forkserverr�registerZ
EVENT_READZselect�
SystemExit�readr"r=�ChildProcessError�pop�WIFSIGNALED�WTERMSIG�	WIFEXITED�AssertionError�format�WEXITSTATUS�write_signed�BrokenPipeErrorr!�warnings�warnZacceptrZrecvfdsr.r-�RuntimeError�fork�_exit�extend�values�
_serve_one�	Exception�sys�
excepthook�exc_info�stderr�flush�OSError�errnoZECONNABORTED)Zlistener_fdrEZpreloadr6r7�modnameZsig_rZsig_wrJ�handlersZold_handlersZ	pid_to_fdrDZselectorZrfdsrA�stsr4�
returncode�sr3r5�code�
unused_fds�fd�errr�main�s�

��
�




��
�

��

�
r�c	Csht�d�|��D]\}}t�||�q|D]}t�|�q,|^t_tj_	t_
t�|�}t�
||�}|S)NrP)rKrWr>r r!rYrrZ_resource_trackerZ_fdr�duprZ_main)	r5r3r}rxrLrMr~Zparent_sentinelr|rrrrn1s
�
rncCsNd}tj}t|�|kr@t�||t|��}|s6td��||7}q
t�|�dS)N�zunexpected EOFr)�
SIGNED_STRUCT�sizer-r r\�EOFErrorZunpack)r~rCZlengthr{rrr�read_signedHs
r�cCs<t�|�}|r8t�||�}|dkr*td��||d�}q
dS)Nrzshould not get here)r�Zpackr �writeri)r~�n�msg�nbytesrrrreRs
re)NN) rvr rXrKr0Zstructrprrg�rr�contextrrrr�__all__r.ZStructr��objectrr�rnr�rerYr	r
rrrrrr�<module>s>�


PK{��\W�	%	%'__pycache__/queues.cpython-38.opt-1.pycnu�[���U

e5d�-�@s�dddgZddlZddlZddlZddlZddlZddlZddlZddlm	Z	m
Z
ddlZddlm
Z
ddlmZejjZdd	lmZmZmZmZmZGd
d�de�Ze�ZGdd�de�ZGdd�de�ZdS)
�Queue�SimpleQueue�
JoinableQueue�N)�Empty�Full�)�
connection)�context)�debug�info�Finalize�register_after_fork�
is_exitingc@s�eZdZd*dd�Zdd�Zdd�Zdd	�Zd+dd
�Zd,dd�Zdd�Z	dd�Z
dd�Zdd�Zdd�Z
dd�Zdd�Zdd�Zd d!�Zed"d#��Zed$d%��Zed&d'��Zed(d)��ZdS)-rrcCs�|dkrddlm}||_tjdd�\|_|_|��|_t	�
�|_tj
dkrTd|_n
|��|_|�|�|_d|_|��tj
dkr�t|tj�dS)Nrr)�
SEM_VALUE_MAXF�Zduplex�win32)Zsynchronizer�_maxsizer�Pipe�_reader�_writer�Lock�_rlock�os�getpid�_opid�sys�platform�_wlockZBoundedSemaphore�_sem�
_ignore_epipe�_after_forkr
r��self�maxsize�ctx�r%�./usr/lib64/python3.8/multiprocessing/queues.py�__init__$s




zQueue.__init__cCs.t�|�|j|j|j|j|j|j|j|j	fS�N)
r	�assert_spawningrrrrrrrr�r"r%r%r&�__getstate__9s
�zQueue.__getstate__c	Cs0|\|_|_|_|_|_|_|_|_|��dSr()	rrrrrrrrr �r"�stater%r%r&�__setstate__>s�zQueue.__setstate__cCsbtd�t�t���|_t��|_d|_d|_	d|_
d|_d|_|j
j|_|jj|_|jj|_dS)NzQueue._after_fork()F)r
�	threading�	Conditionr�	_notempty�collections�deque�_buffer�_thread�_jointhread�_joincancelled�_closed�_closer�
send_bytes�_send_bytesr�
recv_bytes�_recv_bytes�poll�_pollr*r%r%r&r Cs


zQueue._after_forkTNc	Csf|jrtd|�d���|j�||�s(t�|j�.|jdkrB|��|j�	|�|j�
�W5QRXdS�NzQueue z
 is closed)r8�
ValueErrorr�acquirerr1r5�
_start_threadr4�append�notify�r"�obj�block�timeoutr%r%r&�putPs
z	Queue.putc	Cs�|jrtd|�d���|rH|dkrH|j�|��}W5QRX|j��nr|rXt��|}|j�||�sjt	�zB|r�|t��}|�
|�s�t	�n|�
�s�t	�|��}|j��W5|j��Xt�|�Sr@)
r8rArr=r�release�time�	monotonicrBrr?�_ForkingPickler�loads)r"rHrI�resZdeadliner%r%r&�get\s*
z	Queue.getcCs|j|jj��Sr()rr�_semlockZ
_get_valuer*r%r%r&�qsizevszQueue.qsizecCs
|��Sr(�r?r*r%r%r&�emptyzszQueue.emptycCs|jj��Sr()rrR�_is_zeror*r%r%r&�full}sz
Queue.fullcCs
|�d�S�NF)rQr*r%r%r&�
get_nowait�szQueue.get_nowaitcCs|�|d�SrX)rJ�r"rGr%r%r&�
put_nowait�szQueue.put_nowaitcCs2d|_z|j��W5|j}|r,d|_|�XdS)NT)r8r9r�close)r"r\r%r%r&r\�szQueue.closecCstd�|jr|��dS)NzQueue.join_thread())r
r6r*r%r%r&�join_thread�szQueue.join_threadcCs6td�d|_z|j��Wntk
r0YnXdS)NzQueue.cancel_join_thread()T)r
r7r6Zcancel�AttributeErrorr*r%r%r&�cancel_join_thread�szQueue.cancel_join_threadc
Cs�td�|j��tjtj|j|j|j|j	|j
j|j|j
|jfdd�|_d|j_td�|j��td�|js�t|jtjt�|j�gdd�|_t|tj|j|jgd	d�|_dS)
NzQueue._start_thread()ZQueueFeederThread)�target�args�nameTzdoing self._thread.start()z... done self._thread.start()���)Zexitpriority�
)r
r4�clearr/ZThreadr�_feedr1r;rrr\r�_on_queue_feeder_errorrr5Zdaemon�startr7r�_finalize_join�weakref�refr6�_finalize_closer9r*r%r%r&rC�s<
��
�
�zQueue._start_threadcCs4td�|�}|dk	r(|��td�ntd�dS)Nzjoining queue threadz... queue thread joinedz... queue thread already dead)r
�join)Ztwr�threadr%r%r&ri�s
zQueue._finalize_joinc	Cs.td�|�|�t�|��W5QRXdS)Nztelling queue thread to quit)r
rD�	_sentinelrE)�buffer�notemptyr%r%r&rl�s
zQueue._finalize_closec
CsXtd�|j}|j}	|j}
|j}t}tjdkr<|j}
|j}nd}
z�|�z|sT|
�W5|	�Xzb|�}||kr�td�|�WWdSt�	|�}|
dkr�||�qb|
�z||�W5|�XqbWnt
k
r�YnXWq@tk
�rP}zV|�rt|dd�t
jk�rWY�6dSt��r.td|�WY�dS|��|||�W5d}~XYq@Xq@dS)Nz$starting thread to feed data to piperz%feeder thread got sentinel -- exiting�errnorzerror in queue thread: %s)r
rBrK�wait�popleftrorrrN�dumps�
IndexError�	Exception�getattrrrZEPIPErr)rprqr:Z	writelockr\Zignore_epipe�onerrorZ	queue_semZnacquireZnreleaseZnwaitZbpopleft�sentinelZwacquireZwreleaserG�er%r%r&rf�sN







zQueue._feedcCsddl}|��dS)z�
        Private API hook called when feeding data in the background thread
        raises an exception.  For overriding by concurrent.futures.
        rN)�	traceback�	print_exc)r{rGr|r%r%r&rg
szQueue._on_queue_feeder_error)r)TN)TN)�__name__�
__module__�__qualname__r'r+r.r rJrQrSrUrWrYr[r\r]r_rC�staticmethodrirlrfrgr%r%r%r&r"s.



 
	

=c@s@eZdZddd�Zdd�Zdd�Zdd
d�Zdd
�Zdd�Zd	S)rrcCs*tj|||d�|�d�|_|��|_dS)N)r$r)rr'Z	Semaphore�_unfinished_tasksr0�_condr!r%r%r&r'#szJoinableQueue.__init__cCst�|�|j|jfSr()rr+r�r�r*r%r%r&r+(szJoinableQueue.__getstate__cCs,t�||dd��|dd�\|_|_dS)N���)rr.r�r�r,r%r%r&r.+szJoinableQueue.__setstate__TNc
Cs�|jrtd|�d���|j�||�s(t�|j�J|j�8|jdkrJ|��|j	�
|�|j��|j�
�W5QRXW5QRXdSr@)r8rArrBrr1r�r5rCr4rDr�rKrErFr%r%r&rJ/s

zJoinableQueue.putc	Cs@|j�0|j�d�std��|jj��r2|j��W5QRXdS)NFz!task_done() called too many times)r�r�rBrArRrVZ
notify_allr*r%r%r&�	task_done<s
zJoinableQueue.task_donec	Cs,|j�|jj��s|j��W5QRXdSr()r�r�rRrVrsr*r%r%r&rmCszJoinableQueue.join)r)TN)	r~rr�r'r+r.rJr�rmr%r%r%r&r!s


c@s<eZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
S)rcCsHtjdd�\|_|_|��|_|jj|_tj	dkr:d|_
n
|��|_
dS)NFrr)rrrrrrr>r?rrr)r"r$r%r%r&r'Ns


zSimpleQueue.__init__cCs
|��Sr(rTr*r%r%r&rUWszSimpleQueue.emptycCst�|�|j|j|j|jfSr()r	r)rrrrr*r%r%r&r+Zs
zSimpleQueue.__getstate__cCs"|\|_|_|_|_|jj|_dSr()rrrrr>r?r,r%r%r&r.^szSimpleQueue.__setstate__c	Cs&|j�|j��}W5QRXt�|�Sr()rrr<rNrO)r"rPr%r%r&rQbszSimpleQueue.getc	CsDt�|�}|jdkr"|j�|�n|j�|j�|�W5QRXdSr()rNrurrr:rZr%r%r&rJhs


zSimpleQueue.putN)	r~rr�r'rUr+r.rQrJr%r%r%r&rLs	)�__all__rrr/r2rLrjrrZqueuerrZ_multiprocessing�rr	Z	reductionZForkingPicklerrN�utilr
rrr
r�objectrrorrr%r%r%r&�<module>
s$
v
+PK{��\��0

+__pycache__/popen_fork.cpython-38.opt-1.pycnu�[���U

e5d
�@s6ddlZddlZddlmZdgZGdd�de�ZdS)�N�)�util�Popenc@s`eZdZdZdd�Zdd�Zejfdd�Zdd	d
�Z	dd�Z
d
d�Zdd�Zdd�Z
dd�ZdS)r�forkcCs"t��d|_d|_|�|�dS�N)rZ_flush_std_streams�
returncode�	finalizer�_launch)�self�process_obj�r�2/usr/lib64/python3.8/multiprocessing/popen_fork.py�__init__szPopen.__init__cCs|Srr)r
�fdrrr
�duplicate_for_childszPopen.duplicate_for_childc
Cs�|jdkrzzt�|j|�\}}Wn(tk
rH}z
WY�dSd}~XYnX||jkrzt�|�rnt�|�|_nt�|�|_|jSr)r�os�waitpid�pid�OSError�WIFSIGNALED�WTERMSIG�WEXITSTATUS)r
�flagr�sts�errr
�polls


z
Popen.pollNcCsN|jdkrH|dk	r0ddlm}||jg|�s0dS|�|dkrBtjnd�S|jS)Nr)�waitg)rZmultiprocessing.connectionr�sentinelrr�WNOHANG)r
�timeoutrrrr
r(s
z
Popen.waitcCsZ|jdkrVzt�|j|�Wn8tk
r0Yn&tk
rT|jdd�dkrP�YnXdS)Ng�������?)r)rr�killr�ProcessLookupErrorrr)r
Zsigrrr
�_send_signal2s
zPopen._send_signalcCs|�tj�dSr)r"�signal�SIGTERM�r
rrr
�	terminate<szPopen.terminatecCs|�tj�dSr)r"r#�SIGKILLr%rrr
r ?sz
Popen.killc	Cs�d}t��\}}t��\}}t��|_|jdkrdz$t�|�t�|�|j|d�}W5t�|�Xn0t�|�t�|�t�|tj	||f�|_
||_dS)Nrr)Zparent_sentinel)r�piperr�_exit�close�
_bootstraprZFinalizeZ	close_fdsrr)r
r�codeZparent_rZchild_wZchild_rZparent_wrrr
r	Bs 






�z
Popen._launchcCs|jdk	r|��dSr)rr%rrr
r*Us
zPopen.close)N)�__name__�
__module__�__qualname__�methodrrrrrrr"r&r r	r*rrrr
rs


)rr#�r�__all__�objectrrrrr
�<module>sPK{��\�9�s	s	+__pycache__/popen_forkserver.cpython-38.pycnu�[���U

e5d��@s�ddlZddlZddlmZmZejs.ed��ddlmZddlm	Z	ddlm
Z
ddlmZd	gZGd
d�de
�ZGdd	�d	e	j�ZdS)
�N�)�	reduction�set_spawning_popenz,No support for sending fds between processes)�
forkserver)�
popen_fork)�spawn)�util�Popenc@seZdZdd�Zdd�ZdS)�_DupFdcCs
||_dS�N)�ind)�selfr�r�8/usr/lib64/python3.8/multiprocessing/popen_forkserver.py�__init__sz_DupFd.__init__cCst��|jSr)rZget_inherited_fdsr)r
rrr�detachsz
_DupFd.detachN)�__name__�
__module__�__qualname__rrrrrrr
sr
csBeZdZdZeZ�fdd�Zdd�Zdd�Ze	j
fdd	�Z�ZS)
r	rcsg|_t��|�dSr)�_fds�superr)r
�process_obj��	__class__rrr!szPopen.__init__cCs|j�|�t|j�dS)Nr)r�append�len)r
�fdrrr�duplicate_for_child%szPopen.duplicate_for_childc	Cs�t�|j�}t��}t|�zt�||�t�||�W5td�Xt�	|j
�\|_}t�
|�}t�|tj||jf�|_t|ddd��}|�|���W5QRXt�|j�|_dS)N�wbT)�closefd)rZget_preparation_data�_name�io�BytesIOrr�dumprZconnect_to_new_processr�sentinel�os�duprZFinalizeZ	close_fds�	finalizer�open�write�	getbuffer�read_signed�pid)r
rZ	prep_dataZbuf�wZ	_parent_w�frrr�_launch)s


�z
Popen._launchc	Csr|jdkrlddlm}|tjkr$dnd}||jg|�s:dSzt�|j�|_Wntt	fk
rjd|_YnX|jS)Nr)�wait�)
�
returncodeZmultiprocessing.connectionr0r%�WNOHANGr$rr+�OSError�EOFError)r
�flagr0Ztimeoutrrr�poll=s
z
Popen.poll)
rrr�methodr
ZDupFdrrr/r%r3r7�
__classcell__rrrrr	s)r!r%�contextrrZHAVE_SEND_HANDLE�ImportError�rrrr�__all__�objectr
r	rrrr�<module>s
PK{��\����JJ-__pycache__/sharedctypes.cpython-38.opt-1.pycnu�[���U

e5d��@sBddlZddlZddlmZddlmZddlmZmZejZ	dddd	d
dgZ
ejejej
ejejejejejejejejejejejd�Zd
d�Zdd�Zdd�Zddd�dd�Zddd�dd	�Zdd
�Zd&dd�Z dd�Z!dd�Z"dd�Z#dZ$iZ%e�&�Z'Gdd�de(�Z)Gd d!�d!e)�Z*Gd"d#�d#e)�Z+Gd$d%�d%e+�Z,dS)'�N�)�heap)�get_context)�	reduction�assert_spawning�RawValue�RawArray�Value�Array�copy�synchronized)�c�u�b�B�h�H�i�I�l�L�q�Q�f�dcCs t�|�}t�|�}t||d�S�N)�ctypes�sizeofrZ
BufferWrapper�
rebuild_ctype)�type_�size�wrapper�r"�4/usr/lib64/python3.8/multiprocessing/sharedctypes.py�
_new_value's

r$cGs<t�||�}t|�}t�t�|�dt�|��|j|�|S)z>
    Returns a ctypes object allocated from shared memory
    r)�typecode_to_type�getr$r�memset�	addressofr�__init__)�typecode_or_type�argsr�objr"r"r#r,s

cCsjt�||�}t|t�rD||}t|�}t�t�|�dt�|��|S|t	|�}t|�}|j
|�|SdS)z=
    Returns a ctypes array allocated from shared memory
    rN)r%r&�
isinstance�intr$rr'r(r�lenr))r*�size_or_initializerrr,�resultr"r"r#r6s

T)�lock�ctxcGsXt|f|��}|dkr|S|dkr4|p*t�}|��}t|d�sJtd|��t|||d�S)z6
    Return a synchronization wrapper for a Value
    F�TN�acquire�%r has no method 'acquire'�r3)rr�RLock�hasattr�AttributeErrorr)r*r2r3r+r,r"r"r#r	Fs

cCsTt||�}|dkr|S|dkr0|p&t�}|��}t|d�sFtd|��t|||d�S)z9
    Return a synchronization wrapper for a RawArray
    Fr4r5r6r7)rrr8r9r:r)r*r0r2r3r,r"r"r#r
Ts


cCstt|��}|t�|�d<|S)Nr)r$�typerZpointer)r,Znew_objr"r"r#rbscCs�|pt�}t|tj�r"t|||�St|tj�rR|jtjkrFt|||�St	|||�St
|�}zt|}WnRtk
r�dd�|j
D�}dd�|D�}d|j}t
|tf|�}t|<YnX||||�SdS)NcSsg|]}|d�qS)rr")�.0Zfieldr"r"r#�
<listcomp>vsz synchronized.<locals>.<listcomp>cSsi|]}|t|��qSr")�
make_property)r<�namer"r"r#�
<dictcomp>wsz synchronized.<locals>.<dictcomp>�Synchronized)rr-rZ_SimpleCDatarAr
�_type_�c_char�SynchronizedString�SynchronizedArrayr;�class_cache�KeyErrorZ_fields_�__name__�SynchronizedBase)r,r2r3�clsZscls�namesrZ	classnamer"r"r#rgs 

cCs@t|�t|tj�r(t|j|j|jffStt|�|jdffSdSr)	rr-rr
rrB�_wrapperZ_length_r;)r,r"r"r#�reduce_ctype�srMcCs8|dk	r||}t�|t�|��}|�|�}||_|Sr)�_ForkingPickler�registerrMZcreate_memoryviewZfrom_bufferrL)rr!ZlengthZbufr,r"r"r#r�s
rcCsPz
t|WStk
rJi}tt|fd|�||t|<||YSXdS)N�)�
prop_cacherG�exec�template)r?rr"r"r#r>�s
r>z�
def get%s(self):
    self.acquire()
    try:
        return self._obj.%s
    finally:
        self.release()
def set%s(self, value):
    self.acquire()
    try:
        self._obj.%s = value
    finally:
        self.release()
%s = property(get%s, set%s)
c@sFeZdZddd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Zdd�Z	dS)rINcCsB||_|r||_n|ptdd�}|��|_|jj|_|jj|_dS)NT)Zforce)�_obj�_lockrr8r5�release)�selfr,r2r3r"r"r#r)�s

zSynchronizedBase.__init__cCs
|j��Sr)rU�	__enter__�rWr"r"r#rX�szSynchronizedBase.__enter__cGs|jj|�Sr)rU�__exit__)rWr+r"r"r#rZ�szSynchronizedBase.__exit__cCst|�t|j|jffSr)rrrTrUrYr"r"r#�
__reduce__�szSynchronizedBase.__reduce__cCs|jSr�rTrYr"r"r#�get_obj�szSynchronizedBase.get_objcCs|jSr)rUrYr"r"r#�get_lock�szSynchronizedBase.get_lockcCsdt|�j|jfS)Nz<%s wrapper for %s>)r;rHrTrYr"r"r#�__repr__�szSynchronizedBase.__repr__)NN)
rH�
__module__�__qualname__r)rXrZr[r]r^r_r"r"r"r#rI�s

rIc@seZdZed�ZdS)rA�valueN)rHr`rar>rbr"r"r"r#rA�srAc@s4eZdZdd�Zdd�Zdd�Zdd�Zd	d
�ZdS)rEcCs
t|j�Sr)r/rTrYr"r"r#�__len__�szSynchronizedArray.__len__c
Cs&|�|j|W5QR�SQRXdSrr\)rWrr"r"r#�__getitem__�szSynchronizedArray.__getitem__c	Cs|�||j|<W5QRXdSrr\)rWrrbr"r"r#�__setitem__�szSynchronizedArray.__setitem__c
Cs*|�|j||�W5QR�SQRXdSrr\)rW�start�stopr"r"r#�__getslice__�szSynchronizedArray.__getslice__c	Cs"|�||j||�<W5QRXdSrr\)rWrfrg�valuesr"r"r#�__setslice__�szSynchronizedArray.__setslice__N)rHr`rarcrdrerhrjr"r"r"r#rE�s
rEc@seZdZed�Zed�ZdS)rDrb�rawN)rHr`rar>rbrkr"r"r"r#rD�srD)NN)-r�weakref�rr�contextrrZForkingPicklerrN�__all__rCZc_wcharZc_byteZc_ubyteZc_shortZc_ushortZc_intZc_uintZc_longZc_ulongZ
c_longlongZc_ulonglongZc_floatZc_doubler%r$rrr	r
rrrMrr>rSrQ�WeakKeyDictionaryrF�objectrIrArErDr"r"r"r#�<module>
sL�


	 PK{��\��=(-(-(__pycache__/context.cpython-38.opt-2.pycnu�[���U

e5d�+�@s�ddlZddlZddlZddlmZddlmZdZGdd�de�ZGdd	�d	e�Z	Gd
d�de�Z
Gdd
�d
e�ZGdd�de�Z
Gdd�dej�ZGdd�de
�Zejdk�rRGdd�dej�ZGdd�dej�ZGdd�dej�ZGdd�de
�ZGdd�de
�ZGdd �d e
�Ze�e�e�d!�Zejd"k�rDeed#�Zneed$�Zn8Gd%d�dej�ZGd&d�de
�Zd#e�iZeed#�Zd'd(�Ze��Zd)d*�Zd+d,�Zd-d.�ZdS)/�N�)�process)�	reduction�c@seZdZdS)�ProcessErrorN��__name__�
__module__�__qualname__rrr�//usr/lib64/python3.8/multiprocessing/context.pyrsrc@seZdZdS)�BufferTooShortNrrrrrrsrc@seZdZdS)�TimeoutErrorNrrrrrr
sr
c@seZdZdS)�AuthenticationErrorNrrrrrrsrc@sXeZdZeZeZeZeZeej	�Z	eej
�Z
eej�Zdd�Zdd�Z
dCdd�Zdd	�Zd
d�ZdDd
d�ZdEdd�ZdFdd�Zdd�ZdGdd�ZdHdd�ZdIdd�Zdd�ZdJd d!�Zd"d#�Zd$d%�Zdd&�d'd(�Zdd&�d)d*�Zd+d,�Zd-d.�ZdKd/d0�Z d1d2�Z!d3d4�Z"d5d6�Z#dLd7d8�Z$dMd:d;�Z%dNd<d=�Z&e'd>d?��Z(e(j)d@d?��Z(dAdB�Z*dS)O�BaseContextcCs"t��}|dkrtd��n|SdS)Nzcannot determine number of cpus)�os�	cpu_count�NotImplementedError)�selfZnumrrrr)s
zBaseContext.cpu_countcCs&ddlm}||��d�}|��|S)Nr)�SyncManager��ctx)Zmanagersr�get_context�start)rr�mrrr�Manager1szBaseContext.ManagerTcCsddlm}||�S)Nr)�Pipe)�
connectionr)rZduplexrrrrr<szBaseContext.PipecCsddlm}||��d�S)Nr)�Lockr)�synchronizerr)rrrrrrAszBaseContext.LockcCsddlm}||��d�S)Nr)�RLockr)rrr)rrrrrrFszBaseContext.RLockNcCsddlm}|||��d�S)Nr)�	Conditionr)rr r)r�lockr rrrr KszBaseContext.ConditionrcCsddlm}|||��d�S)Nr)�	Semaphorer)rr"r)r�valuer"rrrr"PszBaseContext.SemaphorecCsddlm}|||��d�S)Nr)�BoundedSemaphorer)rr$r)rr#r$rrrr$UszBaseContext.BoundedSemaphorecCsddlm}||��d�S)Nr)�Eventr)rr%r)rr%rrrr%ZszBaseContext.EventcCs ddlm}|||||��d�S)Nr)�Barrierr)rr&r)rZparties�actionZtimeoutr&rrrr&_szBaseContext.BarrierrcCsddlm}|||��d�S)Nr)�Queuer)�queuesr(r)r�maxsizer(rrrr(dszBaseContext.QueuecCsddlm}|||��d�S)Nr)�
JoinableQueuer)r)r+r)rr*r+rrrr+iszBaseContext.JoinableQueuecCsddlm}||��d�S)Nr)�SimpleQueuer)r)r,r)rr,rrrr,nszBaseContext.SimpleQueuercCs"ddlm}||||||��d�S)Nr)�Pool)�context)Zpoolr-r)rZ	processesZinitializerZinitargsZmaxtasksperchildr-rrrr-ss
�zBaseContext.PoolcGsddlm}||f|��S)Nr)�RawValue)�sharedctypesr/)r�typecode_or_type�argsr/rrrr/zszBaseContext.RawValuecCsddlm}|||�S)Nr)�RawArray)r0r3)rr1�size_or_initializerr3rrrr3szBaseContext.RawArray)r!cGs&ddlm}||f|�||��d��S)Nr)�Value�r!r)r0r5r)rr1r!r2r5rrrr5�s�zBaseContext.ValuecCs ddlm}|||||��d�S)Nr)�Arrayr6)r0r7r)rr1r4r!r7rrrr7�s�zBaseContext.ArraycCs,tjdkr(ttdd�r(ddlm}|�dS)N�win32�frozenFr)�freeze_support)�sys�platform�getattr�spawnr:)rr:rrrr:�szBaseContext.freeze_supportcCsddlm}|�S)Nr)�
get_logger)�utilr?)rr?rrrr?�szBaseContext.get_loggercCsddlm}||�S)Nr)�
log_to_stderr)r@rA)r�levelrArrrrA�szBaseContext.log_to_stderrcCsddlm}dS)Nr)r)�r)rrrrr�allow_connection_pickling�sz%BaseContext.allow_connection_picklingcCsddlm}||�dS)Nr)�set_executable)r>rE)r�
executablerErrrrE�szBaseContext.set_executablecCsddlm}||�dS)Nr)�set_forkserver_preload)�
forkserverrG)rZmodule_namesrGrrrrG�sz"BaseContext.set_forkserver_preloadcCsH|dkr|Szt|}Wn"tk
r:td|�d�YnX|��|S)Nzcannot find context for %r)�_concrete_contexts�KeyError�
ValueError�_check_available)r�methodrrrrr�szBaseContext.get_contextFcCs|jS�N)�_name�rZ
allow_nonerrr�get_start_method�szBaseContext.get_start_methodcCstd��dS)Nz+cannot set start method of concrete context)rK�rrMZforcerrr�set_start_method�szBaseContext.set_start_methodcCst��d�S�Nr)�globals�get�rrrr�reducer�szBaseContext.reducercCs|t�d<dSrT)rU)rrrrrrX�scCsdSrNrrWrrrrL�szBaseContext._check_available)T)N)r)r)NN)r)r)NNrN)N)N)F)F)+rr	r
rrr
r�staticmethodrZcurrent_processZparent_processZactive_childrenrrrrrr r"r$r%r&r(r+r,r-r/r3r5r7r:r?rArDrErGrrQrS�propertyrX�setterrLrrrrrsR









�







rc@seZdZdZedd��ZdS)�ProcessNcCst��j�|�SrN)�_default_contextrr\�_Popen)�process_objrrrr^�szProcess._Popen�rr	r
Z
_start_methodrYr^rrrrr\�sr\csFeZdZeZdd�Zd
�fdd�	Zddd�Zdd	d
�Zdd�Z�Z	S)�DefaultContextcCs||_d|_dSrN)r]�_actual_context)rr.rrr�__init__�szDefaultContext.__init__Ncs0|dkr |jdkr|j|_|jSt��|�SdSrN)rbr]�superr)rrM��	__class__rrr�s

zDefaultContext.get_contextFcCs<|jdk	r|std��|dkr,|r,d|_dS|�|�|_dS)Nzcontext has already been set)rb�RuntimeErrorrrRrrrrS�szDefaultContext.set_start_methodcCs"|jdkr|rdS|j|_|jjSrN)rbr]rOrPrrrrQ�s

zDefaultContext.get_start_methodcCsBtjdkrdgStjdkr"ddgnddg}tjr:|�d�|SdS)Nr8r>�darwin�forkrH)r;r<r�HAVE_SEND_HANDLE�append)r�methodsrrr�get_all_start_methodss

z$DefaultContext.get_all_start_methods)N)F)F)
rr	r
r\rcrrSrQrm�
__classcell__rrrerra�s

rar8c@seZdZdZedd��ZdS)�ForkProcessricCsddlm}||�S�Nr)�Popen)Z
popen_forkrq�r_rqrrrr^szForkProcess._PopenNr`rrrrrosroc@seZdZdZedd��ZdS)�SpawnProcessr>cCsddlm}||�Srp)Zpopen_spawn_posixrqrrrrrr^s�SpawnProcess._PopenNr`rrrrrssrsc@seZdZdZedd��ZdS)�ForkServerProcessrHcCsddlm}||�Srp)Zpopen_forkserverrqrrrrrr^ szForkServerProcess._PopenNr`rrrrrusruc@seZdZdZeZdS)�ForkContextriN)rr	r
rOror\rrrrrv%srvc@seZdZdZeZdS��SpawnContextr>N�rr	r
rOrsr\rrrrrx)srxc@seZdZdZeZdd�ZdS)�ForkServerContextrHcCstjstd��dS)Nz%forkserver start method not available)rrjrKrWrrrrL0sz"ForkServerContext._check_availableN)rr	r
rOrur\rLrrrrrz-srz)rir>rHrhr>ric@seZdZdZedd��ZdS)rsr>cCsddlm}||�Srp)Zpopen_spawn_win32rqrrrrrr^DsrtNr`rrrrrsBsc@seZdZdZeZdSrwryrrrrrxIscCst|t_dSrN)rIr]rb)rMrrr�_force_start_methodVsr{cCsttdd�S)N�spawning_popen)r=�_tlsrrrr�get_spawning_popen_sr~cCs
|t_dSrN)r}r|)�popenrrr�set_spawning_popenbsr�cCs t�dkrtdt|�j��dS)NzF%s objects should only be shared between processes through inheritance)r~rg�typer)�objrrr�assert_spawninges
��r�) rr;Z	threadingrCrr�__all__�	Exceptionrrr
r�objectrZBaseProcessr\rar<rorsrurvrxrzrIr]r{Zlocalr}r~r�r�rrrr�<module>sL?,��PK{��\_��w��2__pycache__/popen_spawn_posix.cpython-38.opt-2.pycnu�[���U

e5d��@spddlZddlZddlmZmZddlmZddlmZddlmZdgZ	Gdd	�d	e
�ZGd
d�dej�ZdS)�N�)�	reduction�set_spawning_popen)�
popen_fork)�spawn)�util�Popenc@seZdZdd�Zdd�ZdS)�_DupFdcCs
||_dS�N��fd��selfr�r�9/usr/lib64/python3.8/multiprocessing/popen_spawn_posix.py�__init__sz_DupFd.__init__cCs|jSr
r)rrrr�detachsz
_DupFd.detachN)�__name__�
__module__�__qualname__rrrrrrr	sr	cs4eZdZdZeZ�fdd�Zdd�Zdd�Z�Z	S)rrcsg|_t��|�dSr
)�_fds�superr)r�process_obj��	__class__rrrszPopen.__init__cCs|j�|�|Sr
)r�appendr
rrr�duplicate_for_child"szPopen.duplicate_for_childcCsXddlm}|��}|j�|�t�|j�}t�	�}t
|�zt�||�t�||�W5t
d�Xd}}}}	z~t��\}}t��\}}	tj||d�}|j�||g�t
�t��||j�|_||_t|	ddd��}
|
�|���W5QRXW5g}
||	fD]}|dk	�r|
�|��qt
�|t
j|
�|_||fD]}|dk	�r6t�|��q6XdS)Nr)�resource_tracker)�
tracker_fdZpipe_handle�wbF)�closefd)�rZgetfdrrrZget_preparation_data�_name�io�BytesIOrr�dumprZFinalizeZ	close_fds�	finalizer�os�close�pipeZget_command_line�extendZspawnv_passfdsZget_executable�pid�sentinel�open�write�	getbuffer)rrrrZ	prep_data�fpZparent_rZchild_wZchild_rZparent_wZfds_to_closer�cmd�frrr�_launch&sB
�
�

z
Popen._launch)
rrr�methodr	ZDupFdrrr3�
__classcell__rrrrrs
)
r#r'�contextrrr!rrr�__all__�objectr	rrrrr�<module>s
PK{��\�w�&__pycache__/spawn.cpython-38.opt-2.pycnu�[���U

e5dP$�@s$ddlZddlZddlZddlZddlmZmZddlmZddlm	Z	ddlm
Z
ddd	d
ddd
gZejdkrzdZ
dZneedd�Z
ej���d�Zer�ej�ejd�anejadd	�Zdd
�Zdd�Zdd�Zdd�Zd&dd�Zdd�Zdd�Zdd�ZgZ dd �Z!d!d"�Z"d#d$�Z#d%d
�Z$dS)'�N�)�get_start_method�set_start_method)�process)�	reduction)�util�_main�freeze_support�set_executable�get_executable�get_preparation_data�get_command_line�import_main_path�win32F�frozenzpythonservice.exez
python.execCs|adS�N��_python_exe)Zexe�r�-/usr/lib64/python3.8/multiprocessing/spawn.pyr
)scCstSrrrrrrr-scCs$t|�dkr|ddkrdSdSdS)N�r�--multiprocessing-forkTF)�len)�argvrrr�
is_forking4srcCsdttj�r`i}tjdd�D]0}|�d�\}}|dkr@d||<qt|�||<qtf|�t��dS)Nr�=�None)r�sysr�split�int�
spawn_main�exit)�kwds�arg�name�valuerrrr	>s


cKshttdd�r(tjdgdd�|��D�Sd}|d�dd	�|��D��;}t��}tg|d
|dgSdS)NrFrcSsg|]}d|�qS)�%s=%rr��.0�itemrrr�
<listcomp>Tsz$get_command_line.<locals>.<listcomp>z<from multiprocessing.spawn import spawn_main; spawn_main(%s)z, css|]}d|VqdS)r&Nrr'rrr�	<genexpr>Wsz#get_command_line.<locals>.<genexpr>z-c)�getattrr�
executable�items�joinrZ_args_from_interpreter_flagsr)r"�progZoptsrrrr
Ns�cCs�tjdkr`ddl}ddl}|dk	r:|�|j|jBd|�}nd}tj||d�}|�	|t
j�}|}n"ddlm
}	||	j_|}t
�|�}t||�}
t�|
�dS)NrrF)�source_processr)�resource_tracker)r�platform�msvcrt�_winapiZOpenProcessZSYNCHRONIZEZPROCESS_DUP_HANDLErZ	duplicateZopen_osfhandle�os�O_RDONLY�r2Z_resource_trackerZ_fd�duprr!)Zpipe_handleZ
parent_pidZ
tracker_fdr4r5r1Z
new_handle�fd�parent_sentinelr2Zexitcoderrrr \s*

��

r c	Cs`tj|ddd��@}dt��_z$tj�|�}t|�tj�|�}W5t��`XW5QRX|�	|�S)N�rbT)�closefd)
r6�fdopenr�current_process�_inheritingr�pickle�load�prepare�
_bootstrap)r:r;Zfrom_parentZpreparation_data�selfrrrrxs
cCstt��dd�rtd��dS)Nr@Fa
        An attempt has been made to start a new process before the
        current process has finished its bootstrapping phase.

        This probably means that you are not using fork to start your
        child processes and you have forgotten to use the proper idiom
        in the main module:

            if __name__ == '__main__':
                freeze_support()
                ...

        The "freeze_support()" line can be omitted if the program
        is not going to be frozen to produce an executable.)r,rr?�RuntimeErrorrrrr�_check_not_importing_main�srGcCst�ttjt��jd�}tjdk	r2tj��|d<t	j
��}z|�d�}Wnt
k
r^YnXtj||<|j||t	jtjt��t�d�t	jd}t|jdd�}|dk	r�||d<nft	jdks�t�st�st|d	d�}|dk	�rtj
�|��s
tjdk	�r
tj
�tj|�}tj
�|�|d
<|S)N)�
log_to_stderr�authkey�	log_levelr8)r$�sys_path�sys_argv�orig_dir�dir�start_method�__main__r$�init_main_from_namer�__file__�init_main_from_path)rG�dictrZ_log_to_stderrrr?rIZ_loggerZgetEffectiveLevelr�path�copy�index�
ValueError�ORIGINAL_DIR�updaterr6�getcwdr�modulesr,�__spec__r3�WINEXE�
WINSERVICE�isabsr/�normpath)r$�drK�i�main_moduleZ
main_mod_name�	main_pathrrrr�sD�


�


�cCs�d|kr|dt��_d|kr,|dt��_d|krD|drDt��d|kr^t���|d�d|krp|dt_	d|kr�|dt_
d|kr�t�|d�d|kr�|dt_
d	|kr�t|d	d
d�d|kr�t|d�nd
|kr�t|d
�dS)Nr$rIrHrJrKrLrNrMrOT)ZforcerQrS)rr?r$rIrrHZ
get_loggerZsetLevelrrUrr6�chdirrYr�_fixup_main_from_name�_fixup_main_from_path)�datarrrrC�s,


rCcCs~tjd}|dks|�d�r dSt|jdd�|kr6dSt�|�t�d�}t	j
|ddd�}|j�|�|tjd<tjd<dS)NrPz	.__main__r$�__mp_main__T)�run_nameZ	alter_sys)
rr\�endswithr,r]�old_main_modules�append�types�
ModuleType�runpyZ
run_module�__dict__rZ)Zmod_name�current_mainrd�main_contentrrrrg�s


�rgcCs�tjd}tj�tj�|��d}|dkr.dSt|dd�|krBdSt�|�t	�
d�}tj|dd�}|j
�|�|tjd<tjd<dS)NrPrZipythonrRrj)rk)rr\r6rU�splitext�basenamer,rmrnrorprqZrun_pathrrrZ)rersZ	main_namerdrtrrrrh	s


�rhcCst|�dSr)rh)rerrrr%s)NN)%r6rrqror8rrr�contextrr�__all__r3r^r_r,r-�lowerrlrUr/�exec_prefixrr
rrr	r
r rrGrrmrCrgrhrrrrr�<module>sD�


2&PK{��\�c"		%__pycache__/heap.cpython-38.opt-2.pycnu�[���U

e5dj-�@s�ddlZddlmZddlZddlZddlZddlZddlZddlm	Z	m
Z
ddlmZdgZ
ejdkr�ddlZGdd	�d	e�Zn,Gd
d	�d	e�Zdd�Zd
d�Ze	�ee�Gdd�de�ZGdd�de�ZdS)�N)�defaultdict�)�	reduction�assert_spawning)�util�
BufferWrapperZwin32c@s,eZdZe��Zdd�Zdd�Zdd�ZdS)�ArenacCsx||_td�D]B}dt��t|j�f}tjd||d�}t��dkrHqZ|�	�qt
d��||_||_|j|jf|_
dS)N�dz	pym-%d-%s����ZtagnamerzCannot find name for new mmap)�size�range�os�getpid�next�_rand�mmap�_winapiZGetLastError�close�FileExistsError�name�buffer�_state)�selfr�irZbuf�r�,/usr/lib64/python3.8/multiprocessing/heap.py�__init__&s
�Arena.__init__cCst|�|jS�N)rr)rrrr�__getstate__5szArena.__getstate__cCs,|\|_|_|_tjd|j|jd�|_dS)Nr
r)rrrrr)r�staterrr�__setstate__9szArena.__setstate__N)	�__name__�
__module__�__qualname__�tempfileZ_RandomNameSequencerrr r"rrrrrsrc@s4eZdZejdkrdgZngZd	dd�Zdd�ZdS)
rZlinuxz/dev/shmr
cCsx||_||_|dkrbtjdt��|�|�d�\|_}t�|�t�	|tj
|jf�t�|j|�t�|j|j�|_
dS)Nr
zpym-%d-)�prefix�dir)r�fdr&Zmkstemprr�_choose_dir�unlinkr�Finalizer�	ftruncaterr)rrr)rrrrrMs
�
rcCs6|jD]&}t�|�}|j|j|kr|Sqt��Sr)�_dir_candidatesr�statvfs�f_bavail�f_frsizerZget_temp_dir)rr�d�strrrr*[s



zArena._choose_dirN)r
)r#r$r%�sys�platformr.rr*rrrrrCs


cCs(|jdkrtd��t|jt�|j�ffS)Nr
zDArena is unpicklable because forking was enabled when it was created)r)�
ValueError�
rebuild_arenarrZDupFd)�arrr�reduce_arenads
r9cCst||���Sr)r�detach)rZdupfdrrrr7jsr7c@szeZdZdZdZdZejfdd�Ze	dd��Z
dd�Zd	d
�Zdd�Z
d
d�Zdd�Zdd�Zdd�Zdd�Zdd�ZdS)�Heap�i@cCsXt��|_t��|_||_g|_i|_i|_	i|_
tt�|_
g|_g|_d|_d|_dS�Nr)rr�_lastpid�	threadingZLock�_lock�_size�_lengths�_len_to_seq�_start_to_block�_stop_to_blockr�set�_allocated_blocks�_arenas�_pending_free_blocks�
_n_mallocs�_n_frees)rrrrrr{s


z
Heap.__init__cCs|d}|||@S)Nrr)�nZ	alignment�maskrrr�_roundup�sz
Heap._roundupcCsZ|�t|j|�tj�}|j|jkr0|jd9_t�d|�t|�}|j	�
|�|d|fS)N�z"allocating a new mmap of length %dr)rN�maxrAr�PAGESIZE�_DOUBLE_ARENA_SIZE_UNTILr�inforrH�append)rr�length�arenarrr�
_new_arena�szHeap._new_arenacCsz|j}||jkrdS|j�|�}|j|df=|j||f=|j�|�|j|}|�|d|f�|sv|j|=|j	�|�dSr=)
r�_DISCARD_FREE_SPACE_LARGER_THANrG�poprDrErH�removerCrB)rrVrU�blocks�seqrrr�_discard_arena�s

zHeap._discard_arenac	Cs|t�|j|�}|t|j�kr&|�|�S|j|}|j|}|��}|sV|j|=|j|=|\}}}|j||f=|j||f=|Sr)	�bisectZbisect_leftrB�lenrWrCrYrDrE)	rrrrUr\�blockrV�start�stoprrr�_malloc�s



zHeap._mallocc	Cs�|\}}}z|j||f}Wntk
r0YnX|�|�\}}z|j||f}Wntk
rfYnX|�|�\}}|||f}||}z|j|�|�Wn.tk
r�|g|j|<t�|j|�YnX||j||f<||j||f<dSr)	rE�KeyError�_absorbrDrCrTr^ZinsortrB)	rr`rVrarbZ
prev_block�_Z
next_blockrUrrr�_add_free_block�s(

zHeap._add_free_blockcCs^|\}}}|j||f=|j||f=||}|j|}|�|�|sV|j|=|j�|�||fSr)rDrErCrZrB)rr`rVrarbrUr\rrrre�s


zHeap._absorbcCs4|\}}}|j|}|�||f�|s0|�|�dSr)rGrZr])rr`rVrarbr[rrr�_remove_allocated_block�s


zHeap._remove_allocated_blockcCsBz|j��}Wntk
r&Yq>YnX|�|�|�|�qdSr)rIrY�
IndexErrorrgrh�rr`rrr�_free_pending_blockss

zHeap._free_pending_blockscCs~t��|jkr$td�t��|j���|j�d�s>|j�|�n<z.|j
d7_
|��|�|�|�
|�W5|j�	�XdS)Nz$My pid ({0:n}) is not last pid {1:n}Fr)rrr>r6�formatr@�acquirerIrT�releaserKrkrgrhrjrrr�frees
��
z	Heap.freec
Cs�|dkrtd�|���tj|kr.td�|���t��|jkrD|��|j	��|j
d7_
|��|�t
|d�|j�}|�|�\}}}||}||kr�|�|||f�|j|�||f�|||fW5QR�SQRXdS)Nr�Size {0:n} out of range�Size {0:n} too larger)r6rlr4�maxsize�
OverflowErrorrrr>rr@rJrkrNrP�
_alignmentrcrgrG�add)rrrVrarbZ	real_stoprrr�malloc(s 
zHeap.mallocN)r#r$r%rtrXrRrrQr�staticmethodrNrWr]rcrgrerhrkrorvrrrrr;ss

r;c@s"eZdZe�Zdd�Zdd�ZdS)rcCs^|dkrtd�|���tj|kr.td�|���tj�|�}||f|_t	j
|tjj|fd�dS)Nrrprq)�args)r6rlr4rrrsr�_heaprvrrr,ro)rrr`rrrrFs

zBufferWrapper.__init__cCs&|j\\}}}}t|j�|||�Sr)r�
memoryviewr)rrVrarbrrrr�create_memoryviewOszBufferWrapper.create_memoryviewN)r#r$r%r;ryrr{rrrrrBs	)r^�collectionsrrrr4r&r?�contextrr�r�__all__r5r�objectrr9r7�registerr;rrrrr�<module>
s&
$!PPK{��\��t�EZEZ%__pycache__/pool.cpython-38.opt-2.pycnu�[���U

e5d�~�@sdddgZddlZddlZddlZddlZddlZddlZddlZddlZddlm	Z	ddl
mZddl
mZm
Z
ddlmZd	Zd
ZdZdZe��Zd
d�Zdd�ZGdd�de�ZGdd�d�Zdd�ZGdd�de�Zd+dd�Zdd�ZGdd �d e�Z Gd!d�de!�Z"Gd"d#�d#e!�Z#e#Z$Gd$d%�d%e#�Z%Gd&d'�d'e!�Z&Gd(d)�d)e&�Z'Gd*d�de"�Z(dS),�Pool�
ThreadPool�N)�Empty�)�util)�get_context�TimeoutError)�wait�INIT�RUN�CLOSE�	TERMINATEcCstt|��S�N)�list�map��args�r�,/usr/lib64/python3.8/multiprocessing/pool.py�mapstar/srcCstt�|d|d��S)Nrr)r�	itertools�starmaprrrr�starmapstar2src@seZdZdd�Zdd�ZdS)�RemoteTracebackcCs
||_dSr��tb)�selfrrrr�__init__:szRemoteTraceback.__init__cCs|jSrr�rrrr�__str__<szRemoteTraceback.__str__N)�__name__�
__module__�__qualname__rrrrrrr9src@seZdZdd�Zdd�ZdS)�ExceptionWithTracebackcCs0t�t|�||�}d�|�}||_d||_dS)N�z

"""
%s""")�	traceback�format_exception�type�join�excr)rr)rrrrr@s
zExceptionWithTraceback.__init__cCst|j|jffSr)�rebuild_excr)rrrrr�
__reduce__Esz!ExceptionWithTraceback.__reduce__N)r r!r"rr+rrrrr#?sr#cCst|�|_|Sr)r�	__cause__)r)rrrrr*Hs
r*cs,eZdZ�fdd�Zdd�Zdd�Z�ZS)�MaybeEncodingErrorcs.t|�|_t|�|_tt|��|j|j�dSr)�reprr)�value�superr-r)rr)r/��	__class__rrrTs

zMaybeEncodingError.__init__cCsd|j|jfS)Nz(Error sending result: '%s'. Reason: '%s')r/r)rrrrrYs�zMaybeEncodingError.__str__cCsd|jj|fS)Nz<%s: %s>)r2r rrrr�__repr__]szMaybeEncodingError.__repr__)r r!r"rrr3�
__classcell__rrr1rr-Psr-rFc
Cs�|dk	r(t|t�r|dks(td�|���|j}|j}t|d�rR|j��|j	��|dk	rb||�d}|dks~|�r�||k�r�z
|�}	Wn(t
tfk
r�t�
d�Y�q�YnX|	dkr�t�
d��q�|	\}
}}}
}zd||
|�f}WnHtk
�r0}z(|�r|tk	�rt||j�}d|f}W5d}~XYnXz||
||f�WnRtk
�r�}z2t||d�}t�
d	|�||
|d|ff�W5d}~XYnXd}	}
}}}
}|d7}qft�
d
|�dS)NrzMaxtasks {!r} is not valid�_writerrz)worker got EOFError or OSError -- exitingzworker got sentinel -- exitingTFz0Possible encoding error while sending result: %szworker exiting after %d tasks)�
isinstance�int�AssertionError�format�put�get�hasattrr5�close�_reader�EOFError�OSErrorr�debug�	Exception�_helper_reraises_exceptionr#�
__traceback__r-)�inqueue�outqueue�initializer�initargsZmaxtasks�wrap_exceptionr:r;Z	completed�task�job�i�funcr�kwds�result�e�wrappedrrr�workerasN�





�$
rRcCs|�dSrr)ZexrrrrC�srCcs.eZdZdd��fdd�
Z�fdd�Z�ZS)�
_PoolCacheN��notifiercs||_t�j||�dSr)rUr0r)rrUrrNr1rrr�sz_PoolCache.__init__cs t��|�|s|j�d�dSr)r0�__delitem__rUr:)r�itemr1rrrV�sz_PoolCache.__delitem__)r r!r"rrVr4rrr1rrS�srSc@s�eZdZdZedd��ZdKdd�Zeje	fdd	�Z
d
d�Zdd
�Zedd��Z
edd��Zdd�Zedd��Zedd��Zdd�Zdd�Zdifdd�ZdLdd�ZdMd d!�ZdNd"d#�Zd$d%�ZdOd'd(�ZdPd)d*�Zdiddfd+d,�ZdQd-d.�ZdRd/d0�ZedSd1d2��Zed3d4��Z ed5d6��Z!ed7d8��Z"ed9d:��Z#d;d<�Z$d=d>�Z%d?d@�Z&dAdB�Z'edCdD��Z(edEdF��Z)dGdH�Z*dIdJ�Z+dS)TrTcOs|j||�Sr��Process)�ctxrrNrrrrY�szPool.ProcessNrcCsg|_t|_|pt�|_|��t��|_|j��|_	t
|j	d�|_||_||_
||_|dkrjt��phd}|dkrztd��|dk	r�t|�s�td��||_z|��WnHtk
r�|jD]}|jdkr�|��q�|jD]}|��q؂YnX|��}tjtj|j|j|j|j|j|j|j |j!|j
|j|j|j"||j	fd�|_#d|j#_$t%|j#_|j#�&�tjtj'|j|j(|j!|j|jfd�|_)d|j)_$t%|j)_|j)�&�tjtj*|j!|j+|jfd�|_,d|j,_$t%|j,_|j,�&�t-j.||j/|j|j |j!|j|j	|j#|j)|j,|jf	dd�|_0t%|_dS)	NrTrz&Number of processes must be at least 1zinitializer must be a callable��targetrT�)rZexitpriority)1�_poolr
�_stater�_ctx�
_setup_queues�queue�SimpleQueue�
_taskqueue�_change_notifierrS�_cache�_maxtasksperchild�_initializer�	_initargs�os�	cpu_count�
ValueError�callable�	TypeError�
_processes�_repopulate_poolrB�exitcode�	terminater(�_get_sentinels�	threadingZThreadr�_handle_workersrY�_inqueue�	_outqueue�_wrap_exception�_worker_handler�daemonr�start�
_handle_tasks�
_quick_put�
_task_handler�_handle_results�
_quick_get�_result_handlerrZFinalize�_terminate_pool�
_terminate)r�	processesrGrH�maxtasksperchild�context�p�	sentinelsrrrr�s�





��
��
�
��z
Pool.__init__cCs>|j|kr:|d|��t|d�t|dd�dk	r:|j�d�dS)Nz&unclosed running multiprocessing pool )�sourcere)r_�ResourceWarning�getattrrer:)rZ_warnrrrr�__del__s

�zPool.__del__c	Cs0|j}d|j�d|j�d|j�dt|j��d�	S)N�<�.z state=z pool_size=�>)r2r!r"r_�lenr^)r�clsrrrr3sz
Pool.__repr__cCs|jjg}|jjg}||�Sr)rwr>re)rZtask_queue_sentinelsZself_notifier_sentinelsrrrrss

zPool._get_sentinelscCsdd�|D�S)NcSsg|]}t|d�r|j�qS)�sentinel)r<r�)�.0rRrrr�
<listcomp>s
�z.Pool._get_worker_sentinels.<locals>.<listcomp>r�Zworkersrrr�_get_worker_sentinelss�zPool._get_worker_sentinelscCsPd}ttt|���D]6}||}|jdk	rt�d|�|��d}||=q|S)NF�cleaning up worker %dT)�reversed�ranger�rqrrAr()�poolZcleanedrLrRrrr�_join_exited_workerss
zPool._join_exited_workerscCs0|�|j|j|j|j|j|j|j|j|j	|j
�
Sr)�_repopulate_pool_staticr`rYror^rvrwrhrirgrxrrrrrp.s�zPool._repopulate_poolc

Csft|t|��D]P}
||t||||||	fd�}|j�dd�|_d|_|��|�|�t�	d�qdS)Nr[rYZ
PoolWorkerTzadded worker)
r�r�rR�name�replacerzr{�appendrrA)rZrYr�r�rErFrGrHr�rIrL�wrrrr�7s��
zPool._repopulate_pool_staticc

Cs*t�|�r&t�||||||||||	�
dSr)rr�r�)
rZrYr�r�rErFrGrHr�rIrrr�_maintain_poolJs
�zPool._maintain_poolcCs4|j��|_|j��|_|jjj|_|jjj|_	dSr)
r`rcrvrwr5�sendr}r>�recvr�rrrrraVszPool._setup_queuescCs|jtkrtd��dS)NzPool not running)r_rrlrrrr�_check_running\s
zPool._check_runningcCs|�|||���Sr)�apply_asyncr;)rrMrrNrrr�apply`sz
Pool.applycCs|�||t|���Sr)�
_map_asyncrr;�rrM�iterable�	chunksizerrrrgszPool.mapcCs|�||t|���Sr)r�rr;r�rrrrnszPool.starmapcCs|�||t|||�Sr)r�r�rrMr�r��callback�error_callbackrrr�
starmap_asyncvs�zPool.starmap_asyncc
csjz,d}t|�D]\}}||||fifVqWn8tk
rd}z||dt|fifVW5d}~XYnXdS)N���r)�	enumeraterBrC)rZ
result_jobrMr�rL�xrPrrr�_guarded_task_generation~szPool._guarded_task_generationrcCs�|��|dkr:t|�}|j�|�|j||�|jf�|S|dkrPtd�|���t	�
|||�}t|�}|j�|�|jt|�|jf�dd�|D�SdS)NrzChunksize must be 1+, not {0:n}css|]}|D]
}|Vq
qdSrr�r��chunkrWrrr�	<genexpr>�szPool.imap.<locals>.<genexpr>)r��IMapIteratorrdr:r��_job�_set_lengthrlr9r�
_get_tasksr�rrMr�r�rO�task_batchesrrr�imap�s4�������z	Pool.imapcCs�|��|dkr:t|�}|j�|�|j||�|jf�|S|dkrPtd�|���t	�
|||�}t|�}|j�|�|jt|�|jf�dd�|D�SdS)NrzChunksize must be 1+, not {0!r}css|]}|D]
}|Vq
qdSrrr�rrrr��sz&Pool.imap_unordered.<locals>.<genexpr>)r��IMapUnorderedIteratorrdr:r�r�r�rlr9rr�rr�rrr�imap_unordered�s0������zPool.imap_unorderedcCs6|��t|||�}|j�|jd|||fgdf�|S�Nr)r��ApplyResultrdr:r�)rrMrrNr�r�rOrrrr��szPool.apply_asynccCs|�||t|||�Sr)r�rr�rrr�	map_async�s�zPool.map_asyncc
Cs�|��t|d�st|�}|dkrJtt|�t|j�d�\}}|rJ|d7}t|�dkrZd}t�|||�}t||t|�||d�}	|j	�
|�|	j||�df�|	S)N�__len__�rr�r�)
r�r<r�divmodr�r^rr��	MapResultrdr:r�r�)
rrMr�Zmapperr�r�r�Zextrar�rOrrrr��s,
����zPool._map_asynccCs"t||d�|��s|��qdS)N)�timeout)r	�emptyr;)r��change_notifierr�rrr�_wait_for_updates�szPool._wait_for_updatescCspt��}|jtks |rX|jtkrX|�|||||||	|
||�
|�|�|
�}|�||�q|�d�t	�
d�dS)Nzworker handler exiting)rt�current_threadr_rr
r�r�r�r:rrA)r��cache�	taskqueuerZrYr�r�rErFrGrHr�rIr�r��threadZcurrent_sentinelsrrrru�s�
zPool._handle_workersc

Cspt��}t|jd�D]�\}}d}z�|D]�}|jtkrBt�d�q�z||�Wq&tk
r�}
zB|dd�\}	}z||	�	|d|
f�Wnt
k
r�YnXW5d}
~
XYq&Xq&|r�t�d�|r�|dnd}||d�W�qW�
�q
W5d}}}	Xqt�d�z6t�d�|�d�t�d	�|D]}|d��q.Wn tk
�r`t�d
�YnXt�d�dS)Nz'task handler found thread._state != RUN�Fzdoing set_length()rr�ztask handler got sentinelz/task handler sending sentinel to result handlerz(task handler sending sentinel to workersz/task handler got OSError when sending sentinelsztask handler exiting)
rtr��iterr;r_rrrArB�_set�KeyErrorr:r@)
r�r:rFr�r�r�ZtaskseqZ
set_lengthrJrKrP�idxr�rrrr|sB






zPool._handle_tasksc	Cs�t��}z
|�}Wn$ttfk
r6t�d�YdSX|jtkrNt�d�q�|dkrbt�d�q�|\}}}z||�||�Wnt	k
r�YnXd}}}q|�r@|jt
k�r@z
|�}Wn$ttfk
r�t�d�YdSX|dk�r�t�d�q�|\}}}z||�||�Wnt	k
�r0YnXd}}}q�t|d��r�t�d�z,td�D]}|j
���sv�q�|��q`Wnttfk
�r�YnXt�dt|�|j�dS)	Nz.result handler got EOFError/OSError -- exitingz,result handler found thread._state=TERMINATEzresult handler got sentinelz&result handler ignoring extra sentinelr>z"ensuring that outqueue is not full�
z7result handler exiting: len(cache)=%s, thread._state=%s)rtr�r@r?rrAr_rr�r�r
r<r�r>�pollr�)rFr;r�r�rJrKrL�objrrrr:s\











�zPool._handle_resultsccs0t|�}tt�||��}|s dS||fVqdSr)r��tupler�islice)rM�it�sizer�rrrr�vs
zPool._get_taskscCstd��dS)Nz:pool objects cannot be passed between processes or pickled)�NotImplementedErrorrrrrr+s�zPool.__reduce__cCs2t�d�|jtkr.t|_t|j_|j�d�dS)Nzclosing pool)rrAr_rrryrer:rrrrr=�s


z
Pool.closecCst�d�t|_|��dS)Nzterminating pool)rrAr
r_r�rrrrrr�s
zPool.terminatecCsjt�d�|jtkrtd��n|jttfkr4td��|j��|j	��|j
��|jD]}|��qXdS)Nzjoining poolzPool is still runningzIn unknown state)rrAr_rrlrr
ryr(r~r�r^)rr�rrrr(�s






z	Pool.joincCs@t�d�|j��|��r<|j��r<|j��t�	d�qdS)Nz7removing tasks from inqueue until task handler finishedr)
rrAZ_rlock�acquire�is_aliver>r�r��time�sleep)rE�task_handlerr�rrr�_help_stuff_finish�s



zPool._help_stuff_finishc
CsXt�d�t|_|�d�t|_t�d�|�||t|��|��sXt|	�dkrXtd��t|_|�d�|�d�t�d�t	�
�|k	r�|��|r�t|dd�r�t�d�|D]}
|
j
dkr�|
��q�t�d�t	�
�|k	r�|��t�d	�t	�
�|k	�r|��|�rTt|dd��rTt�d
�|D](}
|
���r*t�d|
j�|
���q*dS)Nzfinalizing poolz&helping task handler/workers to finishrz.Cannot have cache with result_hander not alivezjoining worker handlerrrzterminating workerszjoining task handlerzjoining result handlerzjoining pool workersr�)rrAr
r_r:r�r�r�r8rtr�r(r<rqrr�pid)r�r�rErFr�r�Zworker_handlerr�Zresult_handlerr�r�rrrr��sB


�









zPool._terminate_poolcCs|��|Sr)r�rrrr�	__enter__�szPool.__enter__cCs|��dSr)rr)r�exc_typeZexc_valZexc_tbrrr�__exit__�sz
Pool.__exit__)NNrNN)N)N)NNN)r)r)NNN)NNN)N),r r!r"rx�staticmethodrYr�warnings�warnrr�r3rsr�r�rpr�r�rar�r�rrr�r�r�r�r�r�r�r��classmethodrur|rr�r+r=rrr(r�r�r�r�rrrrr�sv
�
P

	



�


�

�
�


-
;


5c@s@eZdZdd�Zdd�Zdd�Zddd	�Zdd
d�Zdd
�ZdS)r�cCs>||_t��|_tt�|_|j|_||_||_	||j|j<dSr)
r^rtZEvent�_event�next�job_counterr�rf�	_callback�_error_callback)rr�r�r�rrrr�s

zApplyResult.__init__cCs
|j��Sr)r�Zis_setrrrr�ready�szApplyResult.readycCs|��std�|���|jS)Nz{0!r} not ready)r�rlr9�_successrrrr�
successful�szApplyResult.successfulNcCs|j�|�dSr)r�r	�rr�rrrr	�szApplyResult.waitcCs,|�|�|��st�|jr"|jS|j�dSr)r	r�rr��_valuer�rrrr;�s
zApplyResult.getcCsZ|\|_|_|jr$|jr$|�|j�|jr<|js<|�|j�|j��|j|j=d|_dSr)	r�r�r�r�r��setrfr�r^�rrLr�rrrr�s

zApplyResult._set)N)N)	r r!r"rr�r�r	r;r�rrrrr��s	

	r�c@seZdZdd�Zdd�ZdS)r�cCshtj||||d�d|_dg||_||_|dkrNd|_|j��|j|j	=n||t
||�|_dS)Nr�Tr)r�rr�r��
_chunksize�_number_leftr�r�rfr��bool)rr�r��lengthr�r�rrrrs
�
zMapResult.__init__cCs�|jd8_|\}}|rv|jrv||j||j|d|j�<|jdkr�|jrZ|�|j�|j|j=|j��d|_	nL|s�|jr�d|_||_|jdkr�|j
r�|�
|j�|j|j=|j��d|_	dS)NrrF)r�r�r�r�r�rfr�r�r�r^r�)rrLZsuccess_result�successrOrrrr�$s&







zMapResult._setN)r r!r"rr�rrrrr�s
r�c@s:eZdZdd�Zdd�Zddd�ZeZdd	�Zd
d�ZdS)
r�cCsT||_t�t���|_tt�|_|j|_t	�
�|_d|_d|_
i|_||j|j<dSr�)r^rtZ	ConditionZLock�_condr�r�r�rf�collections�deque�_items�_index�_length�	_unsorted)rr�rrrrBs

zIMapIterator.__init__cCs|Srrrrrr�__iter__MszIMapIterator.__iter__NcCs�|j��z|j��}Wnztk
r�|j|jkr>d|_td�|j�|�z|j��}Wn2tk
r�|j|jkr�d|_td�t	d�YnXYnXW5QRX|\}}|r�|S|�dSr)
r�r��popleft�
IndexErrorr�rr^�
StopIterationr	r)rr�rWr�r/rrrr�Ps&zIMapIterator.nextc	Cs�|j��|j|krn|j�|�|jd7_|j|jkrb|j�|j�}|j�|�|jd7_q,|j��n
||j|<|j|jkr�|j|j	=d|_
W5QRXdS�Nr)r�r�r�r�r�pop�notifyrrfr�r^r�rrrr�hs


zIMapIterator._setc	CsB|j�2||_|j|jkr4|j��|j|j=d|_W5QRXdSr)r�rr�rrfr�r^)rr�rrrr�ys

zIMapIterator._set_length)N)	r r!r"rrr��__next__r�r�rrrrr�@s
r�c@seZdZdd�ZdS)r�c	CsV|j�F|j�|�|jd7_|j��|j|jkrH|j|j=d|_W5QRXdSr)	r�r�r�r�rrrfr�r^r�rrrr��s

zIMapUnorderedIterator._setN)r r!r"r�rrrrr��sr�c@sVeZdZdZedd��Zddd�Zdd	�Zd
d�Zedd
��Z	edd��Z
dd�ZdS)rFcOsddlm}|||�S)NrrX)ZdummyrY)rZrrNrYrrrrY�szThreadPool.ProcessNrcCst�||||�dSr)rr)rr�rGrHrrrr�szThreadPool.__init__cCs,t��|_t��|_|jj|_|jj|_dSr)rbrcrvrwr:r}r;r�rrrrra�s


zThreadPool._setup_queuescCs
|jjgSr)rer>rrrrrs�szThreadPool._get_sentinelscCsgSrrr�rrrr��sz ThreadPool._get_worker_sentinelscCsFz|jdd�qWntjk
r(YnXt|�D]}|�d�q2dS)NF)�block)r;rbrr�r:)rEr�r�rLrrrr��szThreadPool._help_stuff_finishcCst�|�dSr)r�r�)rr�r�r�rrrr��szThreadPool._wait_for_updates)NNr)r r!r"rxr�rYrrarsr�r�r�rrrrr�s




)NrNF))�__all__r�rrjrbrtr�r%r�rr$rrrZ
connectionr	r
rrr
�countr�rrrBrr#r*r-rRrC�dictrS�objectrr�ZAsyncResultr�r�r�rrrrr�<module>
sN	�
-=)+EPK{��\^kL|�b�b%__pycache__/connection.cpython-38.pycnu�[���U

&�.ep|�@sddddgZddlZddlZddlZddlZddlZddlZddlZddlZddl	Z	ddl
mZddl
mZm
Z
dd	lmZejZz$ddlZdd
lmZmZmZmZWn$ek
r�ejdkr‚dZYnXdZd
ZdZe��ZdZdgZe ed��rdZedg7Zejdk�rdZedg7Zefdd�Z!dd�Z"dd�Z#dd�Z$dd�Z%Gdd�d�Z&e�rnGdd�de&�Z'Gd d!�d!e&�Z(Gd"d�de)�Z*dPd#d�Z+ejdk�r�dQd%d�Z,n
dRd&d�Z,Gd'd(�d(e)�Z-d)d*�Z.ejdk�r�Gd+d,�d,e)�Z/d-d.�Z0d/Z1d0Z2d1Z3d2Z4d3d4�Z5d5d6�Z6Gd7d8�d8e)�Z7d9d:�Z8d;d<�Z9Gd=d>�d>e*�Z:d?d@�Z;ejdk�rzdAdB�Z<ej=ej>hZ?dSdCd�Z@n,ddlAZAe eAdD��r�eAjBZCneAjDZCdTdEd�Z@ejdk�r�dFdG�ZEdHdI�ZFe�Ge(eE�dJdK�ZHdLdM�ZIe�Ge'eH�ndNdG�ZEdOdI�ZFe�Ge(eE�dS)U�Client�Listener�Pipe�wait�N�)�util)�AuthenticationError�BufferTooShort)�	reduction)�
WAIT_OBJECT_0�WAIT_ABANDONED_0�WAIT_TIMEOUT�INFINITE�win32i g4@Zsha256�AF_INET�AF_UNIX�AF_PIPEcCst��|S�N��time�	monotonic)�timeout�r�2/usr/lib64/python3.8/multiprocessing/connection.py�
_init_timeout?srcCst��|kSrr)�trrr�_check_timeoutBsrcCsX|dkrdS|dkr&tjdt��d�S|dkrLtjdt��tt�fdd�Std	��d
S)z?
    Return an arbitrary free address for the given family
    r)Z	localhostrrz	listener-)�prefix�dirrz\\.\pipe\pyc-%d-%d-�zunrecognized familyN)	�tempfileZmktemprZget_temp_dir�os�getpid�next�
_mmap_counter�
ValueError��familyrrr�arbitrary_addressIs��r(cCsJtjdkr|dkrtd|��tjdkrF|dkrFtt|�sFtd|��dS)zD
    Checks if the family is valid for the current environment.
    rrzFamily %s is not recognized.rN)�sys�platformr%�hasattr�socketr&rrr�_validate_familyWs

r-cCsTt|�tkrdSt|�tkr*|�d�r*dSt|�tks@t�|�rDdStd|��dS)z]
    Return the types of the address

    This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
    rz\\rrzaddress type of %r unrecognizedN)�type�tuple�str�
startswithr�is_abstract_socket_namespacer%)�addressrrr�address_typecsr4c@s�eZdZdZd+dd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	e
dd��Ze
dd��Ze
dd��Z
dd�Zdd�Zd,dd�Zdd�Zd-dd�Zd.d d!�Zd"d#�Zd/d%d&�Zd'd(�Zd)d*�ZdS)0�_ConnectionBaseNTcCs>|��}|dkrtd��|s(|s(td��||_||_||_dS)Nrzinvalid handlez6at least one of `readable` and `writable` must be True)�	__index__r%�_handle�	_readable�	_writable)�self�handle�readable�writablerrr�__init__ys�z_ConnectionBase.__init__cCs|jdk	r|��dSr�r7�_close�r:rrr�__del__�s
z_ConnectionBase.__del__cCs|jdkrtd��dS)Nzhandle is closed)r7�OSErrorrArrr�
_check_closed�s
z_ConnectionBase._check_closedcCs|jstd��dS)Nzconnection is write-only)r8rCrArrr�_check_readable�sz_ConnectionBase._check_readablecCs|jstd��dS)Nzconnection is read-only)r9rCrArrr�_check_writable�sz_ConnectionBase._check_writablecCs"|jrd|_n|��td��dS)NFzbad message length)r9r8�closerCrArrr�_bad_message_length�sz#_ConnectionBase._bad_message_lengthcCs
|jdkS)z True if the connection is closedN�r7rArrr�closed�sz_ConnectionBase.closedcCs|jS)z"True if the connection is readable)r8rArrrr<�sz_ConnectionBase.readablecCs|jS)z"True if the connection is writable)r9rArrrr=�sz_ConnectionBase.writablecCs|��|jS)z+File descriptor or handle of the connection)rDr7rArrr�fileno�sz_ConnectionBase.filenocCs$|jdk	r z|��W5d|_XdS)zClose the connectionNr?rArrrrG�s
z_ConnectionBase.closercCs�|��|��t|�}|jdkr.tt|��}t|�}|dkrFtd��||krVtd��|dkrh||}n&|dkrztd��n|||kr�td��|�||||��dS)z,Send the bytes data from a bytes-like objectrrzoffset is negativezbuffer length < offsetNzsize is negativezbuffer length < offset + size)rDrF�
memoryview�itemsize�bytes�lenr%�_send_bytes)r:�buf�offset�size�m�nrrr�
send_bytes�s"


z_ConnectionBase.send_bytescCs$|��|��|�t�|��dS)zSend a (picklable) objectN)rDrFrP�_ForkingPickler�dumps�r:�objrrr�send�sz_ConnectionBase.sendcCsJ|��|��|dk	r(|dkr(td��|�|�}|dkrB|��|��S)z7
        Receive bytes data as a bytes object.
        Nrznegative maxlength)rDrEr%�_recv_bytesrH�getvalue)r:Z	maxlengthrQrrr�
recv_bytes�s
z_ConnectionBase.recv_bytesc
Cs�|��|��t|���}|j}|t|�}|dkr>td��n||krNtd��|��}|��}|||krvt|�	���|�
d�|�||||||��|W5QR�SQRXdS)zq
        Receive bytes data into a writeable bytes-like object.
        Return the number of bytes read.
        rznegative offsetzoffset too largeN)rDrErLrMrOr%r\�tellr	r]�seek�readinto)r:rQrRrTrMZbytesize�resultrSrrr�recv_bytes_into�s$



�z_ConnectionBase.recv_bytes_intocCs&|��|��|��}t�|���S)zReceive a (picklable) object)rDrEr\rW�loads�	getbuffer)r:rQrrr�recv�sz_ConnectionBase.recv�cCs|��|��|�|�S)z/Whether there is any input available to be read)rDrE�_poll�r:rrrr�pollsz_ConnectionBase.pollcCs|SrrrArrr�	__enter__sz_ConnectionBase.__enter__cCs|��dSr�rG�r:�exc_type�	exc_valueZexc_tbrrr�__exit__
sz_ConnectionBase.__exit__)TT)rN)N)r)rg)�__name__�
__module__�__qualname__r7r>rBrDrErFrH�propertyrJr<r=rKrGrVr[r^rcrfrjrkrprrrrr5vs.








r5c@sDeZdZdZdZejfdd�Zdd�Zddd	�Z	d
d�Z
dd
�ZdS)�PipeConnectionz�
        Connection class based on a Windows named pipe.
        Overlapped I/O is used, so the handles must have been created
        with FILE_FLAG_OVERLAPPED.
        FcCs||j�dSrrI)r:Z_CloseHandlerrrr@szPipeConnection._closec	Cs�tj|j|dd�\}}zHz,|tjkrBt�|jgdt�}|tksBt	�Wn|�
��YnXW5|�d�\}}X|dks|t	�|t|�ks�t	�dS)NT��
overlappedFr)�_winapiZ	WriteFiler7�GetOverlappedResult�ERROR_IO_PENDING�WaitForMultipleObjects�eventrr�AssertionError�cancelrO)r:rQ�ov�errZnwritten�waitresrrrrPs
�zPipeConnection._send_bytesNc	Cs2|jrd|_t��S|dkr dnt|d�}z�tj|j|dd�\}}dzHz,|tjkrpt�
|jgdt�}|tkspt�Wn|���YnXW5|�d�\}}|dkr�t��}|�|�	��|�WS|tj
kr�|�||��WSXWn:tk
�r$}z|jtjk�rt�n�W5d}~XYnXtd��dS)NF�Trvrz.shouldn't get here; expected KeyboardInterrupt)�_got_empty_message�io�BytesIO�minrx�ReadFiler7ry�writereZERROR_MORE_DATA�_get_more_datarzr{r|rrr}r~rC�winerror�ERROR_BROKEN_PIPE�EOFError�RuntimeError)	r:�maxsizeZbsizerr�Znread�fr��errrr\*s>
�

�
zPipeConnection._recv_bytescCs.|jst�|j�ddkrdStt|g|��S)NrT)r�rx�
PeekNamedPiper7�boolrrirrrrhJs
�zPipeConnection._pollcCs�|��}t��}|�|�t�|j�d}|dks6t�|dk	rVt|�||krV|�	�tj
|j|dd�\}}|�d�\}}|dks�t�||ks�t�|�|���|S)NrrTrv)rer�r�r�rxr�r7r}rOrHr�ry)r:rr�rQr��leftr�Zrbytesrrrr�Ps
zPipeConnection._get_more_data)N)rqrrrs�__doc__r�rx�CloseHandler@rPr\rhr�rrrrrus
 ruc@s|eZdZdZer,ejfdd�ZejZ	ej
Znej
fdd�ZejZ	ejZe	fdd�Zefdd�Zd	d
�Zddd
�Zdd�ZdS)�
Connectionzo
    Connection class based on an arbitrary file descriptor (Unix only), or
    a socket handle (Windows).
    cCs||j�dSrrI�r:r@rrrr@gszConnection._closecCs||j�dSrrIr�rrrr@lscCs8t|�}||j|�}||8}|dkr&q4||d�}qdS�Nr)rOr7)r:rQr��	remainingrUrrr�_sendqszConnection._sendcCsbt��}|j}|}|dkr^|||�}t|�}|dkrJ||krBt�ntd��|�|�||8}q|S)Nrzgot end of file during message)r�r�r7rOr�rCr�)r:rS�readrQr;r��chunkrUrrr�_recvzs


zConnection._recvcCs�t|�}|dkrHt�dd�}t�d|�}|�|�|�|�|�|�n8t�d|�}|dkrr|�|�|�|�n|�||�dS)Ni����!i����!Qi@)rO�structZpackr�)r:rQrUZ
pre_header�headerrrrrP�s


zConnection._send_bytesNcCs^|�d�}t�d|���\}|dkr@|�d�}t�d|���\}|dk	rT||krTdS|�|�S)N�r�r��r�)r�r�Zunpackr])r:r�rQrSrrrr\�s

zConnection._recv_bytescCst|g|�}t|�Sr)rr�)r:r�rrrrrh�szConnection._poll)N)rqrrrsr�rx�_multiprocessingZclosesocketr@r[Z_writerfZ_readr!rGr�r�r�r�rPr\rhrrrrr�`s	

r�c@sReZdZdZddd�Zdd�Zdd	�Zed
d��Zedd
��Z	dd�Z
dd�ZdS)rz�
    Returns a listener object.

    This is a wrapper for a bound socket which is 'listening' for
    connections, or for a Windows named pipe.
    NrcCsp|p|rt|�pt}|pt|�}t|�|dkr>t||�|_nt|||�|_|dk	rft|t�sft	d��||_
dS)Nr�authkey should be a byte string)r4�default_familyr(r-�PipeListener�	_listener�SocketListener�
isinstancerN�	TypeError�_authkey)r:r3r'�backlog�authkeyrrrr>�s�zListener.__init__cCs>|jdkrtd��|j��}|jr:t||j�t||j�|S)zz
        Accept a connection on the bound socket or named pipe of `self`.

        Returns a `Connection` object.
        Nzlistener is closed)r�rC�acceptr��deliver_challenge�answer_challenge)r:�crrrr��s

zListener.acceptcCs |j}|dk	rd|_|��dS)zA
        Close the bound socket or named pipe of `self`.
        N)r�rG)r:ZlistenerrrrrG�szListener.closecCs|jjSr)r��_addressrArrrr3�szListener.addresscCs|jjSr)r��_last_acceptedrArrr�
last_accepted�szListener.last_acceptedcCs|SrrrArrrrk�szListener.__enter__cCs|��dSrrlrmrrrrp�szListener.__exit__)NNrN)rqrrrsr�r>r�rGrtr3r�rkrprrrrr�s
	

cCsh|p
t|�}t|�|dkr&t|�}nt|�}|dk	rHt|t�sHtd��|dk	rdt||�t||�|S)z=
    Returns a connection to the address of a `Listener`
    rNr�)	r4r-�
PipeClient�SocketClientr�rNr�r�r�)r3r'r�r�rrrr�s


TcCsj|r>t��\}}|�d�|�d�t|���}t|���}n$t��\}}t|dd�}t|dd�}||fS)�L
        Returns pair of connection objects at either end of a pipe
        TF�r=�r<)r,Z
socketpair�setblockingr��detachr!�pipe)�duplex�s1�s2�c1�c2Zfd1Zfd2rrrrs

c

Cs�td�}|r*tj}tjtjB}tt}}ntj}tj}dt}}t�||tjBtj	Btj
tjBtjBd||tj
tj�}t�||dtjtjtjtj�}t�|tjdd�tj|dd�}|�d�\}	}
|
dks�t�t||d�}t||d�}||fS)	r�rrrNTrvr�r�)r(rx�PIPE_ACCESS_DUPLEX�GENERIC_READ�
GENERIC_WRITE�BUFSIZEZPIPE_ACCESS_INBOUND�CreateNamedPipe�FILE_FLAG_OVERLAPPED�FILE_FLAG_FIRST_PIPE_INSTANCE�PIPE_TYPE_MESSAGE�PIPE_READMODE_MESSAGE�	PIPE_WAIT�NMPWAIT_WAIT_FOREVER�NULL�
CreateFile�
OPEN_EXISTING�SetNamedPipeHandleState�ConnectNamedPiperyr}ru)
r�r3Zopenmode�accessZobsizeZibsizeZh1Zh2rw�_r�r�r�rrrrsV
�
��	��c@s*eZdZdZd
dd�Zdd�Zdd�Zd	S)r�zO
    Representation of a socket which is bound to an address and listening
    rcCs�t�tt|��|_zRtjdkr2|j�tjtjd�|j�d�|j�	|�|j�
|�|j��|_Wn t
k
r�|j���YnX||_d|_|dkr�t�|�s�tj|tj|fdd�|_nd|_dS)N�posixrTrr��argsZexitpriority)r,�getattr�_socketr!�nameZ
setsockoptZ
SOL_SOCKETZSO_REUSEADDRr�ZbindZlistenZgetsocknamer�rCrGZ_familyr�rr2�Finalize�unlink�_unlink)r:r3r'r�rrrr>Ks0

�
�
zSocketListener.__init__cCs&|j��\}|_|�d�t|���S)NT)r�r�r�r�r�r��r:�srrrr�ds
zSocketListener.acceptcCs0z|j��W5|j}|dk	r*d|_|�XdSr)r�r�rG)r:r�rrrrGiszSocketListener.closeN)r)rqrrrsr�r>r�rGrrrrr�Gs
r�c
CsPt|�}t�tt|���.}|�d�|�|�t|���W5QR�SQRXdS)zO
    Return a connection object connected to the socket given by `address`
    TN)r4r,r�r�Zconnectr�r�)r3r'r�rrrr�ss


r�c@s8eZdZdZddd�Zd
dd�Zdd	�Zed
d��ZdS)r�z0
        Representation of a named pipe
        NcCsL||_|jdd�g|_d|_t�d|j�tj|tj|j|jfdd�|_	dS)NT)�firstz listener created with address=%rrr�)
r��_new_handle�
_handle_queuer�r�	sub_debugr�r��_finalize_pipe_listenerrG)r:r3r�rrrr>�s
�zPipeListener.__init__Fc
CsHtjtjB}|r|tjO}t�|j|tjtjBtjBtj	t
t
tjtj�Sr)
rxr�r�r�r�r�r�r�r�ZPIPE_UNLIMITED_INSTANCESr�r�r�)r:r��flagsrrrr��s

��zPipeListener._new_handlec
Cs�|j�|���|j�d�}ztj|dd�}Wn0tk
r^}z|jtjkrN�W5d}~XYn\Xz<zt�|jgdt
�}Wn |��t�|��YnXW5|�	d�\}}|dks�t
�Xt|�S)NrTrvF)r��appendr��poprxr�rCr�Z
ERROR_NO_DATAryr}r{r|rr~r�ru)r:r;rr�r�r��resrrrr��s(�
zPipeListener.acceptcCs$t�d|�|D]}t�|�qdS)Nz closing listener with address=%r)rr�rxr�)Zqueuer3r;rrrr��sz$PipeListener._finalize_pipe_listener)N)F)	rqrrrsr�r>r�r��staticmethodr�rrrrr��s

r�c
Cs�t�}z6t�|d�t�|tjtjBdtjtjtjtj�}Wq�t	k
rz}z |j
tjtjfksht
|�rj�W5d}~XYqXq�q�t�|tjdd�t|�S)zU
        Return a connection object connected to the pipe given by `address`
        ��rN)rrxZ
WaitNamedPiper�r�r�r�r�r�rCr�ZERROR_SEM_TIMEOUTZERROR_PIPE_BUSYrr�r�ru)r3r�hr�rrrr��s8
����r��s#CHALLENGE#s	#WELCOME#s	#FAILURE#cCs�ddl}t|t�s$td�t|����t�t�}|�	t
|�|�||t��
�}|�d�}||krl|�	t�n|�	t�td��dS)Nr� Authkey must be bytes, not {0!s}�zdigest received was wrong)�hmacr�rNr%�formatr.r!�urandom�MESSAGE_LENGTHrV�	CHALLENGE�new�HMAC_DIGEST_NAME�digestr^�WELCOME�FAILUREr�Z
connectionr�r��messager�Zresponserrrr��s
�


r�cCs�ddl}t|t�s$td�t|����|�d�}|dtt��tksNt	d|��|tt�d�}|�
||t���}|�
|�|�d�}|tkr�td��dS)Nrr�r�zmessage = %rzdigest sent was rejected)r�r�rNr%r�r.r^rOr�r}r�r�r�rVr�rr�rrrr��s
�
 

r�c@s$eZdZdd�Zdd�Zdd�ZdS)�ConnectionWrappercCs6||_||_||_dD]}t||�}t|||�qdS)N)rKrGrjr^rV)�_conn�_dumps�_loadsr��setattr)r:�connrXrd�attrrZrrrr>s
zConnectionWrapper.__init__cCs|�|�}|j�|�dSr)r�r�rV)r:rZr�rrrr[	s
zConnectionWrapper.sendcCs|j��}|�|�Sr)r�r^rr�rrrrfs
zConnectionWrapper.recvN)rqrrrsr>r[rfrrrrr�sr�cCst�|fdddd��d�S)Nr�utf-8)�	xmlrpclibrX�encode)rZrrr�
_xml_dumpssrcCst�|�d��\\}}|S)Nr)rrd�decode)r�rZ�methodrrr�
_xml_loadssr
c@seZdZdd�ZdS)�XmlListenercCs"ddlmat�|�}t|tt�Sr�)�
xmlrpc.client�clientrrr�r�rr
rYrrrr�s
zXmlListener.acceptN)rqrrrsr�rrrrrsrcOsddlmatt||�tt�Sr�)rr
rr�rrr
)r��kwdsrrr�	XmlClientsrcCs�t|�}g}|r�t�|d|�}|tkr*q�n\t|krFtt|�krTnn
|t8}n2t|krptt|�kr~nn
|t8}ntd��|�||�||dd�}d}q|S)NFzShould not get hererr)	�listrxr{r
rrOrr�r�)Zhandlesr�L�readyr�rrr�_exhaustive_wait)s 
 
rc
s^|dkrt}n|dkrd}nt|dd�}t|�}i�g}t��t�}�z@|D�]&}zt|d�}	Wn tk
r�|�|��<YqPXzt	�|	�dd�\}}Wn8tk
r�}zd|j}}|tkrƂW5d}~XYnX|t	jkr�|�|�|�|j<qP|�rjt��dd�d	k�rjz|�d
�\}}Wn*tk
�rP}z
|j}W5d}~XYnX|�sjt
|d��rjd|_��|�d}qPt���|�}W5|D]}|���q�|D]�}z|�d�\}}Wn6tk
�r�}z|j}|tk�r�W5d}~XYnX|t	j
k�r��|j}��|�|dk�r�t
|d��r�d|_�q�X���fdd�|D���fd
d�|D�S)��
        Wait till an object in object_list is ready/readable.

        Returns list of those objects in object_list which are ready/readable.
        Nrr�g�?Tr�rK�)�rFc3s|]}�|VqdSrr)�.0r�)�waithandle_to_objrr�	<genexpr>�szwait.<locals>.<genexpr>csg|]}|�kr|�qSrr)r�o)�
ready_objectsrr�
<listcomp>�s�wait.<locals>.<listcomp>)r�intr�setr~ryrCr��
_ready_errorsrxZERROR_OPERATION_ABORTEDr|�addr+r�r��AttributeErrorr6r�rzr�r)Zgetwindowsversionr�keys�update)
�object_listrZov_listZ
ready_handlesrr�r�r�rrKr)rrrr?sh







�PollSelectorc
Cs�t���}|D]}|�|tj�q|dk	r4t��|}|�|�}|r\dd�|D�W5QR�S|dk	r4|t��}|dkr4|W5QR�Sq4W5QRXdS)rNcSsg|]\}}|j�qSr)Zfileobj)r�keyZeventsrrrr�srr)�
_WaitSelector�register�	selectorsZ
EVENT_READrrZselect)r%rZselectorrZZdeadlinerrrrr�s
c
CsZ|��}t�|tjtj��6}ddlm}|�|�}t||j	|j
ffW5QR�SQRXdS)Nr)�resource_sharer)rKr,ZfromfdrZSOCK_STREAMrr+Z	DupSocket�rebuild_connectionr<r=)rr;r�r+�dsrrr�reduce_connection�s

r.cCs|��}t|��||�Sr�r�r�)r-r<r=Zsockrrrr,�sr,cCsB|jrtjnd|jrtjndB}t�|��|�}t||j|jffSr�)	r<rxZFILE_GENERIC_READr=ZFILE_GENERIC_WRITEr
Z	DupHandlerK�rebuild_pipe_connection)rr��dhrrr�reduce_pipe_connection�s
�r2cCs|��}t|||�Sr)r�ru)r1r<r=r;rrrr0�sr0cCs t�|���}t||j|jffSr)r
ZDupFdrKr,r<r=)r�dfrrrr.�scCs|��}t|||�Srr/)r3r<r=�fdrrrr,�s)NN)T)T)N)N)J�__all__r�r!r)r,r�rr �	itertoolsr�rrrr	�contextr
ZForkingPicklerrWrxrrr
r�ImportErrorr*r�ZCONNECTION_TIMEOUTr��countr$r�Zfamiliesr+rrr(r-r4r5rur��objectrrrr�r�r�r�r�r�r�r�r�r�r�rr
rrrr�ZERROR_NETNAME_DELETEDr rr*r&r(ZSelectSelectorr.r,r)r2r0rrrr�<module>
s�



PT=

,,8	P
PK{��\g�=�Y�Y+__pycache__/connection.cpython-38.opt-2.pycnu�[���U

&�.ep|�@sddddgZddlZddlZddlZddlZddlZddlZddlZddlZddl	Z	ddl
mZddl
mZm
Z
dd	lmZejZz$ddlZdd
lmZmZmZmZWn$ek
r�ejdkr‚dZYnXdZd
ZdZe��ZdZdgZe ed��rdZedg7Zejdk�rdZedg7Zefdd�Z!dd�Z"dd�Z#dd�Z$dd�Z%Gdd�d�Z&e�rnGdd�de&�Z'Gd d!�d!e&�Z(Gd"d�de)�Z*dPd#d�Z+ejdk�r�dQd%d�Z,n
dRd&d�Z,Gd'd(�d(e)�Z-d)d*�Z.ejdk�r�Gd+d,�d,e)�Z/d-d.�Z0d/Z1d0Z2d1Z3d2Z4d3d4�Z5d5d6�Z6Gd7d8�d8e)�Z7d9d:�Z8d;d<�Z9Gd=d>�d>e*�Z:d?d@�Z;ejdk�rzdAdB�Z<ej=ej>hZ?dSdCd�Z@n,ddlAZAe eAdD��r�eAjBZCneAjDZCdTdEd�Z@ejdk�r�dFdG�ZEdHdI�ZFe�Ge(eE�dJdK�ZHdLdM�ZIe�Ge'eH�ndNdG�ZEdOdI�ZFe�Ge(eE�dS)U�Client�Listener�Pipe�wait�N�)�util)�AuthenticationError�BufferTooShort)�	reduction)�
WAIT_OBJECT_0�WAIT_ABANDONED_0�WAIT_TIMEOUT�INFINITE�win32i g4@Zsha256�AF_INET�AF_UNIX�AF_PIPEcCst��|S�N��time�	monotonic)�timeout�r�2/usr/lib64/python3.8/multiprocessing/connection.py�
_init_timeout?srcCst��|kSrr)�trrr�_check_timeoutBsrcCsX|dkrdS|dkr&tjdt��d�S|dkrLtjdt��tt�fdd�Std	��dS)
Nr)Z	localhostrrz	listener-)�prefix�dirrz\\.\pipe\pyc-%d-%d-�zunrecognized family)	�tempfileZmktemprZget_temp_dir�os�getpid�next�
_mmap_counter�
ValueError��familyrrr�arbitrary_addressIs��r(cCsJtjdkr|dkrtd|��tjdkrF|dkrFtt|�sFtd|��dS)NrrzFamily %s is not recognized.r)�sys�platformr%�hasattr�socketr&rrr�_validate_familyWs

r-cCsTt|�tkrdSt|�tkr*|�d�r*dSt|�tks@t�|�rDdStd|��dS)Nrz\\rrzaddress type of %r unrecognized)�type�tuple�str�
startswithr�is_abstract_socket_namespacer%)�addressrrr�address_typecsr4c@s�eZdZdZd+dd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	e
dd��Ze
dd��Ze
dd��Z
dd�Zdd�Zd,dd�Zdd�Zd-dd�Zd.d d!�Zd"d#�Zd/d%d&�Zd'd(�Zd)d*�ZdS)0�_ConnectionBaseNTcCs>|��}|dkrtd��|s(|s(td��||_||_||_dS)Nrzinvalid handlez6at least one of `readable` and `writable` must be True)�	__index__r%�_handle�	_readable�	_writable)�self�handle�readable�writablerrr�__init__ys�z_ConnectionBase.__init__cCs|jdk	r|��dSr�r7�_close�r:rrr�__del__�s
z_ConnectionBase.__del__cCs|jdkrtd��dS)Nzhandle is closed)r7�OSErrorrArrr�
_check_closed�s
z_ConnectionBase._check_closedcCs|jstd��dS)Nzconnection is write-only)r8rCrArrr�_check_readable�sz_ConnectionBase._check_readablecCs|jstd��dS)Nzconnection is read-only)r9rCrArrr�_check_writable�sz_ConnectionBase._check_writablecCs"|jrd|_n|��td��dS)NFzbad message length)r9r8�closerCrArrr�_bad_message_length�sz#_ConnectionBase._bad_message_lengthcCs
|jdkSr�r7rArrr�closed�sz_ConnectionBase.closedcCs|jSr)r8rArrrr<�sz_ConnectionBase.readablecCs|jSr)r9rArrrr=�sz_ConnectionBase.writablecCs|��|jSr)rDr7rArrr�fileno�sz_ConnectionBase.filenocCs$|jdk	r z|��W5d|_XdSrr?rArrrrG�s
z_ConnectionBase.closercCs�|��|��t|�}|jdkr.tt|��}t|�}|dkrFtd��||krVtd��|dkrh||}n&|dkrztd��n|||kr�td��|�||||��dS)Nrrzoffset is negativezbuffer length < offsetzsize is negativezbuffer length < offset + size)rDrF�
memoryview�itemsize�bytes�lenr%�_send_bytes)r:�buf�offset�size�m�nrrr�
send_bytes�s"


z_ConnectionBase.send_bytescCs$|��|��|�t�|��dSr)rDrFrP�_ForkingPickler�dumps�r:�objrrr�send�sz_ConnectionBase.sendcCsJ|��|��|dk	r(|dkr(td��|�|�}|dkrB|��|��S)Nrznegative maxlength)rDrEr%�_recv_bytesrH�getvalue)r:Z	maxlengthrQrrr�
recv_bytes�s
z_ConnectionBase.recv_bytesc
Cs�|��|��t|���}|j}|t|�}|dkr>td��n||krNtd��|��}|��}|||krvt|�	���|�
d�|�||||||��|W5QR�SQRXdS)Nrznegative offsetzoffset too large)rDrErLrMrOr%r\�tellr	r]�seek�readinto)r:rQrRrTrMZbytesize�resultrSrrr�recv_bytes_into�s$



�z_ConnectionBase.recv_bytes_intocCs&|��|��|��}t�|���Sr)rDrEr\rW�loads�	getbuffer)r:rQrrr�recv�sz_ConnectionBase.recv�cCs|��|��|�|�Sr)rDrE�_poll�r:rrrr�pollsz_ConnectionBase.pollcCs|SrrrArrr�	__enter__sz_ConnectionBase.__enter__cCs|��dSr�rG�r:�exc_type�	exc_valueZexc_tbrrr�__exit__
sz_ConnectionBase.__exit__)TT)rN)N)r)rg)�__name__�
__module__�__qualname__r7r>rBrDrErFrH�propertyrJr<r=rKrGrVr[r^rcrfrjrkrprrrrr5vs.








r5c@s@eZdZdZejfdd�Zdd�Zd
dd�Zd	d
�Z	dd�Z
dS)�PipeConnectionFcCs||j�dSrrI)r:Z_CloseHandlerrrr@szPipeConnection._closec	Cshtj|j|dd�\}}z<z |tjkr6t�|jgdt�}Wn|���YnXW5|�d�\}}XdS)NT��
overlappedF)	�_winapiZ	WriteFiler7�GetOverlappedResult�ERROR_IO_PENDING�WaitForMultipleObjects�eventr�cancel)r:rQ�ov�errZnwritten�waitresrrrrPs
�zPipeConnection._send_bytesNc	Cs&|jrd|_t��S|dkr dnt|d�}z�tj|j|dd�\}}dz<z |tjkrdt�
|jgdt�}Wn|���YnXW5|�d�\}}|dkr�t��}|�|�	��|�WS|tj
kr�|�||��WSXWn:tk
�r}z|jtjk�rt�n�W5d}~XYnXtd��dS)NF�Trvrz.shouldn't get here; expected KeyboardInterrupt)�_got_empty_message�io�BytesIO�minrx�ReadFiler7ry�writereZERROR_MORE_DATA�_get_more_datarzr{r|rr}rC�winerror�ERROR_BROKEN_PIPE�EOFError�RuntimeError)	r:�maxsizeZbsizer~rZnread�fr��errrr\*s>
�

�
zPipeConnection._recv_bytescCs.|jst�|j�ddkrdStt|g|��S)NrT)r�rx�
PeekNamedPiper7�boolrrirrrrhJs
�zPipeConnection._pollcCs�|��}t��}|�|�t�|j�d}|dk	rJt|�||krJ|��tj	|j|dd�\}}|�
d�\}}|�|���|S)NrTrv)rer�r�r�rxr�r7rOrHr�ry)r:r~r�rQr��leftrZrbytesrrrr�Ps
zPipeConnection._get_more_data)N)rqrrrsr�rx�CloseHandler@rPr\rhr�rrrrrus
 ruc@sxeZdZer(ejfdd�ZejZej	Z
nejfdd�Zej
ZejZ
efdd�Ze
fdd�Zdd	�Zddd�Zd
d�Zd
S)�
ConnectioncCs||j�dSrrI�r:r@rrrr@gszConnection._closecCs||j�dSrrIr�rrrr@lscCs8t|�}||j|�}||8}|dkr&q4||d�}qdS�Nr)rOr7)r:rQr��	remainingrUrrr�_sendqszConnection._sendcCsbt��}|j}|}|dkr^|||�}t|�}|dkrJ||krBt�ntd��|�|�||8}q|S)Nrzgot end of file during message)r�r�r7rOr�rCr�)r:rS�readrQr;r��chunkrUrrr�_recvzs


zConnection._recvcCs�t|�}|dkrHt�dd�}t�d|�}|�|�|�|�|�|�n8t�d|�}|dkrr|�|�|�|�n|�||�dS)Ni����!i����!Qi@)rO�structZpackr�)r:rQrUZ
pre_header�headerrrrrP�s


zConnection._send_bytesNcCs^|�d�}t�d|���\}|dkr@|�d�}t�d|���\}|dk	rT||krTdS|�|�S)N�r�r��r�)r�r�Zunpackr])r:r�rQrSrrrr\�s

zConnection._recv_bytescCst|g|�}t|�Sr)rr�)r:r�rrrrrh�szConnection._poll)N)rqrrrsrx�_multiprocessingZclosesocketr@r[Z_writerfZ_readr!rGr�r�r�r�rPr\rhrrrrr�`s	

r�c@sNeZdZddd�Zdd�Zdd�Zed	d
��Zedd��Zd
d�Z	dd�Z
dS)rNrcCsp|p|rt|�pt}|pt|�}t|�|dkr>t||�|_nt|||�|_|dk	rft|t�sft	d��||_
dS�Nrzauthkey should be a byte string)r4�default_familyr(r-�PipeListener�	_listener�SocketListener�
isinstancerN�	TypeError�_authkey)r:r3r'�backlog�authkeyrrrr>�s�zListener.__init__cCs>|jdkrtd��|j��}|jr:t||j�t||j�|S)Nzlistener is closed)r�rC�acceptr��deliver_challenge�answer_challenge)r:�crrrr��s

zListener.acceptcCs |j}|dk	rd|_|��dSr)r�rG)r:ZlistenerrrrrG�szListener.closecCs|jjSr)r��_addressrArrrr3�szListener.addresscCs|jjSr)r��_last_acceptedrArrr�
last_accepted�szListener.last_acceptedcCs|SrrrArrrrk�szListener.__enter__cCs|��dSrrlrmrrrrp�szListener.__exit__)NNrN)rqrrrsr>r�rGrtr3r�rkrprrrrr�s
	

cCsh|p
t|�}t|�|dkr&t|�}nt|�}|dk	rHt|t�sHtd��|dk	rdt||�t||�|Sr�)	r4r-�
PipeClient�SocketClientr�rNr�r�r�)r3r'r�r�rrrr�s


TcCsj|r>t��\}}|�d�|�d�t|���}t|���}n$t��\}}t|dd�}t|dd�}||fS)NTF�r=�r<)r,Z
socketpair�setblockingr��detachr!�pipe)�duplex�s1�s2�c1�c2Zfd1Zfd2rrrrs

c

Cs�td�}|r*tj}tjtjB}tt}}ntj}tj}dt}}t�||tjBtj	Btj
tjBtjBd||tj
tj�}t�||dtjtjtjtj�}t�|tjdd�tj|dd�}|�d�\}	}
t||d�}t||d�}||fS)NrrrTrvr�r�)r(rx�PIPE_ACCESS_DUPLEX�GENERIC_READ�
GENERIC_WRITE�BUFSIZEZPIPE_ACCESS_INBOUND�CreateNamedPipe�FILE_FLAG_OVERLAPPED�FILE_FLAG_FIRST_PIPE_INSTANCE�PIPE_TYPE_MESSAGE�PIPE_READMODE_MESSAGE�	PIPE_WAIT�NMPWAIT_WAIT_FOREVER�NULL�
CreateFile�
OPEN_EXISTING�SetNamedPipeHandleState�ConnectNamedPiperyru)
r�r3Zopenmode�accessZobsizeZibsizeZh1Zh2rw�_rr�r�rrrrsT
�
��	��c@s&eZdZd	dd�Zdd�Zdd�ZdS)
r�rcCs�t�tt|��|_zRtjdkr2|j�tjtjd�|j�d�|j�	|�|j�
|�|j��|_Wn t
k
r�|j���YnX||_d|_|dkr�t�|�s�tj|tj|fdd�|_nd|_dS)N�posixrTrr��argsZexitpriority)r,�getattr�_socketr!�nameZ
setsockoptZ
SOL_SOCKETZSO_REUSEADDRr�ZbindZlistenZgetsocknamer�rCrGZ_familyr�rr2�Finalize�unlink�_unlink)r:r3r'r�rrrr>Ks0

�
�
zSocketListener.__init__cCs&|j��\}|_|�d�t|���S�NT)r�r�r�r�r�r��r:�srrrr�ds
zSocketListener.acceptcCs0z|j��W5|j}|dk	r*d|_|�XdSr)r�r�rG)r:r�rrrrGiszSocketListener.closeN)r)rqrrrsr>r�rGrrrrr�Gs
r�c
CsPt|�}t�tt|���.}|�d�|�|�t|���W5QR�SQRXdSr�)r4r,r�r�Zconnectr�r�)r3r'r�rrrr�ss


r�c@s4eZdZddd�Zddd�Zdd�Zed	d
��ZdS)
r�NcCsL||_|jdd�g|_d|_t�d|j�tj|tj|j|jfdd�|_	dS)NT)�firstz listener created with address=%rrr�)
r��_new_handle�
_handle_queuer�r�	sub_debugr�r��_finalize_pipe_listenerrG)r:r3r�rrrr>�s
�zPipeListener.__init__Fc
CsHtjtjB}|r|tjO}t�|j|tjtjBtjBtj	t
t
tjtj�Sr)
rxr�r�r�r�r�r�r�r�ZPIPE_UNLIMITED_INSTANCESr�r�r�)r:r��flagsrrrr��s

��zPipeListener._new_handlec
Cs�|j�|���|j�d�}ztj|dd�}Wn0tk
r^}z|jtjkrN�W5d}~XYnPXz<zt�
|jgdt�}Wn |�
�t�|��YnXW5|�	d�\}}Xt|�S)NrTrvF)r��appendr��poprxr�rCr�Z
ERROR_NO_DATAryr{r|rr}r�ru)r:r;r~r�r�r�resrrrr��s(�
zPipeListener.acceptcCs$t�d|�|D]}t�|�qdS)Nz closing listener with address=%r)rr�rxr�)Zqueuer3r;rrrr��sz$PipeListener._finalize_pipe_listener)N)F)rqrrrsr>r�r��staticmethodr�rrrrr��s


r�c
Cs�t�}z6t�|d�t�|tjtjBdtjtjtjtj�}Wq�t	k
rz}z |j
tjtjfksht
|�rj�W5d}~XYqXq�q�t�|tjdd�t|�S)N��r)rrxZ
WaitNamedPiper�r�r�r�r�r�rCr�ZERROR_SEM_TIMEOUTZERROR_PIPE_BUSYrr�r�ru)r3r�hr�rrrr��s8
����r��s#CHALLENGE#s	#WELCOME#s	#FAILURE#cCs�ddl}t|t�s$td�t|����t�t�}|�	t
|�|�||t��
�}|�d�}||krl|�	t�n|�	t�td��dS)Nr� Authkey must be bytes, not {0!s}�zdigest received was wrong)�hmacr�rNr%�formatr.r!�urandom�MESSAGE_LENGTHrV�	CHALLENGE�new�HMAC_DIGEST_NAME�digestr^�WELCOME�FAILUREr�Z
connectionr�r��messager�Zresponserrrr��s
�


r�cCsxddl}t|t�s$td�t|����|�d�}|tt�d�}|�	||t
���}|�|�|�d�}|t
krttd��dS)Nrr�r�zdigest sent was rejected)r�r�rNr%r�r.r^rOr�r�r�r�rVr�rr�rrrr��s
�


r�c@s$eZdZdd�Zdd�Zdd�ZdS)�ConnectionWrappercCs6||_||_||_dD]}t||�}t|||�qdS)N)rKrGrjr^rV)�_conn�_dumps�_loadsr��setattr)r:�connrXrd�attrrZrrrr>s
zConnectionWrapper.__init__cCs|�|�}|j�|�dSr)r�r�rV)r:rZr�rrrr[	s
zConnectionWrapper.sendcCs|j��}|�|�Sr)r�r^r�r�rrrrfs
zConnectionWrapper.recvN)rqrrrsr>r[rfrrrrr�sr�cCst�|fdddd��d�S)Nr�utf-8)�	xmlrpclibrX�encode)rZrrr�
_xml_dumpssrcCst�|�d��\\}}|S)Nr)rrd�decode)r�rZ�methodrrr�
_xml_loadssrc@seZdZdd�ZdS)�XmlListenercCs"ddlmat�|�}t|tt�Sr�)�
xmlrpc.client�clientrrr�r�rrrYrrrr�s
zXmlListener.acceptN)rqrrrsr�rrrrr	sr	cOsddlmatt||�tt�Sr�)r
rrr�rrr)r��kwdsrrr�	XmlClientsr
cCs�t|�}g}|r�t�|d|�}|tkr*q�n\t|krFtt|�krTnn
|t8}n2t|krptt|�kr~nn
|t8}ntd��|�||�||dd�}d}q|S)NFzShould not get hererr)	�listrxr{r
rrOrr�r�)Zhandlesr�L�readyr�rrr�_exhaustive_wait)s 
 
rc
s^|dkrt}n|dkrd}nt|dd�}t|�}i�g}t��t�}�z@|D�]&}zt|d�}	Wn tk
r�|�|��<YqPXzt	�|	�dd�\}}Wn8tk
r�}zd|j}}|tkrƂW5d}~XYnX|t	jkr�|�|�|�|j<qP|�rjt��dd�dk�rjz|�d	�\}}Wn*tk
�rP}z
|j}W5d}~XYnX|�sjt
|d��rjd|_��|�d}qPt���|�}W5|D]}|���q�|D]�}z|�d�\}}Wn6tk
�r�}z|j}|tk�r�W5d}~XYnX|t	j
k�r��|j}��|�|dk�r�t
|d��r�d|_�q�X���fd
d�|D���fdd
�|D�S)Nrr�g�?Tr�rK�)�rFc3s|]}�|VqdSrr)�.0r�)�waithandle_to_objrr�	<genexpr>�szwait.<locals>.<genexpr>csg|]}|�kr|�qSrr)r�o)�
ready_objectsrr�
<listcomp>�s�wait.<locals>.<listcomp>)r�intr�setr}ryrCr��
_ready_errorsrxZERROR_OPERATION_ABORTEDr|�addr+r�r��AttributeErrorr6r�rzr�r)Zgetwindowsversionr�keys�update)
�object_listrZov_listZ
ready_handlesr~r�rr�rrKr)rrrr?sh







�PollSelectorc
Cs�t���}|D]}|�|tj�q|dk	r4t��|}|�|�}|r\dd�|D�W5QR�S|dk	r4|t��}|dkr4|W5QR�Sq4W5QRXdS)NcSsg|]\}}|j�qSr)Zfileobj)r�keyZeventsrrrr�srr)�
_WaitSelector�register�	selectorsZ
EVENT_READrrZselect)r"rZselectorrZZdeadlinerrrrr�s
c
CsZ|��}t�|tjtj��6}ddlm}|�|�}t||j	|j
ffW5QR�SQRXdS)Nr)�resource_sharer)rKr,ZfromfdrZSOCK_STREAMrr(Z	DupSocket�rebuild_connectionr<r=)rr;r�r(�dsrrr�reduce_connection�s

r+cCs|��}t|��||�Sr�r�r�)r*r<r=Zsockrrrr)�sr)cCsB|jrtjnd|jrtjndB}t�|��|�}t||j|jffSr�)	r<rxZFILE_GENERIC_READr=ZFILE_GENERIC_WRITEr
Z	DupHandlerK�rebuild_pipe_connection)rr��dhrrr�reduce_pipe_connection�s
�r/cCs|��}t|||�Sr)r�ru)r.r<r=r;rrrr-�sr-cCs t�|���}t||j|jffSr)r
ZDupFdrKr)r<r=)r�dfrrrr+�scCs|��}t|||�Srr,)r0r<r=�fdrrrr)�s)NN)T)T)N)N)J�__all__r�r!r)r,r�rr �	itertoolsr�rrrr	�contextr
ZForkingPicklerrWrxrrr
r�ImportErrorr*r�ZCONNECTION_TIMEOUTr��countr$r�Zfamiliesr+rrr(r-r4r5rur��objectrrrr�r�r�r�r�r�r�r�r�r�r�rrr	r
rr�ZERROR_NETNAME_DELETEDrrr'r#r%ZSelectSelectorr+r)r&r/r-rrrr�<module>
s�



PT=

,,8	P
PK{��\C_�

2__pycache__/popen_spawn_win32.cpython-38.opt-2.pycnu�[���U

e5d��@s�ddlZddlZddlZddlZddlZddlmZmZmZddl	m
Z
ddl	mZdgZdZ
ejdkoreed	d
�Zej���d�Zdd
�Zeejej�Zdd�ZGdd�de�ZdS)�N�)�	reduction�get_spawning_popen�set_spawning_popen)�spawn)�util�PopeniZwin32�frozenFzpythonservice.execCs ||kptj�|�tj�|�kS�N)�os�path�normcase)Zp1Zp2�r�9/usr/lib64/python3.8/multiprocessing/popen_spawn_win32.py�_path_eqsrcGs|D]}t�|�qdSr
)�_winapi�CloseHandle)Zhandles�handlerrr�_close_handlessrc@sFeZdZdZdd�Zdd�Zddd�Zd	d
�Zdd�ZeZ	d
d�Z
dS)rrcCsTt�|j�}t�dd�\}}t�|d�}tjt�	�|d�}d�
dd�|D��}t��}tr�t
|tj�r�tj}tj��}tj|d<nd}t|ddd	���}	z0t�||ddd
d|dd�	\}
}}}
t�|�Wnt�|��YnX||_d|_|
|_t|
�|_t�|t|jt|�f�|_t|�zt �!||	�t �!||	�W5td�XW5QRXdS)Nr)Z
parent_pidZpipe_handle� css|]}d|VqdS)z"%s"Nr)�.0�xrrr�	<genexpr>9sz!Popen.__init__.<locals>.<genexpr>�__PYVENV_LAUNCHER__�wbT)�closefdF)"rZget_preparation_data�_namerZ
CreatePipe�msvcrtZopen_osfhandleZget_command_liner�getpid�joinZget_executable�WINENVr�sys�
executable�_base_executable�environ�copy�openZ
CreateProcessr�pid�
returncode�_handle�int�sentinelrZFinalizer�	finalizerrr�dump)�selfZprocess_objZ	prep_dataZrhandleZwhandleZwfd�cmdZ
python_exe�envZto_childZhpZhtr'�tidrrr�__init__,sT
�
�

�zPopen.__init__cCst�||j�Sr
)rZ	duplicater+)r.rrrr�duplicate_for_childaszPopen.duplicate_for_childNcCst|jdkrn|dkrtj}ntdt|dd��}t�t|j�|�}|tjkrnt�|j�}|t	krht
j}||_|jS)Nri�g�?)r(rZINFINITE�maxr*ZWaitForSingleObjectr)Z
WAIT_OBJECT_0ZGetExitCodeProcess�	TERMINATE�signal�SIGTERM)r.�timeoutZmsecs�res�coderrr�waites

z
Popen.waitcCs|jdd�S)Nr�r8)r;�r.rrr�pollusz
Popen.pollcCsL|jdkrHzt�t|j�t�Wn&tk
rF|jdd�dkrB�YnXdS)Ng�?r<)r(rZTerminateProcessr*r)r5�OSErrorr;r=rrr�	terminatexs
zPopen.terminatecCs|��dSr
)r,r=rrr�close�szPopen.close)N)�__name__�
__module__�__qualname__�methodr2r3r;r>r@�killrArrrrr&s5
)rrr6r!r�contextrrr�rr�__all__r5�platform�getattrZWINEXEr"�lower�endswithZ
WINSERVICErr#r r�objectrrrrr�<module>s
PK{��\��N�a�a+__pycache__/connection.cpython-38.opt-1.pycnu�[���U

&�.ep|�@sddddgZddlZddlZddlZddlZddlZddlZddlZddlZddl	Z	ddl
mZddl
mZm
Z
dd	lmZejZz$ddlZdd
lmZmZmZmZWn$ek
r�ejdkr‚dZYnXdZd
ZdZe��ZdZdgZe ed��rdZedg7Zejdk�rdZedg7Zefdd�Z!dd�Z"dd�Z#dd�Z$dd�Z%Gdd�d�Z&e�rnGdd�de&�Z'Gd d!�d!e&�Z(Gd"d�de)�Z*dPd#d�Z+ejdk�r�dQd%d�Z,n
dRd&d�Z,Gd'd(�d(e)�Z-d)d*�Z.ejdk�r�Gd+d,�d,e)�Z/d-d.�Z0d/Z1d0Z2d1Z3d2Z4d3d4�Z5d5d6�Z6Gd7d8�d8e)�Z7d9d:�Z8d;d<�Z9Gd=d>�d>e*�Z:d?d@�Z;ejdk�rzdAdB�Z<ej=ej>hZ?dSdCd�Z@n,ddlAZAe eAdD��r�eAjBZCneAjDZCdTdEd�Z@ejdk�r�dFdG�ZEdHdI�ZFe�Ge(eE�dJdK�ZHdLdM�ZIe�Ge'eH�ndNdG�ZEdOdI�ZFe�Ge(eE�dS)U�Client�Listener�Pipe�wait�N�)�util)�AuthenticationError�BufferTooShort)�	reduction)�
WAIT_OBJECT_0�WAIT_ABANDONED_0�WAIT_TIMEOUT�INFINITE�win32i g4@Zsha256�AF_INET�AF_UNIX�AF_PIPEcCst��|S�N��time�	monotonic)�timeout�r�2/usr/lib64/python3.8/multiprocessing/connection.py�
_init_timeout?srcCst��|kSrr)�trrr�_check_timeoutBsrcCsX|dkrdS|dkr&tjdt��d�S|dkrLtjdt��tt�fdd�Std	��d
S)z?
    Return an arbitrary free address for the given family
    r)Z	localhostrrz	listener-)�prefix�dirrz\\.\pipe\pyc-%d-%d-�zunrecognized familyN)	�tempfileZmktemprZget_temp_dir�os�getpid�next�
_mmap_counter�
ValueError��familyrrr�arbitrary_addressIs��r(cCsJtjdkr|dkrtd|��tjdkrF|dkrFtt|�sFtd|��dS)zD
    Checks if the family is valid for the current environment.
    rrzFamily %s is not recognized.rN)�sys�platformr%�hasattr�socketr&rrr�_validate_familyWs

r-cCsTt|�tkrdSt|�tkr*|�d�r*dSt|�tks@t�|�rDdStd|��dS)z]
    Return the types of the address

    This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
    rz\\rrzaddress type of %r unrecognizedN)�type�tuple�str�
startswithr�is_abstract_socket_namespacer%)�addressrrr�address_typecsr4c@s�eZdZdZd+dd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	e
dd��Ze
dd��Ze
dd��Z
dd�Zdd�Zd,dd�Zdd�Zd-dd�Zd.d d!�Zd"d#�Zd/d%d&�Zd'd(�Zd)d*�ZdS)0�_ConnectionBaseNTcCs>|��}|dkrtd��|s(|s(td��||_||_||_dS)Nrzinvalid handlez6at least one of `readable` and `writable` must be True)�	__index__r%�_handle�	_readable�	_writable)�self�handle�readable�writablerrr�__init__ys�z_ConnectionBase.__init__cCs|jdk	r|��dSr�r7�_close�r:rrr�__del__�s
z_ConnectionBase.__del__cCs|jdkrtd��dS)Nzhandle is closed)r7�OSErrorrArrr�
_check_closed�s
z_ConnectionBase._check_closedcCs|jstd��dS)Nzconnection is write-only)r8rCrArrr�_check_readable�sz_ConnectionBase._check_readablecCs|jstd��dS)Nzconnection is read-only)r9rCrArrr�_check_writable�sz_ConnectionBase._check_writablecCs"|jrd|_n|��td��dS)NFzbad message length)r9r8�closerCrArrr�_bad_message_length�sz#_ConnectionBase._bad_message_lengthcCs
|jdkS)z True if the connection is closedN�r7rArrr�closed�sz_ConnectionBase.closedcCs|jS)z"True if the connection is readable)r8rArrrr<�sz_ConnectionBase.readablecCs|jS)z"True if the connection is writable)r9rArrrr=�sz_ConnectionBase.writablecCs|��|jS)z+File descriptor or handle of the connection)rDr7rArrr�fileno�sz_ConnectionBase.filenocCs$|jdk	r z|��W5d|_XdS)zClose the connectionNr?rArrrrG�s
z_ConnectionBase.closercCs�|��|��t|�}|jdkr.tt|��}t|�}|dkrFtd��||krVtd��|dkrh||}n&|dkrztd��n|||kr�td��|�||||��dS)z,Send the bytes data from a bytes-like objectrrzoffset is negativezbuffer length < offsetNzsize is negativezbuffer length < offset + size)rDrF�
memoryview�itemsize�bytes�lenr%�_send_bytes)r:�buf�offset�size�m�nrrr�
send_bytes�s"


z_ConnectionBase.send_bytescCs$|��|��|�t�|��dS)zSend a (picklable) objectN)rDrFrP�_ForkingPickler�dumps�r:�objrrr�send�sz_ConnectionBase.sendcCsJ|��|��|dk	r(|dkr(td��|�|�}|dkrB|��|��S)z7
        Receive bytes data as a bytes object.
        Nrznegative maxlength)rDrEr%�_recv_bytesrH�getvalue)r:Z	maxlengthrQrrr�
recv_bytes�s
z_ConnectionBase.recv_bytesc
Cs�|��|��t|���}|j}|t|�}|dkr>td��n||krNtd��|��}|��}|||krvt|�	���|�
d�|�||||||��|W5QR�SQRXdS)zq
        Receive bytes data into a writeable bytes-like object.
        Return the number of bytes read.
        rznegative offsetzoffset too largeN)rDrErLrMrOr%r\�tellr	r]�seek�readinto)r:rQrRrTrMZbytesize�resultrSrrr�recv_bytes_into�s$



�z_ConnectionBase.recv_bytes_intocCs&|��|��|��}t�|���S)zReceive a (picklable) object)rDrEr\rW�loads�	getbuffer)r:rQrrr�recv�sz_ConnectionBase.recv�cCs|��|��|�|�S)z/Whether there is any input available to be read)rDrE�_poll�r:rrrr�pollsz_ConnectionBase.pollcCs|SrrrArrr�	__enter__sz_ConnectionBase.__enter__cCs|��dSr�rG�r:�exc_type�	exc_valueZexc_tbrrr�__exit__
sz_ConnectionBase.__exit__)TT)rN)N)r)rg)�__name__�
__module__�__qualname__r7r>rBrDrErFrH�propertyrJr<r=rKrGrVr[r^rcrfrjrkrprrrrr5vs.








r5c@sDeZdZdZdZejfdd�Zdd�Zddd	�Z	d
d�Z
dd
�ZdS)�PipeConnectionz�
        Connection class based on a Windows named pipe.
        Overlapped I/O is used, so the handles must have been created
        with FILE_FLAG_OVERLAPPED.
        FcCs||j�dSrrI)r:Z_CloseHandlerrrr@szPipeConnection._closec	Cshtj|j|dd�\}}z<z |tjkr6t�|jgdt�}Wn|���YnXW5|�d�\}}XdS)NT��
overlappedF)	�_winapiZ	WriteFiler7�GetOverlappedResult�ERROR_IO_PENDING�WaitForMultipleObjects�eventr�cancel)r:rQ�ov�errZnwritten�waitresrrrrPs
�zPipeConnection._send_bytesNc	Cs&|jrd|_t��S|dkr dnt|d�}z�tj|j|dd�\}}dz<z |tjkrdt�
|jgdt�}Wn|���YnXW5|�d�\}}|dkr�t��}|�|�	��|�WS|tj
kr�|�||��WSXWn:tk
�r}z|jtjk�rt�n�W5d}~XYnXtd��dS)NF�Trvrz.shouldn't get here; expected KeyboardInterrupt)�_got_empty_message�io�BytesIO�minrx�ReadFiler7ry�writereZERROR_MORE_DATA�_get_more_datarzr{r|rr}rC�winerror�ERROR_BROKEN_PIPE�EOFError�RuntimeError)	r:�maxsizeZbsizer~rZnread�fr��errrr\*s>
�

�
zPipeConnection._recv_bytescCs.|jst�|j�ddkrdStt|g|��S)NrT)r�rx�
PeekNamedPiper7�boolrrirrrrhJs
�zPipeConnection._pollcCs�|��}t��}|�|�t�|j�d}|dk	rJt|�||krJ|��tj	|j|dd�\}}|�
d�\}}|�|���|S)NrTrv)rer�r�r�rxr�r7rOrHr�ry)r:r~r�rQr��leftrZrbytesrrrr�Ps
zPipeConnection._get_more_data)N)rqrrrs�__doc__r�rx�CloseHandler@rPr\rhr�rrrrrus
 ruc@s|eZdZdZer,ejfdd�ZejZ	ej
Znej
fdd�ZejZ	ejZe	fdd�Zefdd�Zd	d
�Zddd
�Zdd�ZdS)�
Connectionzo
    Connection class based on an arbitrary file descriptor (Unix only), or
    a socket handle (Windows).
    cCs||j�dSrrI�r:r@rrrr@gszConnection._closecCs||j�dSrrIr�rrrr@lscCs8t|�}||j|�}||8}|dkr&q4||d�}qdS�Nr)rOr7)r:rQr��	remainingrUrrr�_sendqszConnection._sendcCsbt��}|j}|}|dkr^|||�}t|�}|dkrJ||krBt�ntd��|�|�||8}q|S)Nrzgot end of file during message)r�r�r7rOr�rCr�)r:rS�readrQr;r��chunkrUrrr�_recvzs


zConnection._recvcCs�t|�}|dkrHt�dd�}t�d|�}|�|�|�|�|�|�n8t�d|�}|dkrr|�|�|�|�n|�||�dS)Ni����!i����!Qi@)rO�structZpackr�)r:rQrUZ
pre_header�headerrrrrP�s


zConnection._send_bytesNcCs^|�d�}t�d|���\}|dkr@|�d�}t�d|���\}|dk	rT||krTdS|�|�S)N�r�r��r�)r�r�Zunpackr])r:r�rQrSrrrr\�s

zConnection._recv_bytescCst|g|�}t|�Sr)rr�)r:r�rrrrrh�szConnection._poll)N)rqrrrsr�rx�_multiprocessingZclosesocketr@r[Z_writerfZ_readr!rGr�r�r�r�rPr\rhrrrrr�`s	

r�c@sReZdZdZddd�Zdd�Zdd	�Zed
d��Zedd
��Z	dd�Z
dd�ZdS)rz�
    Returns a listener object.

    This is a wrapper for a bound socket which is 'listening' for
    connections, or for a Windows named pipe.
    NrcCsp|p|rt|�pt}|pt|�}t|�|dkr>t||�|_nt|||�|_|dk	rft|t�sft	d��||_
dS)Nr�authkey should be a byte string)r4�default_familyr(r-�PipeListener�	_listener�SocketListener�
isinstancerN�	TypeError�_authkey)r:r3r'�backlog�authkeyrrrr>�s�zListener.__init__cCs>|jdkrtd��|j��}|jr:t||j�t||j�|S)zz
        Accept a connection on the bound socket or named pipe of `self`.

        Returns a `Connection` object.
        Nzlistener is closed)r�rC�acceptr��deliver_challenge�answer_challenge)r:�crrrr��s

zListener.acceptcCs |j}|dk	rd|_|��dS)zA
        Close the bound socket or named pipe of `self`.
        N)r�rG)r:ZlistenerrrrrG�szListener.closecCs|jjSr)r��_addressrArrrr3�szListener.addresscCs|jjSr)r��_last_acceptedrArrr�
last_accepted�szListener.last_acceptedcCs|SrrrArrrrk�szListener.__enter__cCs|��dSrrlrmrrrrp�szListener.__exit__)NNrN)rqrrrsr�r>r�rGrtr3r�rkrprrrrr�s
	

cCsh|p
t|�}t|�|dkr&t|�}nt|�}|dk	rHt|t�sHtd��|dk	rdt||�t||�|S)z=
    Returns a connection to the address of a `Listener`
    rNr�)	r4r-�
PipeClient�SocketClientr�rNr�r�r�)r3r'r�r�rrrr�s


TcCsj|r>t��\}}|�d�|�d�t|���}t|���}n$t��\}}t|dd�}t|dd�}||fS)�L
        Returns pair of connection objects at either end of a pipe
        TF�r=�r<)r,Z
socketpair�setblockingr��detachr!�pipe)�duplex�s1�s2�c1�c2Zfd1Zfd2rrrrs

c

Cs�td�}|r*tj}tjtjB}tt}}ntj}tj}dt}}t�||tjBtj	Btj
tjBtjBd||tj
tj�}t�||dtjtjtjtj�}t�|tjdd�tj|dd�}|�d�\}	}
t||d�}t||d�}||fS)	r�rrrNTrvr�r�)r(rx�PIPE_ACCESS_DUPLEX�GENERIC_READ�
GENERIC_WRITE�BUFSIZEZPIPE_ACCESS_INBOUND�CreateNamedPipe�FILE_FLAG_OVERLAPPED�FILE_FLAG_FIRST_PIPE_INSTANCE�PIPE_TYPE_MESSAGE�PIPE_READMODE_MESSAGE�	PIPE_WAIT�NMPWAIT_WAIT_FOREVER�NULL�
CreateFile�
OPEN_EXISTING�SetNamedPipeHandleState�ConnectNamedPiperyru)
r�r3Zopenmode�accessZobsizeZibsizeZh1Zh2rw�_rr�r�rrrrsT
�
��	��c@s*eZdZdZd
dd�Zdd�Zdd�Zd	S)r�zO
    Representation of a socket which is bound to an address and listening
    rcCs�t�tt|��|_zRtjdkr2|j�tjtjd�|j�d�|j�	|�|j�
|�|j��|_Wn t
k
r�|j���YnX||_d|_|dkr�t�|�s�tj|tj|fdd�|_nd|_dS)N�posixrTrr��argsZexitpriority)r,�getattr�_socketr!�nameZ
setsockoptZ
SOL_SOCKETZSO_REUSEADDRr�ZbindZlistenZgetsocknamer�rCrGZ_familyr�rr2�Finalize�unlink�_unlink)r:r3r'r�rrrr>Ks0

�
�
zSocketListener.__init__cCs&|j��\}|_|�d�t|���S)NT)r�r�r�r�r�r��r:�srrrr�ds
zSocketListener.acceptcCs0z|j��W5|j}|dk	r*d|_|�XdSr)r�r�rG)r:r�rrrrGiszSocketListener.closeN)r)rqrrrsr�r>r�rGrrrrr�Gs
r�c
CsPt|�}t�tt|���.}|�d�|�|�t|���W5QR�SQRXdS)zO
    Return a connection object connected to the socket given by `address`
    TN)r4r,r�r�Zconnectr�r�)r3r'r�rrrr�ss


r�c@s8eZdZdZddd�Zd
dd�Zdd	�Zed
d��ZdS)r�z0
        Representation of a named pipe
        NcCsL||_|jdd�g|_d|_t�d|j�tj|tj|j|jfdd�|_	dS)NT)�firstz listener created with address=%rrr�)
r��_new_handle�
_handle_queuer�r�	sub_debugr�r��_finalize_pipe_listenerrG)r:r3r�rrrr>�s
�zPipeListener.__init__Fc
CsHtjtjB}|r|tjO}t�|j|tjtjBtjBtj	t
t
tjtj�Sr)
rxr�r�r�r�r�r�r�r�ZPIPE_UNLIMITED_INSTANCESr�r�r�)r:r��flagsrrrr��s

��zPipeListener._new_handlec
Cs�|j�|���|j�d�}ztj|dd�}Wn0tk
r^}z|jtjkrN�W5d}~XYnPXz<zt�
|jgdt�}Wn |�
�t�|��YnXW5|�	d�\}}Xt|�S)NrTrvF)r��appendr��poprxr�rCr�Z
ERROR_NO_DATAryr{r|rr}r�ru)r:r;r~r�r�r�resrrrr��s(�
zPipeListener.acceptcCs$t�d|�|D]}t�|�qdS)Nz closing listener with address=%r)rr�rxr�)Zqueuer3r;rrrr��sz$PipeListener._finalize_pipe_listener)N)F)	rqrrrsr�r>r�r��staticmethodr�rrrrr��s

r�c
Cs�t�}z6t�|d�t�|tjtjBdtjtjtjtj�}Wq�t	k
rz}z |j
tjtjfksht
|�rj�W5d}~XYqXq�q�t�|tjdd�t|�S)zU
        Return a connection object connected to the pipe given by `address`
        ��rN)rrxZ
WaitNamedPiper�r�r�r�r�r�rCr�ZERROR_SEM_TIMEOUTZERROR_PIPE_BUSYrr�r�ru)r3r�hr�rrrr��s8
����r��s#CHALLENGE#s	#WELCOME#s	#FAILURE#cCs�ddl}t|t�s$td�t|����t�t�}|�	t
|�|�||t��
�}|�d�}||krl|�	t�n|�	t�td��dS)Nr� Authkey must be bytes, not {0!s}�zdigest received was wrong)�hmacr�rNr%�formatr.r!�urandom�MESSAGE_LENGTHrV�	CHALLENGE�new�HMAC_DIGEST_NAME�digestr^�WELCOME�FAILUREr�Z
connectionr�r��messager�Zresponserrrr��s
�


r�cCsxddl}t|t�s$td�t|����|�d�}|tt�d�}|�	||t
���}|�|�|�d�}|t
krttd��dS)Nrr�r�zdigest sent was rejected)r�r�rNr%r�r.r^rOr�r�r�r�rVr�rr�rrrr��s
�


r�c@s$eZdZdd�Zdd�Zdd�ZdS)�ConnectionWrappercCs6||_||_||_dD]}t||�}t|||�qdS)N)rKrGrjr^rV)�_conn�_dumps�_loadsr��setattr)r:�connrXrd�attrrZrrrr>s
zConnectionWrapper.__init__cCs|�|�}|j�|�dSr)r�r�rV)r:rZr�rrrr[	s
zConnectionWrapper.sendcCs|j��}|�|�Sr)r�r^r�r�rrrrfs
zConnectionWrapper.recvN)rqrrrsr>r[rfrrrrr�sr�cCst�|fdddd��d�S)Nr�utf-8)�	xmlrpclibrX�encode)rZrrr�
_xml_dumpssrcCst�|�d��\\}}|S)Nr)rrd�decode)r�rZ�methodrrr�
_xml_loadssr	c@seZdZdd�ZdS)�XmlListenercCs"ddlmat�|�}t|tt�Sr�)�
xmlrpc.client�clientrrr�r�rr	rYrrrr�s
zXmlListener.acceptN)rqrrrsr�rrrrr
sr
cOsddlmatt||�tt�Sr�)rrrr�rrr	)r��kwdsrrr�	XmlClientsrcCs�t|�}g}|r�t�|d|�}|tkr*q�n\t|krFtt|�krTnn
|t8}n2t|krptt|�kr~nn
|t8}ntd��|�||�||dd�}d}q|S)NFzShould not get hererr)	�listrxr{r
rrOrr�r�)Zhandlesr�L�readyr�rrr�_exhaustive_wait)s 
 
rc
s^|dkrt}n|dkrd}nt|dd�}t|�}i�g}t��t�}�z@|D�]&}zt|d�}	Wn tk
r�|�|��<YqPXzt	�|	�dd�\}}Wn8tk
r�}zd|j}}|tkrƂW5d}~XYnX|t	jkr�|�|�|�|j<qP|�rjt��dd�d	k�rjz|�d
�\}}Wn*tk
�rP}z
|j}W5d}~XYnX|�sjt
|d��rjd|_��|�d}qPt���|�}W5|D]}|���q�|D]�}z|�d�\}}Wn6tk
�r�}z|j}|tk�r�W5d}~XYnX|t	j
k�r��|j}��|�|dk�r�t
|d��r�d|_�q�X���fdd�|D���fd
d�|D�S)��
        Wait till an object in object_list is ready/readable.

        Returns list of those objects in object_list which are ready/readable.
        Nrr�g�?Tr�rK�)�rFc3s|]}�|VqdSrr)�.0r�)�waithandle_to_objrr�	<genexpr>�szwait.<locals>.<genexpr>csg|]}|�kr|�qSrr)r�o)�
ready_objectsrr�
<listcomp>�s�wait.<locals>.<listcomp>)r�intr�setr}ryrCr��
_ready_errorsrxZERROR_OPERATION_ABORTEDr|�addr+r�r��AttributeErrorr6r�rzr�r)Zgetwindowsversionr�keys�update)
�object_listrZov_listZ
ready_handlesr~r�rr�rrKr)rrrr?sh







�PollSelectorc
Cs�t���}|D]}|�|tj�q|dk	r4t��|}|�|�}|r\dd�|D�W5QR�S|dk	r4|t��}|dkr4|W5QR�Sq4W5QRXdS)rNcSsg|]\}}|j�qSr)Zfileobj)r�keyZeventsrrrr�srr)�
_WaitSelector�register�	selectorsZ
EVENT_READrrZselect)r$rZselectorrZZdeadlinerrrrr�s
c
CsZ|��}t�|tjtj��6}ddlm}|�|�}t||j	|j
ffW5QR�SQRXdS)Nr)�resource_sharer)rKr,ZfromfdrZSOCK_STREAMrr*Z	DupSocket�rebuild_connectionr<r=)rr;r�r*�dsrrr�reduce_connection�s

r-cCs|��}t|��||�Sr�r�r�)r,r<r=Zsockrrrr+�sr+cCsB|jrtjnd|jrtjndB}t�|��|�}t||j|jffSr�)	r<rxZFILE_GENERIC_READr=ZFILE_GENERIC_WRITEr
Z	DupHandlerK�rebuild_pipe_connection)rr��dhrrr�reduce_pipe_connection�s
�r1cCs|��}t|||�Sr)r�ru)r0r<r=r;rrrr/�sr/cCs t�|���}t||j|jffSr)r
ZDupFdrKr+r<r=)r�dfrrrr-�scCs|��}t|||�Srr.)r2r<r=�fdrrrr+�s)NN)T)T)N)N)J�__all__r�r!r)r,r�rr �	itertoolsr�rrrr	�contextr
ZForkingPicklerrWrxrrr
r�ImportErrorr*r�ZCONNECTION_TIMEOUTr��countr$r�Zfamiliesr+rrr(r-r4r5rur��objectrrrr�r�r�r�r�r�r�r�r�r�r�rr	r
rrr�ZERROR_NETNAME_DELETEDrrr)r%r'ZSelectSelectorr-r+r(r1r/rrrr�<module>
s�



PT=

,,8	P
PK{��\�;z ��#__pycache__/__init__.cpython-38.pycnu�[���U

e5d��@sdddlZddlmZdd�eej�D�Ze��dd�eD��dZd	Z	d
ej
kr`ej
d
ej
d<dS)�N�)�contextcCsg|]}|�d�s|�qS)�_)�
startswith)�.0�x�r�0/usr/lib64/python3.8/multiprocessing/__init__.py�
<listcomp>s
r
ccs|]}|ttj|�fVqdS)N)�getattrr�_default_context)r�namerrr	�	<genexpr>sr���__main__Z__mp_main__)�sys�r�dirr�__all__�globals�updateZSUBDEBUGZ
SUBWARNING�modulesrrrr	�<module>s
PK{��\���h�*�*,__pycache__/synchronize.cpython-38.opt-2.pycnu�[���U

e5dZ-�@s,ddddddgZddlZddlZddlZddlZddlZdd	lmZdd
lmZddlm	Z	zddlm
Z
mZWnek
r�ed
��YnXe
ed��\ZZej
jZGdd�de�Z
Gdd�de
�ZGdd�de�ZGdd�de
�ZGdd�de
�ZGdd�de�ZGdd�de�ZGdd�dej�ZdS)�Lock�RLock�	Semaphore�BoundedSemaphore�	Condition�Event�N�)�context)�process)�util)�SemLock�
sem_unlinkz�This platform lacks a functioning sem_open implementation, therefore, the required synchronization primitives needed will not function, see issue 3770.�c@s\eZdZe��Zdd�Zedd��Zdd�Z	dd�Z
d	d
�Zdd�Zd
d�Z
edd��ZdS)rc	Cs�|dkrtj��}|��}tjdkp*|dk}td�D]>}z t�||||�	�|�}|_
Wntk
rlYq4Xq|q4td��t�
d|j�|��tjdkr�dd�}	t�||	�|j
jdk	r�dd	lm}
|
|j
jd
�tj|tj|j
jfdd�dS)
N�win32�fork�dzcannot find name for semaphorezcreated semlock with handle %scSs|j��dS�N)�_semlock�_after_fork)�obj�r�3/usr/lib64/python3.8/multiprocessing/synchronize.pyrGsz%SemLock.__init__.<locals>._after_forkr)�register�	semaphorer)Zexitpriority)r	Z_default_contextZget_contextZget_start_method�sys�platform�range�_multiprocessingr�
_make_namer�FileExistsErrorr�debug�handle�
_make_methodsZregister_after_fork�name�resource_trackerrZFinalize�_cleanup)�self�kind�value�maxvalue�ctxr#Z
unlink_now�i�slrrrrr�__init__2s8
�
�zSemLock.__init__cCs"ddlm}t|�||d�dS)Nr)�
unregisterr)r$r.r
)r#r.rrrr%TszSemLock._cleanupcCs|jj|_|jj|_dSr)r�acquire�release�r&rrrr"Zs
zSemLock._make_methodscCs
|j��Sr)r�	__enter__r1rrrr2^szSemLock.__enter__cGs|jj|�Sr)r�__exit__�r&�argsrrrr3aszSemLock.__exit__cCsDt�|�|j}tjdkr,t���|j�}n|j}||j|j	|j
fS)Nr)r	�assert_spawningrrrZget_spawning_popenZduplicate_for_childr!r'r)r#)r&r,�hrrr�__getstate__ds

zSemLock.__getstate__cCs,tjj|�|_t�d|d�|��dS)Nz recreated blocker with handle %rr)rrZ_rebuildrrr r"�r&�staterrr�__setstate__mszSemLock.__setstate__cCsdt��jdttj�fS)Nz%s-%sZ	semprefix)r
�current_processZ_config�nextr�_randrrrrrrs�zSemLock._make_nameN)�__name__�
__module__�__qualname__�tempfileZ_RandomNameSequencer>r-�staticmethodr%r"r2r3r8r;rrrrrr.s"
	rc@s&eZdZd	dd�Zdd�Zdd�ZdS)
rrcCstj|t|t|d�dS�N�r*)rr-�	SEMAPHORE�
SEM_VALUE_MAX�r&r(r*rrrr-}szSemaphore.__init__cCs
|j��Sr)r�
_get_valuer1rrr�	get_value�szSemaphore.get_valuecCs8z|j��}Wntk
r&d}YnXd|jj|fS)N�unknownz<%s(value=%s)>)rrI�	Exception�	__class__r?�r&r(rrr�__repr__�s

zSemaphore.__repr__N)r)r?r@rAr-rJrOrrrrr{s
c@seZdZddd�Zdd�ZdS)rrcCstj|t|||d�dSrD�rr-rFrHrrrr-�szBoundedSemaphore.__init__cCs>z|j��}Wntk
r&d}YnXd|jj||jjfS)NrKz<%s(value=%s, maxvalue=%s)>)rrIrLrMr?r)rNrrrrO�s
�zBoundedSemaphore.__repr__N)r�r?r@rAr-rOrrrrr�s
c@seZdZdd�Zdd�ZdS)rcCstj|tdd|d�dS�NrrErP�r&r*rrrr-�sz
Lock.__init__cCs�zf|j��r8t��j}t��jdkrd|dt��j7}n,|j��dkrLd}n|j��dkr`d}nd}Wnt	k
r~d}YnXd	|j
j|fS)
N�
MainThread�|r�Noner�SomeOtherThread�SomeOtherProcessrKz<%s(owner=%s)>)r�_is_miner
r<r#�	threading�current_threadrI�_countrLrMr?)r&r#rrrrO�s


z
Lock.__repr__NrQrrrrr�sc@seZdZdd�Zdd�ZdS)rcCstj|tdd|d�dSrR)rr-�RECURSIVE_MUTEXrSrrrr-�szRLock.__init__cCs�z||j��rBt��j}t��jdkr6|dt��j7}|j��}n8|j��dkrZd\}}n |j��dkrrd\}}nd\}}Wnt	k
r�d\}}YnXd	|j
j||fS)
NrTrUr)rVrr)rW�nonzero)rXr^)rKrK�<%s(%s, %s)>)rrYr
r<r#rZr[r\rIrLrMr?)r&r#�countrrrrO�s



zRLock.__repr__NrQrrrrr�sc@sleZdZddd�Zdd�Zdd�Zdd	�Zd
d�Zdd
�Zdd�Z	ddd�Z
ddd�Zdd�Zddd�Z
dS)rNcCs>|p
|��|_|�d�|_|�d�|_|�d�|_|��dS�Nr)r�_lockr�_sleeping_count�_woken_count�_wait_semaphorer")r&�lockr*rrrr-�s
zCondition.__init__cCst�|�|j|j|j|jfSr)r	r6rbrcrdrer1rrrr8�s

�zCondition.__getstate__cCs |\|_|_|_|_|��dSr)rbrcrdrer"r9rrrr;�s
�
zCondition.__setstate__cCs
|j��Sr)rbr2r1rrrr2�szCondition.__enter__cGs|jj|�Sr)rbr3r4rrrr3�szCondition.__exit__cCs|jj|_|jj|_dSr)rbr/r0r1rrrr"�s
zCondition._make_methodscCsJz|jj��|jj��}Wntk
r4d}YnXd|jj|j|fS)NrKr_)rcrrIrdrLrMr?rb)r&Znum_waitersrrrrO�s

�
zCondition.__repr__c	Csj|j��|jj��}t|�D]}|j��qz|j�d|�W�S|j��t|�D]}|j��qTXdS)NT)	rcr0rbrr\rrdr/re)r&�timeoutr`r+rrr�wait�s

zCondition.waitrcCst|j�d�r|j�d�}qd}||krF|j�d�rF|j��|d7}q|rpt|�D]}|j��qR|j�d�rpqbdS)NFrr)rdr/rcrer0r)r&�n�resZsleepersr+rrr�notifys

zCondition.notifycCs|jtjd�dS)N)ri)rkr�maxsizer1rrr�
notify_all(szCondition.notify_allcCsd|�}|r|S|dk	r$t��|}nd}d}|s`|dk	rN|t��}|dkrNq`|�|�|�}q,|Sra)�time�	monotonicrh)r&Z	predicaterg�resultZendtimeZwaittimerrr�wait_for+s
zCondition.wait_for)N)N)r)N)r?r@rAr-r8r;r2r3r"rOrhrkrmrqrrrrr�s


c@s6eZdZdd�Zdd�Zdd�Zdd�Zdd
d�Zd	S)
rcCs |�|���|_|�d�|_dSra)rr�_condr�_flagrSrrrr-CszEvent.__init__c	CsD|j�4|j�d�r,|j��W5QR�dSW5QR�dSQRXdS�NFT)rrrsr/r0r1rrr�is_setGs

zEvent.is_setc	Cs6|j�&|j�d�|j��|j��W5QRXdS�NF)rrrsr/r0rmr1rrr�setNs
z	Event.setc	Cs"|j�|j�d�W5QRXdSrv)rrrsr/r1rrr�clearTszEvent.clearNc	Csh|j�X|j�d�r |j��n|j�|�|j�d�rP|j��W5QR�dSW5QR�dSQRXdSrt)rrrsr/r0rh)r&rgrrrrhXs
z
Event.wait)N)r?r@rAr-rurwrxrhrrrrrAs
c@sZeZdZddd�Zdd�Zdd�Zedd	��Zejd
d	��Zedd��Z	e	jd
d��Z	dS)�BarrierNc	CsRddl}ddlm}||�d�d�}|��}|�|||||f�d|_d|_dS)Nrr)�
BufferWrapperr+r)�struct�heaprzZcalcsizerr;�_stater\)	r&Zparties�actionrgr*r{rz�wrapperZcondrrrr-jszBarrier.__init__cCs.|\|_|_|_|_|_|j���d�|_dS)Nr+)�_parties�_action�_timeoutrr�_wrapperZcreate_memoryview�cast�_arrayr9rrrr;ss
�zBarrier.__setstate__cCs|j|j|j|j|jfSr)r�r�r�rrr�r1rrrr8xs�zBarrier.__getstate__cCs
|jdSra�r�r1rrrr}|szBarrier._statecCs||jd<dSrar�rNrrrr}�scCs
|jdS�Nrr�r1rrrr\�szBarrier._countcCs||jd<dSr�r�rNrrrr\�s)NN)
r?r@rAr-r;r8�propertyr}�setterr\rrrrryhs
	


ry)�__all__rZrrBrrn�r	r
rrr
�ImportError�listrr]rFrG�objectrrrrrrryrrrr�<module>s8�	Mo'PK{��\�9�s	s	1__pycache__/popen_forkserver.cpython-38.opt-2.pycnu�[���U

e5d��@s�ddlZddlZddlmZmZejs.ed��ddlmZddlm	Z	ddlm
Z
ddlmZd	gZGd
d�de
�ZGdd	�d	e	j�ZdS)
�N�)�	reduction�set_spawning_popenz,No support for sending fds between processes)�
forkserver)�
popen_fork)�spawn)�util�Popenc@seZdZdd�Zdd�ZdS)�_DupFdcCs
||_dS�N)�ind)�selfr�r�8/usr/lib64/python3.8/multiprocessing/popen_forkserver.py�__init__sz_DupFd.__init__cCst��|jSr)rZget_inherited_fdsr)r
rrr�detachsz
_DupFd.detachN)�__name__�
__module__�__qualname__rrrrrrr
sr
csBeZdZdZeZ�fdd�Zdd�Zdd�Ze	j
fdd	�Z�ZS)
r	rcsg|_t��|�dSr)�_fds�superr)r
�process_obj��	__class__rrr!szPopen.__init__cCs|j�|�t|j�dS)Nr)r�append�len)r
�fdrrr�duplicate_for_child%szPopen.duplicate_for_childc	Cs�t�|j�}t��}t|�zt�||�t�||�W5td�Xt�	|j
�\|_}t�
|�}t�|tj||jf�|_t|ddd��}|�|���W5QRXt�|j�|_dS)N�wbT)�closefd)rZget_preparation_data�_name�io�BytesIOrr�dumprZconnect_to_new_processr�sentinel�os�duprZFinalizeZ	close_fds�	finalizer�open�write�	getbuffer�read_signed�pid)r
rZ	prep_dataZbuf�wZ	_parent_w�frrr�_launch)s


�z
Popen._launchc	Csr|jdkrlddlm}|tjkr$dnd}||jg|�s:dSzt�|j�|_Wntt	fk
rjd|_YnX|jS)Nr)�wait�)
�
returncodeZmultiprocessing.connectionr0r%�WNOHANGr$rr+�OSError�EOFError)r
�flagr0Ztimeoutrrr�poll=s
z
Popen.poll)
rrr�methodr
ZDupFdrrr/r%r3r7�
__classcell__rrrrr	s)r!r%�contextrrZHAVE_SEND_HANDLE�ImportError�rrrr�__all__�objectr
r	rrrr�<module>s
PK{��\�	de�,�,.__pycache__/shared_memory.cpython-38.opt-2.pycnu�[���U

e5dD�@s�ddgZddlmZddlZddlZddlZddlZddlZejdkrTddl	Z	dZ
nddlZdZ
ejej
BZdZe
rzd	Znd
Zdd�ZGd
d�d�ZdZGdd�d�ZdS)�SharedMemory�
ShareableList�)�partialN�ntFT�z/psm_Zwnsm_cCs"ttt�d}tt�|�}|S)N�)�_SHM_SAFE_NAME_LENGTH�len�_SHM_NAME_PREFIX�secretsZ	token_hex)�nbytes�name�r�5/usr/lib64/python3.8/multiprocessing/shared_memory.py�_make_filename&src@s�eZdZdZdZdZdZejZ	dZ
er*dndZddd�Z
d	d
�Zdd�Zd
d�Zedd��Zedd��Zedd��Zdd�Zdd�ZdS)rN���i�TFrc
	Csl|dkstd��|r0ttjB|_|dkr0td��|dkrL|jtj@sLtd��t�rH|dkr�t�}ztj	||j|j
d�|_Wntk
r�YqZYnX||_
q�qZn.|jr�d|n|}tj	||j|j
d�|_||_
z<|r�|r�t�|j|�t�|j�}|j}t�|j|�|_Wn tk
�r*|���YnXddlm}||j
d	��n|�r�|dk�r^t�n|}t�tjtjtj|d
?d@|d@|�}zXt��}|tjk�r�|dk	�r�tt j!t�"t j!�|tj��nW��qNtjd||d
�|_W5t�|�X||_
�qV�qNnX||_
t�#tj$d|�}zt�%|tj$ddd�}	W5t�|�Xt�&|	�}tjd||d
�|_||_'t(|j�|_)dS)Nrz!'size' must be a positive integerz4'size' must be a positive number different from zeroz&'name' can only be None if create=True)�mode�/�)�register�
shared_memory� l��r)ZtagnameF)*�
ValueError�_O_CREX�os�O_RDWR�_flags�O_EXCL�
_USE_POSIXr�_posixshmemZshm_open�_mode�_fd�FileExistsError�_name�_prepend_leading_slash�	ftruncate�fstat�st_size�mmap�_mmap�OSError�unlink�resource_trackerr�_winapiZCreateFileMappingZINVALID_HANDLE_VALUEZNULLZPAGE_READWRITEZCloseHandleZGetLastErrorZERROR_ALREADY_EXISTS�errnoZEEXIST�strerrorZOpenFileMappingZ
FILE_MAP_READZ
MapViewOfFileZVirtualQuerySize�_size�
memoryview�_buf)
�selfr
�create�sizeZstatsrZ	temp_nameZh_mapZlast_error_codeZp_bufrrr�__init__Is��
�
�

�
��
zSharedMemory.__init__cCs&z|��Wntk
r YnXdS�N)�closer*�r3rrr�__del__�szSharedMemory.__del__cCs|j|jd|jffS)NF)�	__class__r
r5r9rrr�
__reduce__�s��zSharedMemory.__reduce__cCs|jj�d|j�d|j�d�S)N�(z, size=�))r;�__name__r
r5r9rrr�__repr__�szSharedMemory.__repr__cCs|jSr7)r2r9rrr�buf�szSharedMemory.bufcCs.|j}tr*|jr*|j�d�r*|jdd�}|S)Nrr)r#rr$�
startswith)r3Z
reported_namerrrr
�s

zSharedMemory.namecCs|jSr7)r0r9rrrr5�szSharedMemory.sizecCsX|jdk	r|j��d|_|jdk	r4|j��d|_trT|jdkrTt�|j�d|_dS)Nrr)r2�releaser)r8rr!rr9rrrr8�s



zSharedMemory.closecCs2tr.|jr.ddlm}t�|j�||jd�dS)Nr)�
unregisterr)rr#r,rDrZ
shm_unlink)r3rDrrrr+�s
zSharedMemory.unlink)NFr)r?�
__module__�__qualname__r#r!r)r2rrrr rr$r6r:r<r@�propertyrAr
r5r8r+rrrrr0s&
l




�utf8c@seZdZedededededdjdiZ	dZ
dd	�d
d	�dd	�dd	�d
�Zedd��Z
d5dd�dd�Zdd�Zdd�Zdd�Zdd�Zdd�Zdd�Zdd �Zd!d"�Zed#d$��Zed%d&��Zed'd(��Zed)d*��Zed+d,��Zed-d.��Zed/d0��Zd1d2�Zd3d4�Z dS)6r�q�dzxxxxxxx?z%dsNzxxxxxx?x�cCs|Sr7r��valuerrr�<lambda>
�zShareableList.<lambda>cCs|�d��t�S�N�)�rstrip�decode�	_encodingrLrrrrNrOcCs
|�d�SrP)rRrLrrrrNrOcCsdSr7r)Z_valuerrrrN
rO)rrr�cCs:t|ttdjf�sdSt|t�r$dSt|t�r2dSdSdS)NrrrrU)�
isinstance�str�bytesr;rLrrr�_extract_recreation_codes

z&ShareableList._extract_recreation_code�r
csr|dk	rv�fdd�|D�}t|��_t�fdd�|D���_�fdd�|D�}t�d�jd�|��j�j	�}nd}|dk	r�|dkr�t
|��_nt
|d	|d
��_|dk	�rNt�tj
d�j�jjd�jf�j��tj
d�|��jj�jf�fdd�|D���tj
�j�jj�jf�fd
d�|D���tj
�j	�jj�jf|��n t���_t��j�jjd��_dS)NcsPg|]H}t|ttf�s$�jt|�n&�jt|��jt|��jdf�qS)r)rVrWrX�_types_mapping�type�
_alignmentr	��.0�itemr9rr�
<listcomp> s���z*ShareableList.__init__.<locals>.<listcomp>c3s0|](}|ddkr�jnt|dd��VqdS)r�sN)r]�int)r_�fmtr9rr�	<genexpr>*s�z)ShareableList.__init__.<locals>.<genexpr>csg|]}��|��qSr)rYr^r9rrra.srI�rKT)r4r5rc3s&|]}t|t�r|���n|VqdSr7)rVrW�encode�r_�v��_encrrreMsc3s|]}|���VqdSr7)rgrhrjrrreSs)r	�	_list_len�tuple�_allocated_bytes�structZcalcsize�_format_size_metainfo�join�_format_packing_metainfo�_format_back_transform_codesr�shmrT�	pack_intorA�_offset_data_start�_offset_packing_formats�_offset_back_transform_codes�unpack_from)r3Zsequencer
Z_formatsZ_recreation_codesZrequested_sizer)rkr3rr6sz
�
�

�����
��������
�zShareableList.__init__cCsj|dkr|n||j}||jks*|jdkr2td��t�d|jj|j|d�d}|�d�}|�t	�}|S)Nr� Requested position out of range.�8srKrQ)
rl�
IndexErrorroryrtrArwrRrSrT)r3�positionrird�
fmt_as_strrrr�_get_packing_formatds��

z!ShareableList._get_packing_formatcCs\|dkr|n||j}||jks*|jdkr2td��t�d|jj|j|�d}|j|}|S)Nrrz�b)rlr|roryrtrArx�_back_transforms_mapping)r3r}�transform_codeZtransform_functionrrr�_get_back_transformts��
z!ShareableList._get_back_transformcCs~|dkr|n||j}||jks*|jdkr2td��t�d|jj|j|d|�t��|�	|�}t�d|jj|j
||�dS)Nrrzr{rKr�)rlr|rorurtrArwrgrTrYrx)r3r}r~rMr�rrr�!_set_packing_format_and_transform�s �
�z/ShareableList._set_packing_format_and_transformcCsjz6|jt|jd|��}t�|�|�|jj|�\}Wntk
rRtd��YnX|�	|�}||�}|S)Nzindex out of range)
rv�sumrnroryrrtrAr|r�)r3r}�offsetriZback_transformrrr�__getitem__�s��

zShareableList.__getitem__cCs�z&|jt|jd|��}|�|�}Wntk
rBtd��YnXt|ttf�sf|jt	|�}|}nZt|t�rz|�
t�n|}t|�|j|kr�t
d��|ddkr�|}n|jt|j|f}|�|||�t�||jj||�dS)Nzassignment index out of rangez(bytes/str item exceeds available storagerrb)rvr�rnrr|rVrWrXr[r\rgrTr	rr�rorurtrA)r3r}rMr�Zcurrent_formatZ
new_formatZ
encoded_valuerrr�__setitem__�s6�����zShareableList.__setitem__cCst|j|jjd�dfS)NrZr)rr;rtr
r9rrrr<�szShareableList.__reduce__cCst�d|jjd�dS)NrIr)roryrtrAr9rrr�__len__�szShareableList.__len__cCs"|jj�dt|��d|jj�d�S)Nr=z, name=r>)r;r?�listrtr
r9rrrr@�szShareableList.__repr__csd��fdd�t�j�D��S)Nrfc3s|]}��|�VqdSr7)r)r_�ir9rrre�sz'ShareableList.format.<locals>.<genexpr>)rq�rangerlr9rr9r�format�s�zShareableList.formatcCs|j�d�S)NrI�rlr9rrrrp�sz#ShareableList._format_size_metainfocCs
d|jS)Nr{r�r9rrrrr�sz&ShareableList._format_packing_metainfocCs
d|jS)Nr�r�r9rrrrs�sz*ShareableList._format_back_transform_codescCs|jddS)NrrKr�r9rrrrv�sz ShareableList._offset_data_startcCs|jt|j�Sr7)rvr�rnr9rrrrw�sz%ShareableList._offset_packing_formatscCs|j|jdS)NrK)rwrlr9rrrrx�sz*ShareableList._offset_back_transform_codescst�fdd�|D��S)Nc3s|]}�|kVqdSr7r)r_�entryrLrrre�sz&ShareableList.count.<locals>.<genexpr>)r�)r3rMrrLr�count�szShareableList.countcCs4t|�D]\}}||kr|Sqt|�d���dS)Nz not in this container)�	enumerater)r3rMr}r�rrr�index�s
zShareableList.index)N)!r?rErFrc�float�boolrWrXr;r[r]r��staticmethodrYr6rr�r�r�r�r<r�r@rGr�rprrrsrvrwrxr�r�rrrrr�s\��

F






)�__all__�	functoolsrr(rr.rorr
r-rr�O_CREATrrrr
rrrTrrrrr�<module>s*

EPK{��\{�2
 
 *__pycache__/reduction.cpython-38.opt-1.pycnu�[���U

e5d(%�@sddlmZddlZddlZddlZddlZddlZddlZddlZddl	m
Z
ddddd	gZejd
kp�e
ed�o�e
ed�o�e
ejd
�ZGdd�dej�ZejZd6dd	�Zejd
k�redddg7ZddlZd7dd�dd�Zdd�Zdd�Zdd�ZGdd�de�ZnHedddg7ZddlZejdkZdd�Zdd�Zd d�Zd!d�Zd"d�Zd#d$�ZGd%d&�d&�Z ee!e �j"�e�d'd(�Z#ee!e$j%�e#�ee!e&j'�e#�d)d*�Z(d+d,�Z)eej*e(�ejd
k�r�d-d.�Z+d/d0�Z,eeje+�nd1d.�Z+d2d0�Z,eeje+�Gd3d4�d4ed5�Z-dS)8�)�ABCMetaN�)�context�send_handle�recv_handle�ForkingPickler�register�dump�win32ZCMSG_LEN�
SCM_RIGHTS�sendmsgcsJeZdZdZiZejZ�fdd�Ze	dd��Z
e	d	dd��Zej
Z
�ZS)
rz)Pickler subclass used by multiprocessing.cs*t�j|�|j��|_|j�|j�dS�N)�super�__init__�_copyreg_dispatch_table�copy�dispatch_table�update�_extra_reducers��self�args��	__class__��1/usr/lib64/python3.8/multiprocessing/reduction.pyr&szForkingPickler.__init__cCs||j|<dS)z&Register a reduce function for a type.N)r)�cls�type�reducerrrr+szForkingPickler.registerNcCs t��}|||��|�|��Sr
)�io�BytesIOr	�	getbuffer)r�obj�protocolZbufrrr�dumps0szForkingPickler.dumps)N)�__name__�
__module__�__qualname__�__doc__r�copyregrrr�classmethodrr$�pickle�loads�
__classcell__rrrrr!s
cCst||��|�dS)z3Replacement for pickle.dump() using ForkingPickler.N)rr	)r"�filer#rrrr	:s�	DupHandle�	duplicate�steal_handleF)�source_processcCs6t��}|dkr|}|dkr |}t�|||d|tj�S)z<Duplicate a handle.  (target_process is a handle not a pid!)Nr)�_winapi�GetCurrentProcess�DuplicateHandle�DUPLICATE_SAME_ACCESS)�handleZtarget_processZinheritabler2Zcurrent_processrrrr0Gs�c	CsFt�tjd|�}z$t�||t��ddtjtjB�W�St�|�XdS)z5Steal a handle from process identified by source_pid.FrN)r3�OpenProcess�PROCESS_DUP_HANDLE�CloseHandler5r4r6�DUPLICATE_CLOSE_SOURCE)Z
source_pidr7Zsource_process_handlerrrr1Ss�
�cCst|tj|�}|�|�dS�z&Send a handle over a local connection.N)r/r3r6�send)�connr7�destination_pidZdhrrrr_scCs|����S)�)Receive a handle over a local connection.)�recv�detach)r>rrrrdsc@s"eZdZdZddd�Zdd�ZdS)r/zPicklable wrapper for a handle.Nc	Cs\|dkrt��}t�tjd|�}zt�t��|||dd�|_W5t�|�X||_	||_
dS)NFr)�os�getpidr3r8r9r:r5r4�_handle�_access�_pid)rr7�access�pid�procrrrrjs�
zDupHandle.__init__c	CsZ|jt��kr|jSt�tjd|j�}z"t�||jt�	�|j
dtj�W�St�|�XdS)z1Get the handle.  This should only be called once.FN)rGrCrDrEr3r8r9r:r5r4rFr;)rrJrrrrBys
��zDupHandle.detach)N)r%r&r'r(rrBrrrrr/hs
�DupFd�sendfds�recvfds�darwincCsVt�d|�}tt|�dg�}|�|gtjtj|fg�trR|�d�dkrRt	d��dS)z,Send an array of fds over an AF_UNIX socket.�i�r�Az%did not receive acknowledgement of fdN)
�array�bytes�lenr�socket�
SOL_SOCKETr�ACKNOWLEDGErA�RuntimeError)�sockZfds�msgrrrrL�s
c	Cst�d�}|j|}|�dt�|��\}}}}|s:|s:t�z�trJ|�d�t|�dkrft	dt|���|d\}}	}
|tj
kr�|	tjkr�t|
�|jdkr�t�|�
|
�t|�d|dkr�td�t|�|d���t|�WSWnttfk
r�YnXt	d��d	S)
z/Receive an array of fds over an AF_UNIX socket.rOrrQzreceived %d items of ancdatarrPz Len is {0:n} but msg[0] is {1!r}zInvalid data receivedN)rR�itemsizeZrecvmsgrUZ
CMSG_SPACE�EOFErrorrWr=rTrXrVr�
ValueErrorZ	frombytes�AssertionError�format�list�
IndexError)rY�size�aZ
bytes_sizerZZancdata�flagsZaddrZ
cmsg_levelZ	cmsg_typeZ	cmsg_datarrrrM�s<


�
�
��c	Cs2t�|��tjtj��}t||g�W5QRXdSr<)rU�fromfd�fileno�AF_UNIX�SOCK_STREAMrL)r>r7r?�srrrr�sc
Cs<t�|��tjtj��}t|d�dW5QR�SQRXdS)r@rrN)rUrerfrgrhrM)r>rirrrr�scCsFt��}|dk	r |�|�|��Str:ddlm}|�|�Std��dS)zReturn a wrapper for an fd.Nr)�resource_sharerz&SCM_RIGHTS appears not to be available)rZget_spawning_popenrKZduplicate_for_child�HAVE_SEND_HANDLE�rjr])�fdZ	popen_objrjrrrrK�s
cCs2|jdkrt|j|jjffSt|j|jjffSdSr
)�__self__�getattrr�__func__r%��mrrr�_reduce_method�s
rsc@seZdZdd�ZdS)�_CcCsdSr
r)rrrr�f�sz_C.fN)r%r&r'rurrrrrt�srtcCst|j|jffSr
)ro�__objclass__r%rqrrr�_reduce_method_descriptor�srwcCst|j|j|jpiffSr
)�_rebuild_partial�funcr�keywords)�prrr�_reduce_partial�sr|cCstj|f|�|�Sr
)�	functools�partial)ryrrzrrrrx�srxcCsddlm}t||�ffS)Nr)�	DupSocket)rjr�_rebuild_socket)rirrrr�_reduce_socket�sr�cCs|��Sr
)rB)Zdsrrrr��sr�cCs"t|���}t||j|j|jffSr
)rKrfr��familyr�proto)ri�dfrrrr��scCs|��}tj||||d�S)N)rf)rBrU)r�r�rr�rmrrrr��sc@sdeZdZdZeZeZeZeZeZe	j
dkr8eZeZe
Z
neZeZeZeZeZeZeZeZdd�ZdS)�AbstractReducerz�Abstract base class for use in implementing a Reduction class
    suitable for use in replacing the standard reduction mechanism
    used in multiprocessing.r
cGsNttt�j�t�tttj�t�tttj	�t�tt
jt�tt
j
t�dSr
)rrrtrursr`�appendrw�int�__add__r}r~r|rUr�rrrrrs
zAbstractReducer.__init__N)r%r&r'r(rrr	rr�sys�platformr1r0r/rLrMrKrsrwrxr�r�rrrrrr��s&
r�)�	metaclass)N)NF).�abcrr)r}rrCr+rUr�rlr�__all__r��hasattrrkZPicklerrrr	r3r0r1rr�objectr/rRrWrLrMrKrsrtrrurwr`r�r�r�r|rxr~r�r�r�rrrr�<module>
sj

�
�	
�#
PK{��\�TS���1__pycache__/resource_tracker.cpython-38.opt-2.pycnu�[���U

e5d�!�@s�ddlZddlZddlZddlZddlZddlmZddlmZdddgZe	ed�Z
ejejfZ
d	d
d�iZejdkr�ddlZddlZe�ejejd
��Gdd�de�Ze�ZejZejZejZejZdd�ZdS)�N�)�spawn)�util�ensure_running�register�
unregister�pthread_sigmaskZnoopcCsdS�N�r
r
r
�8/usr/lib64/python3.8/multiprocessing/resource_tracker.py�<lambda>!�r�posix)Z	semaphoreZ
shared_memoryc@sLeZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	dd�Z
dS)�ResourceTrackercCst��|_d|_d|_dSr	)�	threadingZLock�_lock�_fd�_pid��selfr
r
r�__init__0s
zResourceTracker.__init__c	CsT|j�D|jdkr W5QR�dSt�|j�d|_t�|jd�d|_W5QRXdS)Nr)rr�os�close�waitpidrrr
r
r�_stop5s
zResourceTracker._stopcCs|��|jSr	)rrrr
r
r�getfdBszResourceTracker.getfdcCst|j��b|jdk	r~|��r*W5QR�dSt�|j�z|jdk	rPt�|jd�Wntk
rfYnXd|_d|_t�	d�g}z|�
tj�
��Wntk
r�YnXd}t��\}}z�zr|�
|�t��}|gt��}|d||g7}z&t�rt�tjt�t�|||�}W5t�r,t�tjt�XWnt�|��YnX||_||_W5t�|�XW5QRXdS)NrzUresource_tracker: process died unexpectedly, relaunching.  Some resources might leak.z:from multiprocessing.resource_tracker import main;main(%d)z-c)rr�_check_aliverrrr�ChildProcessError�warnings�warn�append�sys�stderr�fileno�	Exception�piperZget_executablerZ_args_from_interpreter_flags�
_HAVE_SIGMASK�signalr�SIG_UNBLOCK�_IGNORED_SIGNALS�	SIG_BLOCKZspawnv_passfds)rZfds_to_pass�cmd�r�wZexe�args�pidr
r
rrFsJ






zResourceTracker.ensure_runningcCs2zt�|jd�Wntk
r(YdSXdSdS)Ns
PROBE:0:noop
FT)r�writer�OSErrorrr
r
rr�s
zResourceTracker._check_alivecCs|�d||�dS)N�REGISTER��_send�r�name�rtyper
r
rr�szResourceTracker.registercCs|�d||�dS)N�
UNREGISTERr3r5r
r
rr�szResourceTracker.unregistercCsB|��d�|||��d�}t|�dkr0td��t�|j|�}dS)Nz{0}:{1}:{2}
�asciiiz
name too long)r�format�encode�len�
ValueErrorrr0r)rr+r6r7�msg�nbytesr
r
rr4�szResourceTracker._sendN)�__name__�
__module__�__qualname__rrrrrrrr4r
r
r
rr.s
@rc
Cst�tjtj�t�tjtj�tr2t�tjt�tj	tj
fD]&}z|��Wq>tk
rbYq>Xq>dd�t
��D�}z�t|d���}|D]�}z�|���d��d�\}}}t
�|d�}	|	dkr�td|�d	|����|d
kr�||�|�n2|dk�r||�|�n|dk�rntd
|��Wq�tk
�rTztjt���WnYnXYq�Xq�W5QRXW5|��D]�\}}|�r�zt�dt|�|f�Wntk
�r�YnX|D]V}zLzt
||�Wn6tk
�r�}zt�d||f�W5d}~XYnXW5X�q��qnXdS)NcSsi|]}|t��qSr
)�set)�.0r7r
r
r�
<dictcomp>�szmain.<locals>.<dictcomp>zQresource_tracker: There appear to be %d leaked %s objects to clean up at shutdownzresource_tracker: %r: %s�rbr9�:zCannot register z. for automatic cleanup: unknown resource type r2r8ZPROBEzunrecognized command %r)r'�SIGINT�SIG_IGN�SIGTERMr&rr(r)r!�stdin�stdoutrr$�_CLEANUP_FUNCS�keys�itemsrrr<�open�strip�decode�split�getr=�add�remove�RuntimeError�
excepthook�exc_info)
�fd�f�cacher7Zrtype_cacher6�e�liner+Zcleanup_funcr
r
r�main�s^�


�
(r_)rr'r!rr�rr�__all__�hasattrr&rHrJr)rMr6Z_multiprocessingZ_posixshmem�updateZ
sem_unlinkZ
shm_unlink�objectrZ_resource_trackerrrrrr_r
r
r
r�<module>s4

�
�wPK{��\ɴ�
�
,__pycache__/popen_spawn_win32.cpython-38.pycnu�[���U

e5d��@s�ddlZddlZddlZddlZddlZddlmZmZmZddl	m
Z
ddl	mZdgZdZ
ejdkoreed	d
�Zej���d�Zdd
�Zeejej�Zdd�ZGdd�de�ZdS)�N�)�	reduction�get_spawning_popen�set_spawning_popen)�spawn)�util�PopeniZwin32�frozenFzpythonservice.execCs ||kptj�|�tj�|�kS�N)�os�path�normcase)Zp1Zp2�r�9/usr/lib64/python3.8/multiprocessing/popen_spawn_win32.py�_path_eqsrcGs|D]}t�|�qdSr
)�_winapi�CloseHandle)Zhandles�handlerrr�_close_handlessrc@sJeZdZdZdZdd�Zdd�Zddd	�Zd
d�Zdd
�Z	e	Z
dd�ZdS)rz@
    Start a subprocess to run the code of a process object
    rcCsTt�|j�}t�dd�\}}t�|d�}tjt�	�|d�}d�
dd�|D��}t��}tr�t
|tj�r�tj}tj��}tj|d<nd}t|ddd	���}	z0t�||ddd
d|dd�	\}
}}}
t�|�Wnt�|��YnX||_d|_|
|_t|
�|_t�|t|jt|�f�|_t|�zt �!||	�t �!||	�W5td�XW5QRXdS)Nr)Z
parent_pidZpipe_handle� css|]}d|VqdS)z"%s"Nr)�.0�xrrr�	<genexpr>9sz!Popen.__init__.<locals>.<genexpr>�__PYVENV_LAUNCHER__�wbT)�closefdF)"rZget_preparation_data�_namerZ
CreatePipe�msvcrtZopen_osfhandleZget_command_liner�getpid�joinZget_executable�WINENVr�sys�
executable�_base_executable�environ�copy�openZ
CreateProcessr�pid�
returncode�_handle�int�sentinelrZFinalizer�	finalizerrr�dump)�selfZprocess_objZ	prep_dataZrhandleZwhandleZwfd�cmdZ
python_exe�envZto_childZhpZhtr'�tidrrr�__init__,sT
�
�

�zPopen.__init__cCs|t�kst�t�||j�Sr
)r�AssertionErrorrZ	duplicater+)r.rrrr�duplicate_for_childaszPopen.duplicate_for_childNcCst|jdkrn|dkrtj}ntdt|dd��}t�t|j�|�}|tjkrnt�|j�}|t	krht
j}||_|jS)Nri�g�?)r(rZINFINITE�maxr*ZWaitForSingleObjectr)Z
WAIT_OBJECT_0ZGetExitCodeProcess�	TERMINATE�signal�SIGTERM)r.�timeoutZmsecs�res�coderrr�waites

z
Popen.waitcCs|jdd�S)Nr�r9)r<�r.rrr�pollusz
Popen.pollcCsL|jdkrHzt�t|j�t�Wn&tk
rF|jdd�dkrB�YnXdS)Ng�?r=)r(rZTerminateProcessr*r)r6�OSErrorr<r>rrr�	terminatexs
zPopen.terminatecCs|��dSr
)r,r>rrr�close�szPopen.close)N)�__name__�
__module__�__qualname__�__doc__�methodr2r4r<r?rA�killrBrrrrr&s5
)rrr7r!r�contextrrr�rr�__all__r6�platform�getattrZWINEXEr"�lower�endswithZ
WINSERVICErr#r r�objectrrrrr�<module>s
PK{��\�;z ��)__pycache__/__init__.cpython-38.opt-1.pycnu�[���U

e5d��@sdddlZddlmZdd�eej�D�Ze��dd�eD��dZd	Z	d
ej
kr`ej
d
ej
d<dS)�N�)�contextcCsg|]}|�d�s|�qS)�_)�
startswith)�.0�x�r�0/usr/lib64/python3.8/multiprocessing/__init__.py�
<listcomp>s
r
ccs|]}|ttj|�fVqdS)N)�getattrr�_default_context)r�namerrr	�	<genexpr>sr���__main__Z__mp_main__)�sys�r�dirr�__all__�globals�updateZSUBDEBUGZ
SUBWARNING�modulesrrrr	�<module>s
PK{��\{�2
 
 $__pycache__/reduction.cpython-38.pycnu�[���U

e5d(%�@sddlmZddlZddlZddlZddlZddlZddlZddlZddl	m
Z
ddddd	gZejd
kp�e
ed�o�e
ed�o�e
ejd
�ZGdd�dej�ZejZd6dd	�Zejd
k�redddg7ZddlZd7dd�dd�Zdd�Zdd�Zdd�ZGdd�de�ZnHedddg7ZddlZejdkZdd�Zdd�Zd d�Zd!d�Zd"d�Zd#d$�ZGd%d&�d&�Z ee!e �j"�e�d'd(�Z#ee!e$j%�e#�ee!e&j'�e#�d)d*�Z(d+d,�Z)eej*e(�ejd
k�r�d-d.�Z+d/d0�Z,eeje+�nd1d.�Z+d2d0�Z,eeje+�Gd3d4�d4ed5�Z-dS)8�)�ABCMetaN�)�context�send_handle�recv_handle�ForkingPickler�register�dump�win32ZCMSG_LEN�
SCM_RIGHTS�sendmsgcsJeZdZdZiZejZ�fdd�Ze	dd��Z
e	d	dd��Zej
Z
�ZS)
rz)Pickler subclass used by multiprocessing.cs*t�j|�|j��|_|j�|j�dS�N)�super�__init__�_copyreg_dispatch_table�copy�dispatch_table�update�_extra_reducers��self�args��	__class__��1/usr/lib64/python3.8/multiprocessing/reduction.pyr&szForkingPickler.__init__cCs||j|<dS)z&Register a reduce function for a type.N)r)�cls�type�reducerrrr+szForkingPickler.registerNcCs t��}|||��|�|��Sr
)�io�BytesIOr	�	getbuffer)r�obj�protocolZbufrrr�dumps0szForkingPickler.dumps)N)�__name__�
__module__�__qualname__�__doc__r�copyregrrr�classmethodrr$�pickle�loads�
__classcell__rrrrr!s
cCst||��|�dS)z3Replacement for pickle.dump() using ForkingPickler.N)rr	)r"�filer#rrrr	:s�	DupHandle�	duplicate�steal_handleF)�source_processcCs6t��}|dkr|}|dkr |}t�|||d|tj�S)z<Duplicate a handle.  (target_process is a handle not a pid!)Nr)�_winapi�GetCurrentProcess�DuplicateHandle�DUPLICATE_SAME_ACCESS)�handleZtarget_processZinheritabler2Zcurrent_processrrrr0Gs�c	CsFt�tjd|�}z$t�||t��ddtjtjB�W�St�|�XdS)z5Steal a handle from process identified by source_pid.FrN)r3�OpenProcess�PROCESS_DUP_HANDLE�CloseHandler5r4r6�DUPLICATE_CLOSE_SOURCE)Z
source_pidr7Zsource_process_handlerrrr1Ss�
�cCst|tj|�}|�|�dS�z&Send a handle over a local connection.N)r/r3r6�send)�connr7�destination_pidZdhrrrr_scCs|����S)�)Receive a handle over a local connection.)�recv�detach)r>rrrrdsc@s"eZdZdZddd�Zdd�ZdS)r/zPicklable wrapper for a handle.Nc	Cs\|dkrt��}t�tjd|�}zt�t��|||dd�|_W5t�|�X||_	||_
dS)NFr)�os�getpidr3r8r9r:r5r4�_handle�_access�_pid)rr7�access�pid�procrrrrjs�
zDupHandle.__init__c	CsZ|jt��kr|jSt�tjd|j�}z"t�||jt�	�|j
dtj�W�St�|�XdS)z1Get the handle.  This should only be called once.FN)rGrCrDrEr3r8r9r:r5r4rFr;)rrJrrrrBys
��zDupHandle.detach)N)r%r&r'r(rrBrrrrr/hs
�DupFd�sendfds�recvfds�darwincCsVt�d|�}tt|�dg�}|�|gtjtj|fg�trR|�d�dkrRt	d��dS)z,Send an array of fds over an AF_UNIX socket.�i�r�Az%did not receive acknowledgement of fdN)
�array�bytes�lenr�socket�
SOL_SOCKETr�ACKNOWLEDGErA�RuntimeError)�sockZfds�msgrrrrL�s
c	Cst�d�}|j|}|�dt�|��\}}}}|s:|s:t�z�trJ|�d�t|�dkrft	dt|���|d\}}	}
|tj
kr�|	tjkr�t|
�|jdkr�t�|�
|
�t|�d|dkr�td�t|�|d���t|�WSWnttfk
r�YnXt	d��d	S)
z/Receive an array of fds over an AF_UNIX socket.rOrrQzreceived %d items of ancdatarrPz Len is {0:n} but msg[0] is {1!r}zInvalid data receivedN)rR�itemsizeZrecvmsgrUZ
CMSG_SPACE�EOFErrorrWr=rTrXrVr�
ValueErrorZ	frombytes�AssertionError�format�list�
IndexError)rY�size�aZ
bytes_sizerZZancdata�flagsZaddrZ
cmsg_levelZ	cmsg_typeZ	cmsg_datarrrrM�s<


�
�
��c	Cs2t�|��tjtj��}t||g�W5QRXdSr<)rU�fromfd�fileno�AF_UNIX�SOCK_STREAMrL)r>r7r?�srrrr�sc
Cs<t�|��tjtj��}t|d�dW5QR�SQRXdS)r@rrN)rUrerfrgrhrM)r>rirrrr�scCsFt��}|dk	r |�|�|��Str:ddlm}|�|�Std��dS)zReturn a wrapper for an fd.Nr)�resource_sharerz&SCM_RIGHTS appears not to be available)rZget_spawning_popenrKZduplicate_for_child�HAVE_SEND_HANDLE�rjr])�fdZ	popen_objrjrrrrK�s
cCs2|jdkrt|j|jjffSt|j|jjffSdSr
)�__self__�getattrr�__func__r%��mrrr�_reduce_method�s
rsc@seZdZdd�ZdS)�_CcCsdSr
r)rrrr�f�sz_C.fN)r%r&r'rurrrrrt�srtcCst|j|jffSr
)ro�__objclass__r%rqrrr�_reduce_method_descriptor�srwcCst|j|j|jpiffSr
)�_rebuild_partial�funcr�keywords)�prrr�_reduce_partial�sr|cCstj|f|�|�Sr
)�	functools�partial)ryrrzrrrrx�srxcCsddlm}t||�ffS)Nr)�	DupSocket)rjr�_rebuild_socket)rirrrr�_reduce_socket�sr�cCs|��Sr
)rB)Zdsrrrr��sr�cCs"t|���}t||j|j|jffSr
)rKrfr��familyr�proto)ri�dfrrrr��scCs|��}tj||||d�S)N)rf)rBrU)r�r�rr�rmrrrr��sc@sdeZdZdZeZeZeZeZeZe	j
dkr8eZeZe
Z
neZeZeZeZeZeZeZeZdd�ZdS)�AbstractReducerz�Abstract base class for use in implementing a Reduction class
    suitable for use in replacing the standard reduction mechanism
    used in multiprocessing.r
cGsNttt�j�t�tttj�t�tttj	�t�tt
jt�tt
j
t�dSr
)rrrtrursr`�appendrw�int�__add__r}r~r|rUr�rrrrrs
zAbstractReducer.__init__N)r%r&r'r(rrr	rr�sys�platformr1r0r/rLrMrKrsrwrxr�r�rrrrrr��s&
r�)�	metaclass)N)NF).�abcrr)r}rrCr+rUr�rlr�__all__r��hasattrrkZPicklerrrr	r3r0r1rr�objectr/rRrWrLrMrKrsrtrrurwr`r�r�r�r|rxr~r�r�r�rrrr�<module>
sj

�
�	
�#
PK{��\!o�BT7T7.__pycache__/shared_memory.cpython-38.opt-1.pycnu�[���U

e5dD�@s�dZddgZddlmZddlZddlZddlZddlZddlZej	dkrXddl
Z
dZnddlZdZej
ejBZd	Zer~d
ZndZdd
�ZGdd�d�ZdZGdd�d�ZdS)z�Provides shared memory for direct access across processes.

The API of this package is currently provisional. Refer to the
documentation for details.
�SharedMemory�
ShareableList�)�partialN�ntFT�z/psm_Zwnsm_cCs"ttt�d}tt�|�}|S)z6Create a random filename for the shared memory object.�)�_SHM_SAFE_NAME_LENGTH�len�_SHM_NAME_PREFIX�secretsZ	token_hex)�nbytes�name�r�5/usr/lib64/python3.8/multiprocessing/shared_memory.py�_make_filename&src@s�eZdZdZdZdZdZdZej	Z
dZer.dndZ
ddd	�Zd
d�Zdd
�Zdd�Zedd��Zedd��Zedd��Zdd�Zdd�ZdS)ra�Creates a new shared memory block or attaches to an existing
    shared memory block.

    Every shared memory block is assigned a unique name.  This enables
    one process to create a shared memory block with a particular name
    so that a different process can attach to that same shared memory
    block using that same name.

    As a resource for sharing data across processes, shared memory blocks
    may outlive the original process that created them.  When one process
    no longer needs access to a shared memory block that might still be
    needed by other processes, the close() method should be called.
    When a shared memory block is no longer needed by any process, the
    unlink() method should be called to ensure proper cleanup.N���i�TFrc
	Csl|dkstd��|r0ttjB|_|dkr0td��|dkrL|jtj@sLtd��t�rH|dkr�t�}ztj	||j|j
d�|_Wntk
r�YqZYnX||_
q�qZn.|jr�d|n|}tj	||j|j
d�|_||_
z<|r�|r�t�|j|�t�|j�}|j}t�|j|�|_Wn tk
�r*|���YnXddlm}||j
d	��n|�r�|dk�r^t�n|}t�tjtjtj|d
?d@|d@|�}zXt��}|tjk�r�|dk	�r�tt j!t�"t j!�|tj��nW��qNtjd||d
�|_W5t�|�X||_
�qV�qNnX||_
t�#tj$d|�}zt�%|tj$ddd�}	W5t�|�Xt�&|	�}tjd||d
�|_||_'t(|j�|_)dS)Nrz!'size' must be a positive integerz4'size' must be a positive number different from zeroz&'name' can only be None if create=True)�mode�/�)�register�
shared_memory� l��r)ZtagnameF)*�
ValueError�_O_CREX�os�O_RDWR�_flags�O_EXCL�
_USE_POSIXr�_posixshmemZshm_open�_mode�_fd�FileExistsError�_name�_prepend_leading_slash�	ftruncate�fstat�st_size�mmap�_mmap�OSError�unlink�resource_trackerr�_winapiZCreateFileMappingZINVALID_HANDLE_VALUEZNULLZPAGE_READWRITEZCloseHandleZGetLastErrorZERROR_ALREADY_EXISTS�errnoZEEXIST�strerrorZOpenFileMappingZ
FILE_MAP_READZ
MapViewOfFileZVirtualQuerySize�_size�
memoryview�_buf)
�selfr
�create�sizeZstatsrZ	temp_nameZh_mapZlast_error_codeZp_bufrrr�__init__Is��
�
�

�
��
zSharedMemory.__init__cCs&z|��Wntk
r YnXdS�N)�closer*�r3rrr�__del__�szSharedMemory.__del__cCs|j|jd|jffS)NF)�	__class__r
r5r9rrr�
__reduce__�s��zSharedMemory.__reduce__cCs|jj�d|j�d|j�d�S)N�(z, size=�))r;�__name__r
r5r9rrr�__repr__�szSharedMemory.__repr__cCs|jS)z4A memoryview of contents of the shared memory block.)r2r9rrr�buf�szSharedMemory.bufcCs.|j}tr*|jr*|j�d�r*|jdd�}|S)z4Unique name that identifies the shared memory block.rrN)r#rr$�
startswith)r3Z
reported_namerrrr
�s

zSharedMemory.namecCs|jS)zSize in bytes.)r0r9rrrr5�szSharedMemory.sizecCsX|jdk	r|j��d|_|jdk	r4|j��d|_trT|jdkrTt�|j�d|_dS)zkCloses access to the shared memory from this instance but does
        not destroy the shared memory block.Nrr)r2�releaser)r8rr!rr9rrrr8�s



zSharedMemory.closecCs2tr.|jr.ddlm}t�|j�||jd�dS)z�Requests that the underlying shared memory block be destroyed.

        In order to ensure proper cleanup of resources, unlink should be
        called once (and only once) across all processes which have access
        to the shared memory block.r)�
unregisterrN)rr#r,rDrZ
shm_unlink)r3rDrrrr+�s
zSharedMemory.unlink)NFr)r?�
__module__�__qualname__�__doc__r#r!r)r2rrrr rr$r6r:r<r@�propertyrAr
r5r8r+rrrrr0s(
l




�utf8c@seZdZdZedededededdj	diZ
dZd	d
�dd
�dd
�d
d
�d�Ze
dd��Zd6dd�dd�Zdd�Zdd�Zdd�Zdd�Zdd�Zdd�Zd d!�Zd"d#�Zed$d%��Zed&d'��Zed(d)��Zed*d+��Zed,d-��Zed.d/��Zed0d1��Zd2d3�Z d4d5�Z!dS)7ra�Pattern for a mutable list-like object shareable via a shared
    memory block.  It differs from the built-in list type in that these
    lists can not change their overall length (i.e. no append, insert,
    etc.)

    Because values are packed into a memoryview as bytes, the struct
    packing format for any storable value must require no more than 8
    characters to describe its format.�q�dzxxxxxxx?z%dsNzxxxxxx?x�cCs|Sr7r��valuerrr�<lambda>
�zShareableList.<lambda>cCs|�d��t�S�N�)�rstrip�decode�	_encodingrMrrrrOrPcCs
|�d�SrQ)rSrMrrrrOrPcCsdSr7r)Z_valuerrrrO
rP)rrr�cCs:t|ttdjf�sdSt|t�r$dSt|t�r2dSdSdS)z�Used in concert with _back_transforms_mapping to convert values
        into the appropriate Python objects when retrieving them from
        the list as well as when storing them.NrrrrV)�
isinstance�str�bytesr;rMrrr�_extract_recreation_codes

z&ShareableList._extract_recreation_code�r
csr|dk	rv�fdd�|D�}t|��_t�fdd�|D���_�fdd�|D�}t�d�jd�|��j�j	�}nd}|dk	r�|dkr�t
|��_nt
|d	|d
��_|dk	�rNt�tj
d�j�jjd�jf�j��tj
d�|��jj�jf�fdd�|D���tj
�j�jj�jf�fd
d�|D���tj
�j	�jj�jf|��n t���_t��j�jjd��_dS)NcsPg|]H}t|ttf�s$�jt|�n&�jt|��jt|��jdf�qS)r)rWrXrY�_types_mapping�type�
_alignmentr	��.0�itemr9rr�
<listcomp> s���z*ShareableList.__init__.<locals>.<listcomp>c3s0|](}|ddkr�jnt|dd��VqdS)r�sN)r^�int)r`�fmtr9rr�	<genexpr>*s�z)ShareableList.__init__.<locals>.<genexpr>csg|]}��|��qSr)rZr_r9rrrb.srJ�rLT)r4r5rc3s&|]}t|t�r|���n|VqdSr7)rWrX�encode�r`�v��_encrrrfMsc3s|]}|���VqdSr7)rhrirkrrrfSs)r	�	_list_len�tuple�_allocated_bytes�structZcalcsize�_format_size_metainfo�join�_format_packing_metainfo�_format_back_transform_codesr�shmrU�	pack_intorA�_offset_data_start�_offset_packing_formats�_offset_back_transform_codes�unpack_from)r3Zsequencer
Z_formatsZ_recreation_codesZrequested_sizer)rlr3rr6sz
�
�

�����
��������
�zShareableList.__init__cCsj|dkr|n||j}||jks*|jdkr2td��t�d|jj|j|d�d}|�d�}|�t	�}|S)z>Gets the packing format for a single value stored in the list.r� Requested position out of range.�8srLrR)
rm�
IndexErrorrprzrurArxrSrTrU)r3�positionrjre�
fmt_as_strrrr�_get_packing_formatds��

z!ShareableList._get_packing_formatcCs\|dkr|n||j}||jks*|jdkr2td��t�d|jj|j|�d}|j|}|S)z9Gets the back transformation function for a single value.rr{�b)rmr}rprzrurAry�_back_transforms_mapping)r3r~�transform_codeZtransform_functionrrr�_get_back_transformts��
z!ShareableList._get_back_transformcCs~|dkr|n||j}||jks*|jdkr2td��t�d|jj|j|d|�t��|�	|�}t�d|jj|j
||�dS)zvSets the packing format and back transformation code for a
        single value in the list at the specified position.rr{r|rLr�N)rmr}rprvrurArxrhrUrZry)r3r~rrNr�rrr�!_set_packing_format_and_transform�s �
�z/ShareableList._set_packing_format_and_transformcCsjz6|jt|jd|��}t�|�|�|jj|�\}Wntk
rRtd��YnX|�	|�}||�}|S)Nzindex out of range)
rw�sumrorprzr�rurAr}r�)r3r~�offsetrjZback_transformrrr�__getitem__�s��

zShareableList.__getitem__cCs�z&|jt|jd|��}|�|�}Wntk
rBtd��YnXt|ttf�sf|jt	|�}|}nZt|t�rz|�
t�n|}t|�|j|kr�t
d��|ddkr�|}n|jt|j|f}|�|||�t�||jj||�dS)Nzassignment index out of rangez(bytes/str item exceeds available storagerrc)rwr�ror�r}rWrXrYr\r]rhrUr	rr�rprvrurA)r3r~rNr�Zcurrent_formatZ
new_formatZ
encoded_valuerrr�__setitem__�s6�����zShareableList.__setitem__cCst|j|jjd�dfS)Nr[r)rr;rur
r9rrrr<�szShareableList.__reduce__cCst�d|jjd�dS)NrJr)rprzrurAr9rrr�__len__�szShareableList.__len__cCs"|jj�dt|��d|jj�d�S)Nr=z, name=r>)r;r?�listrur
r9rrrr@�szShareableList.__repr__csd��fdd�t�j�D��S)z>The struct packing format used by all currently stored values.rgc3s|]}��|�VqdSr7)r�)r`�ir9rrrf�sz'ShareableList.format.<locals>.<genexpr>)rr�rangermr9rr9r�format�s�zShareableList.formatcCs|j�d�S)z=The struct packing format used for metainfo on storage sizes.rJ�rmr9rrrrq�sz#ShareableList._format_size_metainfocCs
d|jS)z?The struct packing format used for the values' packing formats.r|r�r9rrrrs�sz&ShareableList._format_packing_metainfocCs
d|jS)z?The struct packing format used for the values' back transforms.r�r�r9rrrrt�sz*ShareableList._format_back_transform_codescCs|jddS)NrrLr�r9rrrrw�sz ShareableList._offset_data_startcCs|jt|j�Sr7)rwr�ror9rrrrx�sz%ShareableList._offset_packing_formatscCs|j|jdS)NrL)rxrmr9rrrry�sz*ShareableList._offset_back_transform_codescst�fdd�|D��S)zCL.count(value) -> integer -- return number of occurrences of value.c3s|]}�|kVqdSr7r)r`�entryrMrrrf�sz&ShareableList.count.<locals>.<genexpr>)r�)r3rNrrMr�count�szShareableList.countcCs4t|�D]\}}||kr|Sqt|�d���dS)zpL.index(value) -> integer -- return first index of value.
        Raises ValueError if the value is not present.z not in this containerN)�	enumerater)r3rNr~r�rrr�index�s
zShareableList.index)N)"r?rErFrGrd�float�boolrXrYr;r\r^r��staticmethodrZr6r�r�r�r�r�r<r�r@rHr�rqrsrtrwrxryr�r�rrrrr�s^
��

F






)rG�__all__�	functoolsrr(rr.rprr
r-rr�O_CREATrrrr
rrrUrrrrr�<module>s,

EPK|��\i6���,�,%__pycache__/util.cpython-38.opt-1.pycnu�[���U

e5d~6�@s�ddlZddlZddlZddlZddlZddlZddlmZddlm	Z	ddddd	d
ddd
ddddddgZ
dZdZdZ
dZdZdZdZdadadd�Zdd�Zdd�Zdd�Zdd	�Zd@d d
�Zd!d"�Zd#d$�Ze�Zd%d&�Zd'd�Ze��Z e�!�Z"d(d)�Z#d*d�Z$iZ%e�!�Z&Gd+d�de'�Z(dAd,d-�Z)d.d
�Z*da+eee)e	j,e	j-fd/d0�Z.e�/e.�Gd1d�de'�Z0Gd2d�dej1�Z2ze�3d3�Z4Wne5k
�r�d4Z4YnXd5d�Z6d6d7�Z7d8d9�Z8d:d;�Z9d<d=�Z:d>d?�Z;dS)B�N)�_args_from_interpreter_flags�)�process�	sub_debug�debug�info�sub_warning�
get_logger�
log_to_stderr�get_temp_dir�register_after_fork�
is_exiting�Finalize�ForkAwareThreadLock�ForkAwareLocal�close_all_fds_except�SUBDEBUG�
SUBWARNING��
���multiprocessingz+[%(levelname)s/%(processName)s] %(message)sFcGstrtjt|f|��dS�N)�_logger�logr��msg�args�r�,/usr/lib64/python3.8/multiprocessing/util.pyr,scGstrtjt|f|��dSr)rr�DEBUGrrrr r0scGstrtjt|f|��dSr)rr�INFOrrrr r4scGstrtjt|f|��dSr)rrrrrrr r8scCs|ddl}|��z\tsj|�t�adt_ttd�rFt�	t
�t�t
�n$tj�
t
dif�tj�t
dif�W5|��XtS)z0
    Returns logger used by multiprocessing
    rN�
unregisterr)�loggingZ_acquireLockZ_releaseLockrZ	getLogger�LOGGER_NAMEZ	propagate�hasattr�atexitr#�_exit_function�registerZ
_exithandlers�remove�append)r$rrr r	<s



cCsJddl}t�}|�t�}|��}|�|�|�|�|rB|�|�dat	S)zB
    Turn on logging and add a handler which prints to stderr
    rNT)
r$r	Z	Formatter�DEFAULT_LOGGING_FORMATZ
StreamHandlerZsetFormatterZ
addHandlerZsetLevel�_log_to_stderrr)�levelr$ZloggerZ	formatterZhandlerrrr r
Ws



cCs tjdkrdSttd�rdSdS)NZlinuxTZgetandroidapilevelF)�sys�platformr&rrrr �#_platform_supports_abstract_socketsls


r1cCs@|sdSt|t�r|ddkSt|t�r4|ddkStd��dS)NFr�z(address type of {address!r} unrecognized)�
isinstance�bytes�str�	TypeError)Zaddressrrr �is_abstract_socket_namespacets

r7cCs&||�t��}|dk	r"d|jd<dS)N�tempdir)r�current_process�_config)�rmtreer8r9rrr �_remove_temp_dir�sr<cCsft��j�d�}|dkrbddl}ddl}|jdd�}td|�tdt	|j
|fdd�|t��jd<|S)Nr8rzpymp-)�prefixzcreated temp directory %si����)r�exitpriority)rr9r:�get�shutil�tempfileZmkdtemprrr<r;)r8r@rArrr r�s
�cCsftt���}|��|D]H\\}}}}z||�Wqtk
r^}ztd|�W5d}~XYqXqdS)Nz after forker raised exception %s)�list�_afterfork_registry�items�sort�	Exceptionr)rD�indexZident�func�obj�errr �_run_after_forkers�srKcCs|ttt�t|�|f<dSr)rC�next�_afterfork_counter�id)rIrHrrr r�sc@sFeZdZdZddd�Zdeeejfdd�Z	dd	�Z
d
d�Zdd
�ZdS)rzA
    Class which supports object finalization using weakrefs
    rNcCs�|dk	r&t|t�s&td�|t|����|dk	r>t�||�|_n|dkrNtd��||_	||_
|p`i|_|tt
�f|_t��|_|t|j<dS)Nz3Exitpriority ({0!r}) must be None or int, not {1!s}z+Without object, exitpriority cannot be None)r3�intr6�format�type�weakref�ref�_weakref�
ValueError�	_callback�_args�_kwargsrL�_finalizer_counter�_key�os�getpid�_pid�_finalizer_registry)�selfrI�callbackr�kwargsr>rrr �__init__�s"��

zFinalize.__init__cCs�z||j=Wntk
r(|d�YnbX|j|�krD|d�d}n$|d|j|j|j�|j|j|j�}d|_|_|_|_|_|SdS)zQ
        Run the callback unless it has already been called or cancelled
        zfinalizer no longer registeredz+finalizer ignored because different processNz/finalizer calling %s with args %s and kwargs %s)rZ�KeyErrorr]rVrWrXrT)r_Zwrr^rr\�resrrr �__call__�s$��zFinalize.__call__cCsDzt|j=Wntk
r Yn Xd|_|_|_|_|_dS)z3
        Cancel finalization of the object
        N)r^rZrcrTrVrWrX�r_rrr �cancel�s��zFinalize.cancelcCs
|jtkS)zS
        Return whether this finalizer is still waiting to invoke callback
        )rZr^rfrrr �still_active�szFinalize.still_activec	Cs�z|��}Wnttfk
r(d}YnX|dkr>d|jjSd|jjt|jd|j�f}|jrr|dt|j�7}|j	r�|dt|j	�7}|j
ddk	r�|dt|j
d�7}|dS)	Nz<%s object, dead>z<%s object, callback=%s�__name__z, args=z	, kwargs=rz, exitpriority=�>)rT�AttributeErrorr6�	__class__ri�getattrrVrWr5rXrZ)r_rI�xrrr �__repr__�s"
�zFinalize.__repr__)rNN)
ri�
__module__�__qualname__�__doc__rbr^rr[r\rergrhrorrrr r�s
�
c	s�tdkrdS�dkrdd��n�fdd���fdd�tt�D�}|jdd�|D]P}t�|�}|dk	rPtd	|�z
|�WqPtk
r�d
dl}|��YqPXqP�dkr�t��dS)z�
    Run all finalizers whose exit priority is not None and at least minpriority

    Finalizers with highest priority are called first; finalizers with
    the same priority will be called in reverse order of creation.
    NcSs|ddk	S�Nrr��prrr �<lambda>�z!_run_finalizers.<locals>.<lambda>cs|ddk	o|d�kSrsrrt)�minpriorityrr rvrwcsg|]}�|�r|�qSrr)�.0�key)�frr �
<listcomp>#sz#_run_finalizers.<locals>.<listcomp>T)�reversez
calling %sr)	r^rBrEr?rrF�	traceback�	print_exc�clear)rx�keysrz�	finalizerr~r)r{rxr �_run_finalizerss$



r�cCstp
tdkS)z6
    Returns true if the process is shutting down
    N)�_exitingrrrr r
8scCs�ts�da|d�|d�|d�|�dk	rr|�D] }|jr0|d|j�|j��q0|�D]}|d|j�|��qX|d�|�dS)NTzprocess shutting downz2running all "atexit" finalizers with priority >= 0rz!calling terminate() for daemon %szcalling join() for process %sz)running the remaining "atexit" finalizers)r�Zdaemon�nameZ_popenZ	terminate�join)rrr��active_childrenr9rurrr r(@s	



r(c@s,eZdZdd�Zdd�Zdd�Zdd�Zd	S)
rcCs|��t|tj�dSr)�_resetrrrfrrr rbqszForkAwareThreadLock.__init__cCs"t��|_|jj|_|jj|_dSr)�	threadingZLock�_lock�acquire�releaserfrrr r�us

zForkAwareThreadLock._resetcCs
|j��Sr)r��	__enter__rfrrr r�zszForkAwareThreadLock.__enter__cGs|jj|�Sr)r��__exit__)r_rrrr r�}szForkAwareThreadLock.__exit__N)rirprqrbr�r�r�rrrr rpsc@seZdZdd�Zdd�ZdS)rcCst|dd��dS)NcSs
|j��Sr)�__dict__r�)rIrrr rv�rwz)ForkAwareLocal.__init__.<locals>.<lambda>)rrfrrr rb�szForkAwareLocal.__init__cCst|�dfS)Nr)rQrfrrr �
__reduce__�szForkAwareLocal.__reduce__N)rirprqrbr�rrrr r�s�SC_OPEN_MAX�cCsNt|�dtg}|��tt|�d�D] }t�||d||d�q(dS)N���r)rB�MAXFDrE�range�lenr[�
closerange)�fds�irrr r�sc	Cs�tjdkrdSztj��Wnttfk
r4YnXz@t�tjtj�}zt|dd�t_Wnt�|��YnXWnttfk
r�YnXdS)NF)�closefd)	r/�stdin�close�OSErrorrUr[�open�devnull�O_RDONLY)�fdrrr �_close_stdin�s

r�c	CsTztj��Wnttfk
r&YnXztj��Wnttfk
rNYnXdSr)r/�stdout�flushrkrU�stderrrrrr �_flush_std_streams�sr�cCsxddl}tttt|���}t��\}}z6|�|t�	|�gd|dddddddd||ddd�W�St�|�t�|�XdS)NrTr�F)
�_posixsubprocess�tuple�sorted�maprOr[�piper�Z	fork_exec�fsencode)�pathrZpassfdsr�Zerrpipe_readZ
errpipe_writerrr �spawnv_passfds�s2
�
r�cGs|D]}t�|�qdS)z/Close each file descriptor given as an argumentN)r[r�)r�r�rrr �	close_fds�sr�cCsZddlm}t��ddlm}|j��ddlm}|j	��t
�|��|��dS)zKCleanup multiprocessing resources when multiprocessing tests
    completed.r)�support)�
forkserver)�resource_trackerN)
Ztestr�rZ_cleanuprr�Z_forkserverZ_stopr�Z_resource_trackerr�Z
gc_collectZ
reap_children)r�r�r�rrr �_cleanup_tests�s

r�)N)N)<r[�	itertoolsr/rRr'r��
subprocessr�r�__all__ZNOTSETrr!r"rr%r,rr-rrrrr	r
r1r7Zabstract_sockets_supportedr<rZWeakValueDictionaryrC�countrMrKrr^rY�objectrr�r
r�r�r9r(r)rZlocalr�sysconfr�rFrr�r�r�r�r�rrrr �<module>
s��

		V
,�
*



PK|��\!���o$o$'__pycache__/queues.cpython-38.opt-2.pycnu�[���U

e5d�-�@s�dddgZddlZddlZddlZddlZddlZddlZddlZddlm	Z	m
Z
ddlZddlm
Z
ddlmZejjZdd	lmZmZmZmZmZGd
d�de�Ze�ZGdd�de�ZGdd�de�ZdS)
�Queue�SimpleQueue�
JoinableQueue�N)�Empty�Full�)�
connection)�context)�debug�info�Finalize�register_after_fork�
is_exitingc@s�eZdZd*dd�Zdd�Zdd�Zdd	�Zd+dd
�Zd,dd�Zdd�Z	dd�Z
dd�Zdd�Zdd�Z
dd�Zdd�Zdd�Zd d!�Zed"d#��Zed$d%��Zed&d'��Zed(d)��ZdS)-rrcCs�|dkrddlm}||_tjdd�\|_|_|��|_t	�
�|_tj
dkrTd|_n
|��|_|�|�|_d|_|��tj
dkr�t|tj�dS)Nrr)�
SEM_VALUE_MAXF�Zduplex�win32)Zsynchronizer�_maxsizer�Pipe�_reader�_writer�Lock�_rlock�os�getpid�_opid�sys�platform�_wlockZBoundedSemaphore�_sem�
_ignore_epipe�_after_forkr
r��self�maxsize�ctx�r%�./usr/lib64/python3.8/multiprocessing/queues.py�__init__$s




zQueue.__init__cCs.t�|�|j|j|j|j|j|j|j|j	fS�N)
r	�assert_spawningrrrrrrrr�r"r%r%r&�__getstate__9s
�zQueue.__getstate__c	Cs0|\|_|_|_|_|_|_|_|_|��dSr()	rrrrrrrrr �r"�stater%r%r&�__setstate__>s�zQueue.__setstate__cCsbtd�t�t���|_t��|_d|_d|_	d|_
d|_d|_|j
j|_|jj|_|jj|_dS)NzQueue._after_fork()F)r
�	threading�	Conditionr�	_notempty�collections�deque�_buffer�_thread�_jointhread�_joincancelled�_closed�_closer�
send_bytes�_send_bytesr�
recv_bytes�_recv_bytes�poll�_pollr*r%r%r&r Cs


zQueue._after_forkTNc	Csf|jrtd|�d���|j�||�s(t�|j�.|jdkrB|��|j�	|�|j�
�W5QRXdS�NzQueue z
 is closed)r8�
ValueErrorr�acquirerr1r5�
_start_threadr4�append�notify�r"�obj�block�timeoutr%r%r&�putPs
z	Queue.putc	Cs�|jrtd|�d���|rH|dkrH|j�|��}W5QRX|j��nr|rXt��|}|j�||�sjt	�zB|r�|t��}|�
|�s�t	�n|�
�s�t	�|��}|j��W5|j��Xt�|�Sr@)
r8rArr=r�release�time�	monotonicrBrr?�_ForkingPickler�loads)r"rHrI�resZdeadliner%r%r&�get\s*
z	Queue.getcCs|j|jj��Sr()rr�_semlockZ
_get_valuer*r%r%r&�qsizevszQueue.qsizecCs
|��Sr(�r?r*r%r%r&�emptyzszQueue.emptycCs|jj��Sr()rrR�_is_zeror*r%r%r&�full}sz
Queue.fullcCs
|�d�S�NF)rQr*r%r%r&�
get_nowait�szQueue.get_nowaitcCs|�|d�SrX)rJ�r"rGr%r%r&�
put_nowait�szQueue.put_nowaitcCs2d|_z|j��W5|j}|r,d|_|�XdS)NT)r8r9r�close)r"r\r%r%r&r\�szQueue.closecCstd�|jr|��dS)NzQueue.join_thread())r
r6r*r%r%r&�join_thread�szQueue.join_threadcCs6td�d|_z|j��Wntk
r0YnXdS)NzQueue.cancel_join_thread()T)r
r7r6Zcancel�AttributeErrorr*r%r%r&�cancel_join_thread�szQueue.cancel_join_threadc
Cs�td�|j��tjtj|j|j|j|j	|j
j|j|j
|jfdd�|_d|j_td�|j��td�|js�t|jtjt�|j�gdd�|_t|tj|j|jgd	d�|_dS)
NzQueue._start_thread()ZQueueFeederThread)�target�args�nameTzdoing self._thread.start()z... done self._thread.start()���)Zexitpriority�
)r
r4�clearr/ZThreadr�_feedr1r;rrr\r�_on_queue_feeder_errorrr5Zdaemon�startr7r�_finalize_join�weakref�refr6�_finalize_closer9r*r%r%r&rC�s<
��
�
�zQueue._start_threadcCs4td�|�}|dk	r(|��td�ntd�dS)Nzjoining queue threadz... queue thread joinedz... queue thread already dead)r
�join)Ztwr�threadr%r%r&ri�s
zQueue._finalize_joinc	Cs.td�|�|�t�|��W5QRXdS)Nztelling queue thread to quit)r
rD�	_sentinelrE)�buffer�notemptyr%r%r&rl�s
zQueue._finalize_closec
CsXtd�|j}|j}	|j}
|j}t}tjdkr<|j}
|j}nd}
z�|�z|sT|
�W5|	�Xzb|�}||kr�td�|�WWdSt�	|�}|
dkr�||�qb|
�z||�W5|�XqbWnt
k
r�YnXWq@tk
�rP}zV|�rt|dd�t
jk�rWY�6dSt��r.td|�WY�dS|��|||�W5d}~XYq@Xq@dS)Nz$starting thread to feed data to piperz%feeder thread got sentinel -- exiting�errnorzerror in queue thread: %s)r
rBrK�wait�popleftrorrrN�dumps�
IndexError�	Exception�getattrrrZEPIPErr)rprqr:Z	writelockr\Zignore_epipe�onerrorZ	queue_semZnacquireZnreleaseZnwaitZbpopleft�sentinelZwacquireZwreleaserG�er%r%r&rf�sN







zQueue._feedcCsddl}|��dS)Nr)�	traceback�	print_exc)r{rGr|r%r%r&rg
szQueue._on_queue_feeder_error)r)TN)TN)�__name__�
__module__�__qualname__r'r+r.r rJrQrSrUrWrYr[r\r]r_rC�staticmethodrirlrfrgr%r%r%r&r"s.



 
	

=c@s@eZdZddd�Zdd�Zdd�Zdd
d�Zdd
�Zdd�Zd	S)rrcCs*tj|||d�|�d�|_|��|_dS)N)r$r)rr'Z	Semaphore�_unfinished_tasksr0�_condr!r%r%r&r'#szJoinableQueue.__init__cCst�|�|j|jfSr()rr+r�r�r*r%r%r&r+(szJoinableQueue.__getstate__cCs,t�||dd��|dd�\|_|_dS)N���)rr.r�r�r,r%r%r&r.+szJoinableQueue.__setstate__TNc
Cs�|jrtd|�d���|j�||�s(t�|j�J|j�8|jdkrJ|��|j	�
|�|j��|j�
�W5QRXW5QRXdSr@)r8rArrBrr1r�r5rCr4rDr�rKrErFr%r%r&rJ/s

zJoinableQueue.putc	Cs@|j�0|j�d�std��|jj��r2|j��W5QRXdS)NFz!task_done() called too many times)r�r�rBrArRrVZ
notify_allr*r%r%r&�	task_done<s
zJoinableQueue.task_donec	Cs,|j�|jj��s|j��W5QRXdSr()r�r�rRrVrsr*r%r%r&rmCszJoinableQueue.join)r)TN)	r~rr�r'r+r.rJr�rmr%r%r%r&r!s


c@s<eZdZdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
S)rcCsHtjdd�\|_|_|��|_|jj|_tj	dkr:d|_
n
|��|_
dS)NFrr)rrrrrrr>r?rrr)r"r$r%r%r&r'Ns


zSimpleQueue.__init__cCs
|��Sr(rTr*r%r%r&rUWszSimpleQueue.emptycCst�|�|j|j|j|jfSr()r	r)rrrrr*r%r%r&r+Zs
zSimpleQueue.__getstate__cCs"|\|_|_|_|_|jj|_dSr()rrrrr>r?r,r%r%r&r.^szSimpleQueue.__setstate__c	Cs&|j�|j��}W5QRXt�|�Sr()rrr<rNrO)r"rPr%r%r&rQbszSimpleQueue.getc	CsDt�|�}|jdkr"|j�|�n|j�|j�|�W5QRXdSr()rNrurrr:rZr%r%r&rJhs


zSimpleQueue.putN)	r~rr�r'rUr+r.rQrJr%r%r%r&rLs	)�__all__rrr/r2rLrjrrZqueuerrZ_multiprocessing�rr	Z	reductionZForkingPicklerrN�utilr
rrr
r�objectrrorrr%r%r%r&�<module>
s$
v
+PK|��\_��w��2__pycache__/popen_spawn_posix.cpython-38.opt-1.pycnu�[���U

e5d��@spddlZddlZddlmZmZddlmZddlmZddlmZdgZ	Gdd	�d	e
�ZGd
d�dej�ZdS)�N�)�	reduction�set_spawning_popen)�
popen_fork)�spawn)�util�Popenc@seZdZdd�Zdd�ZdS)�_DupFdcCs
||_dS�N��fd��selfr�r�9/usr/lib64/python3.8/multiprocessing/popen_spawn_posix.py�__init__sz_DupFd.__init__cCs|jSr
r)rrrr�detachsz
_DupFd.detachN)�__name__�
__module__�__qualname__rrrrrrr	sr	cs4eZdZdZeZ�fdd�Zdd�Zdd�Z�Z	S)rrcsg|_t��|�dSr
)�_fds�superr)r�process_obj��	__class__rrrszPopen.__init__cCs|j�|�|Sr
)r�appendr
rrr�duplicate_for_child"szPopen.duplicate_for_childcCsXddlm}|��}|j�|�t�|j�}t�	�}t
|�zt�||�t�||�W5t
d�Xd}}}}	z~t��\}}t��\}}	tj||d�}|j�||g�t
�t��||j�|_||_t|	ddd��}
|
�|���W5QRXW5g}
||	fD]}|dk	�r|
�|��qt
�|t
j|
�|_||fD]}|dk	�r6t�|��q6XdS)Nr)�resource_tracker)�
tracker_fdZpipe_handle�wbF)�closefd)�rZgetfdrrrZget_preparation_data�_name�io�BytesIOrr�dumprZFinalizeZ	close_fds�	finalizer�os�close�pipeZget_command_line�extendZspawnv_passfdsZget_executable�pid�sentinel�open�write�	getbuffer)rrrrZ	prep_data�fpZparent_rZchild_wZchild_rZparent_wZfds_to_closer�cmd�frrr�_launch&sB
�
�

z
Popen._launch)
rrr�methodr	ZDupFdrrr3�
__classcell__rrrrrs
)
r#r'�contextrrr!rrr�__all__�objectr	rrrrr�<module>s
PK|��\:F6V
V
%__pycache__/popen_fork.cpython-38.pycnu�[���U

e5d
�@s6ddlZddlZddlmZdgZGdd�de�ZdS)�N�)�util�Popenc@s`eZdZdZdd�Zdd�Zejfdd�Zdd	d
�Z	dd�Z
d
d�Zdd�Zdd�Z
dd�ZdS)r�forkcCs"t��d|_d|_|�|�dS�N)rZ_flush_std_streams�
returncode�	finalizer�_launch)�self�process_obj�r�2/usr/lib64/python3.8/multiprocessing/popen_fork.py�__init__szPopen.__init__cCs|Srr)r
�fdrrr
�duplicate_for_childszPopen.duplicate_for_childc
Cs�|jdkr�zt�|j|�\}}Wn(tk
rH}z
WY�dSd}~XYnX||jkr�t�|�rnt�|�|_n$t�|�s�td�	|���t�
|�|_|jS)NzStatus is {:n})r�os�waitpid�pid�OSError�WIFSIGNALED�WTERMSIG�	WIFEXITED�AssertionError�format�WEXITSTATUS)r
�flagr�sts�errr
�polls


z
Popen.pollNcCsN|jdkrH|dk	r0ddlm}||jg|�s0dS|�|dkrBtjnd�S|jS)Nr)�waitg)rZmultiprocessing.connectionr�sentinelrr�WNOHANG)r
�timeoutrrrr
r(s
z
Popen.waitcCsZ|jdkrVzt�|j|�Wn8tk
r0Yn&tk
rT|jdd�dkrP�YnXdS)Ng�������?)r")rr�killr�ProcessLookupErrorrr)r
Zsigrrr
�_send_signal2s
zPopen._send_signalcCs|�tj�dSr)r%�signal�SIGTERM�r
rrr
�	terminate<szPopen.terminatecCs|�tj�dSr)r%r&�SIGKILLr(rrr
r#?sz
Popen.killc	Cs�d}t��\}}t��\}}t��|_|jdkrdz$t�|�t�|�|j|d�}W5t�|�Xn0t�|�t�|�t�|tj	||f�|_
||_dS)Nrr)Zparent_sentinel)r�piperr�_exit�close�
_bootstraprZFinalizeZ	close_fdsrr )r
r�codeZparent_rZchild_wZchild_rZparent_wrrr
r	Bs 






�z
Popen._launchcCs|jdk	r|��dSr)rr(rrr
r-Us
zPopen.close)N)�__name__�
__module__�__qualname__�methodrrrr!rrr%r)r#r	r-rrrr
rs


)rr&�r�__all__�objectrrrrr
�<module>sPK|��\��0

+__pycache__/popen_fork.cpython-38.opt-2.pycnu�[���U

e5d
�@s6ddlZddlZddlmZdgZGdd�de�ZdS)�N�)�util�Popenc@s`eZdZdZdd�Zdd�Zejfdd�Zdd	d
�Z	dd�Z
d
d�Zdd�Zdd�Z
dd�ZdS)r�forkcCs"t��d|_d|_|�|�dS�N)rZ_flush_std_streams�
returncode�	finalizer�_launch)�self�process_obj�r�2/usr/lib64/python3.8/multiprocessing/popen_fork.py�__init__szPopen.__init__cCs|Srr)r
�fdrrr
�duplicate_for_childszPopen.duplicate_for_childc
Cs�|jdkrzzt�|j|�\}}Wn(tk
rH}z
WY�dSd}~XYnX||jkrzt�|�rnt�|�|_nt�|�|_|jSr)r�os�waitpid�pid�OSError�WIFSIGNALED�WTERMSIG�WEXITSTATUS)r
�flagr�sts�errr
�polls


z
Popen.pollNcCsN|jdkrH|dk	r0ddlm}||jg|�s0dS|�|dkrBtjnd�S|jS)Nr)�waitg)rZmultiprocessing.connectionr�sentinelrr�WNOHANG)r
�timeoutrrrr
r(s
z
Popen.waitcCsZ|jdkrVzt�|j|�Wn8tk
r0Yn&tk
rT|jdd�dkrP�YnXdS)Ng�������?)r)rr�killr�ProcessLookupErrorrr)r
Zsigrrr
�_send_signal2s
zPopen._send_signalcCs|�tj�dSr)r"�signal�SIGTERM�r
rrr
�	terminate<szPopen.terminatecCs|�tj�dSr)r"r#�SIGKILLr%rrr
r ?sz
Popen.killc	Cs�d}t��\}}t��\}}t��|_|jdkrdz$t�|�t�|�|j|d�}W5t�|�Xn0t�|�t�|�t�|tj	||f�|_
||_dS)Nrr)Zparent_sentinel)r�piperr�_exit�close�
_bootstraprZFinalizeZ	close_fdsrr)r
r�codeZparent_rZchild_wZchild_rZparent_wrrr
r	Bs 






�z
Popen._launchcCs|jdk	r|��dSr)rr%rrr
r*Us
zPopen.close)N)�__name__�
__module__�__qualname__�methodrrrrrrr"r&r r	r*rrrr
rs


)rr#�r�__all__�objectrrrrr
�<module>sPK|��\Q
x7��*__pycache__/resource_sharer.cpython-38.pycnu�[���U

e5d��@s�ddlZddlZddlZddlZddlZddlmZddlmZddlm	Z	dgZ
ejdkrxe
dg7Z
Gd	d�de�Z
ne
d
g7Z
Gdd
�d
e�ZGdd
�d
e�Ze�ZejZdS)�N�)�process)�	reduction)�util�stopZwin32�	DupSocketc@s eZdZdZdd�Zdd�ZdS)rzPicklable wrapper for a socket.cs(|����fdd�}t�|�j�|_dS)Ncs��|�}|�|�dS�N)�shareZ
send_bytes)�conn�pidr	�Znew_sock��7/usr/lib64/python3.8/multiprocessing/resource_sharer.py�sends
z DupSocket.__init__.<locals>.send)�dup�_resource_sharer�register�close�_id)�selfZsockrr
rr�__init__szDupSocket.__init__c
Cs6t�|j�� }|��}t�|�W5QR�SQRXdS)z1Get the socket.  This should only be called once.N)r�get_connectionrZ
recv_bytes�socketZ	fromshare)rr
r	r
r
r�detach$szDupSocket.detachN��__name__�
__module__�__qualname__�__doc__rrr
r
r
rrs�DupFdc@s eZdZdZdd�Zdd�ZdS)rz-Wrapper for fd which can be used at any time.cs4t�|���fdd�}�fdd�}t�||�|_dS)Ncst�|�|�dSr)rZsend_handle)r
r�Znew_fdr
rr1szDupFd.__init__.<locals>.sendcst���dSr)�osrr
r r
rr3szDupFd.__init__.<locals>.close)r!rrrr)r�fdrrr
r rr/s
zDupFd.__init__c
Cs.t�|j��}t�|�W5QR�SQRXdS)z-Get the fd.  This should only be called once.N)rrrrZrecv_handle)rr
r
r
rr7szDupFd.detachNrr
r
r
rr-sc@sNeZdZdZdd�Zdd�Zedd��Zdd	d
�Zdd�Z	d
d�Z
dd�ZdS)�_ResourceSharerz.Manager for resources using background thread.cCs@d|_i|_g|_t��|_d|_d|_d|_t	�
|tj�dS)Nr)
�_key�_cache�
_old_locks�	threading�Lock�_lock�	_listener�_address�_threadrZregister_after_forkr#�
_afterfork)rr
r
rr?s
z_ResourceSharer.__init__c
CsZ|j�J|jdkr|��|jd7_||f|j|j<|j|jfW5QR�SQRXdS)z+Register resource, returning an identifier.Nr)r)r+�_startr$r%)rrrr
r
rrIs
z_ResourceSharer.registercCs<ddlm}|\}}||t��jd�}|�|t��f�|S)z<Return connection from which to receive identified resource.r��Client��authkey)�
connectionr0r�current_processr2rr!�getpid)Zidentr0�address�key�cr
r
rrRs
z_ResourceSharer.get_connectionNc	Cs�ddlm}|j��|jdk	r�||jt��jd�}|�d�|��|j	�
|�|j	��rdt�
d�|j��d|_	d|_d|_|j��D]\}\}}|�q�|j��W5QRXdS)z:Stop the background thread and clear registered resources.rr/Nr1z._ResourceSharer thread did not stop when asked)r3r0r)r+rr4r2rrr,�joinZis_aliverZsub_warningr*r%�items�clear)rZtimeoutr0r8r7rrr
r
rr[s$
�



z_ResourceSharer.stopcCsj|j��D]\}\}}|�q
|j��|j�|j�t��|_|jdk	rT|j�	�d|_d|_
d|_dSr)r%r:r;r&�appendr)r'r(r*rr+r,)rr7rrr
r
rr-ps



z_ResourceSharer._afterforkcCsjddlm}|jdkstd��t�d�|t��jd�|_|jj	|_
tj|j
d�}d|_|��||_dS)Nr)�ListenerzAlready have Listenerz0starting listener and thread for sending handlesr1)�targetT)r3r=r*�AssertionErrorr�debugrr4r2r6r+r'ZThread�_serveZdaemon�startr,)rr=�tr
r
rr.~s

z_ResourceSharer._startc	Cs�ttd�rt�tjt���zh|j���T}|��}|dkrHW5QR�Wq�|\}}|j�	|�\}}z|||�W5|�XW5QRXWqt
��s�tj
t���YqXqdS)N�pthread_sigmask)�hasattr�signalrD�	SIG_BLOCK�
valid_signalsr*ZacceptZrecvr%�poprZ
is_exiting�sys�
excepthook�exc_info)rr
�msgr7Zdestination_pidrrr
r
rrA�s
z_ResourceSharer._serve)N)rrrrrr�staticmethodrrr-r.rAr
r
r
rr#=s
	

r#)r!rFrrJr'�r�contextrr�__all__�platform�objectrrr#rrr
r
r
r�<module>s 


`PK|��\�}�y�2�2"__pycache__/context.cpython-38.pycnu�[���U

e5d�+�@s�ddlZddlZddlZddlmZddlmZdZGdd�de�ZGdd	�d	e�Z	Gd
d�de�Z
Gdd
�d
e�ZGdd�de�Z
Gdd�dej�ZGdd�de
�Zejdk�rRGdd�dej�ZGdd�dej�ZGdd�dej�ZGdd�de
�ZGdd�de
�ZGdd �d e
�Ze�e�e�d!�Zejd"k�rDeed#�Zneed$�Zn8Gd%d�dej�ZGd&d�de
�Zd#e�iZeed#�Zd'd(�Ze��Zd)d*�Zd+d,�Zd-d.�ZdS)/�N�)�process)�	reduction�c@seZdZdS)�ProcessErrorN��__name__�
__module__�__qualname__rrr�//usr/lib64/python3.8/multiprocessing/context.pyrsrc@seZdZdS)�BufferTooShortNrrrrrrsrc@seZdZdS)�TimeoutErrorNrrrrrr
sr
c@seZdZdS)�AuthenticationErrorNrrrrrrsrc@sXeZdZeZeZeZeZeej	�Z	eej
�Z
eej�Zdd�Zdd�Z
dCdd�Zdd	�Zd
d�ZdDd
d�ZdEdd�ZdFdd�Zdd�ZdGdd�ZdHdd�ZdIdd�Zdd�ZdJd d!�Zd"d#�Zd$d%�Zdd&�d'd(�Zdd&�d)d*�Zd+d,�Zd-d.�ZdKd/d0�Z d1d2�Z!d3d4�Z"d5d6�Z#dLd7d8�Z$dMd:d;�Z%dNd<d=�Z&e'd>d?��Z(e(j)d@d?��Z(dAdB�Z*dS)O�BaseContextcCs"t��}|dkrtd��n|SdS)z(Returns the number of CPUs in the systemNzcannot determine number of cpus)�os�	cpu_count�NotImplementedError)�selfZnumrrrr)s
zBaseContext.cpu_countcCs&ddlm}||��d�}|��|S)z�Returns a manager associated with a running server process

        The managers methods such as `Lock()`, `Condition()` and `Queue()`
        can be used to create shared objects.
        r)�SyncManager��ctx)Zmanagersr�get_context�start)rr�mrrr�Manager1szBaseContext.ManagerTcCsddlm}||�S)z1Returns two connection object connected by a piper)�Pipe)�
connectionr)rZduplexrrrrr<szBaseContext.PipecCsddlm}||��d�S)z#Returns a non-recursive lock objectr)�Lockr)�synchronizerr)rrrrrrAszBaseContext.LockcCsddlm}||��d�S)zReturns a recursive lock objectr)�RLockr)rrr)rrrrrrFszBaseContext.RLockNcCsddlm}|||��d�S)zReturns a condition objectr)�	Conditionr)rr r)r�lockr rrrr KszBaseContext.ConditionrcCsddlm}|||��d�S)zReturns a semaphore objectr)�	Semaphorer)rr"r)r�valuer"rrrr"PszBaseContext.SemaphorecCsddlm}|||��d�S)z"Returns a bounded semaphore objectr)�BoundedSemaphorer)rr$r)rr#r$rrrr$UszBaseContext.BoundedSemaphorecCsddlm}||��d�S)zReturns an event objectr)�Eventr)rr%r)rr%rrrr%ZszBaseContext.EventcCs ddlm}|||||��d�S)zReturns a barrier objectr)�Barrierr)rr&r)rZparties�actionZtimeoutr&rrrr&_szBaseContext.BarrierrcCsddlm}|||��d�S)�Returns a queue objectr)�Queuer)�queuesr)r)r�maxsizer)rrrr)dszBaseContext.QueuecCsddlm}|||��d�S)r(r)�
JoinableQueuer)r*r,r)rr+r,rrrr,iszBaseContext.JoinableQueuecCsddlm}||��d�S)r(r)�SimpleQueuer)r*r-r)rr-rrrr-nszBaseContext.SimpleQueuercCs"ddlm}||||||��d�S)zReturns a process pool objectr)�Pool)�context)Zpoolr.r)rZ	processesZinitializerZinitargsZmaxtasksperchildr.rrrr.ss
�zBaseContext.PoolcGsddlm}||f|��S)zReturns a shared objectr)�RawValue)�sharedctypesr0)r�typecode_or_type�argsr0rrrr0zszBaseContext.RawValuecCsddlm}|||�S)zReturns a shared arrayr)�RawArray)r1r4)rr2�size_or_initializerr4rrrr4szBaseContext.RawArray)r!cGs&ddlm}||f|�||��d��S)z$Returns a synchronized shared objectr)�Value�r!r)r1r6r)rr2r!r3r6rrrr6�s�zBaseContext.ValuecCs ddlm}|||||��d�S)z#Returns a synchronized shared arrayr)�Arrayr7)r1r8r)rr2r5r!r8rrrr8�s�zBaseContext.ArraycCs,tjdkr(ttdd�r(ddlm}|�dS)z�Check whether this is a fake forked process in a frozen executable.
        If so then run code specified by commandline and exit.
        �win32�frozenFr)�freeze_supportN)�sys�platform�getattr�spawnr;)rr;rrrr;�szBaseContext.freeze_supportcCsddlm}|�S)zZReturn package logger -- if it does not already exist then
        it is created.
        r)�
get_logger)�utilr@)rr@rrrr@�szBaseContext.get_loggercCsddlm}||�S)z8Turn on logging and add a handler which prints to stderrr)�
log_to_stderr)rArB)r�levelrBrrrrB�szBaseContext.log_to_stderrcCsddlm}dS)zVInstall support for sending connections and sockets
        between processes
        r)rN)�r)rrrrr�allow_connection_pickling�sz%BaseContext.allow_connection_picklingcCsddlm}||�dS)z�Sets the path to a python.exe or pythonw.exe binary used to run
        child processes instead of sys.executable when using the 'spawn'
        start method.  Useful for people embedding Python.
        r)�set_executableN)r?rF)r�
executablerFrrrrF�szBaseContext.set_executablecCsddlm}||�dS)zkSet list of module names to try to load in forkserver process.
        This is really just a hint.
        r)�set_forkserver_preloadN)�
forkserverrH)rZmodule_namesrHrrrrH�sz"BaseContext.set_forkserver_preloadcCsH|dkr|Szt|}Wn"tk
r:td|�d�YnX|��|S)Nzcannot find context for %r)�_concrete_contexts�KeyError�
ValueError�_check_available)r�methodrrrrr�szBaseContext.get_contextFcCs|jS�N)�_name�rZ
allow_nonerrr�get_start_method�szBaseContext.get_start_methodcCstd��dS)Nz+cannot set start method of concrete context)rL�rrNZforcerrr�set_start_method�szBaseContext.set_start_methodcCst��d�S)z_Controls how objects will be reduced to a form that can be
        shared with other processes.r)�globals�get�rrrr�reducer�szBaseContext.reducercCs|t�d<dS)Nr)rU)rrrrrrX�scCsdSrOrrWrrrrM�szBaseContext._check_available)T)N)r)r)NN)r)r)NNrN)N)N)F)F)+rr	r
rrr
r�staticmethodrZcurrent_processZparent_processZactive_childrenrrrrrr r"r$r%r&r)r,r-r.r0r4r6r8r;r@rBrErFrHrrRrT�propertyrX�setterrMrrrrrsR









�







rc@seZdZdZedd��ZdS)�ProcessNcCst��j�|�SrO)�_default_contextrr\�_Popen)�process_objrrrr^�szProcess._Popen�rr	r
Z
_start_methodrYr^rrrrr\�sr\csFeZdZeZdd�Zd
�fdd�	Zddd�Zdd	d
�Zdd�Z�Z	S)�DefaultContextcCs||_d|_dSrO)r]�_actual_context)rr/rrr�__init__�szDefaultContext.__init__Ncs0|dkr |jdkr|j|_|jSt��|�SdSrO)rbr]�superr)rrN��	__class__rrr�s

zDefaultContext.get_contextFcCs<|jdk	r|std��|dkr,|r,d|_dS|�|�|_dS)Nzcontext has already been set)rb�RuntimeErrorrrSrrrrT�szDefaultContext.set_start_methodcCs"|jdkr|rdS|j|_|jjSrO)rbr]rPrQrrrrR�s

zDefaultContext.get_start_methodcCsBtjdkrdgStjdkr"ddgnddg}tjr:|�d�|SdS)Nr9r?�darwin�forkrI)r<r=r�HAVE_SEND_HANDLE�append)r�methodsrrr�get_all_start_methodss

z$DefaultContext.get_all_start_methods)N)F)F)
rr	r
r\rcrrTrRrm�
__classcell__rrrerra�s

rar9c@seZdZdZedd��ZdS)�ForkProcessricCsddlm}||�S�Nr)�Popen)Z
popen_forkrq�r_rqrrrr^szForkProcess._PopenNr`rrrrrosroc@seZdZdZedd��ZdS)�SpawnProcessr?cCsddlm}||�Srp)Zpopen_spawn_posixrqrrrrrr^s�SpawnProcess._PopenNr`rrrrrssrsc@seZdZdZedd��ZdS)�ForkServerProcessrIcCsddlm}||�Srp)Zpopen_forkserverrqrrrrrr^ szForkServerProcess._PopenNr`rrrrrusruc@seZdZdZeZdS)�ForkContextriN)rr	r
rPror\rrrrrv%srvc@seZdZdZeZdS��SpawnContextr?N�rr	r
rPrsr\rrrrrx)srxc@seZdZdZeZdd�ZdS)�ForkServerContextrIcCstjstd��dS)Nz%forkserver start method not available)rrjrLrWrrrrM0sz"ForkServerContext._check_availableN)rr	r
rPrur\rMrrrrrz-srz)rir?rIrhr?ric@seZdZdZedd��ZdS)rsr?cCsddlm}||�Srp)Zpopen_spawn_win32rqrrrrrr^DsrtNr`rrrrrsBsc@seZdZdZeZdSrwryrrrrrxIscCst|t_dSrO)rJr]rb)rNrrr�_force_start_methodVsr{cCsttdd�S)N�spawning_popen)r>�_tlsrrrr�get_spawning_popen_sr~cCs
|t_dSrO)r}r|)�popenrrr�set_spawning_popenbsr�cCs t�dkrtdt|�j��dS)NzF%s objects should only be shared between processes through inheritance)r~rg�typer)�objrrr�assert_spawninges
��r�) rr<Z	threadingrDrr�__all__�	Exceptionrrr
r�objectrZBaseProcessr\rar=rorsrurvrxrzrJr]r{Zlocalr}r~r�r�rrrr�<module>sL?,��PK|��\|�V���)__pycache__/managers.cpython-38.opt-2.pycnu�[���U

e5d��@sBdddddgZddlZddlZddlZddlZddlZddlZddlZddlmZddl	m
Z
d	d
lmZd	dl
mZmZmZd	dlmZd	d
lmZd	dlmZd	dlmZzd	dlmZdZWnek
r�dZYnXdd�Ze�eje�dd�dD�Zedek	�r.dd�ZeD]Ze�ee��qGdd�de�Zdifdd�Z dd�Z!Gd d!�d!e"�Z#d"d#�Z$d$d%�Z%Gd&d'�d'e�Z&Gd(d)�d)e�Z'ej(ej)fej*ej+fd*�Z,Gd+d�de�Z-Gd,d-�d-e.�Z/Gd.d�de�Z0d/d0�Z1ifd1d2�Z2dld3d4�Z3Gd5d6�d6e�Z4Gd7d8�d8e�Z5dmd9d:�Z6Gd;d<�d<e0�Z7Gd=d>�d>e0�Z8Gd?d@�d@e8�Z9GdAdB�dBe0�Z:GdCdD�dDe0�Z;GdEdF�dFe0�Z<GdGdH�dHe0�Z=e2dIdJ�Z>GdKdL�dLe>�Z?e2dMdN�Z@dOdPie@_Ae2dQdR�ZBe2dSdT�ZCdUdUdUdPdPdV�eC_AGdWdS�dSeC�ZDGdXd�de-�ZEeE�dYejF�eE�dZejF�eE�d[ejGe:�eE�d\ejHe8�eE�d]ejIe8�eE�d^ejJe8�eE�d_ejKe8�eE�d`ejLe9�eE�daejMe;�eE�dbejNeD�eE�dcee?�eE�ddeOe@�eE�d8e5e=�eE�d:e6eB�eE�d6e4e<�eEjdPe7dde�eEjdUddf�e�r>Gdgdh�dh�ZPGdidj�dje&�ZQGdkd�de-�ZRdS)n�BaseManager�SyncManager�	BaseProxy�Token�SharedMemoryManager�N)�getpid)�
format_exc�)�
connection)�	reduction�get_spawning_popen�ProcessError)�pool)�process)�util)�get_context)�
shared_memoryTFcCstj|j|��ffS�N)�array�typecode�tobytes)�a�r�0/usr/lib64/python3.8/multiprocessing/managers.py�reduce_array-srcCsg|]}tti|����qSr)�type�getattr��.0�namerrr�
<listcomp>1sr )�items�keys�valuescCstt|�ffSr)�list��objrrr�rebuild_as_list3sr'c@s0eZdZdZdd�Zdd�Zdd�Zdd	�Zd
S)r��typeid�address�idcCs||||_|_|_dSrr()�selfr)r*r+rrr�__init__BszToken.__init__cCs|j|j|jfSrr(�r,rrr�__getstate__EszToken.__getstate__cCs|\|_|_|_dSrr(�r,�staterrr�__setstate__HszToken.__setstate__cCsd|jj|j|j|jfS)Nz %s(typeid=%r, address=%r, id=%r))�	__class__�__name__r)r*r+r.rrr�__repr__Ks�zToken.__repr__N)r4�
__module__�__qualname__�	__slots__r-r/r2r5rrrrr<s
rcCs8|�||||f�|��\}}|dkr*|St||��dS)N�#RETURN)�send�recv�convert_to_error)�cr+�
methodname�args�kwds�kind�resultrrr�dispatchSs
rCcCsd|dkr|S|dkrRt|t�s4td�||t|����|dkrHtd|�St|�Sntd�|��SdS)N�#ERROR)�
#TRACEBACK�#UNSERIALIZABLEz.Result {0!r} (kind '{1}') type is {2}, not strrFzUnserializable message: %s
zUnrecognized message type {!r})�
isinstance�str�	TypeError�formatr�RemoteError�
ValueError)rArBrrrr<]s
��
r<c@seZdZdd�ZdS)rKcCsdt|jd�dS)NzM
---------------------------------------------------------------------------
rzK---------------------------------------------------------------------------)rHr?r.rrr�__str__mszRemoteError.__str__N)r4r6r7rMrrrrrKlsrKcCs2g}t|�D] }t||�}t|�r|�|�q|Sr)�dirr�callable�append)r&�tempr�funcrrr�all_methodsts
rScCsdd�t|�D�S)NcSsg|]}|ddkr|�qS)r�_rrrrrr �sz"public_methods.<locals>.<listcomp>)rSr%rrr�public_methodssrUc	@s�eZdZddddddddd	g	Zd
d�Zdd
�Zdd�Zdd�Zdd�Zdd�Z	dd�Z
dd�Ze
ee	d�Zdd�Z
dd�Zdd �Zd!d"�Zd#d$�Zd%e_d&d'�Zd(d)�Zd*d+�Zd,d-�Zd.S)/�Server�shutdown�create�accept_connection�get_methods�
debug_info�number_of_objects�dummy�incref�decrefcCsxt|t�std�|t|����||_t�|�|_t	|\}}||dd�|_
|j
j|_ddi|_i|_
i|_t��|_dS)Nz&Authkey {0!r} is type {1!s}, not bytes�)r*Zbacklog�0�Nr)rG�bytesrIrJr�registryr�AuthenticationString�authkey�listener_client�listenerr*�	id_to_obj�id_to_refcount�id_to_local_proxy_obj�	threading�Lock�mutex)r,rdr*rf�
serializer�Listener�Clientrrrr-�s 
��

zServer.__init__c	Cs�t��|_|t��_zVtj|jd�}d|_|��z|j��sL|j�d�q4Wnttfk
rfYnXW5tjtjkr�t	�
d�tjt_tjt_t�
d�XdS)Nzresetting stdout, stderrr)�targetTr	)rl�Event�
stop_eventr�current_process�_manager_server�sys�stdout�
__stdout__r�debug�
__stderr__�stderr�exit�Thread�accepter�daemon�start�is_set�wait�KeyboardInterrupt�
SystemExit)r,rrrr�
serve_forever�s 




zServer.serve_forevercCsNz|j��}Wntk
r&YqYnXtj|j|fd�}d|_|��qdS)N�rrr?T)rhZaccept�OSErrorrlr~�handle_requestr�r�)r,r=�trrrr�s
zServer.acceptercCs4d}}}z>t�||j�t�||j�|��}|\}}}}t||�}Wntk
rhdt�f}	Yn>Xz||f|�|�}Wntk
r�dt�f}	Yn
Xd|f}	z|�|	�Wnrtk
�r&}
zRz|�dt�f�Wntk
r�YnXt	�
d|	�t	�
d|�t	�
d|
�W5d}
~
XYnX|��dS)NrEr9zFailure to send message: %rz ... request was %r� ... exception was %r)r
Zdeliver_challengerfZanswer_challenger;r�	Exceptionrr:r�info�close)r,r=�funcnamerB�request�ignorer?r@rR�msg�errrr��s2zServer.handle_requestc
Cs�t�dt��j�|j}|j}|j}|j�	��s�zBd}}|�}|\}}}	}
z||\}}}Wn^t
k
r�}
z@z|j|\}}}Wn&t
k
r�}z|
�W5d}~XYnXW5d}
~
XYnX||kr�td|t
|�|f��t||�}z||	|
�}Wn,tk
�r"}zd|f}W5d}~XYnPX|�o4|�|d�}|�rj|�|||�\}}t||j|�}d||ff}nd|f}Wn�tk
�r�|dk�r�dt�f}nNz,|j|}|||||f|	�|
�}d|f}Wn tk
�r�dt�f}YnXYnPtk
�rt�dt��j�t�d�Yn tk
�r<dt�f}YnXzDz||�Wn2tk
�r~}z|d	t�f�W5d}~XYnXWq$tk
�r�}z@t�d
t��j�t�d|�t�d|�|��t�d
�W5d}~XYq$Xq$dS)Nz$starting server thread to service %rz+method %r of %r object is not in exposed=%rrD�#PROXYr9rEz$got EOF -- exiting thread serving %rrrFzexception in thread serving %rz ... message was %rr�r	)rrzrl�current_threadrr;r:rirtr��KeyErrorrk�AttributeErrorrrr��getrXrr*r�fallback_mapping�EOFErrorrwr}r�r�)r,�connr;r:rir>r&r��identr?r@�exposed�	gettypeid�keZ	second_keZfunction�resr�r�r)ZridentZrexposed�tokenZ
fallback_funcrBrrr�serve_client�s���(��


����$�zServer.serve_clientcCs|Srr�r,r�r�r&rrr�fallback_getvalue5szServer.fallback_getvaluecCst|�Sr�rHr�rrr�fallback_str8szServer.fallback_strcCst|�Sr)�reprr�rrr�
fallback_repr;szServer.fallback_repr)rMr5�	#GETVALUEcCsdSrr�r,r=rrrr]DszServer.dummyc
Cs�|j�tg}t|j���}|��|D]<}|dkr&|�d||j|t|j|d�dd�f�q&d�|�W5QR�SQRXdS)Nraz  %s:       refcount=%s
    %sr�K�
)	rnr$rjr"�sortrPrHri�join)r,r=rBr"r�rrrr[Gs
��zServer.debug_infocCs
t|j�Sr)�lenrjr�rrrr\WszServer.number_of_objectscCsLz:zt�d�|�d�Wnddl}|��YnXW5|j��XdS)Nz!manager received shutdown message�r9Nr)rt�setrrzr:�	traceback�	print_exc)r,r=r�rrrrW^s
zServer.shutdownc	Os�t|�dkr|^}}}}n�|s(td��n�d|krDtdt|�d��|�d�}t|�dkr~|^}}}ddl}|jdtdd	�nFd
|kr�tdt|�d��|�d
�}|^}}ddl}|jdtdd	�t|�}|j��|j|\}}}}	|dk�r|�st|�dk�rt	d��|d}
n
|||�}
|dk�r2t
|
�}|dk	�rlt|t��s\td
�
|t|����t|�t|�}dt|
�}t�d||�|
t|�|f|j|<||jk�r�d|j|<W5QRX|�||�|t|�fS)N�z8descriptor 'create' of 'Server' object needs an argumentr)�7create expected at least 2 positional arguments, got %dr	�rz2Passing 'typeid' as keyword argument is deprecated)�
stacklevelr=z-Passing 'c' as keyword argument is deprecatedz4Without callable, must have one non-keyword argumentz,Method_to_typeid {0!r}: type {1!s}, not dictz%xz&%r callable returned object with id %r)r�rI�pop�warnings�warn�DeprecationWarning�tuplernrdrLrUrG�dictrJrr$r+rrzr�rirjr^)r?r@r,r=r)r�rOr��method_to_typeid�	proxytyper&r�rrrrXksp

�

�
�
��

�



��z
Server.createz$($self, c, typeid, /, *args, **kwds)cCst|j|jd�S)Nr	)r�rir+)r,r=r�rrrrZ�szServer.get_methodscCs"|t��_|�d�|�|�dS)Nr�)rlr�rr:r�)r,r=rrrrrY�s

zServer.accept_connectioncCs�|j��z|j|d7<Wnhtk
r�}zJ||jkrrd|j|<|j||j|<|j|\}}}t�d|�n|�W5d}~XYnXW5QRXdS)Nr	z&Server re-enabled tracking & INCREF %r)rnrjr�rkrirrz)r,r=r�r�r&r�r�rrrr^�s

�z
Server.increfc	Cs�||jkr$||jkr$t�d|�dS|j�Z|j|dkrXtd�||j||j|���|j|d8<|j|dkr�|j|=W5QRX||jkr�d|j|<t�d|�|j�|j|=W5QRXdS)NzServer DECREF skipping %rrz+Id {0!s} ({1!r}) has refcount {2:n}, not 1+r	)NrNzdisposing of obj with id %r)rjrkrrzrn�AssertionErrorrJri)r,r=r�rrrr_�s,
���

z
Server.decrefN)r4r6r7�publicr-r�rr�r�r�r�r�r�r]r[r\rWrX�__text_signature__rZrYr^r_rrrrrV�s:�
"Q�
=rVc@seZdZdgZdZdZdZdS)�State�valuerr	r�N)r4r6r7r8�INITIAL�STARTED�SHUTDOWNrrrrr��sr�)�pickleZ	xmlrpclibc@s�eZdZiZeZd!dd�Zdd�Zdd�Zd"d
d�Z	e
d#dd
��Zdd�Zd$dd�Z
dd�Zdd�Zdd�Zdd�Zedd��Zedd��Ze
d%dd ��ZdS)&rNr�cCs\|dkrt��j}||_t�|�|_t�|_tj|j_	||_
t|\|_|_
|pTt�|_dSr)rrurf�_addressre�_authkeyr��_stater�r��_serializerrgZ	_Listener�_Clientr�_ctx)r,r*rfroZctxrrrr-s

zBaseManager.__init__cCsf|jjtjkrP|jjtjkr&td��n*|jjtjkr>td��ntd�|jj���t|j	|j
|j|j�S)N�Already started server�Manager has shut down�Unknown state {!r})
r�r�r�r�r�r
r�rJrV�	_registryr�r�r�r.rrr�
get_servers

�
�zBaseManager.get_servercCs8t|j\}}||j|jd�}t|dd�tj|j_dS)N�rfr])	rgr�r�r�rCr�r�r�r�)r,rprqr�rrr�connectszBaseManager.connectrc	Cs4|jjtjkrP|jjtjkr&td��n*|jjtjkr>td��ntd�|jj���|dk	rht|�sht	d��t
jdd�\}}|jj
t|�j|j|j|j|j|||fd�|_d�d	d
�|jjD��}t|�jd||j_|j��|��|��|_|��tj|j_tj|t|�j|j|j|j|j|jfdd
�|_ dS)Nr�r�r�zinitializer must be a callableF)Zduplexr��:css|]}t|�VqdSrr�)r�irrr�	<genexpr>Asz$BaseManager.start.<locals>.<genexpr>�-r�r?Zexitpriority)!r�r�r�r�r�r
r�rJrOrIr
ZPiper�ZProcessr�_run_serverr�r�r�r��_processr�Z	_identityr4rr�r�r;r�Finalize�_finalize_managerr�rW)r,�initializer�initargs�reader�writerr�rrrr�(sH

���


��zBaseManager.startc	Cs^t�tjtj�|dk	r ||�|�||||�}|�|j�|��t�d|j�|�	�dS)Nzmanager serving at %r)
�signal�SIGINT�SIG_IGN�_Serverr:r*r�rr�r�)	�clsrdr*rfror�r�r��serverrrrr�SszBaseManager._run_servercOsN|j|j|jd�}zt|dd|f||�\}}W5|��Xt||j|�|fS)Nr�rX)r�r�r�r�rCr)r,r)r?r@r�r+r�rrr�_createjs

zBaseManager._createcCs*|jdk	r&|j�|�|j��s&d|_dSr)r�r��is_alive�r,�timeoutrrrr�vs

zBaseManager.joincCs2|j|j|jd�}zt|dd�W�S|��XdS)Nr�r[�r�r�r�r�rC�r,r�rrr�_debug_infoszBaseManager._debug_infocCs2|j|j|jd�}zt|dd�W�S|��XdS)Nr�r\r�r�rrr�_number_of_objects�szBaseManager._number_of_objectscCsj|jjtjkr|��|jjtjkrf|jjtjkr<td��n*|jjtjkrTtd��ntd�|jj���|S)NzUnable to start serverr�r�)	r�r�r�r�r�r�r
r�rJr.rrr�	__enter__�s

�zBaseManager.__enter__cCs|��dSr)rW�r,�exc_typeZexc_valZexc_tbrrr�__exit__�szBaseManager.__exit__cCs�|��r�t�d�z,|||d�}zt|dd�W5|��XWntk
rRYnX|jdd�|��r�t�d�t|d�r�t�d�|��|jd	d�|��r�t�d
�t	j
|_ztj
|=Wntk
r�YnXdS)Nz#sending shutdown message to managerr�rWg�?)r�zmanager still alive�	terminatez'trying to `terminate()` manager processg�������?z#manager still alive after terminate)r�rr�r�rCr�r��hasattrr�r�r�r�r�_address_to_localr�)rr*rfr1r�r�rrrr��s.




zBaseManager._finalize_managercCs|jSr)r�r.rrrr*�szBaseManager.addressTc
s�d|jkr|j��|_�dkr"t�|p0t�dd�}|p@t�dd�}|r\t|���D]\}}qR|||�f|j�<|r���fdd�}	�|	_t|�|	�dS)Nr��	_exposed_�_method_to_typeid_cs`t�d��|j�f|�|�\}}�||j||j|d�}|j|j|jd�}t|dd|jf�|S)Nz)requesting creation of a shared %r object��managerrfr�r�r_)	rrzr�r�r�r�r*rCr+)r,r?r@r�Zexp�proxyr��r�r)rrrQ�s�z"BaseManager.register.<locals>.temp)	�__dict__r��copy�	AutoProxyrr$r!r4�setattr)
r�r)rOr�r�r��
create_method�keyr�rQrr�r�register�s(

��

zBaseManager.register)NNr�N)Nr)Nr)N)NNNNT)r4r6r7r�rVr�r-r�r�r��classmethodr�r�r�r�r�r�r��staticmethodr��propertyr*rrrrrr�s6�
	
+�
	




�c@seZdZdd�Zdd�ZdS)�ProcessLocalSetcCst�|dd��dS)NcSs|��Sr)�clearr%rrr�<lambda>��z*ProcessLocalSet.__init__.<locals>.<lambda>)r�register_after_forkr.rrrr-�szProcessLocalSet.__init__cCst|�dfSrb)rr.rrr�
__reduce__�szProcessLocalSet.__reduce__N)r4r6r7r-r
rrrrr�src@s|eZdZiZe��Zddd�Zdd�Zdifd	d
�Z	dd�Z
d
d�Zedd��Z
dd�Zdd�Zdd�Zdd�Zdd�ZdS)rNTFc		Cs�tj�8tj�|jd�}|dkr:t��t�f}|tj|j<W5QRX|d|_|d|_	||_
|j
j|_||_
||_t|d|_||_|dk	r�t�|�|_n"|j
dk	r�|j
j|_nt��j|_|r�|��t�|tj�dS)Nrr	)r�_mutexr�r�r*rZForkAwareLocalr�_tls�_idset�_tokenr+�_id�_managerr�rgr��_owned_by_managerrrer�rurf�_increfr�_after_fork)	r,r�ror�rfr�r^�
manager_ownedZ	tls_idsetrrrr-s*



zBaseProxy.__init__cCsdt�d�t��j}t��jdkr4|dt��j7}|j|jj	|j
d�}t|dd|f�||j_
dS)Nzmaking connection to managerZ
MainThread�|r�rY)rrzrrurrlr�r�rr*r�rCrr
)r,rr�rrr�_connect-s

zBaseProxy._connectrcCs�z|jj}Wn6tk
rBt�dt��j�|��|jj}YnX|�	|j
|||f�|��\}}|dkrp|S|dkr�|\}}|jj
|jd}	|jj|_|	||j|j|j|d�}
|j|j|jd�}t|dd|jf�|
St||��dS)Nz#thread %r does not own a connectionr9r����r�r�r_)rr
r�rrzrlr�rrr:rr;rr�r)rr*r�r�r�rCr+r<)r,r>r?r@r�rArBr�r�r�r�rrr�_callmethod6s6�
�zBaseProxy._callmethodcCs
|�d�S)Nr��rr.rrr�	_getvalueTszBaseProxy._getvaluec	Cs�|jrt�d|jj�dS|j|jj|jd�}t|dd|j	f�t�d|jj�|j
�|j	�|joj|jj
}tj|tj|j|j||j|j
|jfdd�|_dS)Nz%owned_by_manager skipped INCREF of %rr�r^z	INCREF %r�
r�)rrrzrr+r�r*r�rCrr�addrr�r�r�_decrefrZ_close)r,r�r1rrrrZs$
��zBaseProxy._increfc
Cs�|�|j�|dks |jtjkr�z2t�d|j�||j|d�}t|dd|jf�Wq�t	k
r�}zt�d|�W5d}~XYq�Xnt�d|j�|s�t
|d�r�t�dt��j
�|j��|`dS)Nz	DECREF %rr�r_z... decref failed %sz%DECREF %r -- manager already shutdownr
z-thread %r has no more proxies so closing conn)�discardr+r�r�r�rrzr*rCr�r�rlr�rr
r�)r�rfr1ZtlsZidsetr�r�r�rrrr ns �
zBaseProxy._decrefc
CsHd|_z|��Wn0tk
rB}zt�d|�W5d}~XYnXdS)Nzincref failed: %s)rrr�rr�)r,r�rrrr�s
zBaseProxy._after_forkcCs^i}t�dk	r|j|d<t|dd�rB|j|d<tt|j|j|ffStt|�|j|j|ffSdS)Nrf�_isautoFr�)	rr�rr��RebuildProxyrrr�r�r,r@rrrr
�s


��zBaseProxy.__reduce__cCs|��Sr)r)r,Zmemorrr�__deepcopy__�szBaseProxy.__deepcopy__cCsdt|�j|jjt|�fS)Nz<%s object, typeid %r at %#x>)rr4rr)r+r.rrrr5�s�zBaseProxy.__repr__cCs:z|�d�WStk
r4t|�dd�dYSXdS)Nr5rz; '__str__()' failed>)rr�r�r.rrrrM�szBaseProxy.__str__)NNNTF)r4r6r7r�rZForkAwareThreadLockrr-rrrrrr rr
r%r5rMrrrrr�s&�
)	

cCs�tt��dd�}|rT|j|jkrTt�d|�d|d<|j|jkrT|j|j|j|j<|�	dd�optt��dd�}|||fd|i|��S)Nrvz*Rebuild a proxy owned by manager, token=%rTrr^Z_inheritingF)
rrrur*rrzr+rkrir�)rRr�ror@r�r^rrrr#�s
�
�r#cCspt|�}z|||fWStk
r*YnXi}|D]}td||f|�q4t|tf|�}||_||||f<|S)NzOdef %s(self, /, *args, **kwds):
        return self._callmethod(%r, args, kwds))r�r��execrrr�)rr��_cacheZdicZmeth�	ProxyTyperrr�
MakeProxyType�s ��r)c
Cs�t|d}|dkrB||j|d�}zt|dd|f�}W5|��X|dkrX|dk	rX|j}|dkrjt��j}td|j	|�}||||||d�}	d|	_
|	S)Nr	r�rZz
AutoProxy[%s])r�rfr^T)rgr*r�rCr�rrurfr)r)r")
r�ror�rfr�r^r�r�r(r�rrrr�s 


�rc@seZdZdd�Zdd�ZdS)�	NamespacecKs|j�|�dSr)r��updater$rrrr-�szNamespace.__init__cCsZt|j���}g}|D]$\}}|�d�s|�d||f�q|��d|jjd�|�fS)NrTz%s=%rz%s(%s)z, )	r$r�r!�
startswithrPr�r3r4r�)r,r!rQrr�rrrr5�s
zNamespace.__repr__N)r4r6r7r-r5rrrrr*�sr*c@s8eZdZddd�Zdd�Zdd�Zdd	�Zeee�Zd
S)�ValueTcCs||_||_dSr)�	_typecode�_value)r,rr��lockrrrr-szValue.__init__cCs|jSr�r/r.rrrr�sz	Value.getcCs
||_dSrr1�r,r�rrrr�
sz	Value.setcCsdt|�j|j|jfS)Nz
%s(%r, %r))rr4r.r/r.rrrr5szValue.__repr__N)T)	r4r6r7r-r�r�r5rr�rrrrr-s

r-cCst�||�Sr)r)r�sequencer0rrr�Arraysr4c@s8eZdZdZdd�Zdd�Zdd�Zdd	�Zd
d�ZdS)
�
IteratorProxy)�__next__r:�throwr�cCs|Srrr.rrr�__iter__szIteratorProxy.__iter__cGs|�d|�S)Nr6r�r,r?rrrr6szIteratorProxy.__next__cGs|�d|�S)Nr:rr9rrrr:szIteratorProxy.sendcGs|�d|�S)Nr7rr9rrrr7szIteratorProxy.throwcGs|�d|�S)Nr�rr9rrrr�!szIteratorProxy.closeN)	r4r6r7r�r8r6r:r7r�rrrrr5sr5c@s2eZdZdZddd�Zdd�Zdd	�Zd
d�ZdS)
�
AcquirerProxy)�acquire�releaseTNcCs"|dkr|fn||f}|�d|�S�Nr;r)r,Zblockingr�r?rrrr;'szAcquirerProxy.acquirecCs
|�d�S�Nr<rr.rrrr<*szAcquirerProxy.releasecCs
|�d�Sr=rr.rrrr�,szAcquirerProxy.__enter__cCs
|�d�Sr>rr�rrrr�.szAcquirerProxy.__exit__)TN)r4r6r7r�r;r<r�r�rrrrr:%s

r:c@s6eZdZdZddd�Zd
dd�Zdd	�Zdd
d�ZdS)�ConditionProxy)r;r<r��notify�
notify_allNcCs|�d|f�S�Nr�rr�rrrr�4szConditionProxy.waitr	cCs|�d|f�S)Nr@r)r,�nrrrr@6szConditionProxy.notifycCs
|�d�S)NrArr.rrrrA8szConditionProxy.notify_allcCsd|�}|r|S|dk	r$t��|}nd}d}|s`|dk	rN|t��}|dkrNq`|�|�|�}q,|S)Nr)�time�	monotonicr�)r,Z	predicater�rBZendtimeZwaittimerrr�wait_for:s
zConditionProxy.wait_for)N)r	)N)r4r6r7r�r�r@rArFrrrrr?2s


r?c@s2eZdZdZdd�Zdd�Zdd�Zdd	d
�ZdS)�
EventProxy)r�r�r	r�cCs
|�d�S)Nr�rr.rrrr�OszEventProxy.is_setcCs
|�d�S�Nr�rr.rrrr�QszEventProxy.setcCs
|�d�S)Nr	rr.rrrr	SszEventProxy.clearNcCs|�d|f�SrBrr�rrrr�UszEventProxy.wait)N)r4r6r7r�r�r�r	r�rrrrrGMs
rGc@sNeZdZdZddd�Zdd�Zdd�Zed	d
��Zedd��Z	ed
d��Z
dS)�BarrierProxy)�__getattribute__r��abort�resetNcCs|�d|f�SrBrr�rrrr�[szBarrierProxy.waitcCs
|�d�S)NrKrr.rrrrK]szBarrierProxy.abortcCs
|�d�S)NrLrr.rrrrL_szBarrierProxy.resetcCs|�dd�S)NrJ)�partiesrr.rrrrMaszBarrierProxy.partiescCs|�dd�S)NrJ)�	n_waitingrr.rrrrNdszBarrierProxy.n_waitingcCs|�dd�S)NrJ)�brokenrr.rrrrOgszBarrierProxy.broken)N)r4r6r7r�r�rKrLrrMrNrOrrrrrIYs


rIc@s(eZdZdZdd�Zdd�Zdd�ZdS)	�NamespaceProxy)rJ�__setattr__�__delattr__cCs0|ddkrt�||�St�|d�}|d|f�S)NrrTrrJ)�objectrJ�r,r�
callmethodrrr�__getattr__nszNamespaceProxy.__getattr__cCs4|ddkrt�|||�St�|d�}|d||f�S)NrrTrrQ)rSrQrJ)r,rr�rUrrrrQsszNamespaceProxy.__setattr__cCs0|ddkrt�||�St�|d�}|d|f�S)NrrTrrR)rSrRrJrTrrrrRxszNamespaceProxy.__delattr__N)r4r6r7r�rVrQrRrrrrrPlsrPc@s*eZdZdZdd�Zdd�Zeee�ZdS)�
ValueProxy)r�r�cCs
|�d�S)Nr�rr.rrrr��szValueProxy.getcCs|�d|f�SrHrr2rrrr��szValueProxy.setN)r4r6r7r�r�r�rr�rrrrrWsrW�
BaseListProxy)�__add__�__contains__�__delitem__�__getitem__�__len__�__mul__�__reversed__�__rmul__�__setitem__rP�count�extend�index�insertr��remove�reverser��__imul__c@seZdZdd�Zdd�ZdS)�	ListProxycCs|�d|f�|S)Nrcrr2rrr�__iadd__�szListProxy.__iadd__cCs|�d|f�|S)Nrhrr2rrrrh�szListProxy.__imul__N)r4r6r7rjrhrrrrri�sri�	DictProxy)rZr[r\r8r]rar	r�r�r!r"r��popitem�
setdefaultr+r#r8�Iterator�
ArrayProxy)r]r\ra�	PoolProxy)Zapply�apply_asyncr��imap�imap_unorderedr��map�	map_async�starmap�
starmap_asyncr�ZAsyncResult)rqrurwrrrsc@seZdZdd�Zdd�ZdS)rpcCs|Srrr.rrrr��szPoolProxy.__enter__cCs|��dSr)r�r�rrrr��szPoolProxy.__exit__N)r4r6r7r�r�rrrrrp�sc@seZdZdS)rN)r4r6r7rrrrr�s�QueueZ
JoinableQueuersrm�RLock�	Semaphore�BoundedSemaphore�	Condition�Barrier�Poolr$r�)r�r)rc@sHeZdZgfdd�Zdd�Zdd�Zdd�Zd	d
�Zdd�Zd
d�Z	dS)�_SharedMemoryTrackercCs||_||_dSr�Zshared_memory_context_name�
segment_names)r,rr�rrrr-�sz_SharedMemoryTracker.__init__cCs(t�d|�dt����|j�|�dS)NzRegister segment � in pid )rrzrr�rP�r,�segment_namerrr�register_segment�sz%_SharedMemoryTracker.register_segmentcCsBt�d|�dt����|j�|�t�|�}|��|��dS)NzDestroy segment r�)	rrzrr�rfr�SharedMemoryr��unlink)r,r�Zsegmentrrr�destroy_segment�s

z$_SharedMemoryTracker.destroy_segmentcCs"|jdd�D]}|�|�qdSr)r�r�r�rrrr��sz_SharedMemoryTracker.unlinkcCs(t�d|jj�dt����|��dS)NzCall z.__del__ in )rrzr3r4rr�r.rrr�__del__�sz_SharedMemoryTracker.__del__cCs|j|jfSrr�r.rrrr/�sz!_SharedMemoryTracker.__getstate__cCs|j|�dSr)r-r0rrrr2sz!_SharedMemoryTracker.__setstate__N)
r4r6r7r-r�r�r�r�r/r2rrrrr�s	rc@sReZdZejdddgZdd�Zdd�Zde_d	d
�Zdd�Z	d
d�Z
dd�ZdS)�SharedMemoryServer�
track_segment�release_segment�
list_segmentscOsZtj|f|�|�|j}t|t�r,t�|�}td|�dt����|_	t
�dt����dS)NZshm_rTz"SharedMemoryServer started by pid )rVr-r*rGrc�os�fsdecoderr�shared_memory_contextrrz)r,r?�kwargsr*rrrr-
s

�zSharedMemoryServer.__init__cOstt|�dkr|d}n4d|kr(|d}n"|s6td��ntdt|�d��ttj|dd�rhtj|d	<tj||�S)
Nr�r�r)zDdescriptor 'create' of 'SharedMemoryServer' object needs an argumentr�r	rZ_shared_memory_proxyr�)r�rIr�r,rdr�rVrX)r?r�Ztypeodr)rrrrXs



�
zSharedMemoryServer.createz&($self, c, typeid, /, *args, **kwargs)cCs|j��t�||�Sr)r�r�rVrWr�rrrrW)s
zSharedMemoryServer.shutdowncCs|j�|�dSr)r�r��r,r=r�rrrr�.sz SharedMemoryServer.track_segmentcCs|j�|�dSr)r�r�r�rrrr�2sz"SharedMemoryServer.release_segmentcCs|jjSr)r�r�r�rrrr�7sz SharedMemoryServer.list_segmentsN)r4r6r7rVr�r-rXr�rWr�r�r�rrrrr�s�
r�c@s8eZdZeZdd�Zdd�Zdd�Zdd�Zd	d
�Z	dS)rcOsNtjdkrddlm}|��tj|f|�|�t�|j	j
�dt����dS)N�posixr	)�resource_trackerz created by pid )r�r�r�Zensure_runningrr-rrzr3r4r)r,r?r�r�rrrr-Is

zSharedMemoryManager.__init__cCst�|jj�dt����dS)Nz.__del__ by pid )rrzr3r4rr.rrrr�UszSharedMemoryManager.__del__cCsh|jjtjkrP|jjtjkr&td��n*|jjtjkr>td��ntd�|jj���|�|j	|j
|j|j�S)Nz"Already started SharedMemoryServerz!SharedMemoryManager has shut downr�)
r�r�r�r�r�r
r�rJr�r�r�r�r�r.rrrr�Ys

��zSharedMemoryManager.get_servercCsx|j|j|jd��\}tjdd|d�}zt|dd|jf�Wn.tk
rh}z|��|�W5d}~XYnXW5QRX|S)Nr�T)rX�sizer�)	r�r�r�rr�rCr�
BaseExceptionr�)r,r�r�Zsmsr�rrrr�fs z SharedMemoryManager.SharedMemorycCsv|j|j|jd��Z}t�|�}zt|dd|jjf�Wn0tk
rf}z|j�	�|�W5d}~XYnXW5QRX|S)Nr�r�)
r�r�r�r�
ShareableListrCZshmrr�r�)r,r3r�Zslr�rrrr�rs

 z!SharedMemoryManager.ShareableListN)
r4r6r7r�r�r-r�r�r�r�rrrrr=s

)NNNT)T)S�__all__rwrlr�rZqueuerDr�rr�rr�r
�contextrrr
rrrrrZ	HAS_SHMEM�ImportErrorrrZ
view_typesr$r'Z	view_typerSrrCr<r�rKrSrUrVr�rprqZXmlListenerZ	XmlClientrgrr�rrr#r)rr*r-r4r5r:r?rGrIrPrWrXrirkr�roZ
BasePoolProxyrprrxrsrmryrzr{r|r}r~r�rr�rrrrr�<module>s��


c

�	w
4�


	
	
�

�

�%8PK|��\5��}�a�a%__pycache__/pool.cpython-38.opt-1.pycnu�[���U

e5d�~�@sdddgZddlZddlZddlZddlZddlZddlZddlZddlZddlm	Z	ddl
mZddl
mZm
Z
ddlmZd	Zd
ZdZdZe��Zd
d�Zdd�ZGdd�de�ZGdd�d�Zdd�ZGdd�de�Zd+dd�Zdd�ZGdd �d e�Z Gd!d�de!�Z"Gd"d#�d#e!�Z#e#Z$Gd$d%�d%e#�Z%Gd&d'�d'e!�Z&Gd(d)�d)e&�Z'Gd*d�de"�Z(dS),�Pool�
ThreadPool�N)�Empty�)�util)�get_context�TimeoutError)�wait�INIT�RUN�CLOSE�	TERMINATEcCstt|��S�N)�list�map��args�r�,/usr/lib64/python3.8/multiprocessing/pool.py�mapstar/srcCstt�|d|d��S)Nrr)r�	itertools�starmaprrrr�starmapstar2src@seZdZdd�Zdd�ZdS)�RemoteTracebackcCs
||_dSr��tb)�selfrrrr�__init__:szRemoteTraceback.__init__cCs|jSrr�rrrr�__str__<szRemoteTraceback.__str__N)�__name__�
__module__�__qualname__rrrrrrr9src@seZdZdd�Zdd�ZdS)�ExceptionWithTracebackcCs0t�t|�||�}d�|�}||_d||_dS)N�z

"""
%s""")�	traceback�format_exception�type�join�excr)rr)rrrrr@s
zExceptionWithTraceback.__init__cCst|j|jffSr)�rebuild_excr)rrrrr�
__reduce__Esz!ExceptionWithTraceback.__reduce__N)r r!r"rr+rrrrr#?sr#cCst|�|_|Sr)r�	__cause__)r)rrrrr*Hs
r*cs0eZdZdZ�fdd�Zdd�Zdd�Z�ZS)�MaybeEncodingErrorzVWraps possible unpickleable errors, so they can be
    safely sent through the socket.cs.t|�|_t|�|_tt|��|j|j�dSr)�reprr)�value�superr-r)rr)r/��	__class__rrrTs

zMaybeEncodingError.__init__cCsd|j|jfS)Nz(Error sending result: '%s'. Reason: '%s')r/r)rrrrrYs�zMaybeEncodingError.__str__cCsd|jj|fS)Nz<%s: %s>)r2r rrrr�__repr__]szMaybeEncodingError.__repr__)r r!r"�__doc__rrr3�
__classcell__rrr1rr-Psr-rFc
Cs�|dk	r(t|t�r|dks(td�|���|j}|j}t|d�rR|j��|j	��|dk	rb||�d}|dks~|�r�||k�r�z
|�}	Wn(t
tfk
r�t�
d�Y�q�YnX|	dkr�t�
d��q�|	\}
}}}
}zd||
|�f}WnHtk
�r0}z(|�r|tk	�rt||j�}d|f}W5d}~XYnXz||
||f�WnRtk
�r�}z2t||d�}t�
d	|�||
|d|ff�W5d}~XYnXd}	}
}}}
}|d7}qft�
d
|�dS)NrzMaxtasks {!r} is not valid�_writerrz)worker got EOFError or OSError -- exitingzworker got sentinel -- exitingTFz0Possible encoding error while sending result: %szworker exiting after %d tasks)�
isinstance�int�AssertionError�format�put�get�hasattrr6�close�_reader�EOFError�OSErrorr�debug�	Exception�_helper_reraises_exceptionr#�
__traceback__r-)�inqueue�outqueue�initializer�initargsZmaxtasks�wrap_exceptionr;r<Z	completed�task�job�i�funcr�kwds�result�e�wrappedrrr�workerasN�





�$
rScCs|�dS)z@Pickle-able helper function for use by _guarded_task_generation.Nr)ZexrrrrD�srDcs2eZdZdZdd��fdd�
Z�fdd�Z�ZS)�
_PoolCachez�
    Class that implements a cache for the Pool class that will notify
    the pool management threads every time the cache is emptied. The
    notification is done by the use of a queue that is provided when
    instantiating the cache.
    N��notifiercs||_t�j||�dSr)rVr0r)rrVrrOr1rrr�sz_PoolCache.__init__cs t��|�|s|j�d�dSr)r0�__delitem__rVr;)r�itemr1rrrW�sz_PoolCache.__delitem__)r r!r"r4rrWr5rrr1rrT�srTc@s�eZdZdZdZedd��ZdLdd�Zej	e
fd	d
�Zdd�Zd
d�Z
edd��Zedd��Zdd�Zedd��Zedd��Zdd�Zdd�Zdifdd�ZdMdd �ZdNd!d"�ZdOd#d$�Zd%d&�ZdPd(d)�ZdQd*d+�Zdiddfd,d-�ZdRd.d/�ZdSd0d1�ZedTd2d3��Ze d4d5��Z!ed6d7��Z"ed8d9��Z#ed:d;��Z$d<d=�Z%d>d?�Z&d@dA�Z'dBdC�Z(edDdE��Z)e dFdG��Z*dHdI�Z+dJdK�Z,dS)UrzS
    Class which supports an async version of applying functions to arguments.
    TcOs|j||�Sr��Process)�ctxrrOrrrrZ�szPool.ProcessNrcCsg|_t|_|pt�|_|��t��|_|j��|_	t
|j	d�|_||_||_
||_|dkrjt��phd}|dkrztd��|dk	r�t|�s�td��||_z|��WnHtk
r�|jD]}|jdkr�|��q�|jD]}|��q؂YnX|��}tjtj|j|j|j|j|j|j|j |j!|j
|j|j|j"||j	fd�|_#d|j#_$t%|j#_|j#�&�tjtj'|j|j(|j!|j|jfd�|_)d|j)_$t%|j)_|j)�&�tjtj*|j!|j+|jfd�|_,d|j,_$t%|j,_|j,�&�t-j.||j/|j|j |j!|j|j	|j#|j)|j,|jf	dd�|_0t%|_dS)	NrUrz&Number of processes must be at least 1zinitializer must be a callable��targetrT�)rZexitpriority)1�_poolr
�_stater�_ctx�
_setup_queues�queue�SimpleQueue�
_taskqueue�_change_notifierrT�_cache�_maxtasksperchild�_initializer�	_initargs�os�	cpu_count�
ValueError�callable�	TypeError�
_processes�_repopulate_poolrC�exitcode�	terminater(�_get_sentinels�	threadingZThreadr�_handle_workersrZ�_inqueue�	_outqueue�_wrap_exception�_worker_handler�daemonr�start�
_handle_tasks�
_quick_put�
_task_handler�_handle_results�
_quick_get�_result_handlerrZFinalize�_terminate_pool�
_terminate)r�	processesrHrI�maxtasksperchild�context�p�	sentinelsrrrr�s�





��
��
�
��z
Pool.__init__cCs>|j|kr:|d|��t|d�t|dd�dk	r:|j�d�dS)Nz&unclosed running multiprocessing pool )�sourcerf)r`�ResourceWarning�getattrrfr;)rZ_warnrrrr�__del__s

�zPool.__del__c	Cs0|j}d|j�d|j�d|j�dt|j��d�	S)N�<�.z state=z pool_size=�>)r2r!r"r`�lenr_)r�clsrrrr3sz
Pool.__repr__cCs|jjg}|jjg}||�Sr)rxr?rf)rZtask_queue_sentinelsZself_notifier_sentinelsrrrrts

zPool._get_sentinelscCsdd�|D�S)NcSsg|]}t|d�r|j�qS)�sentinel)r=r�)�.0rSrrr�
<listcomp>s
�z.Pool._get_worker_sentinels.<locals>.<listcomp>r�Zworkersrrr�_get_worker_sentinelss�zPool._get_worker_sentinelscCsPd}ttt|���D]6}||}|jdk	rt�d|�|��d}||=q|S)z�Cleanup after any worker processes which have exited due to reaching
        their specified lifetime.  Returns True if any workers were cleaned up.
        FN�cleaning up worker %dT)�reversed�ranger�rrrrBr()�poolZcleanedrMrSrrr�_join_exited_workerss
zPool._join_exited_workerscCs0|�|j|j|j|j|j|j|j|j|j	|j
�
Sr)�_repopulate_pool_staticrarZrpr_rwrxrirjrhryrrrrrq.s�zPool._repopulate_poolc

Csft|t|��D]P}
||t||||||	fd�}|j�dd�|_d|_|��|�|�t�	d�qdS)z�Bring the number of pool processes up to the specified number,
        for use after reaping workers which have exited.
        r\rZZ
PoolWorkerTzadded workerN)
r�r�rS�name�replacer{r|�appendrrB)r[rZr�r�rFrGrHrIr�rJrM�wrrrr�7s��
zPool._repopulate_pool_staticc

Cs*t�|�r&t�||||||||||	�
dS)zEClean up any exited workers and start replacements for them.
        N)rr�r�)
r[rZr�r�rFrGrHrIr�rJrrr�_maintain_poolJs
�zPool._maintain_poolcCs4|j��|_|j��|_|jjj|_|jjj|_	dSr)
rardrwrxr6�sendr~r?�recvr�rrrrrbVszPool._setup_queuescCs|jtkrtd��dS)NzPool not running)r`rrmrrrr�_check_running\s
zPool._check_runningcCs|�|||���S)zT
        Equivalent of `func(*args, **kwds)`.
        Pool must be running.
        )�apply_asyncr<)rrNrrOrrr�apply`sz
Pool.applycCs|�||t|���S)zx
        Apply `func` to each element in `iterable`, collecting the results
        in a list that is returned.
        )�
_map_asyncrr<�rrN�iterable�	chunksizerrrrgszPool.mapcCs|�||t|���S)z�
        Like `map()` method but the elements of the `iterable` are expected to
        be iterables as well and will be unpacked as arguments. Hence
        `func` and (a, b) becomes func(a, b).
        )r�rr<r�rrrrnszPool.starmapcCs|�||t|||�S)z=
        Asynchronous version of `starmap()` method.
        )r�r�rrNr�r��callback�error_callbackrrr�
starmap_asyncvs�zPool.starmap_asyncc
csjz,d}t|�D]\}}||||fifVqWn8tk
rd}z||dt|fifVW5d}~XYnXdS)z�Provides a generator of tasks for imap and imap_unordered with
        appropriate handling for iterables which throw exceptions during
        iteration.���rN)�	enumeraterCrD)rZ
result_jobrNr�rM�xrQrrr�_guarded_task_generation~szPool._guarded_task_generationrcCs�|��|dkr:t|�}|j�|�|j||�|jf�|S|dkrPtd�|���t	�
|||�}t|�}|j�|�|jt|�|jf�dd�|D�SdS)zP
        Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
        rzChunksize must be 1+, not {0:n}css|]}|D]
}|Vq
qdSrr�r��chunkrXrrr�	<genexpr>�szPool.imap.<locals>.<genexpr>N)r��IMapIteratorrer;r��_job�_set_lengthrmr:r�
_get_tasksr�rrNr�r�rP�task_batchesrrr�imap�s4�������z	Pool.imapcCs�|��|dkr:t|�}|j�|�|j||�|jf�|S|dkrPtd�|���t	�
|||�}t|�}|j�|�|jt|�|jf�dd�|D�SdS)zL
        Like `imap()` method but ordering of results is arbitrary.
        rzChunksize must be 1+, not {0!r}css|]}|D]
}|Vq
qdSrrr�rrrr��sz&Pool.imap_unordered.<locals>.<genexpr>N)r��IMapUnorderedIteratorrer;r�r�r�rmr:rr�rr�rrr�imap_unordered�s0������zPool.imap_unorderedcCs6|��t|||�}|j�|jd|||fgdf�|S)z;
        Asynchronous version of `apply()` method.
        rN)r��ApplyResultrer;r�)rrNrrOr�r�rPrrrr��szPool.apply_asynccCs|�||t|||�S)z9
        Asynchronous version of `map()` method.
        )r�rr�rrr�	map_async�s�zPool.map_asyncc
Cs�|��t|d�st|�}|dkrJtt|�t|j�d�\}}|rJ|d7}t|�dkrZd}t�|||�}t||t|�||d�}	|j	�
|�|	j||�df�|	S)zY
        Helper function to implement map, starmap and their async counterparts.
        �__len__N�rr�r�)
r�r=r�divmodr�r_rr��	MapResultrer;r�r�)
rrNr�Zmapperr�r�r�Zextrar�rPrrrr��s,
����zPool._map_asynccCs"t||d�|��s|��qdS)N)�timeout)r	�emptyr<)r��change_notifierr�rrr�_wait_for_updates�szPool._wait_for_updatescCspt��}|jtks |rX|jtkrX|�|||||||	|
||�
|�|�|
�}|�||�q|�d�t	�
d�dS)Nzworker handler exiting)ru�current_threadr`rr
r�r�r�r;rrB)r��cache�	taskqueuer[rZr�r�rFrGrHrIr�rJr�r��threadZcurrent_sentinelsrrrrv�s�
zPool._handle_workersc

Cspt��}t|jd�D]�\}}d}z�|D]�}|jtkrBt�d�q�z||�Wq&tk
r�}
zB|dd�\}	}z||	�	|d|
f�Wnt
k
r�YnXW5d}
~
XYq&Xq&|r�t�d�|r�|dnd}||d�W�qW�
�q
W5d}}}	Xqt�d�z6t�d�|�d�t�d	�|D]}|d��q.Wn tk
�r`t�d
�YnXt�d�dS)Nz'task handler found thread._state != RUN�Fzdoing set_length()rr�ztask handler got sentinelz/task handler sending sentinel to result handlerz(task handler sending sentinel to workersz/task handler got OSError when sending sentinelsztask handler exiting)
rur��iterr<r`rrrBrC�_set�KeyErrorr;rA)
r�r;rGr�r�r�ZtaskseqZ
set_lengthrKrLrQ�idxr�rrrr}sB






zPool._handle_tasksc	Cs�t��}z
|�}Wn$ttfk
r6t�d�YdSX|jtkrNt�d�q�|dkrbt�d�q�|\}}}z||�||�Wnt	k
r�YnXd}}}q|�r@|jt
k�r@z
|�}Wn$ttfk
r�t�d�YdSX|dk�r�t�d�q�|\}}}z||�||�Wnt	k
�r0YnXd}}}q�t|d��r�t�d�z,td�D]}|j
���sv�q�|��q`Wnttfk
�r�YnXt�dt|�|j�dS)	Nz.result handler got EOFError/OSError -- exitingz,result handler found thread._state=TERMINATEzresult handler got sentinelz&result handler ignoring extra sentinelr?z"ensuring that outqueue is not full�
z7result handler exiting: len(cache)=%s, thread._state=%s)rur�rAr@rrBr`rr�r�r
r=r�r?�pollr�)rGr<r�r�rKrLrM�objrrrr�:s\











�zPool._handle_resultsccs0t|�}tt�||��}|s dS||fVqdSr)r��tupler�islice)rN�it�sizer�rrrr�vs
zPool._get_taskscCstd��dS)Nz:pool objects cannot be passed between processes or pickled)�NotImplementedErrorrrrrr+s�zPool.__reduce__cCs2t�d�|jtkr.t|_t|j_|j�d�dS)Nzclosing pool)rrBr`rrrzrfr;rrrrr>�s


z
Pool.closecCst�d�t|_|��dS)Nzterminating pool)rrBr
r`r�rrrrrs�s
zPool.terminatecCsjt�d�|jtkrtd��n|jttfkr4td��|j��|j	��|j
��|jD]}|��qXdS)Nzjoining poolzPool is still runningzIn unknown state)rrBr`rrmrr
rzr(rr�r_)rr�rrrr(�s






z	Pool.joincCs@t�d�|j��|��r<|j��r<|j��t�	d�qdS)Nz7removing tasks from inqueue until task handler finishedr)
rrBZ_rlock�acquire�is_aliver?r�r��time�sleep)rF�task_handlerr�rrr�_help_stuff_finish�s



zPool._help_stuff_finishc
CsXt�d�t|_|�d�t|_t�d�|�||t|��|��sXt|	�dkrXtd��t|_|�d�|�d�t�d�t	�
�|k	r�|��|r�t|dd�r�t�d�|D]}
|
j
dkr�|
��q�t�d�t	�
�|k	r�|��t�d	�t	�
�|k	�r|��|�rTt|dd��rTt�d
�|D](}
|
���r*t�d|
j�|
���q*dS)Nzfinalizing poolz&helping task handler/workers to finishrz.Cannot have cache with result_hander not alivezjoining worker handlerrszterminating workerszjoining task handlerzjoining result handlerzjoining pool workersr�)rrBr
r`r;r�r�r�r9rur�r(r=rrrs�pid)r�r�rFrGr�r�Zworker_handlerr�Zresult_handlerr�r�rrrr��sB


�









zPool._terminate_poolcCs|��|Sr)r�rrrr�	__enter__�szPool.__enter__cCs|��dSr)rs)r�exc_typeZexc_valZexc_tbrrr�__exit__�sz
Pool.__exit__)NNrNN)N)N)NNN)r)r)NNN)NNN)N)-r r!r"r4ry�staticmethodrZr�warnings�warnrr�r3rtr�r�rqr�r�rbr�r�rrr�r�r�r�r�r�r�r��classmethodrvr}r�r�r+r>rsr(r�r�r�r�rrrrr�sx
�
P

	



�


�

�
�


-
;


5c@s@eZdZdd�Zdd�Zdd�Zddd	�Zdd
d�Zdd
�ZdS)r�cCs>||_t��|_tt�|_|j|_||_||_	||j|j<dSr)
r_ruZEvent�_event�next�job_counterr�rg�	_callback�_error_callback)rr�r�r�rrrr�s

zApplyResult.__init__cCs
|j��Sr)r�Zis_setrrrr�ready�szApplyResult.readycCs|��std�|���|jS)Nz{0!r} not ready)r�rmr:�_successrrrr�
successful�szApplyResult.successfulNcCs|j�|�dSr)r�r	�rr�rrrr	�szApplyResult.waitcCs,|�|�|��st�|jr"|jS|j�dSr)r	r�rr��_valuer�rrrr<�s
zApplyResult.getcCsZ|\|_|_|jr$|jr$|�|j�|jr<|js<|�|j�|j��|j|j=d|_dSr)	r�r�r�r�r��setrgr�r_�rrMr�rrrr�s

zApplyResult._set)N)N)	r r!r"rr�r�r	r<r�rrrrr��s	

	r�c@seZdZdd�Zdd�ZdS)r�cCshtj||||d�d|_dg||_||_|dkrNd|_|j��|j|j	=n||t
||�|_dS)Nr�Tr)r�rr�r��
_chunksize�_number_leftr�r�rgr��bool)rr�r��lengthr�r�rrrrs
�
zMapResult.__init__cCs�|jd8_|\}}|rv|jrv||j||j|d|j�<|jdkr�|jrZ|�|j�|j|j=|j��d|_	nL|s�|jr�d|_||_|jdkr�|j
r�|�
|j�|j|j=|j��d|_	dS)NrrF)r�r�r�r�r�rgr�r�r�r_r�)rrMZsuccess_result�successrPrrrr�$s&







zMapResult._setN)r r!r"rr�rrrrr�s
r�c@s:eZdZdd�Zdd�Zddd�ZeZdd	�Zd
d�ZdS)
r�cCsT||_t�t���|_tt�|_|j|_t	�
�|_d|_d|_
i|_||j|j<dS)Nr)r_ruZ	ConditionZLock�_condr�r�r�rg�collections�deque�_items�_index�_length�	_unsorted)rr�rrrrBs

zIMapIterator.__init__cCs|Srrrrrr�__iter__MszIMapIterator.__iter__NcCs�|j��z|j��}Wnztk
r�|j|jkr>d|_td�|j�|�z|j��}Wn2tk
r�|j|jkr�d|_td�t	d�YnXYnXW5QRX|\}}|r�|S|�dSr)
r�r��popleft�
IndexErrorr�rr_�
StopIterationr	r)rr�rXr�r/rrrr�Ps&zIMapIterator.nextc	Cs�|j��|j|krn|j�|�|jd7_|j|jkrb|j�|j�}|j�|�|jd7_q,|j��n
||j|<|j|jkr�|j|j	=d|_
W5QRXdS�Nr)r�r�r�r�r�pop�notifyrrgr�r_r�rrrr�hs


zIMapIterator._setc	CsB|j�2||_|j|jkr4|j��|j|j=d|_W5QRXdSr)r�rr�rrgr�r_)rr�rrrr�ys

zIMapIterator._set_length)N)	r r!r"rrr��__next__r�r�rrrrr�@s
r�c@seZdZdd�ZdS)r�c	CsV|j�F|j�|�|jd7_|j��|j|jkrH|j|j=d|_W5QRXdSr)	r�r�r�r�rrrgr�r_r�rrrr��s

zIMapUnorderedIterator._setN)r r!r"r�rrrrr��sr�c@sVeZdZdZedd��Zddd�Zdd	�Zd
d�Zedd
��Z	edd��Z
dd�ZdS)rFcOsddlm}|||�S)NrrY)ZdummyrZ)r[rrOrZrrrrZ�szThreadPool.ProcessNrcCst�||||�dSr)rr)rr�rHrIrrrr�szThreadPool.__init__cCs,t��|_t��|_|jj|_|jj|_dSr)rcrdrwrxr;r~r<r�rrrrrb�s


zThreadPool._setup_queuescCs
|jjgSr)rfr?rrrrrt�szThreadPool._get_sentinelscCsgSrrr�rrrr��sz ThreadPool._get_worker_sentinelscCsFz|jdd�qWntjk
r(YnXt|�D]}|�d�q2dS)NF)�block)r<rcrr�r;)rFr�r�rMrrrr��szThreadPool._help_stuff_finishcCst�|�dSr)r�r�)rr�r�r�rrrr��szThreadPool._wait_for_updates)NNr)r r!r"ryr�rZrrbrtr�r�r�rrrrr�s




)NrNF))�__all__r�rrkrcrur�r%r�rr$rrrZ
connectionr	r
rrr
�countr�rrrCrr#r*r-rSrD�dictrT�objectrr�ZAsyncResultr�r�r�rrrrr�<module>
sN	�
-=)+EPK|��\�9�s	s	1__pycache__/popen_forkserver.cpython-38.opt-1.pycnu�[���U

e5d��@s�ddlZddlZddlmZmZejs.ed��ddlmZddlm	Z	ddlm
Z
ddlmZd	gZGd
d�de
�ZGdd	�d	e	j�ZdS)
�N�)�	reduction�set_spawning_popenz,No support for sending fds between processes)�
forkserver)�
popen_fork)�spawn)�util�Popenc@seZdZdd�Zdd�ZdS)�_DupFdcCs
||_dS�N)�ind)�selfr�r�8/usr/lib64/python3.8/multiprocessing/popen_forkserver.py�__init__sz_DupFd.__init__cCst��|jSr)rZget_inherited_fdsr)r
rrr�detachsz
_DupFd.detachN)�__name__�
__module__�__qualname__rrrrrrr
sr
csBeZdZdZeZ�fdd�Zdd�Zdd�Ze	j
fdd	�Z�ZS)
r	rcsg|_t��|�dSr)�_fds�superr)r
�process_obj��	__class__rrr!szPopen.__init__cCs|j�|�t|j�dS)Nr)r�append�len)r
�fdrrr�duplicate_for_child%szPopen.duplicate_for_childc	Cs�t�|j�}t��}t|�zt�||�t�||�W5td�Xt�	|j
�\|_}t�
|�}t�|tj||jf�|_t|ddd��}|�|���W5QRXt�|j�|_dS)N�wbT)�closefd)rZget_preparation_data�_name�io�BytesIOrr�dumprZconnect_to_new_processr�sentinel�os�duprZFinalizeZ	close_fds�	finalizer�open�write�	getbuffer�read_signed�pid)r
rZ	prep_dataZbuf�wZ	_parent_w�frrr�_launch)s


�z
Popen._launchc	Csr|jdkrlddlm}|tjkr$dnd}||jg|�s:dSzt�|j�|_Wntt	fk
rjd|_YnX|jS)Nr)�wait�)
�
returncodeZmultiprocessing.connectionr0r%�WNOHANGr$rr+�OSError�EOFError)r
�flagr0Ztimeoutrrr�poll=s
z
Popen.poll)
rrr�methodr
ZDupFdrrr/r%r3r7�
__classcell__rrrrr	s)r!r%�contextrrZHAVE_SEND_HANDLE�ImportError�rrrr�__all__�objectr
r	rrrr�<module>s
PK|��\/TJ��popen_forkserver.pynu�[���import io
import os

from .context import reduction, set_spawning_popen
if not reduction.HAVE_SEND_HANDLE:
    raise ImportError('No support for sending fds between processes')
from . import forkserver
from . import popen_fork
from . import spawn
from . import util


__all__ = ['Popen']

#
# Wrapper for an fd used while launching a process
#

class _DupFd(object):
    def __init__(self, ind):
        self.ind = ind
    def detach(self):
        return forkserver.get_inherited_fds()[self.ind]

#
# Start child process using a server process
#

class Popen(popen_fork.Popen):
    method = 'forkserver'
    DupFd = _DupFd

    def __init__(self, process_obj):
        self._fds = []
        super().__init__(process_obj)

    def duplicate_for_child(self, fd):
        self._fds.append(fd)
        return len(self._fds) - 1

    def _launch(self, process_obj):
        prep_data = spawn.get_preparation_data(process_obj._name)
        buf = io.BytesIO()
        set_spawning_popen(self)
        try:
            reduction.dump(prep_data, buf)
            reduction.dump(process_obj, buf)
        finally:
            set_spawning_popen(None)

        self.sentinel, w = forkserver.connect_to_new_process(self._fds)
        # Keep a duplicate of the data pipe's write end as a sentinel of the
        # parent process used by the child process.
        _parent_w = os.dup(w)
        self.finalizer = util.Finalize(self, util.close_fds,
                                       (_parent_w, self.sentinel))
        with open(w, 'wb', closefd=True) as f:
            f.write(buf.getbuffer())
        self.pid = forkserver.read_signed(self.sentinel)

    def poll(self, flag=os.WNOHANG):
        if self.returncode is None:
            from multiprocessing.connection import wait
            timeout = 0 if flag == os.WNOHANG else None
            if not wait([self.sentinel], timeout):
                return None
            try:
                self.returncode = forkserver.read_signed(self.sentinel)
            except (OSError, EOFError):
                # This should not happen usually, but perhaps the forkserver
                # process itself got killed
                self.returncode = 255

        return self.returncode
PK|��\}����sharedctypes.pynu�[���#
# Module which supports allocation of ctypes objects from shared memory
#
# multiprocessing/sharedctypes.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

import ctypes
import weakref

from . import heap
from . import get_context

from .context import reduction, assert_spawning
_ForkingPickler = reduction.ForkingPickler

__all__ = ['RawValue', 'RawArray', 'Value', 'Array', 'copy', 'synchronized']

#
#
#

typecode_to_type = {
    'c': ctypes.c_char,     'u': ctypes.c_wchar,
    'b': ctypes.c_byte,     'B': ctypes.c_ubyte,
    'h': ctypes.c_short,    'H': ctypes.c_ushort,
    'i': ctypes.c_int,      'I': ctypes.c_uint,
    'l': ctypes.c_long,     'L': ctypes.c_ulong,
    'q': ctypes.c_longlong, 'Q': ctypes.c_ulonglong,
    'f': ctypes.c_float,    'd': ctypes.c_double
    }

#
#
#

def _new_value(type_):
    size = ctypes.sizeof(type_)
    wrapper = heap.BufferWrapper(size)
    return rebuild_ctype(type_, wrapper, None)

def RawValue(typecode_or_type, *args):
    '''
    Returns a ctypes object allocated from shared memory
    '''
    type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
    obj = _new_value(type_)
    ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
    obj.__init__(*args)
    return obj

def RawArray(typecode_or_type, size_or_initializer):
    '''
    Returns a ctypes array allocated from shared memory
    '''
    type_ = typecode_to_type.get(typecode_or_type, typecode_or_type)
    if isinstance(size_or_initializer, int):
        type_ = type_ * size_or_initializer
        obj = _new_value(type_)
        ctypes.memset(ctypes.addressof(obj), 0, ctypes.sizeof(obj))
        return obj
    else:
        type_ = type_ * len(size_or_initializer)
        result = _new_value(type_)
        result.__init__(*size_or_initializer)
        return result

def Value(typecode_or_type, *args, lock=True, ctx=None):
    '''
    Return a synchronization wrapper for a Value
    '''
    obj = RawValue(typecode_or_type, *args)
    if lock is False:
        return obj
    if lock in (True, None):
        ctx = ctx or get_context()
        lock = ctx.RLock()
    if not hasattr(lock, 'acquire'):
        raise AttributeError("%r has no method 'acquire'" % lock)
    return synchronized(obj, lock, ctx=ctx)

def Array(typecode_or_type, size_or_initializer, *, lock=True, ctx=None):
    '''
    Return a synchronization wrapper for a RawArray
    '''
    obj = RawArray(typecode_or_type, size_or_initializer)
    if lock is False:
        return obj
    if lock in (True, None):
        ctx = ctx or get_context()
        lock = ctx.RLock()
    if not hasattr(lock, 'acquire'):
        raise AttributeError("%r has no method 'acquire'" % lock)
    return synchronized(obj, lock, ctx=ctx)

def copy(obj):
    new_obj = _new_value(type(obj))
    ctypes.pointer(new_obj)[0] = obj
    return new_obj

def synchronized(obj, lock=None, ctx=None):
    assert not isinstance(obj, SynchronizedBase), 'object already synchronized'
    ctx = ctx or get_context()

    if isinstance(obj, ctypes._SimpleCData):
        return Synchronized(obj, lock, ctx)
    elif isinstance(obj, ctypes.Array):
        if obj._type_ is ctypes.c_char:
            return SynchronizedString(obj, lock, ctx)
        return SynchronizedArray(obj, lock, ctx)
    else:
        cls = type(obj)
        try:
            scls = class_cache[cls]
        except KeyError:
            names = [field[0] for field in cls._fields_]
            d = {name: make_property(name) for name in names}
            classname = 'Synchronized' + cls.__name__
            scls = class_cache[cls] = type(classname, (SynchronizedBase,), d)
        return scls(obj, lock, ctx)

#
# Functions for pickling/unpickling
#

def reduce_ctype(obj):
    assert_spawning(obj)
    if isinstance(obj, ctypes.Array):
        return rebuild_ctype, (obj._type_, obj._wrapper, obj._length_)
    else:
        return rebuild_ctype, (type(obj), obj._wrapper, None)

def rebuild_ctype(type_, wrapper, length):
    if length is not None:
        type_ = type_ * length
    _ForkingPickler.register(type_, reduce_ctype)
    buf = wrapper.create_memoryview()
    obj = type_.from_buffer(buf)
    obj._wrapper = wrapper
    return obj

#
# Function to create properties
#

def make_property(name):
    try:
        return prop_cache[name]
    except KeyError:
        d = {}
        exec(template % ((name,)*7), d)
        prop_cache[name] = d[name]
        return d[name]

template = '''
def get%s(self):
    self.acquire()
    try:
        return self._obj.%s
    finally:
        self.release()
def set%s(self, value):
    self.acquire()
    try:
        self._obj.%s = value
    finally:
        self.release()
%s = property(get%s, set%s)
'''

prop_cache = {}
class_cache = weakref.WeakKeyDictionary()

#
# Synchronized wrappers
#

class SynchronizedBase(object):

    def __init__(self, obj, lock=None, ctx=None):
        self._obj = obj
        if lock:
            self._lock = lock
        else:
            ctx = ctx or get_context(force=True)
            self._lock = ctx.RLock()
        self.acquire = self._lock.acquire
        self.release = self._lock.release

    def __enter__(self):
        return self._lock.__enter__()

    def __exit__(self, *args):
        return self._lock.__exit__(*args)

    def __reduce__(self):
        assert_spawning(self)
        return synchronized, (self._obj, self._lock)

    def get_obj(self):
        return self._obj

    def get_lock(self):
        return self._lock

    def __repr__(self):
        return '<%s wrapper for %s>' % (type(self).__name__, self._obj)


class Synchronized(SynchronizedBase):
    value = make_property('value')


class SynchronizedArray(SynchronizedBase):

    def __len__(self):
        return len(self._obj)

    def __getitem__(self, i):
        with self:
            return self._obj[i]

    def __setitem__(self, i, value):
        with self:
            self._obj[i] = value

    def __getslice__(self, start, stop):
        with self:
            return self._obj[start:stop]

    def __setslice__(self, start, stop, values):
        with self:
            self._obj[start:stop] = values


class SynchronizedString(SynchronizedArray):
    value = make_property('value')
    raw = make_property('raw')
PK|��\��p�+�+
context.pynu�[���import os
import sys
import threading

from . import process
from . import reduction

__all__ = ()

#
# Exceptions
#

class ProcessError(Exception):
    pass

class BufferTooShort(ProcessError):
    pass

class TimeoutError(ProcessError):
    pass

class AuthenticationError(ProcessError):
    pass

#
# Base type for contexts. Bound methods of an instance of this type are included in __all__ of __init__.py
#

class BaseContext(object):

    ProcessError = ProcessError
    BufferTooShort = BufferTooShort
    TimeoutError = TimeoutError
    AuthenticationError = AuthenticationError

    current_process = staticmethod(process.current_process)
    parent_process = staticmethod(process.parent_process)
    active_children = staticmethod(process.active_children)

    def cpu_count(self):
        '''Returns the number of CPUs in the system'''
        num = os.cpu_count()
        if num is None:
            raise NotImplementedError('cannot determine number of cpus')
        else:
            return num

    def Manager(self):
        '''Returns a manager associated with a running server process

        The managers methods such as `Lock()`, `Condition()` and `Queue()`
        can be used to create shared objects.
        '''
        from .managers import SyncManager
        m = SyncManager(ctx=self.get_context())
        m.start()
        return m

    def Pipe(self, duplex=True):
        '''Returns two connection object connected by a pipe'''
        from .connection import Pipe
        return Pipe(duplex)

    def Lock(self):
        '''Returns a non-recursive lock object'''
        from .synchronize import Lock
        return Lock(ctx=self.get_context())

    def RLock(self):
        '''Returns a recursive lock object'''
        from .synchronize import RLock
        return RLock(ctx=self.get_context())

    def Condition(self, lock=None):
        '''Returns a condition object'''
        from .synchronize import Condition
        return Condition(lock, ctx=self.get_context())

    def Semaphore(self, value=1):
        '''Returns a semaphore object'''
        from .synchronize import Semaphore
        return Semaphore(value, ctx=self.get_context())

    def BoundedSemaphore(self, value=1):
        '''Returns a bounded semaphore object'''
        from .synchronize import BoundedSemaphore
        return BoundedSemaphore(value, ctx=self.get_context())

    def Event(self):
        '''Returns an event object'''
        from .synchronize import Event
        return Event(ctx=self.get_context())

    def Barrier(self, parties, action=None, timeout=None):
        '''Returns a barrier object'''
        from .synchronize import Barrier
        return Barrier(parties, action, timeout, ctx=self.get_context())

    def Queue(self, maxsize=0):
        '''Returns a queue object'''
        from .queues import Queue
        return Queue(maxsize, ctx=self.get_context())

    def JoinableQueue(self, maxsize=0):
        '''Returns a queue object'''
        from .queues import JoinableQueue
        return JoinableQueue(maxsize, ctx=self.get_context())

    def SimpleQueue(self):
        '''Returns a queue object'''
        from .queues import SimpleQueue
        return SimpleQueue(ctx=self.get_context())

    def Pool(self, processes=None, initializer=None, initargs=(),
             maxtasksperchild=None):
        '''Returns a process pool object'''
        from .pool import Pool
        return Pool(processes, initializer, initargs, maxtasksperchild,
                    context=self.get_context())

    def RawValue(self, typecode_or_type, *args):
        '''Returns a shared object'''
        from .sharedctypes import RawValue
        return RawValue(typecode_or_type, *args)

    def RawArray(self, typecode_or_type, size_or_initializer):
        '''Returns a shared array'''
        from .sharedctypes import RawArray
        return RawArray(typecode_or_type, size_or_initializer)

    def Value(self, typecode_or_type, *args, lock=True):
        '''Returns a synchronized shared object'''
        from .sharedctypes import Value
        return Value(typecode_or_type, *args, lock=lock,
                     ctx=self.get_context())

    def Array(self, typecode_or_type, size_or_initializer, *, lock=True):
        '''Returns a synchronized shared array'''
        from .sharedctypes import Array
        return Array(typecode_or_type, size_or_initializer, lock=lock,
                     ctx=self.get_context())

    def freeze_support(self):
        '''Check whether this is a fake forked process in a frozen executable.
        If so then run code specified by commandline and exit.
        '''
        if sys.platform == 'win32' and getattr(sys, 'frozen', False):
            from .spawn import freeze_support
            freeze_support()

    def get_logger(self):
        '''Return package logger -- if it does not already exist then
        it is created.
        '''
        from .util import get_logger
        return get_logger()

    def log_to_stderr(self, level=None):
        '''Turn on logging and add a handler which prints to stderr'''
        from .util import log_to_stderr
        return log_to_stderr(level)

    def allow_connection_pickling(self):
        '''Install support for sending connections and sockets
        between processes
        '''
        # This is undocumented.  In previous versions of multiprocessing
        # its only effect was to make socket objects inheritable on Windows.
        from . import connection

    def set_executable(self, executable):
        '''Sets the path to a python.exe or pythonw.exe binary used to run
        child processes instead of sys.executable when using the 'spawn'
        start method.  Useful for people embedding Python.
        '''
        from .spawn import set_executable
        set_executable(executable)

    def set_forkserver_preload(self, module_names):
        '''Set list of module names to try to load in forkserver process.
        This is really just a hint.
        '''
        from .forkserver import set_forkserver_preload
        set_forkserver_preload(module_names)

    def get_context(self, method=None):
        if method is None:
            return self
        try:
            ctx = _concrete_contexts[method]
        except KeyError:
            raise ValueError('cannot find context for %r' % method) from None
        ctx._check_available()
        return ctx

    def get_start_method(self, allow_none=False):
        return self._name

    def set_start_method(self, method, force=False):
        raise ValueError('cannot set start method of concrete context')

    @property
    def reducer(self):
        '''Controls how objects will be reduced to a form that can be
        shared with other processes.'''
        return globals().get('reduction')

    @reducer.setter
    def reducer(self, reduction):
        globals()['reduction'] = reduction

    def _check_available(self):
        pass

#
# Type of default context -- underlying context can be set at most once
#

class Process(process.BaseProcess):
    _start_method = None
    @staticmethod
    def _Popen(process_obj):
        return _default_context.get_context().Process._Popen(process_obj)

class DefaultContext(BaseContext):
    Process = Process

    def __init__(self, context):
        self._default_context = context
        self._actual_context = None

    def get_context(self, method=None):
        if method is None:
            if self._actual_context is None:
                self._actual_context = self._default_context
            return self._actual_context
        else:
            return super().get_context(method)

    def set_start_method(self, method, force=False):
        if self._actual_context is not None and not force:
            raise RuntimeError('context has already been set')
        if method is None and force:
            self._actual_context = None
            return
        self._actual_context = self.get_context(method)

    def get_start_method(self, allow_none=False):
        if self._actual_context is None:
            if allow_none:
                return None
            self._actual_context = self._default_context
        return self._actual_context._name

    def get_all_start_methods(self):
        if sys.platform == 'win32':
            return ['spawn']
        else:
            methods = ['spawn', 'fork'] if sys.platform == 'darwin' else ['fork', 'spawn']
            if reduction.HAVE_SEND_HANDLE:
                methods.append('forkserver')
            return methods


#
# Context types for fixed start method
#

if sys.platform != 'win32':

    class ForkProcess(process.BaseProcess):
        _start_method = 'fork'
        @staticmethod
        def _Popen(process_obj):
            from .popen_fork import Popen
            return Popen(process_obj)

    class SpawnProcess(process.BaseProcess):
        _start_method = 'spawn'
        @staticmethod
        def _Popen(process_obj):
            from .popen_spawn_posix import Popen
            return Popen(process_obj)

    class ForkServerProcess(process.BaseProcess):
        _start_method = 'forkserver'
        @staticmethod
        def _Popen(process_obj):
            from .popen_forkserver import Popen
            return Popen(process_obj)

    class ForkContext(BaseContext):
        _name = 'fork'
        Process = ForkProcess

    class SpawnContext(BaseContext):
        _name = 'spawn'
        Process = SpawnProcess

    class ForkServerContext(BaseContext):
        _name = 'forkserver'
        Process = ForkServerProcess
        def _check_available(self):
            if not reduction.HAVE_SEND_HANDLE:
                raise ValueError('forkserver start method not available')

    _concrete_contexts = {
        'fork': ForkContext(),
        'spawn': SpawnContext(),
        'forkserver': ForkServerContext(),
    }
    if sys.platform == 'darwin':
        # bpo-33725: running arbitrary code after fork() is no longer reliable
        # on macOS since macOS 10.14 (Mojave). Use spawn by default instead.
        _default_context = DefaultContext(_concrete_contexts['spawn'])
    else:
        _default_context = DefaultContext(_concrete_contexts['fork'])

else:

    class SpawnProcess(process.BaseProcess):
        _start_method = 'spawn'
        @staticmethod
        def _Popen(process_obj):
            from .popen_spawn_win32 import Popen
            return Popen(process_obj)

    class SpawnContext(BaseContext):
        _name = 'spawn'
        Process = SpawnProcess

    _concrete_contexts = {
        'spawn': SpawnContext(),
    }
    _default_context = DefaultContext(_concrete_contexts['spawn'])

#
# Force the start method
#

def _force_start_method(method):
    _default_context._actual_context = _concrete_contexts[method]

#
# Check that the current thread is spawning a child process
#

_tls = threading.local()

def get_spawning_popen():
    return getattr(_tls, 'spawning_popen', None)

def set_spawning_popen(popen):
    _tls.spawning_popen = popen

def assert_spawning(obj):
    if get_spawning_popen() is None:
        raise RuntimeError(
            '%s objects should only be shared between processes'
            ' through inheritance' % type(obj).__name__
            )
PK|��\
���!�!resource_tracker.pynu�[���###############################################################################
# Server process to keep track of unlinked resources (like shared memory
# segments, semaphores etc.) and clean them.
#
# On Unix we run a server process which keeps track of unlinked
# resources. The server ignores SIGINT and SIGTERM and reads from a
# pipe.  Every other process of the program has a copy of the writable
# end of the pipe, so we get EOF when all other processes have exited.
# Then the server process unlinks any remaining resource names.
#
# This is important because there may be system limits for such resources: for
# instance, the system only supports a limited number of named semaphores, and
# shared-memory segments live in the RAM. If a python process leaks such a
# resource, this resource will not be removed till the next reboot.  Without
# this resource tracker process, "killall python" would probably leave unlinked
# resources.

import os
import signal
import sys
import threading
import warnings

from . import spawn
from . import util

__all__ = ['ensure_running', 'register', 'unregister']

_HAVE_SIGMASK = hasattr(signal, 'pthread_sigmask')
_IGNORED_SIGNALS = (signal.SIGINT, signal.SIGTERM)

_CLEANUP_FUNCS = {
    'noop': lambda: None,
}

if os.name == 'posix':
    import _multiprocessing
    import _posixshmem

    _CLEANUP_FUNCS.update({
        'semaphore': _multiprocessing.sem_unlink,
        'shared_memory': _posixshmem.shm_unlink,
    })


class ResourceTracker(object):

    def __init__(self):
        self._lock = threading.Lock()
        self._fd = None
        self._pid = None

    def _stop(self):
        with self._lock:
            if self._fd is None:
                # not running
                return

            # closing the "alive" file descriptor stops main()
            os.close(self._fd)
            self._fd = None

            os.waitpid(self._pid, 0)
            self._pid = None

    def getfd(self):
        self.ensure_running()
        return self._fd

    def ensure_running(self):
        '''Make sure that resource tracker process is running.

        This can be run from any process.  Usually a child process will use
        the resource created by its parent.'''
        with self._lock:
            if self._fd is not None:
                # resource tracker was launched before, is it still running?
                if self._check_alive():
                    # => still alive
                    return
                # => dead, launch it again
                os.close(self._fd)

                # Clean-up to avoid dangling processes.
                try:
                    # _pid can be None if this process is a child from another
                    # python process, which has started the resource_tracker.
                    if self._pid is not None:
                        os.waitpid(self._pid, 0)
                except ChildProcessError:
                    # The resource_tracker has already been terminated.
                    pass
                self._fd = None
                self._pid = None

                warnings.warn('resource_tracker: process died unexpectedly, '
                              'relaunching.  Some resources might leak.')

            fds_to_pass = []
            try:
                fds_to_pass.append(sys.stderr.fileno())
            except Exception:
                pass
            cmd = 'from multiprocessing.resource_tracker import main;main(%d)'
            r, w = os.pipe()
            try:
                fds_to_pass.append(r)
                # process will out live us, so no need to wait on pid
                exe = spawn.get_executable()
                args = [exe] + util._args_from_interpreter_flags()
                args += ['-c', cmd % r]
                # bpo-33613: Register a signal mask that will block the signals.
                # This signal mask will be inherited by the child that is going
                # to be spawned and will protect the child from a race condition
                # that can make the child die before it registers signal handlers
                # for SIGINT and SIGTERM. The mask is unregistered after spawning
                # the child.
                try:
                    if _HAVE_SIGMASK:
                        signal.pthread_sigmask(signal.SIG_BLOCK, _IGNORED_SIGNALS)
                    pid = util.spawnv_passfds(exe, args, fds_to_pass)
                finally:
                    if _HAVE_SIGMASK:
                        signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)
            except:
                os.close(w)
                raise
            else:
                self._fd = w
                self._pid = pid
            finally:
                os.close(r)

    def _check_alive(self):
        '''Check that the pipe has not been closed by sending a probe.'''
        try:
            # We cannot use send here as it calls ensure_running, creating
            # a cycle.
            os.write(self._fd, b'PROBE:0:noop\n')
        except OSError:
            return False
        else:
            return True

    def register(self, name, rtype):
        '''Register name of resource with resource tracker.'''
        self._send('REGISTER', name, rtype)

    def unregister(self, name, rtype):
        '''Unregister name of resource with resource tracker.'''
        self._send('UNREGISTER', name, rtype)

    def _send(self, cmd, name, rtype):
        self.ensure_running()
        msg = '{0}:{1}:{2}\n'.format(cmd, name, rtype).encode('ascii')
        if len(name) > 512:
            # posix guarantees that writes to a pipe of less than PIPE_BUF
            # bytes are atomic, and that PIPE_BUF >= 512
            raise ValueError('name too long')
        nbytes = os.write(self._fd, msg)
        assert nbytes == len(msg), "nbytes {0:n} but len(msg) {1:n}".format(
            nbytes, len(msg))


_resource_tracker = ResourceTracker()
ensure_running = _resource_tracker.ensure_running
register = _resource_tracker.register
unregister = _resource_tracker.unregister
getfd = _resource_tracker.getfd

def main(fd):
    '''Run resource tracker.'''
    # protect the process from ^C and "killall python" etc
    signal.signal(signal.SIGINT, signal.SIG_IGN)
    signal.signal(signal.SIGTERM, signal.SIG_IGN)
    if _HAVE_SIGMASK:
        signal.pthread_sigmask(signal.SIG_UNBLOCK, _IGNORED_SIGNALS)

    for f in (sys.stdin, sys.stdout):
        try:
            f.close()
        except Exception:
            pass

    cache = {rtype: set() for rtype in _CLEANUP_FUNCS.keys()}
    try:
        # keep track of registered/unregistered resources
        with open(fd, 'rb') as f:
            for line in f:
                try:
                    cmd, name, rtype = line.strip().decode('ascii').split(':')
                    cleanup_func = _CLEANUP_FUNCS.get(rtype, None)
                    if cleanup_func is None:
                        raise ValueError(
                            f'Cannot register {name} for automatic cleanup: '
                            f'unknown resource type {rtype}')

                    if cmd == 'REGISTER':
                        cache[rtype].add(name)
                    elif cmd == 'UNREGISTER':
                        cache[rtype].remove(name)
                    elif cmd == 'PROBE':
                        pass
                    else:
                        raise RuntimeError('unrecognized command %r' % cmd)
                except Exception:
                    try:
                        sys.excepthook(*sys.exc_info())
                    except:
                        pass
    finally:
        # all processes have terminated; cleanup any remaining resources
        for rtype, rtype_cache in cache.items():
            if rtype_cache:
                try:
                    warnings.warn('resource_tracker: There appear to be %d '
                                  'leaked %s objects to clean up at shutdown' %
                                  (len(rtype_cache), rtype))
                except Exception:
                    pass
            for name in rtype_cache:
                # For some reason the process which created and registered this
                # resource has failed to unregister it. Presumably it has
                # died.  We therefore unlink it.
                try:
                    try:
                        _CLEANUP_FUNCS[rtype](name)
                    except Exception as e:
                        warnings.warn('resource_tracker: %r: %s' % (name, e))
                finally:
                    pass
PK|��\�!ĭ(%(%reduction.pynu�[���#
# Module which deals with pickling of objects.
#
# multiprocessing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

from abc import ABCMeta
import copyreg
import functools
import io
import os
import pickle
import socket
import sys

from . import context

__all__ = ['send_handle', 'recv_handle', 'ForkingPickler', 'register', 'dump']


HAVE_SEND_HANDLE = (sys.platform == 'win32' or
                    (hasattr(socket, 'CMSG_LEN') and
                     hasattr(socket, 'SCM_RIGHTS') and
                     hasattr(socket.socket, 'sendmsg')))

#
# Pickler subclass
#

class ForkingPickler(pickle.Pickler):
    '''Pickler subclass used by multiprocessing.'''
    _extra_reducers = {}
    _copyreg_dispatch_table = copyreg.dispatch_table

    def __init__(self, *args):
        super().__init__(*args)
        self.dispatch_table = self._copyreg_dispatch_table.copy()
        self.dispatch_table.update(self._extra_reducers)

    @classmethod
    def register(cls, type, reduce):
        '''Register a reduce function for a type.'''
        cls._extra_reducers[type] = reduce

    @classmethod
    def dumps(cls, obj, protocol=None):
        buf = io.BytesIO()
        cls(buf, protocol).dump(obj)
        return buf.getbuffer()

    loads = pickle.loads

register = ForkingPickler.register

def dump(obj, file, protocol=None):
    '''Replacement for pickle.dump() using ForkingPickler.'''
    ForkingPickler(file, protocol).dump(obj)

#
# Platform specific definitions
#

if sys.platform == 'win32':
    # Windows
    __all__ += ['DupHandle', 'duplicate', 'steal_handle']
    import _winapi

    def duplicate(handle, target_process=None, inheritable=False,
                  *, source_process=None):
        '''Duplicate a handle.  (target_process is a handle not a pid!)'''
        current_process = _winapi.GetCurrentProcess()
        if source_process is None:
            source_process = current_process
        if target_process is None:
            target_process = current_process
        return _winapi.DuplicateHandle(
            source_process, handle, target_process,
            0, inheritable, _winapi.DUPLICATE_SAME_ACCESS)

    def steal_handle(source_pid, handle):
        '''Steal a handle from process identified by source_pid.'''
        source_process_handle = _winapi.OpenProcess(
            _winapi.PROCESS_DUP_HANDLE, False, source_pid)
        try:
            return _winapi.DuplicateHandle(
                source_process_handle, handle,
                _winapi.GetCurrentProcess(), 0, False,
                _winapi.DUPLICATE_SAME_ACCESS | _winapi.DUPLICATE_CLOSE_SOURCE)
        finally:
            _winapi.CloseHandle(source_process_handle)

    def send_handle(conn, handle, destination_pid):
        '''Send a handle over a local connection.'''
        dh = DupHandle(handle, _winapi.DUPLICATE_SAME_ACCESS, destination_pid)
        conn.send(dh)

    def recv_handle(conn):
        '''Receive a handle over a local connection.'''
        return conn.recv().detach()

    class DupHandle(object):
        '''Picklable wrapper for a handle.'''
        def __init__(self, handle, access, pid=None):
            if pid is None:
                # We just duplicate the handle in the current process and
                # let the receiving process steal the handle.
                pid = os.getpid()
            proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False, pid)
            try:
                self._handle = _winapi.DuplicateHandle(
                    _winapi.GetCurrentProcess(),
                    handle, proc, access, False, 0)
            finally:
                _winapi.CloseHandle(proc)
            self._access = access
            self._pid = pid

        def detach(self):
            '''Get the handle.  This should only be called once.'''
            # retrieve handle from process which currently owns it
            if self._pid == os.getpid():
                # The handle has already been duplicated for this process.
                return self._handle
            # We must steal the handle from the process whose pid is self._pid.
            proc = _winapi.OpenProcess(_winapi.PROCESS_DUP_HANDLE, False,
                                       self._pid)
            try:
                return _winapi.DuplicateHandle(
                    proc, self._handle, _winapi.GetCurrentProcess(),
                    self._access, False, _winapi.DUPLICATE_CLOSE_SOURCE)
            finally:
                _winapi.CloseHandle(proc)

else:
    # Unix
    __all__ += ['DupFd', 'sendfds', 'recvfds']
    import array

    # On MacOSX we should acknowledge receipt of fds -- see Issue14669
    ACKNOWLEDGE = sys.platform == 'darwin'

    def sendfds(sock, fds):
        '''Send an array of fds over an AF_UNIX socket.'''
        fds = array.array('i', fds)
        msg = bytes([len(fds) % 256])
        sock.sendmsg([msg], [(socket.SOL_SOCKET, socket.SCM_RIGHTS, fds)])
        if ACKNOWLEDGE and sock.recv(1) != b'A':
            raise RuntimeError('did not receive acknowledgement of fd')

    def recvfds(sock, size):
        '''Receive an array of fds over an AF_UNIX socket.'''
        a = array.array('i')
        bytes_size = a.itemsize * size
        msg, ancdata, flags, addr = sock.recvmsg(1, socket.CMSG_SPACE(bytes_size))
        if not msg and not ancdata:
            raise EOFError
        try:
            if ACKNOWLEDGE:
                sock.send(b'A')
            if len(ancdata) != 1:
                raise RuntimeError('received %d items of ancdata' %
                                   len(ancdata))
            cmsg_level, cmsg_type, cmsg_data = ancdata[0]
            if (cmsg_level == socket.SOL_SOCKET and
                cmsg_type == socket.SCM_RIGHTS):
                if len(cmsg_data) % a.itemsize != 0:
                    raise ValueError
                a.frombytes(cmsg_data)
                if len(a) % 256 != msg[0]:
                    raise AssertionError(
                        "Len is {0:n} but msg[0] is {1!r}".format(
                            len(a), msg[0]))
                return list(a)
        except (ValueError, IndexError):
            pass
        raise RuntimeError('Invalid data received')

    def send_handle(conn, handle, destination_pid):
        '''Send a handle over a local connection.'''
        with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
            sendfds(s, [handle])

    def recv_handle(conn):
        '''Receive a handle over a local connection.'''
        with socket.fromfd(conn.fileno(), socket.AF_UNIX, socket.SOCK_STREAM) as s:
            return recvfds(s, 1)[0]

    def DupFd(fd):
        '''Return a wrapper for an fd.'''
        popen_obj = context.get_spawning_popen()
        if popen_obj is not None:
            return popen_obj.DupFd(popen_obj.duplicate_for_child(fd))
        elif HAVE_SEND_HANDLE:
            from . import resource_sharer
            return resource_sharer.DupFd(fd)
        else:
            raise ValueError('SCM_RIGHTS appears not to be available')

#
# Try making some callable types picklable
#

def _reduce_method(m):
    if m.__self__ is None:
        return getattr, (m.__class__, m.__func__.__name__)
    else:
        return getattr, (m.__self__, m.__func__.__name__)
class _C:
    def f(self):
        pass
register(type(_C().f), _reduce_method)


def _reduce_method_descriptor(m):
    return getattr, (m.__objclass__, m.__name__)
register(type(list.append), _reduce_method_descriptor)
register(type(int.__add__), _reduce_method_descriptor)


def _reduce_partial(p):
    return _rebuild_partial, (p.func, p.args, p.keywords or {})
def _rebuild_partial(func, args, keywords):
    return functools.partial(func, *args, **keywords)
register(functools.partial, _reduce_partial)

#
# Make sockets picklable
#

if sys.platform == 'win32':
    def _reduce_socket(s):
        from .resource_sharer import DupSocket
        return _rebuild_socket, (DupSocket(s),)
    def _rebuild_socket(ds):
        return ds.detach()
    register(socket.socket, _reduce_socket)

else:
    def _reduce_socket(s):
        df = DupFd(s.fileno())
        return _rebuild_socket, (df, s.family, s.type, s.proto)
    def _rebuild_socket(df, family, type, proto):
        fd = df.detach()
        return socket.socket(family, type, proto, fileno=fd)
    register(socket.socket, _reduce_socket)


class AbstractReducer(metaclass=ABCMeta):
    '''Abstract base class for use in implementing a Reduction class
    suitable for use in replacing the standard reduction mechanism
    used in multiprocessing.'''
    ForkingPickler = ForkingPickler
    register = register
    dump = dump
    send_handle = send_handle
    recv_handle = recv_handle

    if sys.platform == 'win32':
        steal_handle = steal_handle
        duplicate = duplicate
        DupHandle = DupHandle
    else:
        sendfds = sendfds
        recvfds = recvfds
        DupFd = DupFd

    _reduce_method = _reduce_method
    _reduce_method_descriptor = _reduce_method_descriptor
    _rebuild_partial = _rebuild_partial
    _reduce_socket = _reduce_socket
    _rebuild_socket = _rebuild_socket

    def __init__(self, *args):
        register(type(_C().f), _reduce_method)
        register(type(list.append), _reduce_method_descriptor)
        register(type(int.__add__), _reduce_method_descriptor)
        register(functools.partial, _reduce_partial)
        register(socket.socket, _reduce_socket)
PK|��\���.�.
process.pynu�[���#
# Module providing the `Process` class which emulates `threading.Thread`
#
# multiprocessing/process.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

__all__ = ['BaseProcess', 'current_process', 'active_children',
           'parent_process']

#
# Imports
#

import os
import sys
import signal
import itertools
import threading
from _weakrefset import WeakSet

#
#
#

try:
    ORIGINAL_DIR = os.path.abspath(os.getcwd())
except OSError:
    ORIGINAL_DIR = None

#
# Public functions
#

def current_process():
    '''
    Return process object representing the current process
    '''
    return _current_process

def active_children():
    '''
    Return list of process objects corresponding to live child processes
    '''
    _cleanup()
    return list(_children)


def parent_process():
    '''
    Return process object representing the parent process
    '''
    return _parent_process

#
#
#

def _cleanup():
    # check for processes which have finished
    for p in list(_children):
        if p._popen.poll() is not None:
            _children.discard(p)

#
# The `Process` class
#

class BaseProcess(object):
    '''
    Process objects represent activity that is run in a separate process

    The class is analogous to `threading.Thread`
    '''
    def _Popen(self):
        raise NotImplementedError

    def __init__(self, group=None, target=None, name=None, args=(), kwargs={},
                 *, daemon=None):
        assert group is None, 'group argument must be None for now'
        count = next(_process_counter)
        self._identity = _current_process._identity + (count,)
        self._config = _current_process._config.copy()
        self._parent_pid = os.getpid()
        self._parent_name = _current_process.name
        self._popen = None
        self._closed = False
        self._target = target
        self._args = tuple(args)
        self._kwargs = dict(kwargs)
        self._name = name or type(self).__name__ + '-' + \
                     ':'.join(str(i) for i in self._identity)
        if daemon is not None:
            self.daemon = daemon
        _dangling.add(self)

    def _check_closed(self):
        if self._closed:
            raise ValueError("process object is closed")

    def run(self):
        '''
        Method to be run in sub-process; can be overridden in sub-class
        '''
        if self._target:
            self._target(*self._args, **self._kwargs)

    def start(self):
        '''
        Start child process
        '''
        self._check_closed()
        assert self._popen is None, 'cannot start a process twice'
        assert self._parent_pid == os.getpid(), \
               'can only start a process object created by current process'
        assert not _current_process._config.get('daemon'), \
               'daemonic processes are not allowed to have children'
        _cleanup()
        self._popen = self._Popen(self)
        self._sentinel = self._popen.sentinel
        # Avoid a refcycle if the target function holds an indirect
        # reference to the process object (see bpo-30775)
        del self._target, self._args, self._kwargs
        _children.add(self)

    def terminate(self):
        '''
        Terminate process; sends SIGTERM signal or uses TerminateProcess()
        '''
        self._check_closed()
        self._popen.terminate()

    def kill(self):
        '''
        Terminate process; sends SIGKILL signal or uses TerminateProcess()
        '''
        self._check_closed()
        self._popen.kill()

    def join(self, timeout=None):
        '''
        Wait until child process terminates
        '''
        self._check_closed()
        assert self._parent_pid == os.getpid(), 'can only join a child process'
        assert self._popen is not None, 'can only join a started process'
        res = self._popen.wait(timeout)
        if res is not None:
            _children.discard(self)

    def is_alive(self):
        '''
        Return whether process is alive
        '''
        self._check_closed()
        if self is _current_process:
            return True
        assert self._parent_pid == os.getpid(), 'can only test a child process'

        if self._popen is None:
            return False

        returncode = self._popen.poll()
        if returncode is None:
            return True
        else:
            _children.discard(self)
            return False

    def close(self):
        '''
        Close the Process object.

        This method releases resources held by the Process object.  It is
        an error to call this method if the child process is still running.
        '''
        if self._popen is not None:
            if self._popen.poll() is None:
                raise ValueError("Cannot close a process while it is still running. "
                                 "You should first call join() or terminate().")
            self._popen.close()
            self._popen = None
            del self._sentinel
            _children.discard(self)
        self._closed = True

    @property
    def name(self):
        return self._name

    @name.setter
    def name(self, name):
        assert isinstance(name, str), 'name must be a string'
        self._name = name

    @property
    def daemon(self):
        '''
        Return whether process is a daemon
        '''
        return self._config.get('daemon', False)

    @daemon.setter
    def daemon(self, daemonic):
        '''
        Set whether process is a daemon
        '''
        assert self._popen is None, 'process has already started'
        self._config['daemon'] = daemonic

    @property
    def authkey(self):
        return self._config['authkey']

    @authkey.setter
    def authkey(self, authkey):
        '''
        Set authorization key of process
        '''
        self._config['authkey'] = AuthenticationString(authkey)

    @property
    def exitcode(self):
        '''
        Return exit code of process or `None` if it has yet to stop
        '''
        self._check_closed()
        if self._popen is None:
            return self._popen
        return self._popen.poll()

    @property
    def ident(self):
        '''
        Return identifier (PID) of process or `None` if it has yet to start
        '''
        self._check_closed()
        if self is _current_process:
            return os.getpid()
        else:
            return self._popen and self._popen.pid

    pid = ident

    @property
    def sentinel(self):
        '''
        Return a file descriptor (Unix) or handle (Windows) suitable for
        waiting for process termination.
        '''
        self._check_closed()
        try:
            return self._sentinel
        except AttributeError:
            raise ValueError("process not started") from None

    def __repr__(self):
        exitcode = None
        if self is _current_process:
            status = 'started'
        elif self._closed:
            status = 'closed'
        elif self._parent_pid != os.getpid():
            status = 'unknown'
        elif self._popen is None:
            status = 'initial'
        else:
            exitcode = self._popen.poll()
            if exitcode is not None:
                status = 'stopped'
            else:
                status = 'started'

        info = [type(self).__name__, 'name=%r' % self._name]
        if self._popen is not None:
            info.append('pid=%s' % self._popen.pid)
        info.append('parent=%s' % self._parent_pid)
        info.append(status)
        if exitcode is not None:
            exitcode = _exitcode_to_name.get(exitcode, exitcode)
            info.append('exitcode=%s' % exitcode)
        if self.daemon:
            info.append('daemon')
        return '<%s>' % ' '.join(info)

    ##

    def _bootstrap(self, parent_sentinel=None):
        from . import util, context
        global _current_process, _parent_process, _process_counter, _children

        try:
            if self._start_method is not None:
                context._force_start_method(self._start_method)
            _process_counter = itertools.count(1)
            _children = set()
            util._close_stdin()
            old_process = _current_process
            _current_process = self
            _parent_process = _ParentProcess(
                self._parent_name, self._parent_pid, parent_sentinel)
            if threading._HAVE_THREAD_NATIVE_ID:
                threading.main_thread()._set_native_id()
            try:
                util._finalizer_registry.clear()
                util._run_after_forkers()
            finally:
                # delay finalization of the old process object until after
                # _run_after_forkers() is executed
                del old_process
            util.info('child process calling self.run()')
            try:
                self.run()
                exitcode = 0
            finally:
                util._exit_function()
        except SystemExit as e:
            if not e.args:
                exitcode = 1
            elif isinstance(e.args[0], int):
                exitcode = e.args[0]
            else:
                sys.stderr.write(str(e.args[0]) + '\n')
                exitcode = 1
        except:
            exitcode = 1
            import traceback
            sys.stderr.write('Process %s:\n' % self.name)
            traceback.print_exc()
        finally:
            threading._shutdown()
            util.info('process exiting with exitcode %d' % exitcode)
            util._flush_std_streams()

        return exitcode

#
# We subclass bytes to avoid accidental transmission of auth keys over network
#

class AuthenticationString(bytes):
    def __reduce__(self):
        from .context import get_spawning_popen
        if get_spawning_popen() is None:
            raise TypeError(
                'Pickling an AuthenticationString object is '
                'disallowed for security reasons'
                )
        return AuthenticationString, (bytes(self),)


#
# Create object representing the parent process
#

class _ParentProcess(BaseProcess):

    def __init__(self, name, pid, sentinel):
        self._identity = ()
        self._name = name
        self._pid = pid
        self._parent_pid = None
        self._popen = None
        self._closed = False
        self._sentinel = sentinel
        self._config = {}

    def is_alive(self):
        from multiprocessing.connection import wait
        return not wait([self._sentinel], timeout=0)

    @property
    def ident(self):
        return self._pid

    def join(self, timeout=None):
        '''
        Wait until parent process terminates
        '''
        from multiprocessing.connection import wait
        wait([self._sentinel], timeout=timeout)

    pid = ident

#
# Create object representing the main process
#

class _MainProcess(BaseProcess):

    def __init__(self):
        self._identity = ()
        self._name = 'MainProcess'
        self._parent_pid = None
        self._popen = None
        self._closed = False
        self._config = {'authkey': AuthenticationString(os.urandom(32)),
                        'semprefix': '/mp'}
        # Note that some versions of FreeBSD only allow named
        # semaphores to have names of up to 14 characters.  Therefore
        # we choose a short prefix.
        #
        # On MacOSX in a sandbox it may be necessary to use a
        # different prefix -- see #19478.
        #
        # Everything in self._config will be inherited by descendant
        # processes.

    def close(self):
        pass


_parent_process = None
_current_process = _MainProcess()
_process_counter = itertools.count(1)
_children = set()
del _MainProcess

#
# Give names to some return codes
#

_exitcode_to_name = {}

for name, signum in list(signal.__dict__.items()):
    if name[:3]=='SIG' and '_' not in name:
        _exitcode_to_name[-signum] = f'-{name}'

# For debug and leak testing
_dangling = WeakSet()
PK|��\'�u���__init__.pynu�[���#
# Package analogous to 'threading.py' but using processes
#
# multiprocessing/__init__.py
#
# This package is intended to duplicate the functionality (and much of
# the API) of threading.py but uses processes instead of threads.  A
# subpackage 'multiprocessing.dummy' has the same API but is a simple
# wrapper for 'threading'.
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

import sys
from . import context

#
# Copy stuff from default context
#

__all__ = [x for x in dir(context._default_context) if not x.startswith('_')]
globals().update((name, getattr(context._default_context, name)) for name in __all__)

#
# XXX These should not really be documented or public.
#

SUBDEBUG = 5
SUBWARNING = 25

#
# Alias for main module -- will be reset by bootstrapping child processes
#

if '__main__' in sys.modules:
    sys.modules['__mp_main__'] = sys.modules['__main__']
PK|��\av�-�-	queues.pynu�[���#
# Module implementing queues
#
# multiprocessing/queues.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']

import sys
import os
import threading
import collections
import time
import weakref
import errno

from queue import Empty, Full

import _multiprocessing

from . import connection
from . import context
_ForkingPickler = context.reduction.ForkingPickler

from .util import debug, info, Finalize, register_after_fork, is_exiting

#
# Queue type using a pipe, buffer and thread
#

class Queue(object):

    def __init__(self, maxsize=0, *, ctx):
        if maxsize <= 0:
            # Can raise ImportError (see issues #3770 and #23400)
            from .synchronize import SEM_VALUE_MAX as maxsize
        self._maxsize = maxsize
        self._reader, self._writer = connection.Pipe(duplex=False)
        self._rlock = ctx.Lock()
        self._opid = os.getpid()
        if sys.platform == 'win32':
            self._wlock = None
        else:
            self._wlock = ctx.Lock()
        self._sem = ctx.BoundedSemaphore(maxsize)
        # For use by concurrent.futures
        self._ignore_epipe = False

        self._after_fork()

        if sys.platform != 'win32':
            register_after_fork(self, Queue._after_fork)

    def __getstate__(self):
        context.assert_spawning(self)
        return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
                self._rlock, self._wlock, self._sem, self._opid)

    def __setstate__(self, state):
        (self._ignore_epipe, self._maxsize, self._reader, self._writer,
         self._rlock, self._wlock, self._sem, self._opid) = state
        self._after_fork()

    def _after_fork(self):
        debug('Queue._after_fork()')
        self._notempty = threading.Condition(threading.Lock())
        self._buffer = collections.deque()
        self._thread = None
        self._jointhread = None
        self._joincancelled = False
        self._closed = False
        self._close = None
        self._send_bytes = self._writer.send_bytes
        self._recv_bytes = self._reader.recv_bytes
        self._poll = self._reader.poll

    def put(self, obj, block=True, timeout=None):
        if self._closed:
            raise ValueError(f"Queue {self!r} is closed")
        if not self._sem.acquire(block, timeout):
            raise Full

        with self._notempty:
            if self._thread is None:
                self._start_thread()
            self._buffer.append(obj)
            self._notempty.notify()

    def get(self, block=True, timeout=None):
        if self._closed:
            raise ValueError(f"Queue {self!r} is closed")
        if block and timeout is None:
            with self._rlock:
                res = self._recv_bytes()
            self._sem.release()
        else:
            if block:
                deadline = time.monotonic() + timeout
            if not self._rlock.acquire(block, timeout):
                raise Empty
            try:
                if block:
                    timeout = deadline - time.monotonic()
                    if not self._poll(timeout):
                        raise Empty
                elif not self._poll():
                    raise Empty
                res = self._recv_bytes()
                self._sem.release()
            finally:
                self._rlock.release()
        # unserialize the data after having released the lock
        return _ForkingPickler.loads(res)

    def qsize(self):
        # Raises NotImplementedError on Mac OSX because of broken sem_getvalue()
        return self._maxsize - self._sem._semlock._get_value()

    def empty(self):
        return not self._poll()

    def full(self):
        return self._sem._semlock._is_zero()

    def get_nowait(self):
        return self.get(False)

    def put_nowait(self, obj):
        return self.put(obj, False)

    def close(self):
        self._closed = True
        try:
            self._reader.close()
        finally:
            close = self._close
            if close:
                self._close = None
                close()

    def join_thread(self):
        debug('Queue.join_thread()')
        assert self._closed, "Queue {0!r} not closed".format(self)
        if self._jointhread:
            self._jointhread()

    def cancel_join_thread(self):
        debug('Queue.cancel_join_thread()')
        self._joincancelled = True
        try:
            self._jointhread.cancel()
        except AttributeError:
            pass

    def _start_thread(self):
        debug('Queue._start_thread()')

        # Start thread which transfers data from buffer to pipe
        self._buffer.clear()
        self._thread = threading.Thread(
            target=Queue._feed,
            args=(self._buffer, self._notempty, self._send_bytes,
                  self._wlock, self._writer.close, self._ignore_epipe,
                  self._on_queue_feeder_error, self._sem),
            name='QueueFeederThread'
        )
        self._thread.daemon = True

        debug('doing self._thread.start()')
        self._thread.start()
        debug('... done self._thread.start()')

        if not self._joincancelled:
            self._jointhread = Finalize(
                self._thread, Queue._finalize_join,
                [weakref.ref(self._thread)],
                exitpriority=-5
                )

        # Send sentinel to the thread queue object when garbage collected
        self._close = Finalize(
            self, Queue._finalize_close,
            [self._buffer, self._notempty],
            exitpriority=10
            )

    @staticmethod
    def _finalize_join(twr):
        debug('joining queue thread')
        thread = twr()
        if thread is not None:
            thread.join()
            debug('... queue thread joined')
        else:
            debug('... queue thread already dead')

    @staticmethod
    def _finalize_close(buffer, notempty):
        debug('telling queue thread to quit')
        with notempty:
            buffer.append(_sentinel)
            notempty.notify()

    @staticmethod
    def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe,
              onerror, queue_sem):
        debug('starting thread to feed data to pipe')
        nacquire = notempty.acquire
        nrelease = notempty.release
        nwait = notempty.wait
        bpopleft = buffer.popleft
        sentinel = _sentinel
        if sys.platform != 'win32':
            wacquire = writelock.acquire
            wrelease = writelock.release
        else:
            wacquire = None

        while 1:
            try:
                nacquire()
                try:
                    if not buffer:
                        nwait()
                finally:
                    nrelease()
                try:
                    while 1:
                        obj = bpopleft()
                        if obj is sentinel:
                            debug('feeder thread got sentinel -- exiting')
                            close()
                            return

                        # serialize the data before acquiring the lock
                        obj = _ForkingPickler.dumps(obj)
                        if wacquire is None:
                            send_bytes(obj)
                        else:
                            wacquire()
                            try:
                                send_bytes(obj)
                            finally:
                                wrelease()
                except IndexError:
                    pass
            except Exception as e:
                if ignore_epipe and getattr(e, 'errno', 0) == errno.EPIPE:
                    return
                # Since this runs in a daemon thread the resources it uses
                # may be become unusable while the process is cleaning up.
                # We ignore errors which happen after the process has
                # started to cleanup.
                if is_exiting():
                    info('error in queue thread: %s', e)
                    return
                else:
                    # Since the object has not been sent in the queue, we need
                    # to decrease the size of the queue. The error acts as
                    # if the object had been silently removed from the queue
                    # and this step is necessary to have a properly working
                    # queue.
                    queue_sem.release()
                    onerror(e, obj)

    @staticmethod
    def _on_queue_feeder_error(e, obj):
        """
        Private API hook called when feeding data in the background thread
        raises an exception.  For overriding by concurrent.futures.
        """
        import traceback
        traceback.print_exc()


_sentinel = object()

#
# A queue type which also supports join() and task_done() methods
#
# Note that if you do not call task_done() for each finished task then
# eventually the counter's semaphore may overflow causing Bad Things
# to happen.
#

class JoinableQueue(Queue):

    def __init__(self, maxsize=0, *, ctx):
        Queue.__init__(self, maxsize, ctx=ctx)
        self._unfinished_tasks = ctx.Semaphore(0)
        self._cond = ctx.Condition()

    def __getstate__(self):
        return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)

    def __setstate__(self, state):
        Queue.__setstate__(self, state[:-2])
        self._cond, self._unfinished_tasks = state[-2:]

    def put(self, obj, block=True, timeout=None):
        if self._closed:
            raise ValueError(f"Queue {self!r} is closed")
        if not self._sem.acquire(block, timeout):
            raise Full

        with self._notempty, self._cond:
            if self._thread is None:
                self._start_thread()
            self._buffer.append(obj)
            self._unfinished_tasks.release()
            self._notempty.notify()

    def task_done(self):
        with self._cond:
            if not self._unfinished_tasks.acquire(False):
                raise ValueError('task_done() called too many times')
            if self._unfinished_tasks._semlock._is_zero():
                self._cond.notify_all()

    def join(self):
        with self._cond:
            if not self._unfinished_tasks._semlock._is_zero():
                self._cond.wait()

#
# Simplified Queue type -- really just a locked pipe
#

class SimpleQueue(object):

    def __init__(self, *, ctx):
        self._reader, self._writer = connection.Pipe(duplex=False)
        self._rlock = ctx.Lock()
        self._poll = self._reader.poll
        if sys.platform == 'win32':
            self._wlock = None
        else:
            self._wlock = ctx.Lock()

    def empty(self):
        return not self._poll()

    def __getstate__(self):
        context.assert_spawning(self)
        return (self._reader, self._writer, self._rlock, self._wlock)

    def __setstate__(self, state):
        (self._reader, self._writer, self._rlock, self._wlock) = state
        self._poll = self._reader.poll

    def get(self):
        with self._rlock:
            res = self._reader.recv_bytes()
        # unserialize the data after having released the lock
        return _ForkingPickler.loads(res)

    def put(self, obj):
        # serialize the data before acquiring the lock
        obj = _ForkingPickler.dumps(obj)
        if self._wlock is None:
            # writes to a message oriented win32 pipe are atomic
            self._writer.send_bytes(obj)
        else:
            with self._wlock:
                self._writer.send_bytes(obj)
PK|��\���Z-Z-synchronize.pynu�[���#
# Module implementing synchronization primitives
#
# multiprocessing/synchronize.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

__all__ = [
    'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition', 'Event'
    ]

import threading
import sys
import tempfile
import _multiprocessing
import time

from . import context
from . import process
from . import util

# Try to import the mp.synchronize module cleanly, if it fails
# raise ImportError for platforms lacking a working sem_open implementation.
# See issue 3770
try:
    from _multiprocessing import SemLock, sem_unlink
except (ImportError):
    raise ImportError("This platform lacks a functioning sem_open" +
                      " implementation, therefore, the required" +
                      " synchronization primitives needed will not" +
                      " function, see issue 3770.")

#
# Constants
#

RECURSIVE_MUTEX, SEMAPHORE = list(range(2))
SEM_VALUE_MAX = _multiprocessing.SemLock.SEM_VALUE_MAX

#
# Base class for semaphores and mutexes; wraps `_multiprocessing.SemLock`
#

class SemLock(object):

    _rand = tempfile._RandomNameSequence()

    def __init__(self, kind, value, maxvalue, *, ctx):
        if ctx is None:
            ctx = context._default_context.get_context()
        name = ctx.get_start_method()
        unlink_now = sys.platform == 'win32' or name == 'fork'
        for i in range(100):
            try:
                sl = self._semlock = _multiprocessing.SemLock(
                    kind, value, maxvalue, self._make_name(),
                    unlink_now)
            except FileExistsError:
                pass
            else:
                break
        else:
            raise FileExistsError('cannot find name for semaphore')

        util.debug('created semlock with handle %s' % sl.handle)
        self._make_methods()

        if sys.platform != 'win32':
            def _after_fork(obj):
                obj._semlock._after_fork()
            util.register_after_fork(self, _after_fork)

        if self._semlock.name is not None:
            # We only get here if we are on Unix with forking
            # disabled.  When the object is garbage collected or the
            # process shuts down we unlink the semaphore name
            from .resource_tracker import register
            register(self._semlock.name, "semaphore")
            util.Finalize(self, SemLock._cleanup, (self._semlock.name,),
                          exitpriority=0)

    @staticmethod
    def _cleanup(name):
        from .resource_tracker import unregister
        sem_unlink(name)
        unregister(name, "semaphore")

    def _make_methods(self):
        self.acquire = self._semlock.acquire
        self.release = self._semlock.release

    def __enter__(self):
        return self._semlock.__enter__()

    def __exit__(self, *args):
        return self._semlock.__exit__(*args)

    def __getstate__(self):
        context.assert_spawning(self)
        sl = self._semlock
        if sys.platform == 'win32':
            h = context.get_spawning_popen().duplicate_for_child(sl.handle)
        else:
            h = sl.handle
        return (h, sl.kind, sl.maxvalue, sl.name)

    def __setstate__(self, state):
        self._semlock = _multiprocessing.SemLock._rebuild(*state)
        util.debug('recreated blocker with handle %r' % state[0])
        self._make_methods()

    @staticmethod
    def _make_name():
        return '%s-%s' % (process.current_process()._config['semprefix'],
                          next(SemLock._rand))

#
# Semaphore
#

class Semaphore(SemLock):

    def __init__(self, value=1, *, ctx):
        SemLock.__init__(self, SEMAPHORE, value, SEM_VALUE_MAX, ctx=ctx)

    def get_value(self):
        return self._semlock._get_value()

    def __repr__(self):
        try:
            value = self._semlock._get_value()
        except Exception:
            value = 'unknown'
        return '<%s(value=%s)>' % (self.__class__.__name__, value)

#
# Bounded semaphore
#

class BoundedSemaphore(Semaphore):

    def __init__(self, value=1, *, ctx):
        SemLock.__init__(self, SEMAPHORE, value, value, ctx=ctx)

    def __repr__(self):
        try:
            value = self._semlock._get_value()
        except Exception:
            value = 'unknown'
        return '<%s(value=%s, maxvalue=%s)>' % \
               (self.__class__.__name__, value, self._semlock.maxvalue)

#
# Non-recursive lock
#

class Lock(SemLock):

    def __init__(self, *, ctx):
        SemLock.__init__(self, SEMAPHORE, 1, 1, ctx=ctx)

    def __repr__(self):
        try:
            if self._semlock._is_mine():
                name = process.current_process().name
                if threading.current_thread().name != 'MainThread':
                    name += '|' + threading.current_thread().name
            elif self._semlock._get_value() == 1:
                name = 'None'
            elif self._semlock._count() > 0:
                name = 'SomeOtherThread'
            else:
                name = 'SomeOtherProcess'
        except Exception:
            name = 'unknown'
        return '<%s(owner=%s)>' % (self.__class__.__name__, name)

#
# Recursive lock
#

class RLock(SemLock):

    def __init__(self, *, ctx):
        SemLock.__init__(self, RECURSIVE_MUTEX, 1, 1, ctx=ctx)

    def __repr__(self):
        try:
            if self._semlock._is_mine():
                name = process.current_process().name
                if threading.current_thread().name != 'MainThread':
                    name += '|' + threading.current_thread().name
                count = self._semlock._count()
            elif self._semlock._get_value() == 1:
                name, count = 'None', 0
            elif self._semlock._count() > 0:
                name, count = 'SomeOtherThread', 'nonzero'
            else:
                name, count = 'SomeOtherProcess', 'nonzero'
        except Exception:
            name, count = 'unknown', 'unknown'
        return '<%s(%s, %s)>' % (self.__class__.__name__, name, count)

#
# Condition variable
#

class Condition(object):

    def __init__(self, lock=None, *, ctx):
        self._lock = lock or ctx.RLock()
        self._sleeping_count = ctx.Semaphore(0)
        self._woken_count = ctx.Semaphore(0)
        self._wait_semaphore = ctx.Semaphore(0)
        self._make_methods()

    def __getstate__(self):
        context.assert_spawning(self)
        return (self._lock, self._sleeping_count,
                self._woken_count, self._wait_semaphore)

    def __setstate__(self, state):
        (self._lock, self._sleeping_count,
         self._woken_count, self._wait_semaphore) = state
        self._make_methods()

    def __enter__(self):
        return self._lock.__enter__()

    def __exit__(self, *args):
        return self._lock.__exit__(*args)

    def _make_methods(self):
        self.acquire = self._lock.acquire
        self.release = self._lock.release

    def __repr__(self):
        try:
            num_waiters = (self._sleeping_count._semlock._get_value() -
                           self._woken_count._semlock._get_value())
        except Exception:
            num_waiters = 'unknown'
        return '<%s(%s, %s)>' % (self.__class__.__name__, self._lock, num_waiters)

    def wait(self, timeout=None):
        assert self._lock._semlock._is_mine(), \
               'must acquire() condition before using wait()'

        # indicate that this thread is going to sleep
        self._sleeping_count.release()

        # release lock
        count = self._lock._semlock._count()
        for i in range(count):
            self._lock.release()

        try:
            # wait for notification or timeout
            return self._wait_semaphore.acquire(True, timeout)
        finally:
            # indicate that this thread has woken
            self._woken_count.release()

            # reacquire lock
            for i in range(count):
                self._lock.acquire()

    def notify(self, n=1):
        assert self._lock._semlock._is_mine(), 'lock is not owned'
        assert not self._wait_semaphore.acquire(
            False), ('notify: Should not have been able to acquire '
                     + '_wait_semaphore')

        # to take account of timeouts since last notify*() we subtract
        # woken_count from sleeping_count and rezero woken_count
        while self._woken_count.acquire(False):
            res = self._sleeping_count.acquire(False)
            assert res, ('notify: Bug in sleeping_count.acquire'
                         + '- res should not be False')

        sleepers = 0
        while sleepers < n and self._sleeping_count.acquire(False):
            self._wait_semaphore.release()        # wake up one sleeper
            sleepers += 1

        if sleepers:
            for i in range(sleepers):
                self._woken_count.acquire()       # wait for a sleeper to wake

            # rezero wait_semaphore in case some timeouts just happened
            while self._wait_semaphore.acquire(False):
                pass

    def notify_all(self):
        self.notify(n=sys.maxsize)

    def wait_for(self, predicate, timeout=None):
        result = predicate()
        if result:
            return result
        if timeout is not None:
            endtime = time.monotonic() + timeout
        else:
            endtime = None
            waittime = None
        while not result:
            if endtime is not None:
                waittime = endtime - time.monotonic()
                if waittime <= 0:
                    break
            self.wait(waittime)
            result = predicate()
        return result

#
# Event
#

class Event(object):

    def __init__(self, *, ctx):
        self._cond = ctx.Condition(ctx.Lock())
        self._flag = ctx.Semaphore(0)

    def is_set(self):
        with self._cond:
            if self._flag.acquire(False):
                self._flag.release()
                return True
            return False

    def set(self):
        with self._cond:
            self._flag.acquire(False)
            self._flag.release()
            self._cond.notify_all()

    def clear(self):
        with self._cond:
            self._flag.acquire(False)

    def wait(self, timeout=None):
        with self._cond:
            if self._flag.acquire(False):
                self._flag.release()
            else:
                self._cond.wait(timeout)

            if self._flag.acquire(False):
                self._flag.release()
                return True
            return False

#
# Barrier
#

class Barrier(threading.Barrier):

    def __init__(self, parties, action=None, timeout=None, *, ctx):
        import struct
        from .heap import BufferWrapper
        wrapper = BufferWrapper(struct.calcsize('i') * 2)
        cond = ctx.Condition()
        self.__setstate__((parties, action, timeout, cond, wrapper))
        self._state = 0
        self._count = 0

    def __setstate__(self, state):
        (self._parties, self._action, self._timeout,
         self._cond, self._wrapper) = state
        self._array = self._wrapper.create_memoryview().cast('i')

    def __getstate__(self):
        return (self._parties, self._action, self._timeout,
                self._cond, self._wrapper)

    @property
    def _state(self):
        return self._array[0]

    @_state.setter
    def _state(self, value):
        self._array[0] = value

    @property
    def _count(self):
        return self._array[1]

    @_count.setter
    def _count(self, value):
        self._array[1] = value
PK|��\d�ej��popen_spawn_win32.pynu�[���import os
import msvcrt
import signal
import sys
import _winapi

from .context import reduction, get_spawning_popen, set_spawning_popen
from . import spawn
from . import util

__all__ = ['Popen']

#
#
#

TERMINATE = 0x10000
WINEXE = (sys.platform == 'win32' and getattr(sys, 'frozen', False))
WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")


def _path_eq(p1, p2):
    return p1 == p2 or os.path.normcase(p1) == os.path.normcase(p2)

WINENV = not _path_eq(sys.executable, sys._base_executable)


def _close_handles(*handles):
    for handle in handles:
        _winapi.CloseHandle(handle)


#
# We define a Popen class similar to the one from subprocess, but
# whose constructor takes a process object as its argument.
#

class Popen(object):
    '''
    Start a subprocess to run the code of a process object
    '''
    method = 'spawn'

    def __init__(self, process_obj):
        prep_data = spawn.get_preparation_data(process_obj._name)

        # read end of pipe will be duplicated by the child process
        # -- see spawn_main() in spawn.py.
        #
        # bpo-33929: Previously, the read end of pipe was "stolen" by the child
        # process, but it leaked a handle if the child process had been
        # terminated before it could steal the handle from the parent process.
        rhandle, whandle = _winapi.CreatePipe(None, 0)
        wfd = msvcrt.open_osfhandle(whandle, 0)
        cmd = spawn.get_command_line(parent_pid=os.getpid(),
                                     pipe_handle=rhandle)
        cmd = ' '.join('"%s"' % x for x in cmd)

        python_exe = spawn.get_executable()

        # bpo-35797: When running in a venv, we bypass the redirect
        # executor and launch our base Python.
        if WINENV and _path_eq(python_exe, sys.executable):
            python_exe = sys._base_executable
            env = os.environ.copy()
            env["__PYVENV_LAUNCHER__"] = sys.executable
        else:
            env = None

        with open(wfd, 'wb', closefd=True) as to_child:
            # start process
            try:
                hp, ht, pid, tid = _winapi.CreateProcess(
                    python_exe, cmd,
                    None, None, False, 0, env, None, None)
                _winapi.CloseHandle(ht)
            except:
                _winapi.CloseHandle(rhandle)
                raise

            # set attributes of self
            self.pid = pid
            self.returncode = None
            self._handle = hp
            self.sentinel = int(hp)
            self.finalizer = util.Finalize(self, _close_handles,
                                           (self.sentinel, int(rhandle)))

            # send information to child
            set_spawning_popen(self)
            try:
                reduction.dump(prep_data, to_child)
                reduction.dump(process_obj, to_child)
            finally:
                set_spawning_popen(None)

    def duplicate_for_child(self, handle):
        assert self is get_spawning_popen()
        return reduction.duplicate(handle, self.sentinel)

    def wait(self, timeout=None):
        if self.returncode is None:
            if timeout is None:
                msecs = _winapi.INFINITE
            else:
                msecs = max(0, int(timeout * 1000 + 0.5))

            res = _winapi.WaitForSingleObject(int(self._handle), msecs)
            if res == _winapi.WAIT_OBJECT_0:
                code = _winapi.GetExitCodeProcess(self._handle)
                if code == TERMINATE:
                    code = -signal.SIGTERM
                self.returncode = code

        return self.returncode

    def poll(self):
        return self.wait(timeout=0)

    def terminate(self):
        if self.returncode is None:
            try:
                _winapi.TerminateProcess(int(self._handle), TERMINATE)
            except OSError:
                if self.wait(timeout=1.0) is None:
                    raise

    kill = terminate

    def close(self):
        self.finalizer()
PK|��\�>.�


popen_fork.pynu�[���import os
import signal

from . import util

__all__ = ['Popen']

#
# Start child process using fork
#

class Popen(object):
    method = 'fork'

    def __init__(self, process_obj):
        util._flush_std_streams()
        self.returncode = None
        self.finalizer = None
        self._launch(process_obj)

    def duplicate_for_child(self, fd):
        return fd

    def poll(self, flag=os.WNOHANG):
        if self.returncode is None:
            try:
                pid, sts = os.waitpid(self.pid, flag)
            except OSError as e:
                # Child process not yet created. See #1731717
                # e.errno == errno.ECHILD == 10
                return None
            if pid == self.pid:
                if os.WIFSIGNALED(sts):
                    self.returncode = -os.WTERMSIG(sts)
                else:
                    assert os.WIFEXITED(sts), "Status is {:n}".format(sts)
                    self.returncode = os.WEXITSTATUS(sts)
        return self.returncode

    def wait(self, timeout=None):
        if self.returncode is None:
            if timeout is not None:
                from multiprocessing.connection import wait
                if not wait([self.sentinel], timeout):
                    return None
            # This shouldn't block if wait() returned successfully.
            return self.poll(os.WNOHANG if timeout == 0.0 else 0)
        return self.returncode

    def _send_signal(self, sig):
        if self.returncode is None:
            try:
                os.kill(self.pid, sig)
            except ProcessLookupError:
                pass
            except OSError:
                if self.wait(timeout=0.1) is None:
                    raise

    def terminate(self):
        self._send_signal(signal.SIGTERM)

    def kill(self):
        self._send_signal(signal.SIGKILL)

    def _launch(self, process_obj):
        code = 1
        parent_r, child_w = os.pipe()
        child_r, parent_w = os.pipe()
        self.pid = os.fork()
        if self.pid == 0:
            try:
                os.close(parent_r)
                os.close(parent_w)
                code = process_obj._bootstrap(parent_sentinel=child_r)
            finally:
                os._exit(code)
        else:
            os.close(child_w)
            os.close(child_r)
            self.finalizer = util.Finalize(self, util.close_fds,
                                           (parent_r, parent_w,))
            self.sentinel = parent_r

    def close(self):
        if self.finalizer is not None:
            self.finalizer()
PK|��\
����popen_spawn_posix.pynu�[���import io
import os

from .context import reduction, set_spawning_popen
from . import popen_fork
from . import spawn
from . import util

__all__ = ['Popen']


#
# Wrapper for an fd used while launching a process
#

class _DupFd(object):
    def __init__(self, fd):
        self.fd = fd
    def detach(self):
        return self.fd

#
# Start child process using a fresh interpreter
#

class Popen(popen_fork.Popen):
    method = 'spawn'
    DupFd = _DupFd

    def __init__(self, process_obj):
        self._fds = []
        super().__init__(process_obj)

    def duplicate_for_child(self, fd):
        self._fds.append(fd)
        return fd

    def _launch(self, process_obj):
        from . import resource_tracker
        tracker_fd = resource_tracker.getfd()
        self._fds.append(tracker_fd)
        prep_data = spawn.get_preparation_data(process_obj._name)
        fp = io.BytesIO()
        set_spawning_popen(self)
        try:
            reduction.dump(prep_data, fp)
            reduction.dump(process_obj, fp)
        finally:
            set_spawning_popen(None)

        parent_r = child_w = child_r = parent_w = None
        try:
            parent_r, child_w = os.pipe()
            child_r, parent_w = os.pipe()
            cmd = spawn.get_command_line(tracker_fd=tracker_fd,
                                         pipe_handle=child_r)
            self._fds.extend([child_r, child_w])
            self.pid = util.spawnv_passfds(spawn.get_executable(),
                                           cmd, self._fds)
            self.sentinel = parent_r
            with open(parent_w, 'wb', closefd=False) as f:
                f.write(fp.getbuffer())
        finally:
            fds_to_close = []
            for fd in (parent_r, parent_w):
                if fd is not None:
                    fds_to_close.append(fd)
            self.finalizer = util.Finalize(self, util.close_fds, fds_to_close)

            for fd in (child_r, child_w):
                if fd is not None:
                    os.close(fd)
PK|��\D~�jAA/dummy/__pycache__/__init__.cpython-38.opt-2.pycnu�[���U

e5d��@sdddddddddd	d
ddd
ddgZddlZddlZddlZddlZddlmZddlmZmZm	Z	m
Z
ddlmZmZm
Z
ddlmZGdd�dej�ZeZejZe��e�_dd�Zdd�ZGdd�de�ZeZeZd'dd�ZGd d!�d!e�Zd"d�Zd#d$�Z d(d&d�Z!eZ"dS))�Process�current_process�active_children�freeze_support�Lock�RLock�	Semaphore�BoundedSemaphore�	Condition�Event�Barrier�Queue�Manager�Pipe�Pool�
JoinableQueue�N�)r)rrrr)r
r	r)rc@s4eZdZddddifdd�Zdd�Zedd��ZdS)	�DummyProcessN�cCs8tj�||||||�d|_t��|_d|_t�|_	dS)NF)
�	threading�Thread�__init__Z_pid�weakref�WeakKeyDictionary�	_children�
_start_calledr�_parent)�self�group�target�name�args�kwargsrr�6/usr/lib64/python3.8/multiprocessing/dummy/__init__.pyr$s

zDummyProcess.__init__cCsN|jt�k	r td�|jt����d|_t|jd�r>d|jj|<tj�	|�dS)Nz,Parent is {0!r} but current_process is {1!r}Tr)
rr�RuntimeError�formatr�hasattrrrr�start�rrrr#r'+s��zDummyProcess.startcCs|jr|��sdSdSdS)Nr)r�is_aliver(rrr#�exitcode5szDummyProcess.exitcode)�__name__�
__module__�__qualname__rr'�propertyr*rrrr#r"s
rcCs2t�j}t|�D]}|��s|�|d�qt|�S�N)rr�listr)�pop)Zchildren�prrr#rDs
cCsdSr/rrrrr#rKsc@seZdZdd�Zdd�ZdS)�	NamespacecKs|j�|�dSr/)�__dict__�update)r�kwdsrrr#rSszNamespace.__init__cCsZt|j���}g}|D]$\}}|�d�s|�d||f�q|��d|jjd�|�fS)N�_z%s=%rz%s(%s)z, )	r0r4�items�
startswith�append�sort�	__class__r+�join)rr8Ztempr �valuerrr#�__repr__Us
zNamespace.__repr__N)r+r,r-rr?rrrr#r3Rsr3TcCst�||�Sr/)�array)�typecodeZsequence�lockrrr#�ArrayasrCc@s8eZdZd
dd�Zedd��Zejdd��Zdd�Zd	S)�ValueTcCs||_||_dSr/)�	_typecode�_value)rrAr>rBrrr#reszValue.__init__cCs|jSr/�rFr(rrr#r>iszValue.valuecCs
||_dSr/rG)rr>rrr#r>mscCsdt|�j|j|jfS)Nz<%s(%r, %r)>)�typer+rErFr(rrr#r?qszValue.__repr__N)T)r+r,r-rr.r>�setterr?rrrr#rDds


rDcCs
tjtSr/)�sys�modulesr+rrrr#r
tscCsdSr/rrrrr#�shutdownwsrLrcCsddlm}||||�S)N�)�
ThreadPool)ZpoolrN)Z	processesZinitializerZinitargsrNrrr#rzs)T)NNr)#�__all__rrJrr@Z
connectionrrrrrr
r	rZqueuerrrrZcurrent_threadrrrrr�objectr3�dictr0rCrDr
rLrrrrrr#�<module>sN�


PK|��\�/;��	�	+dummy/__pycache__/connection.cpython-38.pycnu�[���U

e5d>�@sRdddgZddlmZdgZGdd�de�Zdd�Zdd	d�ZGd
d�de�ZdS)
�Client�Listener�Pipe�)�QueueNc@sBeZdZddd�Zdd�Zdd�Zed	d
��Zdd�Zd
d�Z	dS)rN�cCst|�|_dS�N)r�_backlog_queue)�self�addressZfamilyZbacklog�r�8/usr/lib64/python3.8/multiprocessing/dummy/connection.py�__init__szListener.__init__cCst|j���Sr)�
Connectionr�get�r	rrr�acceptszListener.acceptcCs
d|_dSr�rrrrr�closeszListener.closecCs|jSrrrrrrr
szListener.addresscCs|Srrrrrr�	__enter__!szListener.__enter__cCs|��dSr�r�r	�exc_type�	exc_valueZexc_tbrrr�__exit__$szListener.__exit__)NNr)
�__name__�
__module__�__qualname__r
rr�propertyr
rrrrrrrs

cCs&t�t�}}|�||f�t||�Sr)r�putr)r
�_in�_outrrrr(sTcCs"t�t�}}t||�t||�fSr)rr)Zduplex�a�brrrr.sc@s6eZdZdd�Zd
dd�Zdd�Zdd	�Zd
d�ZdS)rcCs,||_||_|j|_|_|j|_|_dSr)r rr�sendZ
send_bytesrZrecvZ
recv_bytes)r	rr rrrr
5szConnection.__init__�c	CsN|j��dkrdS|dkrdS|jj�|jj�|�W5QRX|j��dkS)NrTr$F)rZqsizeZ	not_empty�wait)r	Ztimeoutrrr�poll;s
zConnection.pollcCsdSrrrrrrrDszConnection.closecCs|SrrrrrrrGszConnection.__enter__cCs|��dSrrrrrrrJszConnection.__exit__N)r$)rrrr
r&rrrrrrrr3s

	r)T)	�__all__ZqueuerZfamilies�objectrrrrrrrr�<module>
s

PK|��\�/;��	�	1dummy/__pycache__/connection.cpython-38.opt-2.pycnu�[���U

e5d>�@sRdddgZddlmZdgZGdd�de�Zdd�Zdd	d�ZGd
d�de�ZdS)
�Client�Listener�Pipe�)�QueueNc@sBeZdZddd�Zdd�Zdd�Zed	d
��Zdd�Zd
d�Z	dS)rN�cCst|�|_dS�N)r�_backlog_queue)�self�addressZfamilyZbacklog�r�8/usr/lib64/python3.8/multiprocessing/dummy/connection.py�__init__szListener.__init__cCst|j���Sr)�
Connectionr�get�r	rrr�acceptszListener.acceptcCs
d|_dSr�rrrrr�closeszListener.closecCs|jSrrrrrrr
szListener.addresscCs|Srrrrrr�	__enter__!szListener.__enter__cCs|��dSr�r�r	�exc_type�	exc_valueZexc_tbrrr�__exit__$szListener.__exit__)NNr)
�__name__�
__module__�__qualname__r
rr�propertyr
rrrrrrrs

cCs&t�t�}}|�||f�t||�Sr)r�putr)r
�_in�_outrrrr(sTcCs"t�t�}}t||�t||�fSr)rr)Zduplex�a�brrrr.sc@s6eZdZdd�Zd
dd�Zdd�Zdd	�Zd
d�ZdS)rcCs,||_||_|j|_|_|j|_|_dSr)r rr�sendZ
send_bytesrZrecvZ
recv_bytes)r	rr rrrr
5szConnection.__init__�c	CsN|j��dkrdS|dkrdS|jj�|jj�|�W5QRX|j��dkS)NrTr$F)rZqsizeZ	not_empty�wait)r	Ztimeoutrrr�poll;s
zConnection.pollcCsdSrrrrrrrDszConnection.closecCs|SrrrrrrrGszConnection.__enter__cCs|��dSrrrrrrrJszConnection.__exit__N)r$)rrrr
r&rrrrrrrr3s

	r)T)	�__all__ZqueuerZfamilies�objectrrrrrrrr�<module>
s

PK|��\�/;��	�	1dummy/__pycache__/connection.cpython-38.opt-1.pycnu�[���U

e5d>�@sRdddgZddlmZdgZGdd�de�Zdd�Zdd	d�ZGd
d�de�ZdS)
�Client�Listener�Pipe�)�QueueNc@sBeZdZddd�Zdd�Zdd�Zed	d
��Zdd�Zd
d�Z	dS)rN�cCst|�|_dS�N)r�_backlog_queue)�self�addressZfamilyZbacklog�r�8/usr/lib64/python3.8/multiprocessing/dummy/connection.py�__init__szListener.__init__cCst|j���Sr)�
Connectionr�get�r	rrr�acceptszListener.acceptcCs
d|_dSr�rrrrr�closeszListener.closecCs|jSrrrrrrr
szListener.addresscCs|Srrrrrr�	__enter__!szListener.__enter__cCs|��dSr�r�r	�exc_type�	exc_valueZexc_tbrrr�__exit__$szListener.__exit__)NNr)
�__name__�
__module__�__qualname__r
rr�propertyr
rrrrrrrs

cCs&t�t�}}|�||f�t||�Sr)r�putr)r
�_in�_outrrrr(sTcCs"t�t�}}t||�t||�fSr)rr)Zduplex�a�brrrr.sc@s6eZdZdd�Zd
dd�Zdd�Zdd	�Zd
d�ZdS)rcCs,||_||_|j|_|_|j|_|_dSr)r rr�sendZ
send_bytesrZrecvZ
recv_bytes)r	rr rrrr
5szConnection.__init__�c	CsN|j��dkrdS|dkrdS|jj�|jj�|�W5QRX|j��dkS)NrTr$F)rZqsizeZ	not_empty�wait)r	Ztimeoutrrr�poll;s
zConnection.pollcCsdSrrrrrrrDszConnection.closecCs|SrrrrrrrGszConnection.__enter__cCs|��dSrrrrrrrJszConnection.__exit__N)r$)rrrr
r&rrrrrrrr3s

	r)T)	�__all__ZqueuerZfamilies�objectrrrrrrrr�<module>
s

PK|��\D~�jAA)dummy/__pycache__/__init__.cpython-38.pycnu�[���U

e5d��@sdddddddddd	d
ddd
ddgZddlZddlZddlZddlZddlmZddlmZmZm	Z	m
Z
ddlmZmZm
Z
ddlmZGdd�dej�ZeZejZe��e�_dd�Zdd�ZGdd�de�ZeZeZd'dd�ZGd d!�d!e�Zd"d�Zd#d$�Z d(d&d�Z!eZ"dS))�Process�current_process�active_children�freeze_support�Lock�RLock�	Semaphore�BoundedSemaphore�	Condition�Event�Barrier�Queue�Manager�Pipe�Pool�
JoinableQueue�N�)r)rrrr)r
r	r)rc@s4eZdZddddifdd�Zdd�Zedd��ZdS)	�DummyProcessN�cCs8tj�||||||�d|_t��|_d|_t�|_	dS)NF)
�	threading�Thread�__init__Z_pid�weakref�WeakKeyDictionary�	_children�
_start_calledr�_parent)�self�group�target�name�args�kwargsrr�6/usr/lib64/python3.8/multiprocessing/dummy/__init__.pyr$s

zDummyProcess.__init__cCsN|jt�k	r td�|jt����d|_t|jd�r>d|jj|<tj�	|�dS)Nz,Parent is {0!r} but current_process is {1!r}Tr)
rr�RuntimeError�formatr�hasattrrrr�start�rrrr#r'+s��zDummyProcess.startcCs|jr|��sdSdSdS)Nr)r�is_aliver(rrr#�exitcode5szDummyProcess.exitcode)�__name__�
__module__�__qualname__rr'�propertyr*rrrr#r"s
rcCs2t�j}t|�D]}|��s|�|d�qt|�S�N)rr�listr)�pop)Zchildren�prrr#rDs
cCsdSr/rrrrr#rKsc@seZdZdd�Zdd�ZdS)�	NamespacecKs|j�|�dSr/)�__dict__�update)r�kwdsrrr#rSszNamespace.__init__cCsZt|j���}g}|D]$\}}|�d�s|�d||f�q|��d|jjd�|�fS)N�_z%s=%rz%s(%s)z, )	r0r4�items�
startswith�append�sort�	__class__r+�join)rr8Ztempr �valuerrr#�__repr__Us
zNamespace.__repr__N)r+r,r-rr?rrrr#r3Rsr3TcCst�||�Sr/)�array)�typecodeZsequence�lockrrr#�ArrayasrCc@s8eZdZd
dd�Zedd��Zejdd��Zdd�Zd	S)�ValueTcCs||_||_dSr/)�	_typecode�_value)rrAr>rBrrr#reszValue.__init__cCs|jSr/�rFr(rrr#r>iszValue.valuecCs
||_dSr/rG)rr>rrr#r>mscCsdt|�j|j|jfS)Nz<%s(%r, %r)>)�typer+rErFr(rrr#r?qszValue.__repr__N)T)r+r,r-rr.r>�setterr?rrrr#rDds


rDcCs
tjtSr/)�sys�modulesr+rrrr#r
tscCsdSr/rrrrr#�shutdownwsrLrcCsddlm}||||�S)N�)�
ThreadPool)ZpoolrN)Z	processesZinitializerZinitargsrNrrr#rzs)T)NNr)#�__all__rrJrr@Z
connectionrrrrrr
r	rZqueuerrrrZcurrent_threadrrrrr�objectr3�dictr0rCrDr
rLrrrrrr#�<module>sN�


PK|��\D~�jAA/dummy/__pycache__/__init__.cpython-38.opt-1.pycnu�[���U

e5d��@sdddddddddd	d
ddd
ddgZddlZddlZddlZddlZddlmZddlmZmZm	Z	m
Z
ddlmZmZm
Z
ddlmZGdd�dej�ZeZejZe��e�_dd�Zdd�ZGdd�de�ZeZeZd'dd�ZGd d!�d!e�Zd"d�Zd#d$�Z d(d&d�Z!eZ"dS))�Process�current_process�active_children�freeze_support�Lock�RLock�	Semaphore�BoundedSemaphore�	Condition�Event�Barrier�Queue�Manager�Pipe�Pool�
JoinableQueue�N�)r)rrrr)r
r	r)rc@s4eZdZddddifdd�Zdd�Zedd��ZdS)	�DummyProcessN�cCs8tj�||||||�d|_t��|_d|_t�|_	dS)NF)
�	threading�Thread�__init__Z_pid�weakref�WeakKeyDictionary�	_children�
_start_calledr�_parent)�self�group�target�name�args�kwargsrr�6/usr/lib64/python3.8/multiprocessing/dummy/__init__.pyr$s

zDummyProcess.__init__cCsN|jt�k	r td�|jt����d|_t|jd�r>d|jj|<tj�	|�dS)Nz,Parent is {0!r} but current_process is {1!r}Tr)
rr�RuntimeError�formatr�hasattrrrr�start�rrrr#r'+s��zDummyProcess.startcCs|jr|��sdSdSdS)Nr)r�is_aliver(rrr#�exitcode5szDummyProcess.exitcode)�__name__�
__module__�__qualname__rr'�propertyr*rrrr#r"s
rcCs2t�j}t|�D]}|��s|�|d�qt|�S�N)rr�listr)�pop)Zchildren�prrr#rDs
cCsdSr/rrrrr#rKsc@seZdZdd�Zdd�ZdS)�	NamespacecKs|j�|�dSr/)�__dict__�update)r�kwdsrrr#rSszNamespace.__init__cCsZt|j���}g}|D]$\}}|�d�s|�d||f�q|��d|jjd�|�fS)N�_z%s=%rz%s(%s)z, )	r0r4�items�
startswith�append�sort�	__class__r+�join)rr8Ztempr �valuerrr#�__repr__Us
zNamespace.__repr__N)r+r,r-rr?rrrr#r3Rsr3TcCst�||�Sr/)�array)�typecodeZsequence�lockrrr#�ArrayasrCc@s8eZdZd
dd�Zedd��Zejdd��Zdd�Zd	S)�ValueTcCs||_||_dSr/)�	_typecode�_value)rrAr>rBrrr#reszValue.__init__cCs|jSr/�rFr(rrr#r>iszValue.valuecCs
||_dSr/rG)rr>rrr#r>mscCsdt|�j|j|jfS)Nz<%s(%r, %r)>)�typer+rErFr(rrr#r?qszValue.__repr__N)T)r+r,r-rr.r>�setterr?rrrr#rDds


rDcCs
tjtSr/)�sys�modulesr+rrrr#r
tscCsdSr/rrrrr#�shutdownwsrLrcCsddlm}||||�S)N�)�
ThreadPool)ZpoolrN)Z	processesZinitializerZinitargsrNrrr#rzs)T)NNr)#�__all__rrJrr@Z
connectionrrrrrr
r	rZqueuerrrrZcurrent_threadrrrrr�objectr3�dictr0rCrDr
rLrrrrrr#�<module>sN�


PK|��\��se��dummy/__init__.pynu�[���#
# Support for the API of the multiprocessing package using threads
#
# multiprocessing/dummy/__init__.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

__all__ = [
    'Process', 'current_process', 'active_children', 'freeze_support',
    'Lock', 'RLock', 'Semaphore', 'BoundedSemaphore', 'Condition',
    'Event', 'Barrier', 'Queue', 'Manager', 'Pipe', 'Pool', 'JoinableQueue'
    ]

#
# Imports
#

import threading
import sys
import weakref
import array

from .connection import Pipe
from threading import Lock, RLock, Semaphore, BoundedSemaphore
from threading import Event, Condition, Barrier
from queue import Queue

#
#
#

class DummyProcess(threading.Thread):

    def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
        threading.Thread.__init__(self, group, target, name, args, kwargs)
        self._pid = None
        self._children = weakref.WeakKeyDictionary()
        self._start_called = False
        self._parent = current_process()

    def start(self):
        if self._parent is not current_process():
            raise RuntimeError(
                "Parent is {0!r} but current_process is {1!r}".format(
                    self._parent, current_process()))
        self._start_called = True
        if hasattr(self._parent, '_children'):
            self._parent._children[self] = None
        threading.Thread.start(self)

    @property
    def exitcode(self):
        if self._start_called and not self.is_alive():
            return 0
        else:
            return None

#
#
#

Process = DummyProcess
current_process = threading.current_thread
current_process()._children = weakref.WeakKeyDictionary()

def active_children():
    children = current_process()._children
    for p in list(children):
        if not p.is_alive():
            children.pop(p, None)
    return list(children)

def freeze_support():
    pass

#
#
#

class Namespace(object):
    def __init__(self, /, **kwds):
        self.__dict__.update(kwds)
    def __repr__(self):
        items = list(self.__dict__.items())
        temp = []
        for name, value in items:
            if not name.startswith('_'):
                temp.append('%s=%r' % (name, value))
        temp.sort()
        return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))

dict = dict
list = list

def Array(typecode, sequence, lock=True):
    return array.array(typecode, sequence)

class Value(object):
    def __init__(self, typecode, value, lock=True):
        self._typecode = typecode
        self._value = value

    @property
    def value(self):
        return self._value

    @value.setter
    def value(self, value):
        self._value = value

    def __repr__(self):
        return '<%s(%r, %r)>'%(type(self).__name__,self._typecode,self._value)

def Manager():
    return sys.modules[__name__]

def shutdown():
    pass

def Pool(processes=None, initializer=None, initargs=()):
    from ..pool import ThreadPool
    return ThreadPool(processes, initializer, initargs)

JoinableQueue = Queue
PK|��\��_T>>dummy/connection.pynu�[���#
# Analogue of `multiprocessing.connection` which uses queues instead of sockets
#
# multiprocessing/dummy/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

__all__ = [ 'Client', 'Listener', 'Pipe' ]

from queue import Queue


families = [None]


class Listener(object):

    def __init__(self, address=None, family=None, backlog=1):
        self._backlog_queue = Queue(backlog)

    def accept(self):
        return Connection(*self._backlog_queue.get())

    def close(self):
        self._backlog_queue = None

    @property
    def address(self):
        return self._backlog_queue

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, exc_tb):
        self.close()


def Client(address):
    _in, _out = Queue(), Queue()
    address.put((_out, _in))
    return Connection(_in, _out)


def Pipe(duplex=True):
    a, b = Queue(), Queue()
    return Connection(a, b), Connection(b, a)


class Connection(object):

    def __init__(self, _in, _out):
        self._out = _out
        self._in = _in
        self.send = self.send_bytes = _out.put
        self.recv = self.recv_bytes = _in.get

    def poll(self, timeout=0.0):
        if self._in.qsize() > 0:
            return True
        if timeout <= 0.0:
            return False
        with self._in.not_empty:
            self._in.not_empty.wait(timeout)
        return self._in.qsize() > 0

    def close(self):
        pass

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, exc_tb):
        self.close()
PK|��\Ҫ`��resource_sharer.pynu�[���#
# We use a background thread for sharing fds on Unix, and for sharing sockets on
# Windows.
#
# A client which wants to pickle a resource registers it with the resource
# sharer and gets an identifier in return.  The unpickling process will connect
# to the resource sharer, sends the identifier and its pid, and then receives
# the resource.
#

import os
import signal
import socket
import sys
import threading

from . import process
from .context import reduction
from . import util

__all__ = ['stop']


if sys.platform == 'win32':
    __all__ += ['DupSocket']

    class DupSocket(object):
        '''Picklable wrapper for a socket.'''
        def __init__(self, sock):
            new_sock = sock.dup()
            def send(conn, pid):
                share = new_sock.share(pid)
                conn.send_bytes(share)
            self._id = _resource_sharer.register(send, new_sock.close)

        def detach(self):
            '''Get the socket.  This should only be called once.'''
            with _resource_sharer.get_connection(self._id) as conn:
                share = conn.recv_bytes()
                return socket.fromshare(share)

else:
    __all__ += ['DupFd']

    class DupFd(object):
        '''Wrapper for fd which can be used at any time.'''
        def __init__(self, fd):
            new_fd = os.dup(fd)
            def send(conn, pid):
                reduction.send_handle(conn, new_fd, pid)
            def close():
                os.close(new_fd)
            self._id = _resource_sharer.register(send, close)

        def detach(self):
            '''Get the fd.  This should only be called once.'''
            with _resource_sharer.get_connection(self._id) as conn:
                return reduction.recv_handle(conn)


class _ResourceSharer(object):
    '''Manager for resources using background thread.'''
    def __init__(self):
        self._key = 0
        self._cache = {}
        self._old_locks = []
        self._lock = threading.Lock()
        self._listener = None
        self._address = None
        self._thread = None
        util.register_after_fork(self, _ResourceSharer._afterfork)

    def register(self, send, close):
        '''Register resource, returning an identifier.'''
        with self._lock:
            if self._address is None:
                self._start()
            self._key += 1
            self._cache[self._key] = (send, close)
            return (self._address, self._key)

    @staticmethod
    def get_connection(ident):
        '''Return connection from which to receive identified resource.'''
        from .connection import Client
        address, key = ident
        c = Client(address, authkey=process.current_process().authkey)
        c.send((key, os.getpid()))
        return c

    def stop(self, timeout=None):
        '''Stop the background thread and clear registered resources.'''
        from .connection import Client
        with self._lock:
            if self._address is not None:
                c = Client(self._address,
                           authkey=process.current_process().authkey)
                c.send(None)
                c.close()
                self._thread.join(timeout)
                if self._thread.is_alive():
                    util.sub_warning('_ResourceSharer thread did '
                                     'not stop when asked')
                self._listener.close()
                self._thread = None
                self._address = None
                self._listener = None
                for key, (send, close) in self._cache.items():
                    close()
                self._cache.clear()

    def _afterfork(self):
        for key, (send, close) in self._cache.items():
            close()
        self._cache.clear()
        # If self._lock was locked at the time of the fork, it may be broken
        # -- see issue 6721.  Replace it without letting it be gc'ed.
        self._old_locks.append(self._lock)
        self._lock = threading.Lock()
        if self._listener is not None:
            self._listener.close()
        self._listener = None
        self._address = None
        self._thread = None

    def _start(self):
        from .connection import Listener
        assert self._listener is None, "Already have Listener"
        util.debug('starting listener and thread for sending handles')
        self._listener = Listener(authkey=process.current_process().authkey)
        self._address = self._listener.address
        t = threading.Thread(target=self._serve)
        t.daemon = True
        t.start()
        self._thread = t

    def _serve(self):
        if hasattr(signal, 'pthread_sigmask'):
            signal.pthread_sigmask(signal.SIG_BLOCK, signal.valid_signals())
        while 1:
            try:
                with self._listener.accept() as conn:
                    msg = conn.recv()
                    if msg is None:
                        break
                    key, destination_pid = msg
                    send, close = self._cache.pop(key)
                    try:
                        send(conn, destination_pid)
                    finally:
                        close()
            except:
                if not util.is_exiting():
                    sys.excepthook(*sys.exc_info())


_resource_sharer = _ResourceSharer()
stop = _resource_sharer.stop
PK|��\�c�p|p|
connection.pynu�[���#
# A higher level module for using sockets (or Windows named pipes)
#
# multiprocessing/connection.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

__all__ = [ 'Client', 'Listener', 'Pipe', 'wait' ]

import io
import os
import sys
import socket
import struct
import time
import tempfile
import itertools

import _multiprocessing

from . import util

from . import AuthenticationError, BufferTooShort
from .context import reduction
_ForkingPickler = reduction.ForkingPickler

try:
    import _winapi
    from _winapi import WAIT_OBJECT_0, WAIT_ABANDONED_0, WAIT_TIMEOUT, INFINITE
except ImportError:
    if sys.platform == 'win32':
        raise
    _winapi = None

#
#
#

BUFSIZE = 8192
# A very generous timeout when it comes to local connections...
CONNECTION_TIMEOUT = 20.

# The hmac module implicitly defaults to using MD5.
# Support using a stronger algorithm for the challenge/response code:
HMAC_DIGEST_NAME='sha256'

_mmap_counter = itertools.count()

default_family = 'AF_INET'
families = ['AF_INET']

if hasattr(socket, 'AF_UNIX'):
    default_family = 'AF_UNIX'
    families += ['AF_UNIX']

if sys.platform == 'win32':
    default_family = 'AF_PIPE'
    families += ['AF_PIPE']


def _init_timeout(timeout=CONNECTION_TIMEOUT):
    return time.monotonic() + timeout

def _check_timeout(t):
    return time.monotonic() > t

#
#
#

def arbitrary_address(family):
    '''
    Return an arbitrary free address for the given family
    '''
    if family == 'AF_INET':
        return ('localhost', 0)
    elif family == 'AF_UNIX':
        return tempfile.mktemp(prefix='listener-', dir=util.get_temp_dir())
    elif family == 'AF_PIPE':
        return tempfile.mktemp(prefix=r'\\.\pipe\pyc-%d-%d-' %
                               (os.getpid(), next(_mmap_counter)), dir="")
    else:
        raise ValueError('unrecognized family')

def _validate_family(family):
    '''
    Checks if the family is valid for the current environment.
    '''
    if sys.platform != 'win32' and family == 'AF_PIPE':
        raise ValueError('Family %s is not recognized.' % family)

    if sys.platform == 'win32' and family == 'AF_UNIX':
        # double check
        if not hasattr(socket, family):
            raise ValueError('Family %s is not recognized.' % family)

def address_type(address):
    '''
    Return the types of the address

    This can be 'AF_INET', 'AF_UNIX', or 'AF_PIPE'
    '''
    if type(address) == tuple:
        return 'AF_INET'
    elif type(address) is str and address.startswith('\\\\'):
        return 'AF_PIPE'
    elif type(address) is str or util.is_abstract_socket_namespace(address):
        return 'AF_UNIX'
    else:
        raise ValueError('address type of %r unrecognized' % address)

#
# Connection classes
#

class _ConnectionBase:
    _handle = None

    def __init__(self, handle, readable=True, writable=True):
        handle = handle.__index__()
        if handle < 0:
            raise ValueError("invalid handle")
        if not readable and not writable:
            raise ValueError(
                "at least one of `readable` and `writable` must be True")
        self._handle = handle
        self._readable = readable
        self._writable = writable

    # XXX should we use util.Finalize instead of a __del__?

    def __del__(self):
        if self._handle is not None:
            self._close()

    def _check_closed(self):
        if self._handle is None:
            raise OSError("handle is closed")

    def _check_readable(self):
        if not self._readable:
            raise OSError("connection is write-only")

    def _check_writable(self):
        if not self._writable:
            raise OSError("connection is read-only")

    def _bad_message_length(self):
        if self._writable:
            self._readable = False
        else:
            self.close()
        raise OSError("bad message length")

    @property
    def closed(self):
        """True if the connection is closed"""
        return self._handle is None

    @property
    def readable(self):
        """True if the connection is readable"""
        return self._readable

    @property
    def writable(self):
        """True if the connection is writable"""
        return self._writable

    def fileno(self):
        """File descriptor or handle of the connection"""
        self._check_closed()
        return self._handle

    def close(self):
        """Close the connection"""
        if self._handle is not None:
            try:
                self._close()
            finally:
                self._handle = None

    def send_bytes(self, buf, offset=0, size=None):
        """Send the bytes data from a bytes-like object"""
        self._check_closed()
        self._check_writable()
        m = memoryview(buf)
        # HACK for byte-indexing of non-bytewise buffers (e.g. array.array)
        if m.itemsize > 1:
            m = memoryview(bytes(m))
        n = len(m)
        if offset < 0:
            raise ValueError("offset is negative")
        if n < offset:
            raise ValueError("buffer length < offset")
        if size is None:
            size = n - offset
        elif size < 0:
            raise ValueError("size is negative")
        elif offset + size > n:
            raise ValueError("buffer length < offset + size")
        self._send_bytes(m[offset:offset + size])

    def send(self, obj):
        """Send a (picklable) object"""
        self._check_closed()
        self._check_writable()
        self._send_bytes(_ForkingPickler.dumps(obj))

    def recv_bytes(self, maxlength=None):
        """
        Receive bytes data as a bytes object.
        """
        self._check_closed()
        self._check_readable()
        if maxlength is not None and maxlength < 0:
            raise ValueError("negative maxlength")
        buf = self._recv_bytes(maxlength)
        if buf is None:
            self._bad_message_length()
        return buf.getvalue()

    def recv_bytes_into(self, buf, offset=0):
        """
        Receive bytes data into a writeable bytes-like object.
        Return the number of bytes read.
        """
        self._check_closed()
        self._check_readable()
        with memoryview(buf) as m:
            # Get bytesize of arbitrary buffer
            itemsize = m.itemsize
            bytesize = itemsize * len(m)
            if offset < 0:
                raise ValueError("negative offset")
            elif offset > bytesize:
                raise ValueError("offset too large")
            result = self._recv_bytes()
            size = result.tell()
            if bytesize < offset + size:
                raise BufferTooShort(result.getvalue())
            # Message can fit in dest
            result.seek(0)
            result.readinto(m[offset // itemsize :
                              (offset + size) // itemsize])
            return size

    def recv(self):
        """Receive a (picklable) object"""
        self._check_closed()
        self._check_readable()
        buf = self._recv_bytes()
        return _ForkingPickler.loads(buf.getbuffer())

    def poll(self, timeout=0.0):
        """Whether there is any input available to be read"""
        self._check_closed()
        self._check_readable()
        return self._poll(timeout)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, exc_tb):
        self.close()


if _winapi:

    class PipeConnection(_ConnectionBase):
        """
        Connection class based on a Windows named pipe.
        Overlapped I/O is used, so the handles must have been created
        with FILE_FLAG_OVERLAPPED.
        """
        _got_empty_message = False

        def _close(self, _CloseHandle=_winapi.CloseHandle):
            _CloseHandle(self._handle)

        def _send_bytes(self, buf):
            ov, err = _winapi.WriteFile(self._handle, buf, overlapped=True)
            try:
                if err == _winapi.ERROR_IO_PENDING:
                    waitres = _winapi.WaitForMultipleObjects(
                        [ov.event], False, INFINITE)
                    assert waitres == WAIT_OBJECT_0
            except:
                ov.cancel()
                raise
            finally:
                nwritten, err = ov.GetOverlappedResult(True)
            assert err == 0
            assert nwritten == len(buf)

        def _recv_bytes(self, maxsize=None):
            if self._got_empty_message:
                self._got_empty_message = False
                return io.BytesIO()
            else:
                bsize = 128 if maxsize is None else min(maxsize, 128)
                try:
                    ov, err = _winapi.ReadFile(self._handle, bsize,
                                                overlapped=True)
                    try:
                        if err == _winapi.ERROR_IO_PENDING:
                            waitres = _winapi.WaitForMultipleObjects(
                                [ov.event], False, INFINITE)
                            assert waitres == WAIT_OBJECT_0
                    except:
                        ov.cancel()
                        raise
                    finally:
                        nread, err = ov.GetOverlappedResult(True)
                        if err == 0:
                            f = io.BytesIO()
                            f.write(ov.getbuffer())
                            return f
                        elif err == _winapi.ERROR_MORE_DATA:
                            return self._get_more_data(ov, maxsize)
                except OSError as e:
                    if e.winerror == _winapi.ERROR_BROKEN_PIPE:
                        raise EOFError
                    else:
                        raise
            raise RuntimeError("shouldn't get here; expected KeyboardInterrupt")

        def _poll(self, timeout):
            if (self._got_empty_message or
                        _winapi.PeekNamedPipe(self._handle)[0] != 0):
                return True
            return bool(wait([self], timeout))

        def _get_more_data(self, ov, maxsize):
            buf = ov.getbuffer()
            f = io.BytesIO()
            f.write(buf)
            left = _winapi.PeekNamedPipe(self._handle)[1]
            assert left > 0
            if maxsize is not None and len(buf) + left > maxsize:
                self._bad_message_length()
            ov, err = _winapi.ReadFile(self._handle, left, overlapped=True)
            rbytes, err = ov.GetOverlappedResult(True)
            assert err == 0
            assert rbytes == left
            f.write(ov.getbuffer())
            return f


class Connection(_ConnectionBase):
    """
    Connection class based on an arbitrary file descriptor (Unix only), or
    a socket handle (Windows).
    """

    if _winapi:
        def _close(self, _close=_multiprocessing.closesocket):
            _close(self._handle)
        _write = _multiprocessing.send
        _read = _multiprocessing.recv
    else:
        def _close(self, _close=os.close):
            _close(self._handle)
        _write = os.write
        _read = os.read

    def _send(self, buf, write=_write):
        remaining = len(buf)
        while True:
            n = write(self._handle, buf)
            remaining -= n
            if remaining == 0:
                break
            buf = buf[n:]

    def _recv(self, size, read=_read):
        buf = io.BytesIO()
        handle = self._handle
        remaining = size
        while remaining > 0:
            chunk = read(handle, remaining)
            n = len(chunk)
            if n == 0:
                if remaining == size:
                    raise EOFError
                else:
                    raise OSError("got end of file during message")
            buf.write(chunk)
            remaining -= n
        return buf

    def _send_bytes(self, buf):
        n = len(buf)
        if n > 0x7fffffff:
            pre_header = struct.pack("!i", -1)
            header = struct.pack("!Q", n)
            self._send(pre_header)
            self._send(header)
            self._send(buf)
        else:
            # For wire compatibility with 3.7 and lower
            header = struct.pack("!i", n)
            if n > 16384:
                # The payload is large so Nagle's algorithm won't be triggered
                # and we'd better avoid the cost of concatenation.
                self._send(header)
                self._send(buf)
            else:
                # Issue #20540: concatenate before sending, to avoid delays due
                # to Nagle's algorithm on a TCP socket.
                # Also note we want to avoid sending a 0-length buffer separately,
                # to avoid "broken pipe" errors if the other end closed the pipe.
                self._send(header + buf)

    def _recv_bytes(self, maxsize=None):
        buf = self._recv(4)
        size, = struct.unpack("!i", buf.getvalue())
        if size == -1:
            buf = self._recv(8)
            size, = struct.unpack("!Q", buf.getvalue())
        if maxsize is not None and size > maxsize:
            return None
        return self._recv(size)

    def _poll(self, timeout):
        r = wait([self], timeout)
        return bool(r)


#
# Public functions
#

class Listener(object):
    '''
    Returns a listener object.

    This is a wrapper for a bound socket which is 'listening' for
    connections, or for a Windows named pipe.
    '''
    def __init__(self, address=None, family=None, backlog=1, authkey=None):
        family = family or (address and address_type(address)) \
                 or default_family
        address = address or arbitrary_address(family)

        _validate_family(family)
        if family == 'AF_PIPE':
            self._listener = PipeListener(address, backlog)
        else:
            self._listener = SocketListener(address, family, backlog)

        if authkey is not None and not isinstance(authkey, bytes):
            raise TypeError('authkey should be a byte string')

        self._authkey = authkey

    def accept(self):
        '''
        Accept a connection on the bound socket or named pipe of `self`.

        Returns a `Connection` object.
        '''
        if self._listener is None:
            raise OSError('listener is closed')
        c = self._listener.accept()
        if self._authkey:
            deliver_challenge(c, self._authkey)
            answer_challenge(c, self._authkey)
        return c

    def close(self):
        '''
        Close the bound socket or named pipe of `self`.
        '''
        listener = self._listener
        if listener is not None:
            self._listener = None
            listener.close()

    @property
    def address(self):
        return self._listener._address

    @property
    def last_accepted(self):
        return self._listener._last_accepted

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, exc_tb):
        self.close()


def Client(address, family=None, authkey=None):
    '''
    Returns a connection to the address of a `Listener`
    '''
    family = family or address_type(address)
    _validate_family(family)
    if family == 'AF_PIPE':
        c = PipeClient(address)
    else:
        c = SocketClient(address)

    if authkey is not None and not isinstance(authkey, bytes):
        raise TypeError('authkey should be a byte string')

    if authkey is not None:
        answer_challenge(c, authkey)
        deliver_challenge(c, authkey)

    return c


if sys.platform != 'win32':

    def Pipe(duplex=True):
        '''
        Returns pair of connection objects at either end of a pipe
        '''
        if duplex:
            s1, s2 = socket.socketpair()
            s1.setblocking(True)
            s2.setblocking(True)
            c1 = Connection(s1.detach())
            c2 = Connection(s2.detach())
        else:
            fd1, fd2 = os.pipe()
            c1 = Connection(fd1, writable=False)
            c2 = Connection(fd2, readable=False)

        return c1, c2

else:

    def Pipe(duplex=True):
        '''
        Returns pair of connection objects at either end of a pipe
        '''
        address = arbitrary_address('AF_PIPE')
        if duplex:
            openmode = _winapi.PIPE_ACCESS_DUPLEX
            access = _winapi.GENERIC_READ | _winapi.GENERIC_WRITE
            obsize, ibsize = BUFSIZE, BUFSIZE
        else:
            openmode = _winapi.PIPE_ACCESS_INBOUND
            access = _winapi.GENERIC_WRITE
            obsize, ibsize = 0, BUFSIZE

        h1 = _winapi.CreateNamedPipe(
            address, openmode | _winapi.FILE_FLAG_OVERLAPPED |
            _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE,
            _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
            _winapi.PIPE_WAIT,
            1, obsize, ibsize, _winapi.NMPWAIT_WAIT_FOREVER,
            # default security descriptor: the handle cannot be inherited
            _winapi.NULL
            )
        h2 = _winapi.CreateFile(
            address, access, 0, _winapi.NULL, _winapi.OPEN_EXISTING,
            _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
            )
        _winapi.SetNamedPipeHandleState(
            h2, _winapi.PIPE_READMODE_MESSAGE, None, None
            )

        overlapped = _winapi.ConnectNamedPipe(h1, overlapped=True)
        _, err = overlapped.GetOverlappedResult(True)
        assert err == 0

        c1 = PipeConnection(h1, writable=duplex)
        c2 = PipeConnection(h2, readable=duplex)

        return c1, c2

#
# Definitions for connections based on sockets
#

class SocketListener(object):
    '''
    Representation of a socket which is bound to an address and listening
    '''
    def __init__(self, address, family, backlog=1):
        self._socket = socket.socket(getattr(socket, family))
        try:
            # SO_REUSEADDR has different semantics on Windows (issue #2550).
            if os.name == 'posix':
                self._socket.setsockopt(socket.SOL_SOCKET,
                                        socket.SO_REUSEADDR, 1)
            self._socket.setblocking(True)
            self._socket.bind(address)
            self._socket.listen(backlog)
            self._address = self._socket.getsockname()
        except OSError:
            self._socket.close()
            raise
        self._family = family
        self._last_accepted = None

        if family == 'AF_UNIX' and not util.is_abstract_socket_namespace(address):
            # Linux abstract socket namespaces do not need to be explicitly unlinked
            self._unlink = util.Finalize(
                self, os.unlink, args=(address,), exitpriority=0
                )
        else:
            self._unlink = None

    def accept(self):
        s, self._last_accepted = self._socket.accept()
        s.setblocking(True)
        return Connection(s.detach())

    def close(self):
        try:
            self._socket.close()
        finally:
            unlink = self._unlink
            if unlink is not None:
                self._unlink = None
                unlink()


def SocketClient(address):
    '''
    Return a connection object connected to the socket given by `address`
    '''
    family = address_type(address)
    with socket.socket( getattr(socket, family) ) as s:
        s.setblocking(True)
        s.connect(address)
        return Connection(s.detach())

#
# Definitions for connections based on named pipes
#

if sys.platform == 'win32':

    class PipeListener(object):
        '''
        Representation of a named pipe
        '''
        def __init__(self, address, backlog=None):
            self._address = address
            self._handle_queue = [self._new_handle(first=True)]

            self._last_accepted = None
            util.sub_debug('listener created with address=%r', self._address)
            self.close = util.Finalize(
                self, PipeListener._finalize_pipe_listener,
                args=(self._handle_queue, self._address), exitpriority=0
                )

        def _new_handle(self, first=False):
            flags = _winapi.PIPE_ACCESS_DUPLEX | _winapi.FILE_FLAG_OVERLAPPED
            if first:
                flags |= _winapi.FILE_FLAG_FIRST_PIPE_INSTANCE
            return _winapi.CreateNamedPipe(
                self._address, flags,
                _winapi.PIPE_TYPE_MESSAGE | _winapi.PIPE_READMODE_MESSAGE |
                _winapi.PIPE_WAIT,
                _winapi.PIPE_UNLIMITED_INSTANCES, BUFSIZE, BUFSIZE,
                _winapi.NMPWAIT_WAIT_FOREVER, _winapi.NULL
                )

        def accept(self):
            self._handle_queue.append(self._new_handle())
            handle = self._handle_queue.pop(0)
            try:
                ov = _winapi.ConnectNamedPipe(handle, overlapped=True)
            except OSError as e:
                if e.winerror != _winapi.ERROR_NO_DATA:
                    raise
                # ERROR_NO_DATA can occur if a client has already connected,
                # written data and then disconnected -- see Issue 14725.
            else:
                try:
                    res = _winapi.WaitForMultipleObjects(
                        [ov.event], False, INFINITE)
                except:
                    ov.cancel()
                    _winapi.CloseHandle(handle)
                    raise
                finally:
                    _, err = ov.GetOverlappedResult(True)
                    assert err == 0
            return PipeConnection(handle)

        @staticmethod
        def _finalize_pipe_listener(queue, address):
            util.sub_debug('closing listener with address=%r', address)
            for handle in queue:
                _winapi.CloseHandle(handle)

    def PipeClient(address):
        '''
        Return a connection object connected to the pipe given by `address`
        '''
        t = _init_timeout()
        while 1:
            try:
                _winapi.WaitNamedPipe(address, 1000)
                h = _winapi.CreateFile(
                    address, _winapi.GENERIC_READ | _winapi.GENERIC_WRITE,
                    0, _winapi.NULL, _winapi.OPEN_EXISTING,
                    _winapi.FILE_FLAG_OVERLAPPED, _winapi.NULL
                    )
            except OSError as e:
                if e.winerror not in (_winapi.ERROR_SEM_TIMEOUT,
                                      _winapi.ERROR_PIPE_BUSY) or _check_timeout(t):
                    raise
            else:
                break
        else:
            raise

        _winapi.SetNamedPipeHandleState(
            h, _winapi.PIPE_READMODE_MESSAGE, None, None
            )
        return PipeConnection(h)

#
# Authentication stuff
#

MESSAGE_LENGTH = 20

CHALLENGE = b'#CHALLENGE#'
WELCOME = b'#WELCOME#'
FAILURE = b'#FAILURE#'

def deliver_challenge(connection, authkey):
    import hmac
    if not isinstance(authkey, bytes):
        raise ValueError(
            "Authkey must be bytes, not {0!s}".format(type(authkey)))
    message = os.urandom(MESSAGE_LENGTH)
    connection.send_bytes(CHALLENGE + message)
    digest = hmac.new(authkey, message, HMAC_DIGEST_NAME).digest()
    response = connection.recv_bytes(256)        # reject large message
    if response == digest:
        connection.send_bytes(WELCOME)
    else:
        connection.send_bytes(FAILURE)
        raise AuthenticationError('digest received was wrong')

def answer_challenge(connection, authkey):
    import hmac
    if not isinstance(authkey, bytes):
        raise ValueError(
            "Authkey must be bytes, not {0!s}".format(type(authkey)))
    message = connection.recv_bytes(256)         # reject large message
    assert message[:len(CHALLENGE)] == CHALLENGE, 'message = %r' % message
    message = message[len(CHALLENGE):]
    digest = hmac.new(authkey, message, HMAC_DIGEST_NAME).digest()
    connection.send_bytes(digest)
    response = connection.recv_bytes(256)        # reject large message
    if response != WELCOME:
        raise AuthenticationError('digest sent was rejected')

#
# Support for using xmlrpclib for serialization
#

class ConnectionWrapper(object):
    def __init__(self, conn, dumps, loads):
        self._conn = conn
        self._dumps = dumps
        self._loads = loads
        for attr in ('fileno', 'close', 'poll', 'recv_bytes', 'send_bytes'):
            obj = getattr(conn, attr)
            setattr(self, attr, obj)
    def send(self, obj):
        s = self._dumps(obj)
        self._conn.send_bytes(s)
    def recv(self):
        s = self._conn.recv_bytes()
        return self._loads(s)

def _xml_dumps(obj):
    return xmlrpclib.dumps((obj,), None, None, None, 1).encode('utf-8')

def _xml_loads(s):
    (obj,), method = xmlrpclib.loads(s.decode('utf-8'))
    return obj

class XmlListener(Listener):
    def accept(self):
        global xmlrpclib
        import xmlrpc.client as xmlrpclib
        obj = Listener.accept(self)
        return ConnectionWrapper(obj, _xml_dumps, _xml_loads)

def XmlClient(*args, **kwds):
    global xmlrpclib
    import xmlrpc.client as xmlrpclib
    return ConnectionWrapper(Client(*args, **kwds), _xml_dumps, _xml_loads)

#
# Wait
#

if sys.platform == 'win32':

    def _exhaustive_wait(handles, timeout):
        # Return ALL handles which are currently signalled.  (Only
        # returning the first signalled might create starvation issues.)
        L = list(handles)
        ready = []
        while L:
            res = _winapi.WaitForMultipleObjects(L, False, timeout)
            if res == WAIT_TIMEOUT:
                break
            elif WAIT_OBJECT_0 <= res < WAIT_OBJECT_0 + len(L):
                res -= WAIT_OBJECT_0
            elif WAIT_ABANDONED_0 <= res < WAIT_ABANDONED_0 + len(L):
                res -= WAIT_ABANDONED_0
            else:
                raise RuntimeError('Should not get here')
            ready.append(L[res])
            L = L[res+1:]
            timeout = 0
        return ready

    _ready_errors = {_winapi.ERROR_BROKEN_PIPE, _winapi.ERROR_NETNAME_DELETED}

    def wait(object_list, timeout=None):
        '''
        Wait till an object in object_list is ready/readable.

        Returns list of those objects in object_list which are ready/readable.
        '''
        if timeout is None:
            timeout = INFINITE
        elif timeout < 0:
            timeout = 0
        else:
            timeout = int(timeout * 1000 + 0.5)

        object_list = list(object_list)
        waithandle_to_obj = {}
        ov_list = []
        ready_objects = set()
        ready_handles = set()

        try:
            for o in object_list:
                try:
                    fileno = getattr(o, 'fileno')
                except AttributeError:
                    waithandle_to_obj[o.__index__()] = o
                else:
                    # start an overlapped read of length zero
                    try:
                        ov, err = _winapi.ReadFile(fileno(), 0, True)
                    except OSError as e:
                        ov, err = None, e.winerror
                        if err not in _ready_errors:
                            raise
                    if err == _winapi.ERROR_IO_PENDING:
                        ov_list.append(ov)
                        waithandle_to_obj[ov.event] = o
                    else:
                        # If o.fileno() is an overlapped pipe handle and
                        # err == 0 then there is a zero length message
                        # in the pipe, but it HAS NOT been consumed...
                        if ov and sys.getwindowsversion()[:2] >= (6, 2):
                            # ... except on Windows 8 and later, where
                            # the message HAS been consumed.
                            try:
                                _, err = ov.GetOverlappedResult(False)
                            except OSError as e:
                                err = e.winerror
                            if not err and hasattr(o, '_got_empty_message'):
                                o._got_empty_message = True
                        ready_objects.add(o)
                        timeout = 0

            ready_handles = _exhaustive_wait(waithandle_to_obj.keys(), timeout)
        finally:
            # request that overlapped reads stop
            for ov in ov_list:
                ov.cancel()

            # wait for all overlapped reads to stop
            for ov in ov_list:
                try:
                    _, err = ov.GetOverlappedResult(True)
                except OSError as e:
                    err = e.winerror
                    if err not in _ready_errors:
                        raise
                if err != _winapi.ERROR_OPERATION_ABORTED:
                    o = waithandle_to_obj[ov.event]
                    ready_objects.add(o)
                    if err == 0:
                        # If o.fileno() is an overlapped pipe handle then
                        # a zero length message HAS been consumed.
                        if hasattr(o, '_got_empty_message'):
                            o._got_empty_message = True

        ready_objects.update(waithandle_to_obj[h] for h in ready_handles)
        return [o for o in object_list if o in ready_objects]

else:

    import selectors

    # poll/select have the advantage of not requiring any extra file
    # descriptor, contrarily to epoll/kqueue (also, they require a single
    # syscall).
    if hasattr(selectors, 'PollSelector'):
        _WaitSelector = selectors.PollSelector
    else:
        _WaitSelector = selectors.SelectSelector

    def wait(object_list, timeout=None):
        '''
        Wait till an object in object_list is ready/readable.

        Returns list of those objects in object_list which are ready/readable.
        '''
        with _WaitSelector() as selector:
            for obj in object_list:
                selector.register(obj, selectors.EVENT_READ)

            if timeout is not None:
                deadline = time.monotonic() + timeout

            while True:
                ready = selector.select(timeout)
                if ready:
                    return [key.fileobj for (key, events) in ready]
                else:
                    if timeout is not None:
                        timeout = deadline - time.monotonic()
                        if timeout < 0:
                            return ready

#
# Make connection and socket objects sharable if possible
#

if sys.platform == 'win32':
    def reduce_connection(conn):
        handle = conn.fileno()
        with socket.fromfd(handle, socket.AF_INET, socket.SOCK_STREAM) as s:
            from . import resource_sharer
            ds = resource_sharer.DupSocket(s)
            return rebuild_connection, (ds, conn.readable, conn.writable)
    def rebuild_connection(ds, readable, writable):
        sock = ds.detach()
        return Connection(sock.detach(), readable, writable)
    reduction.register(Connection, reduce_connection)

    def reduce_pipe_connection(conn):
        access = ((_winapi.FILE_GENERIC_READ if conn.readable else 0) |
                  (_winapi.FILE_GENERIC_WRITE if conn.writable else 0))
        dh = reduction.DupHandle(conn.fileno(), access)
        return rebuild_pipe_connection, (dh, conn.readable, conn.writable)
    def rebuild_pipe_connection(dh, readable, writable):
        handle = dh.detach()
        return PipeConnection(handle, readable, writable)
    reduction.register(PipeConnection, reduce_pipe_connection)

else:
    def reduce_connection(conn):
        df = reduction.DupFd(conn.fileno())
        return rebuild_connection, (df, conn.readable, conn.writable)
    def rebuild_connection(df, readable, writable):
        fd = df.detach()
        return Connection(fd, readable, writable)
    reduction.register(Connection, reduce_connection)
PK|��\��F��0�0
forkserver.pynu�[���import errno
import os
import selectors
import signal
import socket
import struct
import sys
import threading
import warnings

from . import connection
from . import process
from .context import reduction
from . import resource_tracker
from . import spawn
from . import util

__all__ = ['ensure_running', 'get_inherited_fds', 'connect_to_new_process',
           'set_forkserver_preload']

#
#
#

MAXFDS_TO_SEND = 256
SIGNED_STRUCT = struct.Struct('q')     # large enough for pid_t

#
# Forkserver class
#

class ForkServer(object):

    def __init__(self):
        self._forkserver_address = None
        self._forkserver_alive_fd = None
        self._forkserver_pid = None
        self._inherited_fds = None
        self._lock = threading.Lock()
        self._preload_modules = ['__main__']

    def _stop(self):
        # Method used by unit tests to stop the server
        with self._lock:
            self._stop_unlocked()

    def _stop_unlocked(self):
        if self._forkserver_pid is None:
            return

        # close the "alive" file descriptor asks the server to stop
        os.close(self._forkserver_alive_fd)
        self._forkserver_alive_fd = None

        os.waitpid(self._forkserver_pid, 0)
        self._forkserver_pid = None

        if not util.is_abstract_socket_namespace(self._forkserver_address):
            os.unlink(self._forkserver_address)
        self._forkserver_address = None

    def set_forkserver_preload(self, modules_names):
        '''Set list of module names to try to load in forkserver process.'''
        if not all(type(mod) is str for mod in self._preload_modules):
            raise TypeError('module_names must be a list of strings')
        self._preload_modules = modules_names

    def get_inherited_fds(self):
        '''Return list of fds inherited from parent process.

        This returns None if the current process was not started by fork
        server.
        '''
        return self._inherited_fds

    def connect_to_new_process(self, fds):
        '''Request forkserver to create a child process.

        Returns a pair of fds (status_r, data_w).  The calling process can read
        the child process's pid and (eventually) its returncode from status_r.
        The calling process should write to data_w the pickled preparation and
        process data.
        '''
        self.ensure_running()
        if len(fds) + 4 >= MAXFDS_TO_SEND:
            raise ValueError('too many fds')
        with socket.socket(socket.AF_UNIX) as client:
            client.connect(self._forkserver_address)
            parent_r, child_w = os.pipe()
            child_r, parent_w = os.pipe()
            allfds = [child_r, child_w, self._forkserver_alive_fd,
                      resource_tracker.getfd()]
            allfds += fds
            try:
                reduction.sendfds(client, allfds)
                return parent_r, parent_w
            except:
                os.close(parent_r)
                os.close(parent_w)
                raise
            finally:
                os.close(child_r)
                os.close(child_w)

    def ensure_running(self):
        '''Make sure that a fork server is running.

        This can be called from any process.  Note that usually a child
        process will just reuse the forkserver started by its parent, so
        ensure_running() will do nothing.
        '''
        with self._lock:
            resource_tracker.ensure_running()
            if self._forkserver_pid is not None:
                # forkserver was launched before, is it still running?
                pid, status = os.waitpid(self._forkserver_pid, os.WNOHANG)
                if not pid:
                    # still alive
                    return
                # dead, launch it again
                os.close(self._forkserver_alive_fd)
                self._forkserver_address = None
                self._forkserver_alive_fd = None
                self._forkserver_pid = None

            cmd = ('from multiprocessing.forkserver import main; ' +
                   'main(%d, %d, %r, **%r)')

            if self._preload_modules:
                desired_keys = {'main_path', 'sys_path'}
                data = spawn.get_preparation_data('ignore')
                data = {x: y for x, y in data.items() if x in desired_keys}
            else:
                data = {}

            with socket.socket(socket.AF_UNIX) as listener:
                address = connection.arbitrary_address('AF_UNIX')
                listener.bind(address)
                if not util.is_abstract_socket_namespace(address):
                    os.chmod(address, 0o600)
                listener.listen()

                # all client processes own the write end of the "alive" pipe;
                # when they all terminate the read end becomes ready.
                alive_r, alive_w = os.pipe()
                try:
                    fds_to_pass = [listener.fileno(), alive_r]
                    cmd %= (listener.fileno(), alive_r, self._preload_modules,
                            data)
                    exe = spawn.get_executable()
                    args = [exe] + util._args_from_interpreter_flags()
                    args += ['-c', cmd]
                    pid = util.spawnv_passfds(exe, args, fds_to_pass)
                except:
                    os.close(alive_w)
                    raise
                finally:
                    os.close(alive_r)
                self._forkserver_address = address
                self._forkserver_alive_fd = alive_w
                self._forkserver_pid = pid

#
#
#

def main(listener_fd, alive_r, preload, main_path=None, sys_path=None):
    '''Run forkserver.'''
    if preload:
        if '__main__' in preload and main_path is not None:
            process.current_process()._inheriting = True
            try:
                spawn.import_main_path(main_path)
            finally:
                del process.current_process()._inheriting
        for modname in preload:
            try:
                __import__(modname)
            except ImportError:
                pass

    util._close_stdin()

    sig_r, sig_w = os.pipe()
    os.set_blocking(sig_r, False)
    os.set_blocking(sig_w, False)

    def sigchld_handler(*_unused):
        # Dummy signal handler, doesn't do anything
        pass

    handlers = {
        # unblocking SIGCHLD allows the wakeup fd to notify our event loop
        signal.SIGCHLD: sigchld_handler,
        # protect the process from ^C
        signal.SIGINT: signal.SIG_IGN,
        }
    old_handlers = {sig: signal.signal(sig, val)
                    for (sig, val) in handlers.items()}

    # calling os.write() in the Python signal handler is racy
    signal.set_wakeup_fd(sig_w)

    # map child pids to client fds
    pid_to_fd = {}

    with socket.socket(socket.AF_UNIX, fileno=listener_fd) as listener, \
         selectors.DefaultSelector() as selector:
        _forkserver._forkserver_address = listener.getsockname()

        selector.register(listener, selectors.EVENT_READ)
        selector.register(alive_r, selectors.EVENT_READ)
        selector.register(sig_r, selectors.EVENT_READ)

        while True:
            try:
                while True:
                    rfds = [key.fileobj for (key, events) in selector.select()]
                    if rfds:
                        break

                if alive_r in rfds:
                    # EOF because no more client processes left
                    assert os.read(alive_r, 1) == b'', "Not at EOF?"
                    raise SystemExit

                if sig_r in rfds:
                    # Got SIGCHLD
                    os.read(sig_r, 65536)  # exhaust
                    while True:
                        # Scan for child processes
                        try:
                            pid, sts = os.waitpid(-1, os.WNOHANG)
                        except ChildProcessError:
                            break
                        if pid == 0:
                            break
                        child_w = pid_to_fd.pop(pid, None)
                        if child_w is not None:
                            if os.WIFSIGNALED(sts):
                                returncode = -os.WTERMSIG(sts)
                            else:
                                if not os.WIFEXITED(sts):
                                    raise AssertionError(
                                        "Child {0:n} status is {1:n}".format(
                                            pid,sts))
                                returncode = os.WEXITSTATUS(sts)
                            # Send exit code to client process
                            try:
                                write_signed(child_w, returncode)
                            except BrokenPipeError:
                                # client vanished
                                pass
                            os.close(child_w)
                        else:
                            # This shouldn't happen really
                            warnings.warn('forkserver: waitpid returned '
                                          'unexpected pid %d' % pid)

                if listener in rfds:
                    # Incoming fork request
                    with listener.accept()[0] as s:
                        # Receive fds from client
                        fds = reduction.recvfds(s, MAXFDS_TO_SEND + 1)
                        if len(fds) > MAXFDS_TO_SEND:
                            raise RuntimeError(
                                "Too many ({0:n}) fds to send".format(
                                    len(fds)))
                        child_r, child_w, *fds = fds
                        s.close()
                        pid = os.fork()
                        if pid == 0:
                            # Child
                            code = 1
                            try:
                                listener.close()
                                selector.close()
                                unused_fds = [alive_r, child_w, sig_r, sig_w]
                                unused_fds.extend(pid_to_fd.values())
                                code = _serve_one(child_r, fds,
                                                  unused_fds,
                                                  old_handlers)
                            except Exception:
                                sys.excepthook(*sys.exc_info())
                                sys.stderr.flush()
                            finally:
                                os._exit(code)
                        else:
                            # Send pid to client process
                            try:
                                write_signed(child_w, pid)
                            except BrokenPipeError:
                                # client vanished
                                pass
                            pid_to_fd[pid] = child_w
                            os.close(child_r)
                            for fd in fds:
                                os.close(fd)

            except OSError as e:
                if e.errno != errno.ECONNABORTED:
                    raise


def _serve_one(child_r, fds, unused_fds, handlers):
    # close unnecessary stuff and reset signal handlers
    signal.set_wakeup_fd(-1)
    for sig, val in handlers.items():
        signal.signal(sig, val)
    for fd in unused_fds:
        os.close(fd)

    (_forkserver._forkserver_alive_fd,
     resource_tracker._resource_tracker._fd,
     *_forkserver._inherited_fds) = fds

    # Run process object received over pipe
    parent_sentinel = os.dup(child_r)
    code = spawn._main(child_r, parent_sentinel)

    return code


#
# Read and write signed numbers
#

def read_signed(fd):
    data = b''
    length = SIGNED_STRUCT.size
    while len(data) < length:
        s = os.read(fd, length - len(data))
        if not s:
            raise EOFError('unexpected EOF')
        data += s
    return SIGNED_STRUCT.unpack(data)[0]

def write_signed(fd, n):
    msg = SIGNED_STRUCT.pack(n)
    while msg:
        nbytes = os.write(fd, msg)
        if nbytes == 0:
            raise RuntimeError('should not get here')
        msg = msg[nbytes:]

#
#
#

_forkserver = ForkServer()
ensure_running = _forkserver.ensure_running
get_inherited_fds = _forkserver.get_inherited_fds
connect_to_new_process = _forkserver.connect_to_new_process
set_forkserver_preload = _forkserver.set_forkserver_preload
PK|��\,ӓm��managers.pynu�[���#
# Module providing manager classes for dealing
# with shared objects
#
# multiprocessing/managers.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

__all__ = [ 'BaseManager', 'SyncManager', 'BaseProxy', 'Token',
            'SharedMemoryManager' ]

#
# Imports
#

import sys
import threading
import signal
import array
import queue
import time
import os
from os import getpid

from traceback import format_exc

from . import connection
from .context import reduction, get_spawning_popen, ProcessError
from . import pool
from . import process
from . import util
from . import get_context
try:
    from . import shared_memory
    HAS_SHMEM = True
except ImportError:
    HAS_SHMEM = False

#
# Register some things for pickling
#

def reduce_array(a):
    return array.array, (a.typecode, a.tobytes())
reduction.register(array.array, reduce_array)

view_types = [type(getattr({}, name)()) for name in ('items','keys','values')]
if view_types[0] is not list:       # only needed in Py3.0
    def rebuild_as_list(obj):
        return list, (list(obj),)
    for view_type in view_types:
        reduction.register(view_type, rebuild_as_list)

#
# Type for identifying shared objects
#

class Token(object):
    '''
    Type to uniquely identify a shared object
    '''
    __slots__ = ('typeid', 'address', 'id')

    def __init__(self, typeid, address, id):
        (self.typeid, self.address, self.id) = (typeid, address, id)

    def __getstate__(self):
        return (self.typeid, self.address, self.id)

    def __setstate__(self, state):
        (self.typeid, self.address, self.id) = state

    def __repr__(self):
        return '%s(typeid=%r, address=%r, id=%r)' % \
               (self.__class__.__name__, self.typeid, self.address, self.id)

#
# Function for communication with a manager's server process
#

def dispatch(c, id, methodname, args=(), kwds={}):
    '''
    Send a message to manager using connection `c` and return response
    '''
    c.send((id, methodname, args, kwds))
    kind, result = c.recv()
    if kind == '#RETURN':
        return result
    raise convert_to_error(kind, result)

def convert_to_error(kind, result):
    if kind == '#ERROR':
        return result
    elif kind in ('#TRACEBACK', '#UNSERIALIZABLE'):
        if not isinstance(result, str):
            raise TypeError(
                "Result {0!r} (kind '{1}') type is {2}, not str".format(
                    result, kind, type(result)))
        if kind == '#UNSERIALIZABLE':
            return RemoteError('Unserializable message: %s\n' % result)
        else:
            return RemoteError(result)
    else:
        return ValueError('Unrecognized message type {!r}'.format(kind))

class RemoteError(Exception):
    def __str__(self):
        return ('\n' + '-'*75 + '\n' + str(self.args[0]) + '-'*75)

#
# Functions for finding the method names of an object
#

def all_methods(obj):
    '''
    Return a list of names of methods of `obj`
    '''
    temp = []
    for name in dir(obj):
        func = getattr(obj, name)
        if callable(func):
            temp.append(name)
    return temp

def public_methods(obj):
    '''
    Return a list of names of methods of `obj` which do not start with '_'
    '''
    return [name for name in all_methods(obj) if name[0] != '_']

#
# Server which is run in a process controlled by a manager
#

class Server(object):
    '''
    Server class which runs in a process controlled by a manager object
    '''
    public = ['shutdown', 'create', 'accept_connection', 'get_methods',
              'debug_info', 'number_of_objects', 'dummy', 'incref', 'decref']

    def __init__(self, registry, address, authkey, serializer):
        if not isinstance(authkey, bytes):
            raise TypeError(
                "Authkey {0!r} is type {1!s}, not bytes".format(
                    authkey, type(authkey)))
        self.registry = registry
        self.authkey = process.AuthenticationString(authkey)
        Listener, Client = listener_client[serializer]

        # do authentication later
        self.listener = Listener(address=address, backlog=16)
        self.address = self.listener.address

        self.id_to_obj = {'0': (None, ())}
        self.id_to_refcount = {}
        self.id_to_local_proxy_obj = {}
        self.mutex = threading.Lock()

    def serve_forever(self):
        '''
        Run the server forever
        '''
        self.stop_event = threading.Event()
        process.current_process()._manager_server = self
        try:
            accepter = threading.Thread(target=self.accepter)
            accepter.daemon = True
            accepter.start()
            try:
                while not self.stop_event.is_set():
                    self.stop_event.wait(1)
            except (KeyboardInterrupt, SystemExit):
                pass
        finally:
            if sys.stdout != sys.__stdout__: # what about stderr?
                util.debug('resetting stdout, stderr')
                sys.stdout = sys.__stdout__
                sys.stderr = sys.__stderr__
            sys.exit(0)

    def accepter(self):
        while True:
            try:
                c = self.listener.accept()
            except OSError:
                continue
            t = threading.Thread(target=self.handle_request, args=(c,))
            t.daemon = True
            t.start()

    def handle_request(self, c):
        '''
        Handle a new connection
        '''
        funcname = result = request = None
        try:
            connection.deliver_challenge(c, self.authkey)
            connection.answer_challenge(c, self.authkey)
            request = c.recv()
            ignore, funcname, args, kwds = request
            assert funcname in self.public, '%r unrecognized' % funcname
            func = getattr(self, funcname)
        except Exception:
            msg = ('#TRACEBACK', format_exc())
        else:
            try:
                result = func(c, *args, **kwds)
            except Exception:
                msg = ('#TRACEBACK', format_exc())
            else:
                msg = ('#RETURN', result)
        try:
            c.send(msg)
        except Exception as e:
            try:
                c.send(('#TRACEBACK', format_exc()))
            except Exception:
                pass
            util.info('Failure to send message: %r', msg)
            util.info(' ... request was %r', request)
            util.info(' ... exception was %r', e)

        c.close()

    def serve_client(self, conn):
        '''
        Handle requests from the proxies in a particular process/thread
        '''
        util.debug('starting server thread to service %r',
                   threading.current_thread().name)

        recv = conn.recv
        send = conn.send
        id_to_obj = self.id_to_obj

        while not self.stop_event.is_set():

            try:
                methodname = obj = None
                request = recv()
                ident, methodname, args, kwds = request
                try:
                    obj, exposed, gettypeid = id_to_obj[ident]
                except KeyError as ke:
                    try:
                        obj, exposed, gettypeid = \
                            self.id_to_local_proxy_obj[ident]
                    except KeyError as second_ke:
                        raise ke

                if methodname not in exposed:
                    raise AttributeError(
                        'method %r of %r object is not in exposed=%r' %
                        (methodname, type(obj), exposed)
                        )

                function = getattr(obj, methodname)

                try:
                    res = function(*args, **kwds)
                except Exception as e:
                    msg = ('#ERROR', e)
                else:
                    typeid = gettypeid and gettypeid.get(methodname, None)
                    if typeid:
                        rident, rexposed = self.create(conn, typeid, res)
                        token = Token(typeid, self.address, rident)
                        msg = ('#PROXY', (rexposed, token))
                    else:
                        msg = ('#RETURN', res)

            except AttributeError:
                if methodname is None:
                    msg = ('#TRACEBACK', format_exc())
                else:
                    try:
                        fallback_func = self.fallback_mapping[methodname]
                        result = fallback_func(
                            self, conn, ident, obj, *args, **kwds
                            )
                        msg = ('#RETURN', result)
                    except Exception:
                        msg = ('#TRACEBACK', format_exc())

            except EOFError:
                util.debug('got EOF -- exiting thread serving %r',
                           threading.current_thread().name)
                sys.exit(0)

            except Exception:
                msg = ('#TRACEBACK', format_exc())

            try:
                try:
                    send(msg)
                except Exception as e:
                    send(('#UNSERIALIZABLE', format_exc()))
            except Exception as e:
                util.info('exception in thread serving %r',
                        threading.current_thread().name)
                util.info(' ... message was %r', msg)
                util.info(' ... exception was %r', e)
                conn.close()
                sys.exit(1)

    def fallback_getvalue(self, conn, ident, obj):
        return obj

    def fallback_str(self, conn, ident, obj):
        return str(obj)

    def fallback_repr(self, conn, ident, obj):
        return repr(obj)

    fallback_mapping = {
        '__str__':fallback_str,
        '__repr__':fallback_repr,
        '#GETVALUE':fallback_getvalue
        }

    def dummy(self, c):
        pass

    def debug_info(self, c):
        '''
        Return some info --- useful to spot problems with refcounting
        '''
        # Perhaps include debug info about 'c'?
        with self.mutex:
            result = []
            keys = list(self.id_to_refcount.keys())
            keys.sort()
            for ident in keys:
                if ident != '0':
                    result.append('  %s:       refcount=%s\n    %s' %
                                  (ident, self.id_to_refcount[ident],
                                   str(self.id_to_obj[ident][0])[:75]))
            return '\n'.join(result)

    def number_of_objects(self, c):
        '''
        Number of shared objects
        '''
        # Doesn't use (len(self.id_to_obj) - 1) as we shouldn't count ident='0'
        return len(self.id_to_refcount)

    def shutdown(self, c):
        '''
        Shutdown this process
        '''
        try:
            util.debug('manager received shutdown message')
            c.send(('#RETURN', None))
        except:
            import traceback
            traceback.print_exc()
        finally:
            self.stop_event.set()

    def create(*args, **kwds):
        '''
        Create a new shared object and return its id
        '''
        if len(args) >= 3:
            self, c, typeid, *args = args
        elif not args:
            raise TypeError("descriptor 'create' of 'Server' object "
                            "needs an argument")
        else:
            if 'typeid' not in kwds:
                raise TypeError('create expected at least 2 positional '
                                'arguments, got %d' % (len(args)-1))
            typeid = kwds.pop('typeid')
            if len(args) >= 2:
                self, c, *args = args
                import warnings
                warnings.warn("Passing 'typeid' as keyword argument is deprecated",
                              DeprecationWarning, stacklevel=2)
            else:
                if 'c' not in kwds:
                    raise TypeError('create expected at least 2 positional '
                                    'arguments, got %d' % (len(args)-1))
                c = kwds.pop('c')
                self, *args = args
                import warnings
                warnings.warn("Passing 'c' as keyword argument is deprecated",
                              DeprecationWarning, stacklevel=2)
        args = tuple(args)

        with self.mutex:
            callable, exposed, method_to_typeid, proxytype = \
                      self.registry[typeid]

            if callable is None:
                if kwds or (len(args) != 1):
                    raise ValueError(
                        "Without callable, must have one non-keyword argument")
                obj = args[0]
            else:
                obj = callable(*args, **kwds)

            if exposed is None:
                exposed = public_methods(obj)
            if method_to_typeid is not None:
                if not isinstance(method_to_typeid, dict):
                    raise TypeError(
                        "Method_to_typeid {0!r}: type {1!s}, not dict".format(
                            method_to_typeid, type(method_to_typeid)))
                exposed = list(exposed) + list(method_to_typeid)

            ident = '%x' % id(obj)  # convert to string because xmlrpclib
                                    # only has 32 bit signed integers
            util.debug('%r callable returned object with id %r', typeid, ident)

            self.id_to_obj[ident] = (obj, set(exposed), method_to_typeid)
            if ident not in self.id_to_refcount:
                self.id_to_refcount[ident] = 0

        self.incref(c, ident)
        return ident, tuple(exposed)
    create.__text_signature__ = '($self, c, typeid, /, *args, **kwds)'

    def get_methods(self, c, token):
        '''
        Return the methods of the shared object indicated by token
        '''
        return tuple(self.id_to_obj[token.id][1])

    def accept_connection(self, c, name):
        '''
        Spawn a new thread to serve this connection
        '''
        threading.current_thread().name = name
        c.send(('#RETURN', None))
        self.serve_client(c)

    def incref(self, c, ident):
        with self.mutex:
            try:
                self.id_to_refcount[ident] += 1
            except KeyError as ke:
                # If no external references exist but an internal (to the
                # manager) still does and a new external reference is created
                # from it, restore the manager's tracking of it from the
                # previously stashed internal ref.
                if ident in self.id_to_local_proxy_obj:
                    self.id_to_refcount[ident] = 1
                    self.id_to_obj[ident] = \
                        self.id_to_local_proxy_obj[ident]
                    obj, exposed, gettypeid = self.id_to_obj[ident]
                    util.debug('Server re-enabled tracking & INCREF %r', ident)
                else:
                    raise ke

    def decref(self, c, ident):
        if ident not in self.id_to_refcount and \
            ident in self.id_to_local_proxy_obj:
            util.debug('Server DECREF skipping %r', ident)
            return

        with self.mutex:
            if self.id_to_refcount[ident] <= 0:
                raise AssertionError(
                    "Id {0!s} ({1!r}) has refcount {2:n}, not 1+".format(
                        ident, self.id_to_obj[ident],
                        self.id_to_refcount[ident]))
            self.id_to_refcount[ident] -= 1
            if self.id_to_refcount[ident] == 0:
                del self.id_to_refcount[ident]

        if ident not in self.id_to_refcount:
            # Two-step process in case the object turns out to contain other
            # proxy objects (e.g. a managed list of managed lists).
            # Otherwise, deleting self.id_to_obj[ident] would trigger the
            # deleting of the stored value (another managed object) which would
            # in turn attempt to acquire the mutex that is already held here.
            self.id_to_obj[ident] = (None, (), None)  # thread-safe
            util.debug('disposing of obj with id %r', ident)
            with self.mutex:
                del self.id_to_obj[ident]


#
# Class to represent state of a manager
#

class State(object):
    __slots__ = ['value']
    INITIAL = 0
    STARTED = 1
    SHUTDOWN = 2

#
# Mapping from serializer name to Listener and Client types
#

listener_client = {
    'pickle' : (connection.Listener, connection.Client),
    'xmlrpclib' : (connection.XmlListener, connection.XmlClient)
    }

#
# Definition of BaseManager
#

class BaseManager(object):
    '''
    Base class for managers
    '''
    _registry = {}
    _Server = Server

    def __init__(self, address=None, authkey=None, serializer='pickle',
                 ctx=None):
        if authkey is None:
            authkey = process.current_process().authkey
        self._address = address     # XXX not final address if eg ('', 0)
        self._authkey = process.AuthenticationString(authkey)
        self._state = State()
        self._state.value = State.INITIAL
        self._serializer = serializer
        self._Listener, self._Client = listener_client[serializer]
        self._ctx = ctx or get_context()

    def get_server(self):
        '''
        Return server object with serve_forever() method and address attribute
        '''
        if self._state.value != State.INITIAL:
            if self._state.value == State.STARTED:
                raise ProcessError("Already started server")
            elif self._state.value == State.SHUTDOWN:
                raise ProcessError("Manager has shut down")
            else:
                raise ProcessError(
                    "Unknown state {!r}".format(self._state.value))
        return Server(self._registry, self._address,
                      self._authkey, self._serializer)

    def connect(self):
        '''
        Connect manager object to the server process
        '''
        Listener, Client = listener_client[self._serializer]
        conn = Client(self._address, authkey=self._authkey)
        dispatch(conn, None, 'dummy')
        self._state.value = State.STARTED

    def start(self, initializer=None, initargs=()):
        '''
        Spawn a server process for this manager object
        '''
        if self._state.value != State.INITIAL:
            if self._state.value == State.STARTED:
                raise ProcessError("Already started server")
            elif self._state.value == State.SHUTDOWN:
                raise ProcessError("Manager has shut down")
            else:
                raise ProcessError(
                    "Unknown state {!r}".format(self._state.value))

        if initializer is not None and not callable(initializer):
            raise TypeError('initializer must be a callable')

        # pipe over which we will retrieve address of server
        reader, writer = connection.Pipe(duplex=False)

        # spawn process which runs a server
        self._process = self._ctx.Process(
            target=type(self)._run_server,
            args=(self._registry, self._address, self._authkey,
                  self._serializer, writer, initializer, initargs),
            )
        ident = ':'.join(str(i) for i in self._process._identity)
        self._process.name = type(self).__name__  + '-' + ident
        self._process.start()

        # get address of server
        writer.close()
        self._address = reader.recv()
        reader.close()

        # register a finalizer
        self._state.value = State.STARTED
        self.shutdown = util.Finalize(
            self, type(self)._finalize_manager,
            args=(self._process, self._address, self._authkey,
                  self._state, self._Client),
            exitpriority=0
            )

    @classmethod
    def _run_server(cls, registry, address, authkey, serializer, writer,
                    initializer=None, initargs=()):
        '''
        Create a server, report its address and run it
        '''
        # bpo-36368: protect server process from KeyboardInterrupt signals
        signal.signal(signal.SIGINT, signal.SIG_IGN)

        if initializer is not None:
            initializer(*initargs)

        # create server
        server = cls._Server(registry, address, authkey, serializer)

        # inform parent process of the server's address
        writer.send(server.address)
        writer.close()

        # run the manager
        util.info('manager serving at %r', server.address)
        server.serve_forever()

    def _create(self, typeid, /, *args, **kwds):
        '''
        Create a new shared object; return the token and exposed tuple
        '''
        assert self._state.value == State.STARTED, 'server not yet started'
        conn = self._Client(self._address, authkey=self._authkey)
        try:
            id, exposed = dispatch(conn, None, 'create', (typeid,)+args, kwds)
        finally:
            conn.close()
        return Token(typeid, self._address, id), exposed

    def join(self, timeout=None):
        '''
        Join the manager process (if it has been spawned)
        '''
        if self._process is not None:
            self._process.join(timeout)
            if not self._process.is_alive():
                self._process = None

    def _debug_info(self):
        '''
        Return some info about the servers shared objects and connections
        '''
        conn = self._Client(self._address, authkey=self._authkey)
        try:
            return dispatch(conn, None, 'debug_info')
        finally:
            conn.close()

    def _number_of_objects(self):
        '''
        Return the number of shared objects
        '''
        conn = self._Client(self._address, authkey=self._authkey)
        try:
            return dispatch(conn, None, 'number_of_objects')
        finally:
            conn.close()

    def __enter__(self):
        if self._state.value == State.INITIAL:
            self.start()
        if self._state.value != State.STARTED:
            if self._state.value == State.INITIAL:
                raise ProcessError("Unable to start server")
            elif self._state.value == State.SHUTDOWN:
                raise ProcessError("Manager has shut down")
            else:
                raise ProcessError(
                    "Unknown state {!r}".format(self._state.value))
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.shutdown()

    @staticmethod
    def _finalize_manager(process, address, authkey, state, _Client):
        '''
        Shutdown the manager process; will be registered as a finalizer
        '''
        if process.is_alive():
            util.info('sending shutdown message to manager')
            try:
                conn = _Client(address, authkey=authkey)
                try:
                    dispatch(conn, None, 'shutdown')
                finally:
                    conn.close()
            except Exception:
                pass

            process.join(timeout=1.0)
            if process.is_alive():
                util.info('manager still alive')
                if hasattr(process, 'terminate'):
                    util.info('trying to `terminate()` manager process')
                    process.terminate()
                    process.join(timeout=0.1)
                    if process.is_alive():
                        util.info('manager still alive after terminate')

        state.value = State.SHUTDOWN
        try:
            del BaseProxy._address_to_local[address]
        except KeyError:
            pass

    @property
    def address(self):
        return self._address

    @classmethod
    def register(cls, typeid, callable=None, proxytype=None, exposed=None,
                 method_to_typeid=None, create_method=True):
        '''
        Register a typeid with the manager type
        '''
        if '_registry' not in cls.__dict__:
            cls._registry = cls._registry.copy()

        if proxytype is None:
            proxytype = AutoProxy

        exposed = exposed or getattr(proxytype, '_exposed_', None)

        method_to_typeid = method_to_typeid or \
                           getattr(proxytype, '_method_to_typeid_', None)

        if method_to_typeid:
            for key, value in list(method_to_typeid.items()): # isinstance?
                assert type(key) is str, '%r is not a string' % key
                assert type(value) is str, '%r is not a string' % value

        cls._registry[typeid] = (
            callable, exposed, method_to_typeid, proxytype
            )

        if create_method:
            def temp(self, /, *args, **kwds):
                util.debug('requesting creation of a shared %r object', typeid)
                token, exp = self._create(typeid, *args, **kwds)
                proxy = proxytype(
                    token, self._serializer, manager=self,
                    authkey=self._authkey, exposed=exp
                    )
                conn = self._Client(token.address, authkey=self._authkey)
                dispatch(conn, None, 'decref', (token.id,))
                return proxy
            temp.__name__ = typeid
            setattr(cls, typeid, temp)

#
# Subclass of set which get cleared after a fork
#

class ProcessLocalSet(set):
    def __init__(self):
        util.register_after_fork(self, lambda obj: obj.clear())
    def __reduce__(self):
        return type(self), ()

#
# Definition of BaseProxy
#

class BaseProxy(object):
    '''
    A base for proxies of shared objects
    '''
    _address_to_local = {}
    _mutex = util.ForkAwareThreadLock()

    def __init__(self, token, serializer, manager=None,
                 authkey=None, exposed=None, incref=True, manager_owned=False):
        with BaseProxy._mutex:
            tls_idset = BaseProxy._address_to_local.get(token.address, None)
            if tls_idset is None:
                tls_idset = util.ForkAwareLocal(), ProcessLocalSet()
                BaseProxy._address_to_local[token.address] = tls_idset

        # self._tls is used to record the connection used by this
        # thread to communicate with the manager at token.address
        self._tls = tls_idset[0]

        # self._idset is used to record the identities of all shared
        # objects for which the current process owns references and
        # which are in the manager at token.address
        self._idset = tls_idset[1]

        self._token = token
        self._id = self._token.id
        self._manager = manager
        self._serializer = serializer
        self._Client = listener_client[serializer][1]

        # Should be set to True only when a proxy object is being created
        # on the manager server; primary use case: nested proxy objects.
        # RebuildProxy detects when a proxy is being created on the manager
        # and sets this value appropriately.
        self._owned_by_manager = manager_owned

        if authkey is not None:
            self._authkey = process.AuthenticationString(authkey)
        elif self._manager is not None:
            self._authkey = self._manager._authkey
        else:
            self._authkey = process.current_process().authkey

        if incref:
            self._incref()

        util.register_after_fork(self, BaseProxy._after_fork)

    def _connect(self):
        util.debug('making connection to manager')
        name = process.current_process().name
        if threading.current_thread().name != 'MainThread':
            name += '|' + threading.current_thread().name
        conn = self._Client(self._token.address, authkey=self._authkey)
        dispatch(conn, None, 'accept_connection', (name,))
        self._tls.connection = conn

    def _callmethod(self, methodname, args=(), kwds={}):
        '''
        Try to call a method of the referent and return a copy of the result
        '''
        try:
            conn = self._tls.connection
        except AttributeError:
            util.debug('thread %r does not own a connection',
                       threading.current_thread().name)
            self._connect()
            conn = self._tls.connection

        conn.send((self._id, methodname, args, kwds))
        kind, result = conn.recv()

        if kind == '#RETURN':
            return result
        elif kind == '#PROXY':
            exposed, token = result
            proxytype = self._manager._registry[token.typeid][-1]
            token.address = self._token.address
            proxy = proxytype(
                token, self._serializer, manager=self._manager,
                authkey=self._authkey, exposed=exposed
                )
            conn = self._Client(token.address, authkey=self._authkey)
            dispatch(conn, None, 'decref', (token.id,))
            return proxy
        raise convert_to_error(kind, result)

    def _getvalue(self):
        '''
        Get a copy of the value of the referent
        '''
        return self._callmethod('#GETVALUE')

    def _incref(self):
        if self._owned_by_manager:
            util.debug('owned_by_manager skipped INCREF of %r', self._token.id)
            return

        conn = self._Client(self._token.address, authkey=self._authkey)
        dispatch(conn, None, 'incref', (self._id,))
        util.debug('INCREF %r', self._token.id)

        self._idset.add(self._id)

        state = self._manager and self._manager._state

        self._close = util.Finalize(
            self, BaseProxy._decref,
            args=(self._token, self._authkey, state,
                  self._tls, self._idset, self._Client),
            exitpriority=10
            )

    @staticmethod
    def _decref(token, authkey, state, tls, idset, _Client):
        idset.discard(token.id)

        # check whether manager is still alive
        if state is None or state.value == State.STARTED:
            # tell manager this process no longer cares about referent
            try:
                util.debug('DECREF %r', token.id)
                conn = _Client(token.address, authkey=authkey)
                dispatch(conn, None, 'decref', (token.id,))
            except Exception as e:
                util.debug('... decref failed %s', e)

        else:
            util.debug('DECREF %r -- manager already shutdown', token.id)

        # check whether we can close this thread's connection because
        # the process owns no more references to objects for this manager
        if not idset and hasattr(tls, 'connection'):
            util.debug('thread %r has no more proxies so closing conn',
                       threading.current_thread().name)
            tls.connection.close()
            del tls.connection

    def _after_fork(self):
        self._manager = None
        try:
            self._incref()
        except Exception as e:
            # the proxy may just be for a manager which has shutdown
            util.info('incref failed: %s' % e)

    def __reduce__(self):
        kwds = {}
        if get_spawning_popen() is not None:
            kwds['authkey'] = self._authkey

        if getattr(self, '_isauto', False):
            kwds['exposed'] = self._exposed_
            return (RebuildProxy,
                    (AutoProxy, self._token, self._serializer, kwds))
        else:
            return (RebuildProxy,
                    (type(self), self._token, self._serializer, kwds))

    def __deepcopy__(self, memo):
        return self._getvalue()

    def __repr__(self):
        return '<%s object, typeid %r at %#x>' % \
               (type(self).__name__, self._token.typeid, id(self))

    def __str__(self):
        '''
        Return representation of the referent (or a fall-back if that fails)
        '''
        try:
            return self._callmethod('__repr__')
        except Exception:
            return repr(self)[:-1] + "; '__str__()' failed>"

#
# Function used for unpickling
#

def RebuildProxy(func, token, serializer, kwds):
    '''
    Function used for unpickling proxy objects.
    '''
    server = getattr(process.current_process(), '_manager_server', None)
    if server and server.address == token.address:
        util.debug('Rebuild a proxy owned by manager, token=%r', token)
        kwds['manager_owned'] = True
        if token.id not in server.id_to_local_proxy_obj:
            server.id_to_local_proxy_obj[token.id] = \
                server.id_to_obj[token.id]
    incref = (
        kwds.pop('incref', True) and
        not getattr(process.current_process(), '_inheriting', False)
        )
    return func(token, serializer, incref=incref, **kwds)

#
# Functions to create proxies and proxy types
#

def MakeProxyType(name, exposed, _cache={}):
    '''
    Return a proxy type whose methods are given by `exposed`
    '''
    exposed = tuple(exposed)
    try:
        return _cache[(name, exposed)]
    except KeyError:
        pass

    dic = {}

    for meth in exposed:
        exec('''def %s(self, /, *args, **kwds):
        return self._callmethod(%r, args, kwds)''' % (meth, meth), dic)

    ProxyType = type(name, (BaseProxy,), dic)
    ProxyType._exposed_ = exposed
    _cache[(name, exposed)] = ProxyType
    return ProxyType


def AutoProxy(token, serializer, manager=None, authkey=None,
              exposed=None, incref=True):
    '''
    Return an auto-proxy for `token`
    '''
    _Client = listener_client[serializer][1]

    if exposed is None:
        conn = _Client(token.address, authkey=authkey)
        try:
            exposed = dispatch(conn, None, 'get_methods', (token,))
        finally:
            conn.close()

    if authkey is None and manager is not None:
        authkey = manager._authkey
    if authkey is None:
        authkey = process.current_process().authkey

    ProxyType = MakeProxyType('AutoProxy[%s]' % token.typeid, exposed)
    proxy = ProxyType(token, serializer, manager=manager, authkey=authkey,
                      incref=incref)
    proxy._isauto = True
    return proxy

#
# Types/callables which we will register with SyncManager
#

class Namespace(object):
    def __init__(self, /, **kwds):
        self.__dict__.update(kwds)
    def __repr__(self):
        items = list(self.__dict__.items())
        temp = []
        for name, value in items:
            if not name.startswith('_'):
                temp.append('%s=%r' % (name, value))
        temp.sort()
        return '%s(%s)' % (self.__class__.__name__, ', '.join(temp))

class Value(object):
    def __init__(self, typecode, value, lock=True):
        self._typecode = typecode
        self._value = value
    def get(self):
        return self._value
    def set(self, value):
        self._value = value
    def __repr__(self):
        return '%s(%r, %r)'%(type(self).__name__, self._typecode, self._value)
    value = property(get, set)

def Array(typecode, sequence, lock=True):
    return array.array(typecode, sequence)

#
# Proxy types used by SyncManager
#

class IteratorProxy(BaseProxy):
    _exposed_ = ('__next__', 'send', 'throw', 'close')
    def __iter__(self):
        return self
    def __next__(self, *args):
        return self._callmethod('__next__', args)
    def send(self, *args):
        return self._callmethod('send', args)
    def throw(self, *args):
        return self._callmethod('throw', args)
    def close(self, *args):
        return self._callmethod('close', args)


class AcquirerProxy(BaseProxy):
    _exposed_ = ('acquire', 'release')
    def acquire(self, blocking=True, timeout=None):
        args = (blocking,) if timeout is None else (blocking, timeout)
        return self._callmethod('acquire', args)
    def release(self):
        return self._callmethod('release')
    def __enter__(self):
        return self._callmethod('acquire')
    def __exit__(self, exc_type, exc_val, exc_tb):
        return self._callmethod('release')


class ConditionProxy(AcquirerProxy):
    _exposed_ = ('acquire', 'release', 'wait', 'notify', 'notify_all')
    def wait(self, timeout=None):
        return self._callmethod('wait', (timeout,))
    def notify(self, n=1):
        return self._callmethod('notify', (n,))
    def notify_all(self):
        return self._callmethod('notify_all')
    def wait_for(self, predicate, timeout=None):
        result = predicate()
        if result:
            return result
        if timeout is not None:
            endtime = time.monotonic() + timeout
        else:
            endtime = None
            waittime = None
        while not result:
            if endtime is not None:
                waittime = endtime - time.monotonic()
                if waittime <= 0:
                    break
            self.wait(waittime)
            result = predicate()
        return result


class EventProxy(BaseProxy):
    _exposed_ = ('is_set', 'set', 'clear', 'wait')
    def is_set(self):
        return self._callmethod('is_set')
    def set(self):
        return self._callmethod('set')
    def clear(self):
        return self._callmethod('clear')
    def wait(self, timeout=None):
        return self._callmethod('wait', (timeout,))


class BarrierProxy(BaseProxy):
    _exposed_ = ('__getattribute__', 'wait', 'abort', 'reset')
    def wait(self, timeout=None):
        return self._callmethod('wait', (timeout,))
    def abort(self):
        return self._callmethod('abort')
    def reset(self):
        return self._callmethod('reset')
    @property
    def parties(self):
        return self._callmethod('__getattribute__', ('parties',))
    @property
    def n_waiting(self):
        return self._callmethod('__getattribute__', ('n_waiting',))
    @property
    def broken(self):
        return self._callmethod('__getattribute__', ('broken',))


class NamespaceProxy(BaseProxy):
    _exposed_ = ('__getattribute__', '__setattr__', '__delattr__')
    def __getattr__(self, key):
        if key[0] == '_':
            return object.__getattribute__(self, key)
        callmethod = object.__getattribute__(self, '_callmethod')
        return callmethod('__getattribute__', (key,))
    def __setattr__(self, key, value):
        if key[0] == '_':
            return object.__setattr__(self, key, value)
        callmethod = object.__getattribute__(self, '_callmethod')
        return callmethod('__setattr__', (key, value))
    def __delattr__(self, key):
        if key[0] == '_':
            return object.__delattr__(self, key)
        callmethod = object.__getattribute__(self, '_callmethod')
        return callmethod('__delattr__', (key,))


class ValueProxy(BaseProxy):
    _exposed_ = ('get', 'set')
    def get(self):
        return self._callmethod('get')
    def set(self, value):
        return self._callmethod('set', (value,))
    value = property(get, set)


BaseListProxy = MakeProxyType('BaseListProxy', (
    '__add__', '__contains__', '__delitem__', '__getitem__', '__len__',
    '__mul__', '__reversed__', '__rmul__', '__setitem__',
    'append', 'count', 'extend', 'index', 'insert', 'pop', 'remove',
    'reverse', 'sort', '__imul__'
    ))
class ListProxy(BaseListProxy):
    def __iadd__(self, value):
        self._callmethod('extend', (value,))
        return self
    def __imul__(self, value):
        self._callmethod('__imul__', (value,))
        return self


DictProxy = MakeProxyType('DictProxy', (
    '__contains__', '__delitem__', '__getitem__', '__iter__', '__len__',
    '__setitem__', 'clear', 'copy', 'get', 'items',
    'keys', 'pop', 'popitem', 'setdefault', 'update', 'values'
    ))
DictProxy._method_to_typeid_ = {
    '__iter__': 'Iterator',
    }


ArrayProxy = MakeProxyType('ArrayProxy', (
    '__len__', '__getitem__', '__setitem__'
    ))


BasePoolProxy = MakeProxyType('PoolProxy', (
    'apply', 'apply_async', 'close', 'imap', 'imap_unordered', 'join',
    'map', 'map_async', 'starmap', 'starmap_async', 'terminate',
    ))
BasePoolProxy._method_to_typeid_ = {
    'apply_async': 'AsyncResult',
    'map_async': 'AsyncResult',
    'starmap_async': 'AsyncResult',
    'imap': 'Iterator',
    'imap_unordered': 'Iterator'
    }
class PoolProxy(BasePoolProxy):
    def __enter__(self):
        return self
    def __exit__(self, exc_type, exc_val, exc_tb):
        self.terminate()

#
# Definition of SyncManager
#

class SyncManager(BaseManager):
    '''
    Subclass of `BaseManager` which supports a number of shared object types.

    The types registered are those intended for the synchronization
    of threads, plus `dict`, `list` and `Namespace`.

    The `multiprocessing.Manager()` function creates started instances of
    this class.
    '''

SyncManager.register('Queue', queue.Queue)
SyncManager.register('JoinableQueue', queue.Queue)
SyncManager.register('Event', threading.Event, EventProxy)
SyncManager.register('Lock', threading.Lock, AcquirerProxy)
SyncManager.register('RLock', threading.RLock, AcquirerProxy)
SyncManager.register('Semaphore', threading.Semaphore, AcquirerProxy)
SyncManager.register('BoundedSemaphore', threading.BoundedSemaphore,
                     AcquirerProxy)
SyncManager.register('Condition', threading.Condition, ConditionProxy)
SyncManager.register('Barrier', threading.Barrier, BarrierProxy)
SyncManager.register('Pool', pool.Pool, PoolProxy)
SyncManager.register('list', list, ListProxy)
SyncManager.register('dict', dict, DictProxy)
SyncManager.register('Value', Value, ValueProxy)
SyncManager.register('Array', Array, ArrayProxy)
SyncManager.register('Namespace', Namespace, NamespaceProxy)

# types returned by methods of PoolProxy
SyncManager.register('Iterator', proxytype=IteratorProxy, create_method=False)
SyncManager.register('AsyncResult', create_method=False)

#
# Definition of SharedMemoryManager and SharedMemoryServer
#

if HAS_SHMEM:
    class _SharedMemoryTracker:
        "Manages one or more shared memory segments."

        def __init__(self, name, segment_names=[]):
            self.shared_memory_context_name = name
            self.segment_names = segment_names

        def register_segment(self, segment_name):
            "Adds the supplied shared memory block name to tracker."
            util.debug(f"Register segment {segment_name!r} in pid {getpid()}")
            self.segment_names.append(segment_name)

        def destroy_segment(self, segment_name):
            """Calls unlink() on the shared memory block with the supplied name
            and removes it from the list of blocks being tracked."""
            util.debug(f"Destroy segment {segment_name!r} in pid {getpid()}")
            self.segment_names.remove(segment_name)
            segment = shared_memory.SharedMemory(segment_name)
            segment.close()
            segment.unlink()

        def unlink(self):
            "Calls destroy_segment() on all tracked shared memory blocks."
            for segment_name in self.segment_names[:]:
                self.destroy_segment(segment_name)

        def __del__(self):
            util.debug(f"Call {self.__class__.__name__}.__del__ in {getpid()}")
            self.unlink()

        def __getstate__(self):
            return (self.shared_memory_context_name, self.segment_names)

        def __setstate__(self, state):
            self.__init__(*state)


    class SharedMemoryServer(Server):

        public = Server.public + \
                 ['track_segment', 'release_segment', 'list_segments']

        def __init__(self, *args, **kwargs):
            Server.__init__(self, *args, **kwargs)
            address = self.address
            # The address of Linux abstract namespaces can be bytes
            if isinstance(address, bytes):
                address = os.fsdecode(address)
            self.shared_memory_context = \
                _SharedMemoryTracker(f"shm_{address}_{getpid()}")
            util.debug(f"SharedMemoryServer started by pid {getpid()}")

        def create(*args, **kwargs):
            """Create a new distributed-shared object (not backed by a shared
            memory block) and return its id to be used in a Proxy Object."""
            # Unless set up as a shared proxy, don't make shared_memory_context
            # a standard part of kwargs.  This makes things easier for supplying
            # simple functions.
            if len(args) >= 3:
                typeod = args[2]
            elif 'typeid' in kwargs:
                typeid = kwargs['typeid']
            elif not args:
                raise TypeError("descriptor 'create' of 'SharedMemoryServer' "
                                "object needs an argument")
            else:
                raise TypeError('create expected at least 2 positional '
                                'arguments, got %d' % (len(args)-1))
            if hasattr(self.registry[typeid][-1], "_shared_memory_proxy"):
                kwargs['shared_memory_context'] = self.shared_memory_context
            return Server.create(*args, **kwargs)
        create.__text_signature__ = '($self, c, typeid, /, *args, **kwargs)'

        def shutdown(self, c):
            "Call unlink() on all tracked shared memory, terminate the Server."
            self.shared_memory_context.unlink()
            return Server.shutdown(self, c)

        def track_segment(self, c, segment_name):
            "Adds the supplied shared memory block name to Server's tracker."
            self.shared_memory_context.register_segment(segment_name)

        def release_segment(self, c, segment_name):
            """Calls unlink() on the shared memory block with the supplied name
            and removes it from the tracker instance inside the Server."""
            self.shared_memory_context.destroy_segment(segment_name)

        def list_segments(self, c):
            """Returns a list of names of shared memory blocks that the Server
            is currently tracking."""
            return self.shared_memory_context.segment_names


    class SharedMemoryManager(BaseManager):
        """Like SyncManager but uses SharedMemoryServer instead of Server.

        It provides methods for creating and returning SharedMemory instances
        and for creating a list-like object (ShareableList) backed by shared
        memory.  It also provides methods that create and return Proxy Objects
        that support synchronization across processes (i.e. multi-process-safe
        locks and semaphores).
        """

        _Server = SharedMemoryServer

        def __init__(self, *args, **kwargs):
            if os.name == "posix":
                # bpo-36867: Ensure the resource_tracker is running before
                # launching the manager process, so that concurrent
                # shared_memory manipulation both in the manager and in the
                # current process does not create two resource_tracker
                # processes.
                from . import resource_tracker
                resource_tracker.ensure_running()
            BaseManager.__init__(self, *args, **kwargs)
            util.debug(f"{self.__class__.__name__} created by pid {getpid()}")

        def __del__(self):
            util.debug(f"{self.__class__.__name__}.__del__ by pid {getpid()}")
            pass

        def get_server(self):
            'Better than monkeypatching for now; merge into Server ultimately'
            if self._state.value != State.INITIAL:
                if self._state.value == State.STARTED:
                    raise ProcessError("Already started SharedMemoryServer")
                elif self._state.value == State.SHUTDOWN:
                    raise ProcessError("SharedMemoryManager has shut down")
                else:
                    raise ProcessError(
                        "Unknown state {!r}".format(self._state.value))
            return self._Server(self._registry, self._address,
                                self._authkey, self._serializer)

        def SharedMemory(self, size):
            """Returns a new SharedMemory instance with the specified size in
            bytes, to be tracked by the manager."""
            with self._Client(self._address, authkey=self._authkey) as conn:
                sms = shared_memory.SharedMemory(None, create=True, size=size)
                try:
                    dispatch(conn, None, 'track_segment', (sms.name,))
                except BaseException as e:
                    sms.unlink()
                    raise e
            return sms

        def ShareableList(self, sequence):
            """Returns a new ShareableList instance populated with the values
            from the input sequence, to be tracked by the manager."""
            with self._Client(self._address, authkey=self._authkey) as conn:
                sl = shared_memory.ShareableList(sequence)
                try:
                    dispatch(conn, None, 'track_segment', (sl.shm.name,))
                except BaseException as e:
                    sl.shm.unlink()
                    raise e
            return sl
PK|��\�HhP$P$spawn.pynu�[���#
# Code used to start processes when using the spawn or forkserver
# start methods.
#
# multiprocessing/spawn.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

import os
import sys
import runpy
import types

from . import get_start_method, set_start_method
from . import process
from .context import reduction
from . import util

__all__ = ['_main', 'freeze_support', 'set_executable', 'get_executable',
           'get_preparation_data', 'get_command_line', 'import_main_path']

#
# _python_exe is the assumed path to the python executable.
# People embedding Python want to modify it.
#

if sys.platform != 'win32':
    WINEXE = False
    WINSERVICE = False
else:
    WINEXE = getattr(sys, 'frozen', False)
    WINSERVICE = sys.executable.lower().endswith("pythonservice.exe")

if WINSERVICE:
    _python_exe = os.path.join(sys.exec_prefix, 'python.exe')
else:
    _python_exe = sys.executable

def set_executable(exe):
    global _python_exe
    _python_exe = exe

def get_executable():
    return _python_exe

#
#
#

def is_forking(argv):
    '''
    Return whether commandline indicates we are forking
    '''
    if len(argv) >= 2 and argv[1] == '--multiprocessing-fork':
        return True
    else:
        return False


def freeze_support():
    '''
    Run code for process object if this in not the main process
    '''
    if is_forking(sys.argv):
        kwds = {}
        for arg in sys.argv[2:]:
            name, value = arg.split('=')
            if value == 'None':
                kwds[name] = None
            else:
                kwds[name] = int(value)
        spawn_main(**kwds)
        sys.exit()


def get_command_line(**kwds):
    '''
    Returns prefix of command line used for spawning a child process
    '''
    if getattr(sys, 'frozen', False):
        return ([sys.executable, '--multiprocessing-fork'] +
                ['%s=%r' % item for item in kwds.items()])
    else:
        prog = 'from multiprocessing.spawn import spawn_main; spawn_main(%s)'
        prog %= ', '.join('%s=%r' % item for item in kwds.items())
        opts = util._args_from_interpreter_flags()
        return [_python_exe] + opts + ['-c', prog, '--multiprocessing-fork']


def spawn_main(pipe_handle, parent_pid=None, tracker_fd=None):
    '''
    Run code specified by data received over pipe
    '''
    assert is_forking(sys.argv), "Not forking"
    if sys.platform == 'win32':
        import msvcrt
        import _winapi

        if parent_pid is not None:
            source_process = _winapi.OpenProcess(
                _winapi.SYNCHRONIZE | _winapi.PROCESS_DUP_HANDLE,
                False, parent_pid)
        else:
            source_process = None
        new_handle = reduction.duplicate(pipe_handle,
                                         source_process=source_process)
        fd = msvcrt.open_osfhandle(new_handle, os.O_RDONLY)
        parent_sentinel = source_process
    else:
        from . import resource_tracker
        resource_tracker._resource_tracker._fd = tracker_fd
        fd = pipe_handle
        parent_sentinel = os.dup(pipe_handle)
    exitcode = _main(fd, parent_sentinel)
    sys.exit(exitcode)


def _main(fd, parent_sentinel):
    with os.fdopen(fd, 'rb', closefd=True) as from_parent:
        process.current_process()._inheriting = True
        try:
            preparation_data = reduction.pickle.load(from_parent)
            prepare(preparation_data)
            self = reduction.pickle.load(from_parent)
        finally:
            del process.current_process()._inheriting
    return self._bootstrap(parent_sentinel)


def _check_not_importing_main():
    if getattr(process.current_process(), '_inheriting', False):
        raise RuntimeError('''
        An attempt has been made to start a new process before the
        current process has finished its bootstrapping phase.

        This probably means that you are not using fork to start your
        child processes and you have forgotten to use the proper idiom
        in the main module:

            if __name__ == '__main__':
                freeze_support()
                ...

        The "freeze_support()" line can be omitted if the program
        is not going to be frozen to produce an executable.''')


def get_preparation_data(name):
    '''
    Return info about parent needed by child to unpickle process object
    '''
    _check_not_importing_main()
    d = dict(
        log_to_stderr=util._log_to_stderr,
        authkey=process.current_process().authkey,
        )

    if util._logger is not None:
        d['log_level'] = util._logger.getEffectiveLevel()

    sys_path=sys.path.copy()
    try:
        i = sys_path.index('')
    except ValueError:
        pass
    else:
        sys_path[i] = process.ORIGINAL_DIR

    d.update(
        name=name,
        sys_path=sys_path,
        sys_argv=sys.argv,
        orig_dir=process.ORIGINAL_DIR,
        dir=os.getcwd(),
        start_method=get_start_method(),
        )

    # Figure out whether to initialise main in the subprocess as a module
    # or through direct execution (or to leave it alone entirely)
    main_module = sys.modules['__main__']
    main_mod_name = getattr(main_module.__spec__, "name", None)
    if main_mod_name is not None:
        d['init_main_from_name'] = main_mod_name
    elif sys.platform != 'win32' or (not WINEXE and not WINSERVICE):
        main_path = getattr(main_module, '__file__', None)
        if main_path is not None:
            if (not os.path.isabs(main_path) and
                        process.ORIGINAL_DIR is not None):
                main_path = os.path.join(process.ORIGINAL_DIR, main_path)
            d['init_main_from_path'] = os.path.normpath(main_path)

    return d

#
# Prepare current process
#

old_main_modules = []

def prepare(data):
    '''
    Try to get current process ready to unpickle process object
    '''
    if 'name' in data:
        process.current_process().name = data['name']

    if 'authkey' in data:
        process.current_process().authkey = data['authkey']

    if 'log_to_stderr' in data and data['log_to_stderr']:
        util.log_to_stderr()

    if 'log_level' in data:
        util.get_logger().setLevel(data['log_level'])

    if 'sys_path' in data:
        sys.path = data['sys_path']

    if 'sys_argv' in data:
        sys.argv = data['sys_argv']

    if 'dir' in data:
        os.chdir(data['dir'])

    if 'orig_dir' in data:
        process.ORIGINAL_DIR = data['orig_dir']

    if 'start_method' in data:
        set_start_method(data['start_method'], force=True)

    if 'init_main_from_name' in data:
        _fixup_main_from_name(data['init_main_from_name'])
    elif 'init_main_from_path' in data:
        _fixup_main_from_path(data['init_main_from_path'])

# Multiprocessing module helpers to fix up the main module in
# spawned subprocesses
def _fixup_main_from_name(mod_name):
    # __main__.py files for packages, directories, zip archives, etc, run
    # their "main only" code unconditionally, so we don't even try to
    # populate anything in __main__, nor do we make any changes to
    # __main__ attributes
    current_main = sys.modules['__main__']
    if mod_name == "__main__" or mod_name.endswith(".__main__"):
        return

    # If this process was forked, __main__ may already be populated
    if getattr(current_main.__spec__, "name", None) == mod_name:
        return

    # Otherwise, __main__ may contain some non-main code where we need to
    # support unpickling it properly. We rerun it as __mp_main__ and make
    # the normal __main__ an alias to that
    old_main_modules.append(current_main)
    main_module = types.ModuleType("__mp_main__")
    main_content = runpy.run_module(mod_name,
                                    run_name="__mp_main__",
                                    alter_sys=True)
    main_module.__dict__.update(main_content)
    sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module


def _fixup_main_from_path(main_path):
    # If this process was forked, __main__ may already be populated
    current_main = sys.modules['__main__']

    # Unfortunately, the main ipython launch script historically had no
    # "if __name__ == '__main__'" guard, so we work around that
    # by treating it like a __main__.py file
    # See https://github.com/ipython/ipython/issues/4698
    main_name = os.path.splitext(os.path.basename(main_path))[0]
    if main_name == 'ipython':
        return

    # Otherwise, if __file__ already has the setting we expect,
    # there's nothing more to do
    if getattr(current_main, '__file__', None) == main_path:
        return

    # If the parent process has sent a path through rather than a module
    # name we assume it is an executable script that may contain
    # non-main code that needs to be executed
    old_main_modules.append(current_main)
    main_module = types.ModuleType("__mp_main__")
    main_content = runpy.run_path(main_path,
                                  run_name="__mp_main__")
    main_module.__dict__.update(main_content)
    sys.modules['__main__'] = sys.modules['__mp_main__'] = main_module


def import_main_path(main_path):
    '''
    Set sys.modules['__main__'] to module at main_path
    '''
    _fixup_main_from_path(main_path)
PK|��\IL��~6~6util.pynu�[���#
# Module providing various facilities to other parts of the package
#
# multiprocessing/util.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

import os
import itertools
import sys
import weakref
import atexit
import threading        # we want threading to install it's
                        # cleanup function before multiprocessing does
from subprocess import _args_from_interpreter_flags

from . import process

__all__ = [
    'sub_debug', 'debug', 'info', 'sub_warning', 'get_logger',
    'log_to_stderr', 'get_temp_dir', 'register_after_fork',
    'is_exiting', 'Finalize', 'ForkAwareThreadLock', 'ForkAwareLocal',
    'close_all_fds_except', 'SUBDEBUG', 'SUBWARNING',
    ]

#
# Logging
#

NOTSET = 0
SUBDEBUG = 5
DEBUG = 10
INFO = 20
SUBWARNING = 25

LOGGER_NAME = 'multiprocessing'
DEFAULT_LOGGING_FORMAT = '[%(levelname)s/%(processName)s] %(message)s'

_logger = None
_log_to_stderr = False

def sub_debug(msg, *args):
    if _logger:
        _logger.log(SUBDEBUG, msg, *args)

def debug(msg, *args):
    if _logger:
        _logger.log(DEBUG, msg, *args)

def info(msg, *args):
    if _logger:
        _logger.log(INFO, msg, *args)

def sub_warning(msg, *args):
    if _logger:
        _logger.log(SUBWARNING, msg, *args)

def get_logger():
    '''
    Returns logger used by multiprocessing
    '''
    global _logger
    import logging

    logging._acquireLock()
    try:
        if not _logger:

            _logger = logging.getLogger(LOGGER_NAME)
            _logger.propagate = 0

            # XXX multiprocessing should cleanup before logging
            if hasattr(atexit, 'unregister'):
                atexit.unregister(_exit_function)
                atexit.register(_exit_function)
            else:
                atexit._exithandlers.remove((_exit_function, (), {}))
                atexit._exithandlers.append((_exit_function, (), {}))

    finally:
        logging._releaseLock()

    return _logger

def log_to_stderr(level=None):
    '''
    Turn on logging and add a handler which prints to stderr
    '''
    global _log_to_stderr
    import logging

    logger = get_logger()
    formatter = logging.Formatter(DEFAULT_LOGGING_FORMAT)
    handler = logging.StreamHandler()
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    if level:
        logger.setLevel(level)
    _log_to_stderr = True
    return _logger


# Abstract socket support

def _platform_supports_abstract_sockets():
    if sys.platform == "linux":
        return True
    if hasattr(sys, 'getandroidapilevel'):
        return True
    return False


def is_abstract_socket_namespace(address):
    if not address:
        return False
    if isinstance(address, bytes):
        return address[0] == 0
    elif isinstance(address, str):
        return address[0] == "\0"
    raise TypeError('address type of {address!r} unrecognized')


abstract_sockets_supported = _platform_supports_abstract_sockets()

#
# Function returning a temp directory which will be removed on exit
#

def _remove_temp_dir(rmtree, tempdir):
    rmtree(tempdir)

    current_process = process.current_process()
    # current_process() can be None if the finalizer is called
    # late during Python finalization
    if current_process is not None:
        current_process._config['tempdir'] = None

def get_temp_dir():
    # get name of a temp directory which will be automatically cleaned up
    tempdir = process.current_process()._config.get('tempdir')
    if tempdir is None:
        import shutil, tempfile
        tempdir = tempfile.mkdtemp(prefix='pymp-')
        info('created temp directory %s', tempdir)
        # keep a strong reference to shutil.rmtree(), since the finalizer
        # can be called late during Python shutdown
        Finalize(None, _remove_temp_dir, args=(shutil.rmtree, tempdir),
                 exitpriority=-100)
        process.current_process()._config['tempdir'] = tempdir
    return tempdir

#
# Support for reinitialization of objects when bootstrapping a child process
#

_afterfork_registry = weakref.WeakValueDictionary()
_afterfork_counter = itertools.count()

def _run_after_forkers():
    items = list(_afterfork_registry.items())
    items.sort()
    for (index, ident, func), obj in items:
        try:
            func(obj)
        except Exception as e:
            info('after forker raised exception %s', e)

def register_after_fork(obj, func):
    _afterfork_registry[(next(_afterfork_counter), id(obj), func)] = obj

#
# Finalization using weakrefs
#

_finalizer_registry = {}
_finalizer_counter = itertools.count()


class Finalize(object):
    '''
    Class which supports object finalization using weakrefs
    '''
    def __init__(self, obj, callback, args=(), kwargs=None, exitpriority=None):
        if (exitpriority is not None) and not isinstance(exitpriority,int):
            raise TypeError(
                "Exitpriority ({0!r}) must be None or int, not {1!s}".format(
                    exitpriority, type(exitpriority)))

        if obj is not None:
            self._weakref = weakref.ref(obj, self)
        elif exitpriority is None:
            raise ValueError("Without object, exitpriority cannot be None")

        self._callback = callback
        self._args = args
        self._kwargs = kwargs or {}
        self._key = (exitpriority, next(_finalizer_counter))
        self._pid = os.getpid()

        _finalizer_registry[self._key] = self

    def __call__(self, wr=None,
                 # Need to bind these locally because the globals can have
                 # been cleared at shutdown
                 _finalizer_registry=_finalizer_registry,
                 sub_debug=sub_debug, getpid=os.getpid):
        '''
        Run the callback unless it has already been called or cancelled
        '''
        try:
            del _finalizer_registry[self._key]
        except KeyError:
            sub_debug('finalizer no longer registered')
        else:
            if self._pid != getpid():
                sub_debug('finalizer ignored because different process')
                res = None
            else:
                sub_debug('finalizer calling %s with args %s and kwargs %s',
                          self._callback, self._args, self._kwargs)
                res = self._callback(*self._args, **self._kwargs)
            self._weakref = self._callback = self._args = \
                            self._kwargs = self._key = None
            return res

    def cancel(self):
        '''
        Cancel finalization of the object
        '''
        try:
            del _finalizer_registry[self._key]
        except KeyError:
            pass
        else:
            self._weakref = self._callback = self._args = \
                            self._kwargs = self._key = None

    def still_active(self):
        '''
        Return whether this finalizer is still waiting to invoke callback
        '''
        return self._key in _finalizer_registry

    def __repr__(self):
        try:
            obj = self._weakref()
        except (AttributeError, TypeError):
            obj = None

        if obj is None:
            return '<%s object, dead>' % self.__class__.__name__

        x = '<%s object, callback=%s' % (
                self.__class__.__name__,
                getattr(self._callback, '__name__', self._callback))
        if self._args:
            x += ', args=' + str(self._args)
        if self._kwargs:
            x += ', kwargs=' + str(self._kwargs)
        if self._key[0] is not None:
            x += ', exitpriority=' + str(self._key[0])
        return x + '>'


def _run_finalizers(minpriority=None):
    '''
    Run all finalizers whose exit priority is not None and at least minpriority

    Finalizers with highest priority are called first; finalizers with
    the same priority will be called in reverse order of creation.
    '''
    if _finalizer_registry is None:
        # This function may be called after this module's globals are
        # destroyed.  See the _exit_function function in this module for more
        # notes.
        return

    if minpriority is None:
        f = lambda p : p[0] is not None
    else:
        f = lambda p : p[0] is not None and p[0] >= minpriority

    # Careful: _finalizer_registry may be mutated while this function
    # is running (either by a GC run or by another thread).

    # list(_finalizer_registry) should be atomic, while
    # list(_finalizer_registry.items()) is not.
    keys = [key for key in list(_finalizer_registry) if f(key)]
    keys.sort(reverse=True)

    for key in keys:
        finalizer = _finalizer_registry.get(key)
        # key may have been removed from the registry
        if finalizer is not None:
            sub_debug('calling %s', finalizer)
            try:
                finalizer()
            except Exception:
                import traceback
                traceback.print_exc()

    if minpriority is None:
        _finalizer_registry.clear()

#
# Clean up on exit
#

def is_exiting():
    '''
    Returns true if the process is shutting down
    '''
    return _exiting or _exiting is None

_exiting = False

def _exit_function(info=info, debug=debug, _run_finalizers=_run_finalizers,
                   active_children=process.active_children,
                   current_process=process.current_process):
    # We hold on to references to functions in the arglist due to the
    # situation described below, where this function is called after this
    # module's globals are destroyed.

    global _exiting

    if not _exiting:
        _exiting = True

        info('process shutting down')
        debug('running all "atexit" finalizers with priority >= 0')
        _run_finalizers(0)

        if current_process() is not None:
            # We check if the current process is None here because if
            # it's None, any call to ``active_children()`` will raise
            # an AttributeError (active_children winds up trying to
            # get attributes from util._current_process).  One
            # situation where this can happen is if someone has
            # manipulated sys.modules, causing this module to be
            # garbage collected.  The destructor for the module type
            # then replaces all values in the module dict with None.
            # For instance, after setuptools runs a test it replaces
            # sys.modules with a copy created earlier.  See issues
            # #9775 and #15881.  Also related: #4106, #9205, and
            # #9207.

            for p in active_children():
                if p.daemon:
                    info('calling terminate() for daemon %s', p.name)
                    p._popen.terminate()

            for p in active_children():
                info('calling join() for process %s', p.name)
                p.join()

        debug('running the remaining "atexit" finalizers')
        _run_finalizers()

atexit.register(_exit_function)

#
# Some fork aware types
#

class ForkAwareThreadLock(object):
    def __init__(self):
        self._reset()
        register_after_fork(self, ForkAwareThreadLock._reset)

    def _reset(self):
        self._lock = threading.Lock()
        self.acquire = self._lock.acquire
        self.release = self._lock.release

    def __enter__(self):
        return self._lock.__enter__()

    def __exit__(self, *args):
        return self._lock.__exit__(*args)


class ForkAwareLocal(threading.local):
    def __init__(self):
        register_after_fork(self, lambda obj : obj.__dict__.clear())
    def __reduce__(self):
        return type(self), ()

#
# Close fds except those specified
#

try:
    MAXFD = os.sysconf("SC_OPEN_MAX")
except Exception:
    MAXFD = 256

def close_all_fds_except(fds):
    fds = list(fds) + [-1, MAXFD]
    fds.sort()
    assert fds[-1] == MAXFD, 'fd too large'
    for i in range(len(fds) - 1):
        os.closerange(fds[i]+1, fds[i+1])
#
# Close sys.stdin and replace stdin with os.devnull
#

def _close_stdin():
    if sys.stdin is None:
        return

    try:
        sys.stdin.close()
    except (OSError, ValueError):
        pass

    try:
        fd = os.open(os.devnull, os.O_RDONLY)
        try:
            sys.stdin = open(fd, closefd=False)
        except:
            os.close(fd)
            raise
    except (OSError, ValueError):
        pass

#
# Flush standard streams, if any
#

def _flush_std_streams():
    try:
        sys.stdout.flush()
    except (AttributeError, ValueError):
        pass
    try:
        sys.stderr.flush()
    except (AttributeError, ValueError):
        pass

#
# Start a program with only specified fds kept open
#

def spawnv_passfds(path, args, passfds):
    import _posixsubprocess
    passfds = tuple(sorted(map(int, passfds)))
    errpipe_read, errpipe_write = os.pipe()
    try:
        return _posixsubprocess.fork_exec(
            args, [os.fsencode(path)], True, passfds, None, None,
            -1, -1, -1, -1, -1, -1, errpipe_read, errpipe_write,
            False, False, None)
    finally:
        os.close(errpipe_read)
        os.close(errpipe_write)


def close_fds(*fds):
    """Close each file descriptor given as an argument"""
    for fd in fds:
        os.close(fd)


def _cleanup_tests():
    """Cleanup multiprocessing resources when multiprocessing tests
    completed."""

    from test import support

    # cleanup multiprocessing
    process._cleanup()

    # Stop the ForkServer process if it's running
    from multiprocessing import forkserver
    forkserver._forkserver._stop()

    # Stop the ResourceTracker process if it's running
    from multiprocessing import resource_tracker
    resource_tracker._resource_tracker._stop()

    # bpo-37421: Explicitly call _run_finalizers() to remove immediately
    # temporary directories created by multiprocessing.util.get_temp_dir().
    _run_finalizers()
    support.gc_collect()

    support.reap_children()
PK|��\˨��~�~pool.pynu�[���#
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#

__all__ = ['Pool', 'ThreadPool']

#
# Imports
#

import collections
import itertools
import os
import queue
import threading
import time
import traceback
import warnings
from queue import Empty

# If threading is available then ThreadPool should be provided.  Therefore
# we avoid top-level imports which are liable to fail on some systems.
from . import util
from . import get_context, TimeoutError
from .connection import wait

#
# Constants representing the state of a pool
#

INIT = "INIT"
RUN = "RUN"
CLOSE = "CLOSE"
TERMINATE = "TERMINATE"

#
# Miscellaneous
#

job_counter = itertools.count()

def mapstar(args):
    return list(map(*args))

def starmapstar(args):
    return list(itertools.starmap(args[0], args[1]))

#
# Hack to embed stringification of remote traceback in local traceback
#

class RemoteTraceback(Exception):
    def __init__(self, tb):
        self.tb = tb
    def __str__(self):
        return self.tb

class ExceptionWithTraceback:
    def __init__(self, exc, tb):
        tb = traceback.format_exception(type(exc), exc, tb)
        tb = ''.join(tb)
        self.exc = exc
        self.tb = '\n"""\n%s"""' % tb
    def __reduce__(self):
        return rebuild_exc, (self.exc, self.tb)

def rebuild_exc(exc, tb):
    exc.__cause__ = RemoteTraceback(tb)
    return exc

#
# Code run by worker processes
#

class MaybeEncodingError(Exception):
    """Wraps possible unpickleable errors, so they can be
    safely sent through the socket."""

    def __init__(self, exc, value):
        self.exc = repr(exc)
        self.value = repr(value)
        super(MaybeEncodingError, self).__init__(self.exc, self.value)

    def __str__(self):
        return "Error sending result: '%s'. Reason: '%s'" % (self.value,
                                                             self.exc)

    def __repr__(self):
        return "<%s: %s>" % (self.__class__.__name__, self)


def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None,
           wrap_exception=False):
    if (maxtasks is not None) and not (isinstance(maxtasks, int)
                                       and maxtasks >= 1):
        raise AssertionError("Maxtasks {!r} is not valid".format(maxtasks))
    put = outqueue.put
    get = inqueue.get
    if hasattr(inqueue, '_writer'):
        inqueue._writer.close()
        outqueue._reader.close()

    if initializer is not None:
        initializer(*initargs)

    completed = 0
    while maxtasks is None or (maxtasks and completed < maxtasks):
        try:
            task = get()
        except (EOFError, OSError):
            util.debug('worker got EOFError or OSError -- exiting')
            break

        if task is None:
            util.debug('worker got sentinel -- exiting')
            break

        job, i, func, args, kwds = task
        try:
            result = (True, func(*args, **kwds))
        except Exception as e:
            if wrap_exception and func is not _helper_reraises_exception:
                e = ExceptionWithTraceback(e, e.__traceback__)
            result = (False, e)
        try:
            put((job, i, result))
        except Exception as e:
            wrapped = MaybeEncodingError(e, result[1])
            util.debug("Possible encoding error while sending result: %s" % (
                wrapped))
            put((job, i, (False, wrapped)))

        task = job = result = func = args = kwds = None
        completed += 1
    util.debug('worker exiting after %d tasks' % completed)

def _helper_reraises_exception(ex):
    'Pickle-able helper function for use by _guarded_task_generation.'
    raise ex

#
# Class representing a process pool
#

class _PoolCache(dict):
    """
    Class that implements a cache for the Pool class that will notify
    the pool management threads every time the cache is emptied. The
    notification is done by the use of a queue that is provided when
    instantiating the cache.
    """
    def __init__(self, /, *args, notifier=None, **kwds):
        self.notifier = notifier
        super().__init__(*args, **kwds)

    def __delitem__(self, item):
        super().__delitem__(item)

        # Notify that the cache is empty. This is important because the
        # pool keeps maintaining workers until the cache gets drained. This
        # eliminates a race condition in which a task is finished after the
        # the pool's _handle_workers method has enter another iteration of the
        # loop. In this situation, the only event that can wake up the pool
        # is the cache to be emptied (no more tasks available).
        if not self:
            self.notifier.put(None)

class Pool(object):
    '''
    Class which supports an async version of applying functions to arguments.
    '''
    _wrap_exception = True

    @staticmethod
    def Process(ctx, *args, **kwds):
        return ctx.Process(*args, **kwds)

    def __init__(self, processes=None, initializer=None, initargs=(),
                 maxtasksperchild=None, context=None):
        # Attributes initialized early to make sure that they exist in
        # __del__() if __init__() raises an exception
        self._pool = []
        self._state = INIT

        self._ctx = context or get_context()
        self._setup_queues()
        self._taskqueue = queue.SimpleQueue()
        # The _change_notifier queue exist to wake up self._handle_workers()
        # when the cache (self._cache) is empty or when there is a change in
        # the _state variable of the thread that runs _handle_workers.
        self._change_notifier = self._ctx.SimpleQueue()
        self._cache = _PoolCache(notifier=self._change_notifier)
        self._maxtasksperchild = maxtasksperchild
        self._initializer = initializer
        self._initargs = initargs

        if processes is None:
            processes = os.cpu_count() or 1
        if processes < 1:
            raise ValueError("Number of processes must be at least 1")

        if initializer is not None and not callable(initializer):
            raise TypeError('initializer must be a callable')

        self._processes = processes
        try:
            self._repopulate_pool()
        except Exception:
            for p in self._pool:
                if p.exitcode is None:
                    p.terminate()
            for p in self._pool:
                p.join()
            raise

        sentinels = self._get_sentinels()

        self._worker_handler = threading.Thread(
            target=Pool._handle_workers,
            args=(self._cache, self._taskqueue, self._ctx, self.Process,
                  self._processes, self._pool, self._inqueue, self._outqueue,
                  self._initializer, self._initargs, self._maxtasksperchild,
                  self._wrap_exception, sentinels, self._change_notifier)
            )
        self._worker_handler.daemon = True
        self._worker_handler._state = RUN
        self._worker_handler.start()


        self._task_handler = threading.Thread(
            target=Pool._handle_tasks,
            args=(self._taskqueue, self._quick_put, self._outqueue,
                  self._pool, self._cache)
            )
        self._task_handler.daemon = True
        self._task_handler._state = RUN
        self._task_handler.start()

        self._result_handler = threading.Thread(
            target=Pool._handle_results,
            args=(self._outqueue, self._quick_get, self._cache)
            )
        self._result_handler.daemon = True
        self._result_handler._state = RUN
        self._result_handler.start()

        self._terminate = util.Finalize(
            self, self._terminate_pool,
            args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
                  self._change_notifier, self._worker_handler, self._task_handler,
                  self._result_handler, self._cache),
            exitpriority=15
            )
        self._state = RUN

    # Copy globals as function locals to make sure that they are available
    # during Python shutdown when the Pool is destroyed.
    def __del__(self, _warn=warnings.warn, RUN=RUN):
        if self._state == RUN:
            _warn(f"unclosed running multiprocessing pool {self!r}",
                  ResourceWarning, source=self)
            if getattr(self, '_change_notifier', None) is not None:
                self._change_notifier.put(None)

    def __repr__(self):
        cls = self.__class__
        return (f'<{cls.__module__}.{cls.__qualname__} '
                f'state={self._state} '
                f'pool_size={len(self._pool)}>')

    def _get_sentinels(self):
        task_queue_sentinels = [self._outqueue._reader]
        self_notifier_sentinels = [self._change_notifier._reader]
        return [*task_queue_sentinels, *self_notifier_sentinels]

    @staticmethod
    def _get_worker_sentinels(workers):
        return [worker.sentinel for worker in
                workers if hasattr(worker, "sentinel")]

    @staticmethod
    def _join_exited_workers(pool):
        """Cleanup after any worker processes which have exited due to reaching
        their specified lifetime.  Returns True if any workers were cleaned up.
        """
        cleaned = False
        for i in reversed(range(len(pool))):
            worker = pool[i]
            if worker.exitcode is not None:
                # worker exited
                util.debug('cleaning up worker %d' % i)
                worker.join()
                cleaned = True
                del pool[i]
        return cleaned

    def _repopulate_pool(self):
        return self._repopulate_pool_static(self._ctx, self.Process,
                                            self._processes,
                                            self._pool, self._inqueue,
                                            self._outqueue, self._initializer,
                                            self._initargs,
                                            self._maxtasksperchild,
                                            self._wrap_exception)

    @staticmethod
    def _repopulate_pool_static(ctx, Process, processes, pool, inqueue,
                                outqueue, initializer, initargs,
                                maxtasksperchild, wrap_exception):
        """Bring the number of pool processes up to the specified number,
        for use after reaping workers which have exited.
        """
        for i in range(processes - len(pool)):
            w = Process(ctx, target=worker,
                        args=(inqueue, outqueue,
                              initializer,
                              initargs, maxtasksperchild,
                              wrap_exception))
            w.name = w.name.replace('Process', 'PoolWorker')
            w.daemon = True
            w.start()
            pool.append(w)
            util.debug('added worker')

    @staticmethod
    def _maintain_pool(ctx, Process, processes, pool, inqueue, outqueue,
                       initializer, initargs, maxtasksperchild,
                       wrap_exception):
        """Clean up any exited workers and start replacements for them.
        """
        if Pool._join_exited_workers(pool):
            Pool._repopulate_pool_static(ctx, Process, processes, pool,
                                         inqueue, outqueue, initializer,
                                         initargs, maxtasksperchild,
                                         wrap_exception)

    def _setup_queues(self):
        self._inqueue = self._ctx.SimpleQueue()
        self._outqueue = self._ctx.SimpleQueue()
        self._quick_put = self._inqueue._writer.send
        self._quick_get = self._outqueue._reader.recv

    def _check_running(self):
        if self._state != RUN:
            raise ValueError("Pool not running")

    def apply(self, func, args=(), kwds={}):
        '''
        Equivalent of `func(*args, **kwds)`.
        Pool must be running.
        '''
        return self.apply_async(func, args, kwds).get()

    def map(self, func, iterable, chunksize=None):
        '''
        Apply `func` to each element in `iterable`, collecting the results
        in a list that is returned.
        '''
        return self._map_async(func, iterable, mapstar, chunksize).get()

    def starmap(self, func, iterable, chunksize=None):
        '''
        Like `map()` method but the elements of the `iterable` are expected to
        be iterables as well and will be unpacked as arguments. Hence
        `func` and (a, b) becomes func(a, b).
        '''
        return self._map_async(func, iterable, starmapstar, chunksize).get()

    def starmap_async(self, func, iterable, chunksize=None, callback=None,
            error_callback=None):
        '''
        Asynchronous version of `starmap()` method.
        '''
        return self._map_async(func, iterable, starmapstar, chunksize,
                               callback, error_callback)

    def _guarded_task_generation(self, result_job, func, iterable):
        '''Provides a generator of tasks for imap and imap_unordered with
        appropriate handling for iterables which throw exceptions during
        iteration.'''
        try:
            i = -1
            for i, x in enumerate(iterable):
                yield (result_job, i, func, (x,), {})
        except Exception as e:
            yield (result_job, i+1, _helper_reraises_exception, (e,), {})

    def imap(self, func, iterable, chunksize=1):
        '''
        Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
        '''
        self._check_running()
        if chunksize == 1:
            result = IMapIterator(self)
            self._taskqueue.put(
                (
                    self._guarded_task_generation(result._job, func, iterable),
                    result._set_length
                ))
            return result
        else:
            if chunksize < 1:
                raise ValueError(
                    "Chunksize must be 1+, not {0:n}".format(
                        chunksize))
            task_batches = Pool._get_tasks(func, iterable, chunksize)
            result = IMapIterator(self)
            self._taskqueue.put(
                (
                    self._guarded_task_generation(result._job,
                                                  mapstar,
                                                  task_batches),
                    result._set_length
                ))
            return (item for chunk in result for item in chunk)

    def imap_unordered(self, func, iterable, chunksize=1):
        '''
        Like `imap()` method but ordering of results is arbitrary.
        '''
        self._check_running()
        if chunksize == 1:
            result = IMapUnorderedIterator(self)
            self._taskqueue.put(
                (
                    self._guarded_task_generation(result._job, func, iterable),
                    result._set_length
                ))
            return result
        else:
            if chunksize < 1:
                raise ValueError(
                    "Chunksize must be 1+, not {0!r}".format(chunksize))
            task_batches = Pool._get_tasks(func, iterable, chunksize)
            result = IMapUnorderedIterator(self)
            self._taskqueue.put(
                (
                    self._guarded_task_generation(result._job,
                                                  mapstar,
                                                  task_batches),
                    result._set_length
                ))
            return (item for chunk in result for item in chunk)

    def apply_async(self, func, args=(), kwds={}, callback=None,
            error_callback=None):
        '''
        Asynchronous version of `apply()` method.
        '''
        self._check_running()
        result = ApplyResult(self, callback, error_callback)
        self._taskqueue.put(([(result._job, 0, func, args, kwds)], None))
        return result

    def map_async(self, func, iterable, chunksize=None, callback=None,
            error_callback=None):
        '''
        Asynchronous version of `map()` method.
        '''
        return self._map_async(func, iterable, mapstar, chunksize, callback,
            error_callback)

    def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
            error_callback=None):
        '''
        Helper function to implement map, starmap and their async counterparts.
        '''
        self._check_running()
        if not hasattr(iterable, '__len__'):
            iterable = list(iterable)

        if chunksize is None:
            chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
            if extra:
                chunksize += 1
        if len(iterable) == 0:
            chunksize = 0

        task_batches = Pool._get_tasks(func, iterable, chunksize)
        result = MapResult(self, chunksize, len(iterable), callback,
                           error_callback=error_callback)
        self._taskqueue.put(
            (
                self._guarded_task_generation(result._job,
                                              mapper,
                                              task_batches),
                None
            )
        )
        return result

    @staticmethod
    def _wait_for_updates(sentinels, change_notifier, timeout=None):
        wait(sentinels, timeout=timeout)
        while not change_notifier.empty():
            change_notifier.get()

    @classmethod
    def _handle_workers(cls, cache, taskqueue, ctx, Process, processes,
                        pool, inqueue, outqueue, initializer, initargs,
                        maxtasksperchild, wrap_exception, sentinels,
                        change_notifier):
        thread = threading.current_thread()

        # Keep maintaining workers until the cache gets drained, unless the pool
        # is terminated.
        while thread._state == RUN or (cache and thread._state != TERMINATE):
            cls._maintain_pool(ctx, Process, processes, pool, inqueue,
                               outqueue, initializer, initargs,
                               maxtasksperchild, wrap_exception)

            current_sentinels = [*cls._get_worker_sentinels(pool), *sentinels]

            cls._wait_for_updates(current_sentinels, change_notifier)
        # send sentinel to stop workers
        taskqueue.put(None)
        util.debug('worker handler exiting')

    @staticmethod
    def _handle_tasks(taskqueue, put, outqueue, pool, cache):
        thread = threading.current_thread()

        for taskseq, set_length in iter(taskqueue.get, None):
            task = None
            try:
                # iterating taskseq cannot fail
                for task in taskseq:
                    if thread._state != RUN:
                        util.debug('task handler found thread._state != RUN')
                        break
                    try:
                        put(task)
                    except Exception as e:
                        job, idx = task[:2]
                        try:
                            cache[job]._set(idx, (False, e))
                        except KeyError:
                            pass
                else:
                    if set_length:
                        util.debug('doing set_length()')
                        idx = task[1] if task else -1
                        set_length(idx + 1)
                    continue
                break
            finally:
                task = taskseq = job = None
        else:
            util.debug('task handler got sentinel')

        try:
            # tell result handler to finish when cache is empty
            util.debug('task handler sending sentinel to result handler')
            outqueue.put(None)

            # tell workers there is no more work
            util.debug('task handler sending sentinel to workers')
            for p in pool:
                put(None)
        except OSError:
            util.debug('task handler got OSError when sending sentinels')

        util.debug('task handler exiting')

    @staticmethod
    def _handle_results(outqueue, get, cache):
        thread = threading.current_thread()

        while 1:
            try:
                task = get()
            except (OSError, EOFError):
                util.debug('result handler got EOFError/OSError -- exiting')
                return

            if thread._state != RUN:
                assert thread._state == TERMINATE, "Thread not in TERMINATE"
                util.debug('result handler found thread._state=TERMINATE')
                break

            if task is None:
                util.debug('result handler got sentinel')
                break

            job, i, obj = task
            try:
                cache[job]._set(i, obj)
            except KeyError:
                pass
            task = job = obj = None

        while cache and thread._state != TERMINATE:
            try:
                task = get()
            except (OSError, EOFError):
                util.debug('result handler got EOFError/OSError -- exiting')
                return

            if task is None:
                util.debug('result handler ignoring extra sentinel')
                continue
            job, i, obj = task
            try:
                cache[job]._set(i, obj)
            except KeyError:
                pass
            task = job = obj = None

        if hasattr(outqueue, '_reader'):
            util.debug('ensuring that outqueue is not full')
            # If we don't make room available in outqueue then
            # attempts to add the sentinel (None) to outqueue may
            # block.  There is guaranteed to be no more than 2 sentinels.
            try:
                for i in range(10):
                    if not outqueue._reader.poll():
                        break
                    get()
            except (OSError, EOFError):
                pass

        util.debug('result handler exiting: len(cache)=%s, thread._state=%s',
              len(cache), thread._state)

    @staticmethod
    def _get_tasks(func, it, size):
        it = iter(it)
        while 1:
            x = tuple(itertools.islice(it, size))
            if not x:
                return
            yield (func, x)

    def __reduce__(self):
        raise NotImplementedError(
              'pool objects cannot be passed between processes or pickled'
              )

    def close(self):
        util.debug('closing pool')
        if self._state == RUN:
            self._state = CLOSE
            self._worker_handler._state = CLOSE
            self._change_notifier.put(None)

    def terminate(self):
        util.debug('terminating pool')
        self._state = TERMINATE
        self._terminate()

    def join(self):
        util.debug('joining pool')
        if self._state == RUN:
            raise ValueError("Pool is still running")
        elif self._state not in (CLOSE, TERMINATE):
            raise ValueError("In unknown state")
        self._worker_handler.join()
        self._task_handler.join()
        self._result_handler.join()
        for p in self._pool:
            p.join()

    @staticmethod
    def _help_stuff_finish(inqueue, task_handler, size):
        # task_handler may be blocked trying to put items on inqueue
        util.debug('removing tasks from inqueue until task handler finished')
        inqueue._rlock.acquire()
        while task_handler.is_alive() and inqueue._reader.poll():
            inqueue._reader.recv()
            time.sleep(0)

    @classmethod
    def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool, change_notifier,
                        worker_handler, task_handler, result_handler, cache):
        # this is guaranteed to only be called once
        util.debug('finalizing pool')

        # Notify that the worker_handler state has been changed so the
        # _handle_workers loop can be unblocked (and exited) in order to
        # send the finalization sentinel all the workers.
        worker_handler._state = TERMINATE
        change_notifier.put(None)

        task_handler._state = TERMINATE

        util.debug('helping task handler/workers to finish')
        cls._help_stuff_finish(inqueue, task_handler, len(pool))

        if (not result_handler.is_alive()) and (len(cache) != 0):
            raise AssertionError(
                "Cannot have cache with result_hander not alive")

        result_handler._state = TERMINATE
        change_notifier.put(None)
        outqueue.put(None)                  # sentinel

        # We must wait for the worker handler to exit before terminating
        # workers because we don't want workers to be restarted behind our back.
        util.debug('joining worker handler')
        if threading.current_thread() is not worker_handler:
            worker_handler.join()

        # Terminate workers which haven't already finished.
        if pool and hasattr(pool[0], 'terminate'):
            util.debug('terminating workers')
            for p in pool:
                if p.exitcode is None:
                    p.terminate()

        util.debug('joining task handler')
        if threading.current_thread() is not task_handler:
            task_handler.join()

        util.debug('joining result handler')
        if threading.current_thread() is not result_handler:
            result_handler.join()

        if pool and hasattr(pool[0], 'terminate'):
            util.debug('joining pool workers')
            for p in pool:
                if p.is_alive():
                    # worker has not yet exited
                    util.debug('cleaning up worker %d' % p.pid)
                    p.join()

    def __enter__(self):
        self._check_running()
        return self

    def __exit__(self, exc_type, exc_val, exc_tb):
        self.terminate()

#
# Class whose instances are returned by `Pool.apply_async()`
#

class ApplyResult(object):

    def __init__(self, pool, callback, error_callback):
        self._pool = pool
        self._event = threading.Event()
        self._job = next(job_counter)
        self._cache = pool._cache
        self._callback = callback
        self._error_callback = error_callback
        self._cache[self._job] = self

    def ready(self):
        return self._event.is_set()

    def successful(self):
        if not self.ready():
            raise ValueError("{0!r} not ready".format(self))
        return self._success

    def wait(self, timeout=None):
        self._event.wait(timeout)

    def get(self, timeout=None):
        self.wait(timeout)
        if not self.ready():
            raise TimeoutError
        if self._success:
            return self._value
        else:
            raise self._value

    def _set(self, i, obj):
        self._success, self._value = obj
        if self._callback and self._success:
            self._callback(self._value)
        if self._error_callback and not self._success:
            self._error_callback(self._value)
        self._event.set()
        del self._cache[self._job]
        self._pool = None

AsyncResult = ApplyResult       # create alias -- see #17805

#
# Class whose instances are returned by `Pool.map_async()`
#

class MapResult(ApplyResult):

    def __init__(self, pool, chunksize, length, callback, error_callback):
        ApplyResult.__init__(self, pool, callback,
                             error_callback=error_callback)
        self._success = True
        self._value = [None] * length
        self._chunksize = chunksize
        if chunksize <= 0:
            self._number_left = 0
            self._event.set()
            del self._cache[self._job]
        else:
            self._number_left = length//chunksize + bool(length % chunksize)

    def _set(self, i, success_result):
        self._number_left -= 1
        success, result = success_result
        if success and self._success:
            self._value[i*self._chunksize:(i+1)*self._chunksize] = result
            if self._number_left == 0:
                if self._callback:
                    self._callback(self._value)
                del self._cache[self._job]
                self._event.set()
                self._pool = None
        else:
            if not success and self._success:
                # only store first exception
                self._success = False
                self._value = result
            if self._number_left == 0:
                # only consider the result ready once all jobs are done
                if self._error_callback:
                    self._error_callback(self._value)
                del self._cache[self._job]
                self._event.set()
                self._pool = None

#
# Class whose instances are returned by `Pool.imap()`
#

class IMapIterator(object):

    def __init__(self, pool):
        self._pool = pool
        self._cond = threading.Condition(threading.Lock())
        self._job = next(job_counter)
        self._cache = pool._cache
        self._items = collections.deque()
        self._index = 0
        self._length = None
        self._unsorted = {}
        self._cache[self._job] = self

    def __iter__(self):
        return self

    def next(self, timeout=None):
        with self._cond:
            try:
                item = self._items.popleft()
            except IndexError:
                if self._index == self._length:
                    self._pool = None
                    raise StopIteration from None
                self._cond.wait(timeout)
                try:
                    item = self._items.popleft()
                except IndexError:
                    if self._index == self._length:
                        self._pool = None
                        raise StopIteration from None
                    raise TimeoutError from None

        success, value = item
        if success:
            return value
        raise value

    __next__ = next                    # XXX

    def _set(self, i, obj):
        with self._cond:
            if self._index == i:
                self._items.append(obj)
                self._index += 1
                while self._index in self._unsorted:
                    obj = self._unsorted.pop(self._index)
                    self._items.append(obj)
                    self._index += 1
                self._cond.notify()
            else:
                self._unsorted[i] = obj

            if self._index == self._length:
                del self._cache[self._job]
                self._pool = None

    def _set_length(self, length):
        with self._cond:
            self._length = length
            if self._index == self._length:
                self._cond.notify()
                del self._cache[self._job]
                self._pool = None

#
# Class whose instances are returned by `Pool.imap_unordered()`
#

class IMapUnorderedIterator(IMapIterator):

    def _set(self, i, obj):
        with self._cond:
            self._items.append(obj)
            self._index += 1
            self._cond.notify()
            if self._index == self._length:
                del self._cache[self._job]
                self._pool = None

#
#
#

class ThreadPool(Pool):
    _wrap_exception = False

    @staticmethod
    def Process(ctx, *args, **kwds):
        from .dummy import Process
        return Process(*args, **kwds)

    def __init__(self, processes=None, initializer=None, initargs=()):
        Pool.__init__(self, processes, initializer, initargs)

    def _setup_queues(self):
        self._inqueue = queue.SimpleQueue()
        self._outqueue = queue.SimpleQueue()
        self._quick_put = self._inqueue.put
        self._quick_get = self._outqueue.get

    def _get_sentinels(self):
        return [self._change_notifier._reader]

    @staticmethod
    def _get_worker_sentinels(workers):
        return []

    @staticmethod
    def _help_stuff_finish(inqueue, task_handler, size):
        # drain inqueue, and put sentinels at its head to make workers finish
        try:
            while True:
                inqueue.get(block=False)
        except queue.Empty:
            pass
        for i in range(size):
            inqueue.put(None)

    def _wait_for_updates(self, sentinels, change_notifier, timeout):
        time.sleep(timeout)
PK{��\�U�DDshared_memory.pynu�[���PK{��\���}j-j-NDheap.pynu�[���PK{��\�0�"�"(�q__pycache__/process.cpython-38.opt-2.pycnu�[���PK{��\��L��a�a�__pycache__/pool.cpython-38.pycnu�[���PK{��\j,"'))*"�__pycache__/reduction.cpython-38.opt-2.pycnu�[���PK{��\;P�vBB-�__pycache__/sharedctypes.cpython-38.opt-2.pycnu�[���PK{��\���h�*�*,D/__pycache__/synchronize.cpython-38.opt-1.pycnu�[���PK{��\����*�*"{Z__pycache__/process.cpython-38.pycnu�[���PK{��\w�5�x)x)%ȅ__pycache__/util.cpython-38.opt-2.pycnu�[���PK{��\Y]NZT%T%!��__pycache__/queues.cpython-38.pycnu�[���PK{��\�}�y�2�2(:�__pycache__/context.cpython-38.opt-1.pycnu�[���PK{��\��A�L�L�#n__pycache__/managers.cpython-38.pycnu�[���PK{��\s=q���
�__pycache__/heap.cpython-38.pycnu�[���PK{��\�Wq(q((3�__pycache__/process.cpython-38.opt-1.pycnu�[���PK{��\�����0��__pycache__/resource_sharer.cpython-38.opt-2.pycnu�[���PK{��\�Q��,,&__pycache__/synchronize.cpython-38.pycnu�[���PK{��\i�+C��1z1__pycache__/resource_tracker.cpython-38.opt-1.pycnu�[���PK{��\������&�E__pycache__/spawn.cpython-38.opt-1.pycnu�[���PK{��\�;z ��)`__pycache__/__init__.cpython-38.opt-2.pycnu�[���PK{��\���,�,�b__pycache__/util.cpython-38.pycnu�[���PK{��\#���',�__pycache__/sharedctypes.cpython-38.pycnu�[���PK{��\��>�OO0�__pycache__/resource_sharer.cpython-38.opt-1.pycnu�[���PK{��\�Y0%8888(��__pycache__/shared_memory.cpython-38.pycnu�[���PK{��\�z��{ { +M�__pycache__/forkserver.cpython-38.opt-1.pycnu�[���PK{��\�1�� � %#__pycache__/forkserver.cpython-38.pycnu�[���PK{��\_��w��,!;__pycache__/popen_spawn_posix.cpython-38.pycnu�[���PK{��\@hr+CC+D__pycache__/resource_tracker.cpython-38.pycnu�[���PK{��\���Fy�y�)�X__pycache__/managers.cpython-38.opt-1.pycnu�[���PK{��\�Uxb
b
2��__pycache__/popen_spawn_win32.cpython-38.opt-1.pycnu�[���PK{��\{�ٿ��%S__pycache__/heap.cpython-38.opt-1.pycnu�[���PK{��\��O�** e&__pycache__/spawn.cpython-38.pycnu�[���PK{��\��hh+�@__pycache__/forkserver.cpython-38.opt-2.pycnu�[���PK{��\W�	%	%'�^__pycache__/queues.cpython-38.opt-1.pycnu�[���PK{��\��0

+�__pycache__/popen_fork.cpython-38.opt-1.pycnu�[���PK{��\�9�s	s	+h�__pycache__/popen_forkserver.cpython-38.pycnu�[���PK{��\����JJ-6�__pycache__/sharedctypes.cpython-38.opt-1.pycnu�[���PK{��\��=(-(-(ݳ__pycache__/context.cpython-38.opt-2.pycnu�[���PK{��\_��w��2]�__pycache__/popen_spawn_posix.cpython-38.opt-2.pycnu�[���PK{��\�w�&a�__pycache__/spawn.cpython-38.opt-2.pycnu�[���PK{��\�c"		%�__pycache__/heap.cpython-38.opt-2.pycnu�[���PK{��\��t�EZEZ%' __pycache__/pool.cpython-38.opt-2.pycnu�[���PK{��\^kL|�b�b%�z__pycache__/connection.cpython-38.pycnu�[���PK{��\g�=�Y�Y+��__pycache__/connection.cpython-38.opt-2.pycnu�[���PK{��\C_�

2"8__pycache__/popen_spawn_win32.cpython-38.opt-2.pycnu�[���PK{��\��N�a�a+�E__pycache__/connection.cpython-38.opt-1.pycnu�[���PK{��\�;z ��#��__pycache__/__init__.cpython-38.pycnu�[���PK{��\���h�*�*,��__pycache__/synchronize.cpython-38.opt-2.pycnu�[���PK{��\�9�s	s	1��__pycache__/popen_forkserver.cpython-38.opt-2.pycnu�[���PK{��\�	de�,�,.��__pycache__/shared_memory.cpython-38.opt-2.pycnu�[���PK{��\{�2
 
 *�__pycache__/reduction.cpython-38.opt-1.pycnu�[���PK{��\�TS���1-__pycache__/resource_tracker.cpython-38.opt-2.pycnu�[���PK{��\ɴ�
�
,@__pycache__/popen_spawn_win32.cpython-38.pycnu�[���PK{��\�;z ��)�M__pycache__/__init__.cpython-38.opt-1.pycnu�[���PK{��\{�2
 
 $�P__pycache__/reduction.cpython-38.pycnu�[���PK{��\!o�BT7T7.5q__pycache__/shared_memory.cpython-38.opt-1.pycnu�[���PK|��\i6���,�,%�__pycache__/util.cpython-38.opt-1.pycnu�[���PK|��\!���o$o$'��__pycache__/queues.cpython-38.opt-2.pycnu�[���PK|��\_��w��2��__pycache__/popen_spawn_posix.cpython-38.opt-1.pycnu�[���PK|��\:F6V
V
%�	__pycache__/popen_fork.cpython-38.pycnu�[���PK|��\��0

+]	__pycache__/popen_fork.cpython-38.opt-2.pycnu�[���PK|��\Q
x7��*�	__pycache__/resource_sharer.cpython-38.pycnu�[���PK|��\�}�y�2�2"�-	__pycache__/context.cpython-38.pycnu�[���PK|��\|�V���)�`	__pycache__/managers.cpython-38.opt-2.pycnu�[���PK|��\5��}�a�a%�	__pycache__/pool.cpython-38.opt-1.pycnu�[���PK|��\�9�s	s	1U
__pycache__/popen_forkserver.cpython-38.opt-1.pycnu�[���PK|��\/TJ���^
popen_forkserver.pynu�[���PK|��\}�����g
sharedctypes.pynu�[���PK|��\��p�+�+
��
context.pynu�[���PK|��\
���!�!�
resource_tracker.pynu�[���PK|��\�!ĭ(%(%��
reduction.pynu�[���PK|��\���.�.
1�
process.pynu�[���PK|��\'�u���P#__init__.pynu�[���PK|��\av�-�-	'queues.pynu�[���PK|��\���Z-Z-Usynchronize.pynu�[���PK|��\d�ej����popen_spawn_win32.pynu�[���PK|��\�>.�


��popen_fork.pynu�[���PK|��\
����Μpopen_spawn_posix.pynu�[���PK|��\D~�jAA/��dummy/__pycache__/__init__.cpython-38.opt-2.pycnu�[���PK|��\�/;��	�	+��dummy/__pycache__/connection.cpython-38.pycnu�[���PK|��\�/;��	�	1�dummy/__pycache__/connection.cpython-38.opt-2.pycnu�[���PK|��\�/;��	�	1G�dummy/__pycache__/connection.cpython-38.opt-1.pycnu�[���PK|��\D~�jAA)��dummy/__pycache__/__init__.cpython-38.pycnu�[���PK|��\D~�jAA/8�dummy/__pycache__/__init__.cpython-38.opt-1.pycnu�[���PK|��\��se����dummy/__init__.pynu�[���PK|��\��_T>>�dummy/connection.pynu�[���PK|��\Ҫ`���resource_sharer.pynu�[���PK|��\�c�p|p|
�connection.pynu�[���PK|��\��F��0�0
f�forkserver.pynu�[���PK|��\,ӓm����managers.pynu�[���PK|��\�HhP$P$�
spawn.pynu�[���PK|��\IL��~6~6n�
util.pynu�[���PK|��\˨��~�~#�
pool.pynu�[���PK\\\#Wb