diff --git a/dsoftbus/dist_executor/__init__.py b/dsoftbus/dist_executor/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..22d08a0525105525c2ee6aad2619303adeb6f1b3 --- /dev/null +++ b/dsoftbus/dist_executor/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. diff --git a/dsoftbus/dist_executor/modules/__init__.py b/dsoftbus/dist_executor/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9c9dd874f24d862a78307ba03a8c60fed6fd6d1a --- /dev/null +++ b/dsoftbus/dist_executor/modules/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/basic/bin/etcd/etcd b/dsoftbus/dist_executor/modules/basic/bin/etcd/etcd new file mode 100644 index 0000000000000000000000000000000000000000..6862be05d4796f4c943e7a1a63c713bdaf3fae43 Binary files /dev/null and b/dsoftbus/dist_executor/modules/basic/bin/etcd/etcd differ diff --git a/dsoftbus/dist_executor/modules/basic/bin/etcd/etcdctl b/dsoftbus/dist_executor/modules/basic/bin/etcd/etcdctl new file mode 100644 index 0000000000000000000000000000000000000000..016a14749edce7e07c767fbd37a052a17bb15a43 Binary files /dev/null and b/dsoftbus/dist_executor/modules/basic/bin/etcd/etcdctl differ diff --git a/dsoftbus/dist_executor/modules/basic/bin/redis/redis-server b/dsoftbus/dist_executor/modules/basic/bin/redis/redis-server new file mode 100644 index 0000000000000000000000000000000000000000..278b2c23b32d724719de17f72634284a1d64d3ff Binary files /dev/null and b/dsoftbus/dist_executor/modules/basic/bin/redis/redis-server differ diff --git a/dsoftbus/dist_executor/modules/basic/config.xml b/dsoftbus/dist_executor/modules/basic/config.xml new file mode 100644 index 0000000000000000000000000000000000000000..4126db19fc715b1f368aca9e2d3d078206e8698e --- /dev/null +++ b/dsoftbus/dist_executor/modules/basic/config.xml @@ -0,0 +1,91 @@ + + + + /opt + + IP + + local + + local + + + IP + + 6379 + + password + + + accessKey + + secretKey + + + IP + + 19002 + + 19001 + + + IP + + Noauth + + 32379 + + 32380 + + /usr/bin + + + + path_to_cert + + path_to_key + + path_to_ca + + + false + + path_to_client_cert + + path_to_client_key + + 0 + + + 23790 + + false + + + 58866 + + + 22770 + + + IP + + 12123 + false + + default + + 20480 + + + INFO + INFO + default + default + + + /home/sn/config/services.yaml + + false + 300 + diff --git a/dsoftbus/dist_executor/modules/basic/config/redis/redis.conf b/dsoftbus/dist_executor/modules/basic/config/redis/redis.conf new file mode 100644 index 0000000000000000000000000000000000000000..ab837fcb5ca09c3f965fbc2bdb13adfdf3769741 --- /dev/null +++ b/dsoftbus/dist_executor/modules/basic/config/redis/redis.conf @@ -0,0 +1,1877 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Note that option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# include /path/to/local.conf +# include /path/to/other.conf + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule /path/to/my_module.so +# loadmodule /path/to/other_module.so + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all available network interfaces on the host machine. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# +# Examples: +# +bind core_redis_ip +# bind 127.0.0.1 ::1 +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only on the +# IPv4 loopback interface address (this means Redis will only be able to +# accept client connections from the same host that it is running on). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# JUST COMMENT OUT THE FOLLOWING LINE. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +#bind 127.0.0.1 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and if: +# +# 1) The server is not binding explicitly to a set of addresses using the +# "bind" directive. +# 2) No password is configured. +# +# The server only accepts connections from clients connecting from the +# IPv4 and IPv6 loopback addresses 127.0.0.1 and ::1, and from Unix domain +# sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured, nor a specific set of interfaces +# are explicitly listed using the "bind" directive. +protected-mode yes + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port core_redis_port + +# TCP listen() backlog. +# +# In high requests-per-second environments you need a high backlog in order +# to avoid slow clients connection issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /tmp/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Force network equipment in the middle to consider the connection to be +# alive. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +################################# TLS/SSL ##################################### + +# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration +# directive can be used to define TLS-listening ports. To enable TLS on the +# default port, use: +# +# port 0 +# tls-port 6379 + +# Configure a X.509 certificate and private key to use for authenticating the +# server to connected clients, masters or cluster peers. These files should be +# PEM formatted. +# +# tls-cert-file redis.crt +# tls-key-file redis.key + +# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange: +# +# tls-dh-params-file redis.dh + +# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL +# clients and peers. Redis requires an explicit configuration of at least one +# of these, and will not implicitly use the system wide configuration. +# +# tls-ca-cert-file ca.crt +# tls-ca-cert-dir /etc/ssl/certs + +# By default, clients (including replica servers) on a TLS port are required +# to authenticate using valid client side certificates. +# +# If "no" is specified, client certificates are not required and not accepted. +# If "optional" is specified, client certificates are accepted and must be +# valid if provided, but are not required. +# +# tls-auth-clients no +# tls-auth-clients optional + +# By default, a Redis replica does not attempt to establish a TLS connection +# with its master. +# +# Use the following directive to enable TLS on replication links. +# +# tls-replication yes + +# By default, the Redis Cluster bus uses a plain TCP connection. To enable +# TLS for the bus protocol, use the following directive: +# +# tls-cluster yes + +# Explicitly specify TLS versions to support. Allowed values are case insensitive +# and include "TLSv1", "TLSv1.1", "TLSv1.2", "TLSv1.3" (OpenSSL >= 1.1.1) or +# any combination. To enable only TLSv1.2 and TLSv1.3, use: +# +# tls-protocols "TLSv1.2 TLSv1.3" + +# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information +# about the syntax of this string. +# +# Note: this configuration applies only to <= TLSv1.2. +# +# tls-ciphers DEFAULT:!MEDIUM + +# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more +# information about the syntax of this string, and specifically for TLSv1.3 +# ciphersuites. +# +# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 + +# When choosing a cipher, use the server's preference instead of the client +# preference. By default, the server follows the client's preference. +# +# tls-prefer-server-ciphers yes + +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# requires "expect stop" in your upstart job config +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous pings back to your supervisor. +supervised no + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY. Basically this means +# that normally a logo is displayed only in interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo yes + +################################ SNAPSHOTTING ################################ +# +# Save the DB on disk: +# +# save +# +# Will save the DB if both the given number of seconds and the given +# number of write operations against the DB occurred. +# +# In the example below the behavior will be to save: +# after 900 sec (15 min) if at least 1 key changed +# after 300 sec (5 min) if at least 10 keys changed +# after 60 sec if at least 10000 keys changed +# +# Note: you can disable saving completely by commenting out all "save" lines. +# +# It is also possible to remove all the previously configured save +# points by adding a save directive with a single empty string argument +# like in the following example: +# +# save "" + +save 900 1 +save 300 10 +save 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# By default compression is enabled as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# The filename where to dump the DB +dbfilename dump.rdb + +# Remove RDB files used by replication in instances without persistence +# enabled. By default this option is disabled, however there are environments +# where for regulations or other security concerns, RDB files persisted on +# disk by masters in order to feed replicas, or stored on disk by replicas +# in order to load them for the initial synchronization, should be deleted +# ASAP. Note that this option ONLY WORKS in instances that have both AOF +# and RDB persistence disabled, otherwise is completely ignored. +# +# An alternative (and sometimes better) way to obtain the same effect is +# to use diskless replication on both master and replicas instances. However +# in the case of replicas, diskless is not always an option. +rdb-del-sync-files no + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth +# +# However this is not enough if you are using Redis ACLs (for Redis version +# 6 or greater), and the default user is not capable of running the PSYNC +# command and/or other commands needed for replication. In this case it's +# better to configure a special user to use with replication, and specify the +# masteruser configuration as such: +# +# masteruser +# +# When masteruser is specified, the replica will authenticate against its +# master using the new AUTH form: AUTH . + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) If replica-serve-stale-data is set to 'no' the replica will reply with +# an error "SYNC with master in progress" to all commands except: +# INFO, REPLICAOF, AUTH, PING, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, +# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, +# HOST and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# New replicas and reconnecting replicas that are not able to continue the +# replication process just receiving differences, need to do what is called a +# "full synchronization". An RDB file is transmitted from the master to the +# replicas. +# +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child +# producing the RDB file finishes its work. With diskless replication instead +# once the transfer starts, new replicas arriving will be queued and a new +# transfer will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple +# replicas will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync no + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the +# server waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# ----------------------------------------------------------------------------- +# WARNING: RDB diskless load is experimental. Since in this setup the replica +# does not immediately store an RDB on disk, it may cause data loss during +# failovers. RDB diskless load + Redis modules not handling I/O reads may also +# cause Redis to abort in case of I/O errors during the initial synchronization +# stage with the master. Use only if your do what you are doing. +# ----------------------------------------------------------------------------- +# +# Replica can load the RDB it reads from the replication link directly from the +# socket, or store the RDB to a file and read that file after it was completely +# received from the master. +# +# In many cases the disk is slower than the network, and storing and loading +# the RDB file may increase replication time (and even increase the master's +# Copy on Write memory and salve buffers). +# However, parsing the RDB file directly from the socket may mean that we have +# to flush the contents of the current database before the full rdb was +# received. For this reason we have the following options: +# +# "disabled" - Don't use diskless load (store the rdb file to the disk first) +# "on-empty-db" - Use diskless load only when it is completely safe. +# "swapdb" - Keep a copy of the current db contents in RAM while parsing +# the data directly from the socket. note that this requires +# sufficient memory, if you don't have it, you risk an OOM kill. +repl-diskless-load disabled + +# Replicas send PINGs to server in a predefined interval. It's possible to +# change this interval with the repl_ping_replica_period option. The default +# value is 10 seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. The default +# value is 60 seconds. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a +# replica wants to reconnect again, often a full resync is not needed, but a +# partial resync is enough, just passing the portion of data the replica +# missed while disconnected. +# +# The bigger the replication backlog, the longer the replica can endure the +# disconnect and later be able to perform a partial resynchronization. +# +# The backlog is only allocated if there is at least one replica connected. +# +# repl-backlog-size 1mb + +# After a master has no connected replicas for some time, the backlog will be +# freed. The following option configures the amount of seconds that need to +# elapse, starting from the time the last replica disconnected, for the backlog +# buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with other replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO +# output. It is used by Redis Sentinel in order to select a replica to promote +# into a master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel +# will pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP address and port normally reported by a replica is +# obtained in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may actually be reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +############################### KEYS TRACKING ################################# + +# Redis implements server assisted support for client side caching of values. +# This is implemented using an invalidation table that remembers, using +# 16 millions of slots, what clients may have certain subsets of keys. In turn +# this is used in order to send invalidation messages to clients. Please +# check this page to understand more about the feature: +# +# https://redis.io/topics/client-side-caching +# +# When tracking is enabled for a client, all the read only queries are assumed +# to be cached: this will force Redis to store information in the invalidation +# table. When keys are modified, such information is flushed away, and +# invalidation messages are sent to the clients. However if the workload is +# heavily dominated by reads, Redis could use more and more memory in order +# to track the keys fetched by many clients. +# +# For this reason it is possible to configure a maximum fill value for the +# invalidation table. By default it is set to 1M of keys, and once this limit +# is reached, Redis will start to evict keys in the invalidation table +# even if they were not modified, just to reclaim memory: this will in turn +# force the clients to invalidate the cached values. Basically the table +# maximum size is a trade off between the memory you want to spend server +# side to track information about who cached what, and the ability of clients +# to retain cached objects in memory. +# +# If you set the value to 0, it means there are no limits, and Redis will +# retain as many keys as needed in the invalidation table. +# In the "stats" INFO section, you can find information about the number of +# keys in the invalidation table at every given moment. +# +# Note: when key tracking is used in broadcasting mode, no memory is used +# in the server side so this setting is useless. +# +# tracking-table-max-keys 1000000 + +################################## SECURITY ################################### + +# Warning: since Redis is pretty fast, an outside user can try up to +# 1 million passwords per second against a modern box. This means that you +# should use very strong passwords, otherwise they will be very easy to break. +# Note that because the password is really a shared secret between the client +# and the server, and should not be memorized by any human, the password +# can be easily a long string from /dev/urandom or whatever, so by using a +# long and unguessable password no brute force attack will be possible. + +# Redis ACL users are defined in the following format: +# +# user ... acl rules ... +# +# For example: +# +# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 +# +# The special username "default" is used for new connections. If this user +# has the "nopass" rule, then new connections will be immediately authenticated +# as the "default" user without the need of any password provided via the +# AUTH command. Otherwise if the "default" user is not flagged with "nopass" +# the connections will start in not authenticated state, and will require +# AUTH (or the HELLO command AUTH option) in order to be authenticated and +# start to work. +# +# The ACL rules that describe what a user can do are the following: +# +# on Enable the user: it is possible to authenticate as this user. +# off Disable the user: it's no longer possible to authenticate +# with this user, however the already authenticated connections +# will still work. +# + Allow the execution of that command +# - Disallow the execution of that command +# +@ Allow the execution of all the commands in such category +# with valid categories are like @admin, @set, @sortedset, ... +# and so forth, see the full list in the server.c file where +# the Redis command table is described and defined. +# The special category @all means all the commands, but currently +# present in the server, and that will be loaded in the future +# via modules. +# +|subcommand Allow a specific subcommand of an otherwise +# disabled command. Note that this form is not +# allowed as negative like -DEBUG|SEGFAULT, but +# only additive starting with "+". +# allcommands Alias for +@all. Note that it implies the ability to execute +# all the future commands loaded via the modules system. +# nocommands Alias for -@all. +# ~ Add a pattern of keys that can be mentioned as part of +# commands. For instance ~* allows all the keys. The pattern +# is a glob-style pattern like the one of KEYS. +# It is possible to specify multiple patterns. +# allkeys Alias for ~* +# resetkeys Flush the list of allowed keys patterns. +# > Add this password to the list of valid password for the user. +# For example >mypass will add "mypass" to the list. +# This directive clears the "nopass" flag (see later). +# < Remove this password from the list of valid passwords. +# nopass All the set passwords of the user are removed, and the user +# is flagged as requiring no password: it means that every +# password will work against this user. If this directive is +# used for the default user, every new connection will be +# immediately authenticated with the default user without +# any explicit AUTH command required. Note that the "resetpass" +# directive will clear this condition. +# resetpass Flush the list of allowed passwords. Moreover removes the +# "nopass" status. After "resetpass" the user has no associated +# passwords and there is no way to authenticate without adding +# some password (or setting it as "nopass" later). +# reset Performs the following actions: resetpass, resetkeys, off, +# -@all. The user returns to the same state it has immediately +# after its creation. +# +# ACL rules can be specified in any order: for instance you can start with +# passwords, then flags, or key patterns. However note that the additive +# and subtractive rules will CHANGE MEANING depending on the ordering. +# For instance see the following example: +# +# user alice on +@all -DEBUG ~* >somepassword +# +# This will allow "alice" to use all the commands with the exception of the +# DEBUG command, since +@all added all the commands to the set of the commands +# alice can use, and later DEBUG was removed. However if we invert the order +# of two ACL rules the result will be different: +# +# user alice on -DEBUG +@all ~* >somepassword +# +# Now DEBUG was removed when alice had yet no commands in the set of allowed +# commands, later all the commands are added, so the user will be able to +# execute everything. +# +# Basically ACL rules are processed left-to-right. +# +# For more information about ACL configuration please refer to +# the Redis web site at https://redis.io/topics/acl + +# ACL LOG +# +# The ACL Log tracks failed commands and authentication events associated +# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked +# by ACLs. The ACL Log is stored in memory. You can reclaim memory with +# ACL LOG RESET. Define the maximum entry length of the ACL Log below. +acllog-max-len 128 + +# Using an external ACL file +# +# Instead of configuring users here in this file, it is possible to use +# a stand-alone file just listing users. The two methods cannot be mixed: +# if you configure users here and at the same time you activate the external +# ACL file, the server will refuse to start. +# +# The format of the external ACL user file is exactly the same as the +# format that is used inside redis.conf to describe users. +# +# aclfile /etc/redis/users.acl + +# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility +# layer on top of the new ACL system. The option effect will be just setting +# the password for the default user. Clients will still authenticate using +# AUTH as usually, or more explicitly with AUTH default +# if they follow the new protocol: both will work. +# +requirepass core_redis_password + +# Command renaming (DEPRECATED). +# +# ------------------------------------------------------------------------ +# WARNING: avoid using this option if possible. Instead use ACLs to remove +# commands from the default user, and put them only in some admin user you +# create for administrative purposes. +# ------------------------------------------------------------------------ +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# shared with the cluster bus: every node in the cluster will use two +# connections, one incoming and another outgoing. It is important to size the +# limit accordingly in case of very large clusters. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select one from the following behaviors: +# +# volatile-lru -> Evict using approximated LRU, only keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key having an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, Redis will return an error on write +# operations, when there are no suitable keys for eviction. +# +# At the date of writing these commands are: set setnx setex append +# incr decr rpush lpush rpushx lpushx linsert lset rpoplpush sadd +# sinter sinterstore sunion sunionstore sdiff sdiffstore zadd zincrby +# zunionstore zinterstore hset hsetnx hmset hincrby incrby decrby +# getset mset msetnx exec sort +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. By default Redis will check five keys and pick the one that was +# used least recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica +# to have a different memory setting, and you are sure all the writes performed +# to the replica are idempotent, then you may change this default (but be sure +# to understand what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory +# and so forth). So make sure you monitor your replicas and make sure they +# have enough memory to never hit a real out-of-memory condition before the +# master hits the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +# Redis reclaims expired keys in two ways: upon access when those keys are +# found to be expired, and also in background, in what is called the +# "active expire key". The key space is slowly and interactively scanned +# looking for expired keys to reclaim, so that it is possible to free memory +# of keys that are expired and will never be accessed again in a short time. +# +# The default effort of the expire cycle will try to avoid having more than +# ten percent of expired keys still in memory, and will try to avoid consuming +# more than 25% of total memory and to add latency to the system. However +# it is possible to increase the expire "effort" that is normally set to +# "1", to a greater value, up to the value "10". At its maximum value the +# system will use more CPU, longer cycles (and technically may introduce +# more latency), and will tolerate less already expired keys still present +# in the system. It's a tradeoff between memory, CPU and latency. +# +# active-expire-effort 1 + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives. + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +# It is also possible, for the case when to replace the user code DEL calls +# with UNLINK calls is not easy, to modify the default behavior of the DEL +# command to act exactly like UNLINK, using the following configuration +# directive: + +lazyfree-lazy-user-del no + +################################ THREADED I/O ################################# + +# Redis is mostly single threaded, however there are certain threaded +# operations such as UNLINK, slow I/O accesses and other things that are +# performed on side threads. +# +# Now it is also possible to handle Redis clients socket reads and writes +# in different I/O threads. Since especially writing is so slow, normally +# Redis users use pipelining in order to speed up the Redis performances per +# core, and spawn multiple instances in order to scale more. Using I/O +# threads it is possible to easily speedup two times Redis without resorting +# to pipelining nor sharding of the instance. +# +# By default threading is disabled, we suggest enabling it only in machines +# that have at least 4 or more cores, leaving at least one spare core. +# Using more than 8 threads is unlikely to help much. We also recommend using +# threaded I/O only if you actually have performance problems, with Redis +# instances being able to use a quite big percentage of CPU time, otherwise +# there is no point in using this feature. +# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# io-threads 4 +# +# Setting io-threads to 1 will just use the main thread as usual. +# When I/O threads are enabled, we only use threads for writes, that is +# to thread the write(2) syscall and transfer the client buffers to the +# socket. However it is also possible to enable threading of reads and +# protocol parsing using the following configuration directive, by setting +# it to yes: +# +# io-threads-do-reads no +# +# Usually threading reads doesn't help much. +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. Aso this feature currently does not work when SSL is +# enabled. +# +# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make +# sure you also run the benchmark itself in threaded mode, using the +# --threads option to match the number of Redis threads, otherwise you'll not +# be able to notice the improvements. + +############################ KERNEL OOM CONTROL ############################## + +# On Linux, it is possible to hint the kernel OOM killer on what processes +# should be killed first when out of memory. +# +# Enabling this feature makes Redis actively control the oom_score_adj value +# for all its processes, depending on their role. The default scores will +# attempt to have background child processes killed before all others, and +# replicas killed before masters. +# +# Redis supports three options: +# +# no: Don't make changes to oom-score-adj (default). +# yes: Alias to "relative" see below. +# absolute: Values in oom-score-adj-values are written as is to the kernel. +# relative: Values are used relative to the initial value of oom_score_adj when +# the server starts and are then clamped to a range of -1000 to 1000. +# Because typically the initial value is 0, they will often match the +# absolute values. +oom-score-adj no + +# When oom-score-adj is used, this directive controls the specific values used +# for master, replica and background child processes. Values range -2000 to +# 2000 (higher means more likely to be killed). +# +# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) +# can freely increase their value, but not decrease it below its initial +# settings. This means that setting oom-score-adj to "relative" and setting the +# oom-score-adj-values to positive values will always succeed. +oom-score-adj-values 0 200 800 + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check http://redis.io/topics/persistence for more information. + +appendonly no + +# The name of the append only file (default: "appendonly.aof") + +appendfilename "appendonly.aof" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync none". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# When rewriting the AOF file, Redis is able to use an RDB preamble in the +# AOF file for faster rewrites and recoveries. When this option is turned +# on the rewritten AOF file is composed of two different stanzas: +# +# [RDB file][AOF tail] +# +# When loading, Redis recognizes that the AOF file starts with the "REDIS" +# string and loads the prefixed RDB file, then continues loading the AOF +# tail. +aof-use-rdb-preamble yes + +################################ LUA SCRIPTING ############################### + +# Max execution time of a Lua script in milliseconds. +# +# If the maximum execution time is reached Redis will log that a script is +# still in execution after the maximum allowed time and will start to +# reply to queries with an error. +# +# When a long running script exceeds the maximum execution time only the +# SCRIPT KILL and SHUTDOWN NOSAVE commands are available. The first can be +# used to stop a script that did not yet call any write commands. The second +# is the only way to shut down the server in the case a write command was +# already issued by the script but the user doesn't want to wait for the natural +# termination of the script. +# +# Set it to 0 or a negative value for unlimited execution without warnings. +lua-time-limit 5000 + +################################ REDIS CLUSTER ############################### + +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are a multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large cluster-replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the cluster-replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least a hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the master can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# This option, when set to yes, allows nodes to serve read traffic while the +# the cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful for two cases. The first case is for when an application +# doesn't require consistency of data during node failures or network partitions. +# One example of this is a cache, where as long as the node has the data it +# should be able to serve it. +# +# The second use case is for configurations that don't meet the recommended +# three shards but want to enable cluster mode and scale later. A +# master outage in a 1 or 2 shard configuration causes a read/write outage to the +# entire cluster without this option set, with it set there is only a write outage. +# Without a quorum of masters, slot ownership will not change automatically. +# +# cluster-allow-reads-when-down no + +# In order to setup your cluster make sure to read the documentation +# available at http://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following two options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-bus-port +# +# Each instructs the node about its address, client port, and cluster message +# bus port. The information is then published in the header of the bus packets +# so that other nodes will be able to correctly map the address of the node +# publishing the information. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usual. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-port 6379 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at http://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# t Stream commands +# m Key-miss events (Note: It is not included in the 'A' class) +# A Alias for g$lshzxet, so that the "AKE" string means all the events +# (Except key-miss events which are excluded from 'A' due to their +# unique nature). +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### GOPHER SERVER ################################# + +# Redis contains an implementation of the Gopher protocol, as specified in +# the RFC 1436 (https://www.ietf.org/rfc/rfc1436.txt). +# +# The Gopher protocol was very popular in the late '90s. It is an alternative +# to the web, and the implementation both server and client side is so simple +# that the Redis server has just 100 lines of code in order to implement this +# support. +# +# What do you do with Gopher nowadays? Well Gopher never *really* died, and +# lately there is a movement in order for the Gopher more hierarchical content +# composed of just plain text documents to be resurrected. Some want a simpler +# internet, others believe that the mainstream internet became too much +# controlled, and it's cool to create an alternative space for people that +# want a bit of fresh air. +# +# Anyway for the 10nth birthday of the Redis, we gave it the Gopher protocol +# as a gift. +# +# --- HOW IT WORKS? --- +# +# The Redis Gopher support uses the inline protocol of Redis, and specifically +# two kind of inline requests that were anyway illegal: an empty request +# or any request that starts with "/" (there are no Redis commands starting +# with such a slash). Normal RESP2/RESP3 requests are completely out of the +# path of the Gopher protocol implementation and are served as usual as well. +# +# If you open a connection to Redis when Gopher is enabled and send it +# a string like "/foo", if there is a key named "/foo" it is served via the +# Gopher protocol. +# +# In order to create a real Gopher "hole" (the name of a Gopher site in Gopher +# talking), you likely need a script like the following: +# +# https://github.com/antirez/gopher2redis +# +# --- SECURITY WARNING --- +# +# If you plan to put Redis on the internet in a publicly accessible address +# to server Gopher pages MAKE SURE TO SET A PASSWORD to the instance. +# Once a password is set: +# +# 1. The Gopher server (when enabled, not by default) will still serve +# content via Gopher. +# 2. However other commands cannot be called before the client will +# authenticate. +# +# So use the 'requirepass' option to protect your instance. +# +# Note that Gopher is not currently supported when 'io-threads-do-reads' +# is enabled. +# +# To enable Gopher support, uncomment the following line and set the option +# from no (the default) to yes. +# +# gopher-enabled no + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-ziplist-entries 512 +hash-max-ziplist-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-ziplist-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-ziplist-entries 128 +zset-max-ziplist-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entires limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited to 512 mb. However you can change this limit +# here, but must be 1mb or greater +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporarily raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 32 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in a "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Enabled active defragmentation +# activedefrag no + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage, to be used when the lower +# threshold is reached +# active-defrag-cycle-min 1 + +# Maximal effort for defrag in CPU percentage, to be used when the upper +# threshold is reached +# active-defrag-cycle-max 25 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + +# Jemalloc background thread for purging will be enabled by default +jemalloc-bg-thread yes + +# It is possible to pin different threads and processes of Redis to specific +# CPUs in your system, in order to maximize the performances of the server. +# This is useful both in order to pin different Redis threads in different +# CPUs, but also in order to make sure that multiple Redis instances running +# in the same host will be pinned to different CPUs. +# +# Normally you can do this using the "taskset" command, however it is also +# possible to this via Redis configuration directly, both in Linux and FreeBSD. +# +# You can pin the server/IO threads, bio threads, aof rewrite child process, and +# the bgsave child process. The syntax to specify the cpu list is the same as +# the taskset command: +# +# Set redis server/io threads to cpu affinity 0,2,4,6: +# server_cpulist 0-7:2 +# +# Set bio threads to cpu affinity 1,3: +# bio_cpulist 1,3 +# +# Set aof rewrite child process to cpu affinity 8,9,10,11: +# aof_rewrite_cpulist 8-11 +# +# Set bgsave child process to cpu affinity 1,10,11 +# bgsave_cpulist 1,10-11 + +# In some cases redis will emit warnings and even refuse to start if it detects +# that the system is in bad state, it is possible to suppress these warnings +# by setting the following config which takes a space delimited list of warnings +# to suppress +# +# ignore-warnings ARM64-COW-BUG diff --git a/dsoftbus/dist_executor/modules/basic/deploy.sh b/dsoftbus/dist_executor/modules/basic/deploy.sh new file mode 100644 index 0000000000000000000000000000000000000000..72af2a2be25f75d71a3186a0cf44611497ea6ea8 --- /dev/null +++ b/dsoftbus/dist_executor/modules/basic/deploy.sh @@ -0,0 +1,415 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e +readonly BASE_DIR=$(dirname "$(readlink -f "$0")") +readonly ARCH="$(echo "$(uname -m)")" +CONFIG_DIR="${BASE_DIR}/config" +CONFIG_FILE="${BASE_DIR}/../config/config.xml" +BIN_DIR="${BASE_DIR}/bin" +INSTALL_DIR="/opt" + +[[ ! -f "${BASE_DIR}/utils.sh" ]] && echo "${BASE_DIR}/utils.sh is not exist" && exit 1 +. ${BASE_DIR}/utils.sh + +# identifier assignment +HOST_IP=$(hostname -I | awk '{print $1}') +PORT_MIN=25500 +PORT_MAX=25800 + +unset http_proxy +unset https_proxy + +# help of deploy.sh +function usage() { + echo -e "Usage: bash deploy.sh [-o install/uninstall] [-m component_name/all] [-h help]" + echo -e "Options:" + echo -e " -o install/uninstall, install component or uninstall component." + echo -e " -m component_name/all, such etcd, minio, redis" + echo -e " -n config file" + echo -e " -h usage help" + echo -e " " + echo -e "example:" + echo -e " bash deploy.sh -o install -m all" + echo -e " bash deploy.sh -o uninstall -m all" + echo -e " bash deploy.sh -o install -m etcd" + echo -e " bash deploy.sh -o uninstall -m etcd" + echo -e " bash deploy.sh -h" +} + +function install_etcd() { + log_info "---- install etcd ----" + local etcd_install_dir="${INSTALL_DIR}/basic/etcd" + [ -d "${etcd_install_dir}" ] || mkdir -p "${etcd_install_dir}" + rm -rf "${etcd_install_dir}"/* + chmod 700 "${etcd_install_dir}" + + init_config_var "$CONFIG_FILE" + [ -z "$FUNCTIONCORE_LOG_PATH" ] && FUNCTIONCORE_LOG_PATH="${etcd_install_dir}" + mkdir -p "${FUNCTIONCORE_LOG_PATH}" + log_file="${FUNCTIONCORE_LOG_PATH}"/etcd-run.log + + # load config + local etcd_auth_type="$(xmllint --xpath "string(//config/etcd_auth_type)" "${CONFIG_FILE}")" + ! [[ "${etcd_auth_type}X" == "NoauthX" || "${etcd_auth_type}X" == "TLSX" || "${etcd_auth_type}X" == "PWDX" ]] && log_error "please configure ${CONFIG_FILE} etcd_auth_type first!" && exit 1 + + if [ "${etcd_auth_type}X" == "PWDX" ]; then + local etcd_password="$(xmllint --xpath "string(//config/etcd_password)" "${CONFIG_FILE}")" + [[ "${etcd_password}X" == "passwordX" ]] && log_error "please configure ${CONFIG_FILE} etcd_password first!" && exit 1 + fi + local etcd_ip="$(xmllint --xpath "string(//config/etcd_ip)" "${CONFIG_FILE}")" + [[ "${etcd_ip}X" == "IPX" ]] && log_error "please configure ${CONFIG_FILE} etcd_ip first!" && exit 1 + local etcd_port="$(xmllint --xpath "string(//config/etcd_port)" "${CONFIG_FILE}")" + [[ "${etcd_port}X" == "etcd_portX" ]] && log_error "please configure ${CONFIG_FILE} etcd_port first!" && exit 1 + local etcd_peer_port="$(xmllint --xpath "string(//config/etcd_peer_port)" "${CONFIG_FILE}")" + [[ "${etcd_peer_port}X" == "etcd_peer_portX" ]] && log_error "please configure ${CONFIG_FILE} etcd_peer_port first!" && exit 1 + local etcdctl_install_dir="$(xmllint --xpath "string(//config/etcdctl_install_dir)" "${CONFIG_FILE}")" + [[ "${etcdctl_install_dir}X" == "etcdctl_install_dirX" ]] && etcdctl_install_dir=~/bin + + local etcd_ca_file="$(xmllint --xpath "string(//config/etcd_ca_file)" "${CONFIG_FILE}")" + local etcd_cert_file="$(xmllint --xpath "string(//config/etcd_cert_file)" "${CONFIG_FILE}")" + local etcd_key_file="$(xmllint --xpath "string(//config/etcd_key_file)" "${CONFIG_FILE}")" + + local etcd_proxy_enable="$(xmllint --xpath "string(//config/etcd_proxy_enable)" "${CONFIG_FILE}")" + local client_cert_file="$(xmllint --xpath "string(//config/client_cert_file)" "${CONFIG_FILE}")" + local client_key_file="$(xmllint --xpath "string(//config/client_key_file)" "${CONFIG_FILE}")" + local etcd_proxy_nums="$(xmllint --xpath "string(//config/etcd_proxy_nums)" "${CONFIG_FILE}")" + local etcd_proxy_ports="$(xmllint --xpath "string(//config/etcd_proxy_ports)" "${CONFIG_FILE}")" + local etcd_no_fsync="$(xmllint --xpath "string(//config/etcd_no_fsync)" "${CONFIG_FILE}")" + if ! check_port "${HOST_IP}" "${etcd_port}"; then + log_error "${etcd_port} bind: address already in use" + return 98 + fi + if ! check_port "${HOST_IP}" "${etcd_peer_port}"; then + log_error "${etcd_peer_port} bind: address already in use" + return 98 + fi + # start etcd` + # WARNING: --unsafe-no-fsync will improve I/O performance, but might cause data lose when node crashed. + if [ "${etcd_auth_type}" == "TLS" ]; then + "${BIN_DIR}"/etcd/etcd \ + --name=etcd0 \ + --unsafe-no-fsync="${etcd_no_fsync}" \ + --data-dir="${etcd_install_dir}" \ + --initial-advertise-peer-urls="https://${etcd_ip}:${etcd_peer_port}" \ + --listen-client-urls="https://${etcd_ip}:${etcd_port}" \ + --advertise-client-urls="https://${etcd_ip}:${etcd_port}" \ + --listen-peer-urls="https://${etcd_ip}:${etcd_peer_port}" \ + --client-cert-auth \ + --trusted-ca-file="$etcd_ca_file" \ + --cert-file="$etcd_cert_file" \ + --key-file="$etcd_key_file" \ + --peer-client-cert-auth \ + --peer-trusted-ca-file "$etcd_ca_file" \ + --peer-cert-file "$etcd_cert_file" \ + --peer-key-file "$etcd_key_file" >>$log_file 2>&1 & + else + "${BIN_DIR}"/etcd/etcd \ + --name=etcd0 \ + --unsafe-no-fsync="${etcd_no_fsync}" \ + --data-dir="${etcd_install_dir}" \ + --listen-client-urls="http://${etcd_ip}:${etcd_port}" \ + --advertise-client-urls="http://${etcd_ip}:${etcd_port}" \ + --listen-peer-urls="http://${etcd_ip}:${etcd_peer_port}" >>$log_file 2>&1 & + fi + + set +e + count=0 + max_retry_num=7 + log_info "check etcd status" >>$log_file + while [ "${count}" -lt "${max_retry_num}" ]; do + if [ "${etcd_auth_type}" == "TLS" ]; then + if "${BIN_DIR}"/etcd/etcdctl --cacert=${etcd_ca_file} --cert=${client_cert_file} --key=${client_key_file} --endpoints="https://${etcd_ip}:${etcd_port}" endpoint health; then + log_info "etcd is ready" + break + else + ((count++)) + log_info "number of retries: ${count}" + sleep 2 + fi + else + if "${BIN_DIR}"/etcd/etcdctl --endpoints="${etcd_ip}:${etcd_peer_port}" endpoint health >>$log_file 2>&1; then + log_info "etcd is ready" >>$log_file + break + else + ((count++)) + log_info "number of retries: ${count}" + sleep 2 + fi + fi + done + + if [ "${count}" -ge "${max_retry_num}" ]; then + log_error "etcd is not ready" && exit 1 + fi + set -e + + if [ "${etcd_auth_type}" == "Noauth" ]; then + "${BIN_DIR}"/etcd/etcdctl --endpoints="${etcd_ip}:${etcd_peer_port}" auth disable + elif [ "${etcd_auth_type}" == "PWD" ]; then + # add root user and open auth + "${BIN_DIR}"/etcd/etcdctl --endpoints="${etcd_ip}:${etcd_peer_port}" user add root:"${etcd_password}" >>$log_file 2>&1 & + "${BIN_DIR}"/etcd/etcdctl --endpoints="${etcd_ip}:${etcd_peer_port}" auth enable >>$log_file 2>&1 & + fi + + if [ "${etcd_proxy_enable}" = true ] && [ "$etcd_proxy_nums" -gt 0 ]; then + log_info "etcd_proxy_enable:${etcd_proxy_enable} etcd_proxy_nums:$etcd_proxy_nums etcd_proxy_ports:$etcd_proxy_ports" >>$log_file 2>&1 + for ((i = 1; i <= etcd_proxy_nums; i++)); do + if ! check_port "${HOST_IP}" "${etcd_proxy_ports}"; then + log_error "${etcd_proxy_ports} bind: address already in use" + return 98 + fi + if [ "${etcd_auth_type}" == "Noauth" ]; then + log_info "start proxy-$i" + "${BIN_DIR}"/etcd/etcd grpc-proxy start --endpoints="${etcd_ip}:${etcd_port}" \ + --listen-addr="${etcd_ip}:${etcd_proxy_ports}" \ + --advertise-client-url="${etcd_ip}:${etcd_proxy_ports}" \ + --resolver-prefix="___grpc_proxy_endpoint" \ + --resolver-ttl=60 >"${FUNCTIONCORE_LOG_PATH}"/etcd-proxy-"$i".log 2>&1 & + elif [ "${etcd_auth_type}" == "TLS" ]; then + log_info "start proxy-$i" + "${BIN_DIR}"/etcd/etcd grpc-proxy start --endpoints="${etcd_ip}:${etcd_port}" \ + --listen-addr="${etcd_ip}:${etcd_proxy_ports}" \ + --advertise-client-url="${etcd_ip}:${etcd_proxy_ports}" \ + --cert="$etcd_cert_file" --key="$etcd_key_file" --cacert="$etcd_ca_file" \ + --cert-file="$client_cert_file" --key-file="$client_key_file" --trusted-ca-file="$etcd_ca_file" \ + --resolver-prefix="___grpc_proxy_endpoint" \ + --resolver-ttl=60 >"${FUNCTIONCORE_LOG_PATH}"/etcd-proxy-"$i".log 2>&1 & + else + log_error "proxy not support current auth type: ${etcd_auth_type}" + exit 1 + fi + get_free_port "$HOST_IP" "$PORT_MIN" "$PORT_MAX" >/dev/null + etcd_proxy_ports=${ORDERED_PORTS[0]} + done + else + log_info "etcd_proxy_enable:${etcd_proxy_enable} etcd_proxy_nums:$etcd_proxy_nums" >>$log_file 2>&1 + fi + + log_info "---- install etcd success ----" +} + +function uninstall_etcd() { + log_info "---- uninstall etcd ----" + local etcd_install_dir="${INSTALL_DIR}/basic/etcd" + local etcd_ip="$(xmllint --xpath "string(//config/etcd_ip)" "${CONFIG_FILE}")" + local etcd_port="$(xmllint --xpath "string(//config/etcd_port)" "${CONFIG_FILE}")" + local etcd_proxy_ports="$(xmllint --xpath "string(//config/etcd_proxy_ports)" "${CONFIG_FILE}")" + + local etcd_proxy_pid=$(ps -ef | grep "${BIN_DIR}/etcd/etcd" | grep "${etcd_ip}:${etcd_port}" | grep "grpc-proxy" | awk '{ print $2 }') + for i in ${etcd_proxy_pid}; do + if ! kill -9 "$i"; then + log_info "etcd proxy is not running" + fi + done + + local etcd_pid=$(ps -ef | grep "${BIN_DIR}/etcd/etcd" | grep "${etcd_ip}:${etcd_port}" | grep -v grep | grep -v PPID | grep -v deploy.sh | awk '{ print $2 }') + local etcdctl_install_dir="$(xmllint --xpath "string(//config/etcdctl_install_dir)" "${CONFIG_FILE}")" + [[ "${etcdctl_install_dir}X" == "etcdctl_install_dirX" ]] && etcdctl_install_dir=~/bin + if ! kill -9 "${etcd_pid}"; then + log_info "etcd is not running" + fi + + [ -d "${etcd_install_dir}" ] && rm -rf "${etcd_install_dir}" + log_info "---- uninstall etcd success ----" +} + +function install_redis() { + local state_storage_type="$(xmllint --xpath "string(//config/state_storage_type)" "${CONFIG_FILE}")" + [[ "${state_storage_type}X" == "localX" ]] && log_info "state storage type is local, no need to start redis" && return 0 + [[ "${state_storage_type}X" != "redisX" ]] && log_error "please configure right ${CONFIG_FILE} state_storage_type" && exit 1 + + log_info "---- install redis ----" + + local redis_install_dir="${INSTALL_DIR}/basic/redis" + [ -d "${redis_install_dir}" ] || mkdir -p "${redis_install_dir}" + init_config_var "$CONFIG_FILE" + [ -z "$FUNCTIONCORE_LOG_PATH" ] && FUNCTIONCORE_LOG_PATH="${redis_install_dir}" + mkdir -p "${FUNCTIONCORE_LOG_PATH}" + rm -rf "${redis_install_dir}"/* + cp -rf "${CONFIG_DIR}"/redis/* "${redis_install_dir}" + + # load config + local redis_password="$(xmllint --xpath "string(//config/redis_password)" "${CONFIG_FILE}")" + [[ "${redis_password}X" == "passwordX" ]] && log_error "please configure ${CONFIG_FILE} redis_password first!" && exit 1 + local redis_ip="$(xmllint --xpath "string(//config/redis_ip)" "${CONFIG_FILE}")" + [[ "${redis_ip}X" == "IPX" ]] && log_error "please configure ${CONFIG_FILE} redis_ip first!" && exit 1 + local redis_port="$(xmllint --xpath "string(//config/redis_port)" "${CONFIG_FILE}")" + [[ "${redis_port}X" == "redis_portX" ]] && log_error "please configure ${CONFIG_FILE} redis_port first!" && exit 1 + sed -i "/^dir/c\dir ${redis_install_dir}" "${redis_install_dir}"/redis.conf + sed -i "s/core_redis_ip/${redis_ip}/g" "${redis_install_dir}"/redis.conf + sed -i "s/core_redis_port/${redis_port}/g" "${redis_install_dir}"/redis.conf + sed -i "s/core_redis_password/${redis_password}/g" "${redis_install_dir}"/redis.conf + + if ! check_port "${HOST_IP}" "${redis_port}"; then + log_error "${redis_port} bind: address already in use" + return 98 + fi + # start redis + "${BIN_DIR}"/redis/redis-server "${redis_install_dir}"/redis.conf >"${FUNCTIONCORE_LOG_PATH}"/redis-run.log 2>&1 & + log_info "---- install redis success ----" +} + +function uninstall_redis() { + local state_storage_type="$(xmllint --xpath "string(//config/state_storage_type)" "${CONFIG_FILE}")" + [[ "${state_storage_type}X" == "localX" ]] && log_info "state storage type is local, no need to uninstall redis" && return 0 + [[ "${state_storage_type}X" != "redisX" ]] && log_error "please configure right ${CONFIG_FILE} state_storage_type" && exit 1 + + log_info "---- uninstall redis ----" + local redis_install_dir="${INSTALL_DIR}/basic/redis" + local redis_port="$(xmllint --xpath "string(//config/redis_port)" "${CONFIG_FILE}")" + local redis_pid=$(ps -ef | grep "${BIN_DIR}"/redis/redis-server | grep "${redis_port}" | grep -v grep | grep -v PPID | grep -v deploy.sh | awk '{ print $2 }') + if ! kill -9 "${redis_pid}"; then + log_info "redis-server is not running" + fi + [ -d "${redis_install_dir}" ] && rm -rf "${redis_install_dir}" + log_info "---- uninstall redis success ----" +} + +function install_minio() { + local function_storage_type="$(xmllint --xpath "string(//config/function_storage_type)" "${CONFIG_FILE}")" + [[ "${function_storage_type}X" == "localX" ]] && log_info "function storage type is local, no need to start minio" && return 0 + [[ "${function_storage_type}X" != "s3X" ]] && log_error "please configure right ${CONFIG_FILE} function_storage_type" && exit 1 + + log_info "---- install minio ----" + local minio_install_dir="${INSTALL_DIR}/basic/minio" + [ -d "${minio_install_dir}" ] || mkdir -p "${minio_install_dir}" + init_config_var "$CONFIG_FILE" + [ -z "$FUNCTIONCORE_LOG_PATH" ] && FUNCTIONCORE_LOG_PATH="${minio_install_dir}" + mkdir -p "${FUNCTIONCORE_LOG_PATH}" + rm -rf "${minio_install_dir}"/* + + # load config + local minio_user="$(xmllint --xpath "string(//config/S3_AK)" "${CONFIG_FILE}")" + [[ "${minio_user}X" == "accessKeyX" ]] && log_error "please configure ${CONFIG_FILE} S3_AK first!" && exit 1 + local minio_password="$(xmllint --xpath "string(//config/S3_SK)" "${CONFIG_FILE}")" + [[ "${minio_password}X" == "secretKeyX" ]] && log_error "please configure ${CONFIG_FILE} S3_AK first!" && exit 1 + local minio_ip="$(xmllint --xpath "string(//config/minio_ip)" "${CONFIG_FILE}")" + [[ "${minio_ip}X" == "IPX" ]] && log_error "please configure ${CONFIG_FILE} minio_ip first!" && exit 1 + local minio_port="$(xmllint --xpath "string(//config/minio_port)" "${CONFIG_FILE}")" + [[ "${minio_port}X" == "minio_portX" ]] && log_error "please configure ${CONFIG_FILE} minio_port first!" && exit 1 + local console_port="$(xmllint --xpath "string(//config/minio_console_port)" "${CONFIG_FILE}")" + [[ "${console_port}X" == "minio_console_portX" ]] && log_error "please configure ${CONFIG_FILE} minio_console_portX first!" && exit 1 + + if ! check_port "${HOST_IP}" "${console_port}"; then + log_error "${console_port} bind: address already in use" + return 98 + fi + if ! check_port "${HOST_IP}" "${minio_port}"; then + log_error "${minio_port} bind: address already in use" + return 98 + fi + # start minio + MINIO_ROOT_USER="${minio_user}" MINIO_ROOT_PASSWORD="${minio_password}" "${BIN_DIR}"/minio/minio server \ + "${minio_install_dir}"/data --console-address "${minio_ip}:${console_port}" --address "${minio_ip}:${minio_port}" >"${FUNCTIONCORE_LOG_PATH}"/minio-run.log 2>&1 & + + log_info "---- install minio success ----" +} + +function uninstall_minio() { + local function_storage_type="$(xmllint --xpath "string(//config/function_storage_type)" "${CONFIG_FILE}")" + [[ "${function_storage_type}X" == "localX" ]] && log_info "function storage type is local, no need to uninstall minio" && return 0 + [[ "${function_storage_type}X" != "s3X" ]] && log_error "please configure right ${CONFIG_FILE} function_storage_type" && exit 1 + + log_info "---- uninstall minio ----" + local minio_install_dir="${INSTALL_DIR}/basic/minio" + local minio_port="$(xmllint --xpath "string(//config/minio_port)" "${CONFIG_FILE}")" + local minio_pid=$(ps -ef | grep "${BIN_DIR}"/minio/minio | grep "${minio_port}" | grep -v grep | grep -v PPID | grep -v deploy.sh | awk '{ print $2 }') + if ! kill -9 "${minio_pid}"; then + log_info "minio is not running" + fi + rm -rf "${minio_install_dir}" + log_info "---- uninstall minio success ----" +} + +# install module +function install_module() { + case "$1" in + "etcd") + install_etcd + ;; + "redis") + install_redis + ;; + "minio") + install_minio + ;; + "all") + install_etcd + install_redis + install_minio + ;; + esac +} + +# uninstall module +function uninstall_module() { + case "$1" in + "etcd") + uninstall_etcd + ;; + "redis") + uninstall_redis + ;; + "minio") + uninstall_minio + ;; + "all") + uninstall_etcd + uninstall_redis + uninstall_minio + ;; + esac +} + +function main() { + while getopts "o:m:n:h" opt; do + case "$opt" in + o) + OPTION=$OPTARG + ;; + m) + MODULE=$OPTARG + ;; + n) + CONFIG_FILE=$OPTARG + ;; + h) + usage + exit 0 + ;; + *) + log_error "Unknown parameter" + echo -e "" + usage + exit 1 + ;; + esac + done + + [[ ! -f "${CONFIG_FILE}" ]] && log_error "${CONFIG_FILE} is not exist" && exit 1 + [[ ! -d "${CONFIG_DIR}" ]] && log_error "${CONFIG_DIR} is not exist" && exit 1 + [[ "${ARCH}X" != "aarch64X" && "${ARCH}X" != "x86_64X" ]] && log_error "Not supported ${ARCH}" exit 1 + INSTALL_DIR="$(xmllint --xpath "string(//config/deploy_path)" "${CONFIG_FILE}")" + + if [ "${OPTION}X" == "installX" ]; then + install_module "${MODULE}" + fi + + if [ "${OPTION}X" == "uninstallX" ]; then + uninstall_module "${MODULE}" + fi +} + +main $@ diff --git a/dsoftbus/dist_executor/modules/basic/utils.sh b/dsoftbus/dist_executor/modules/basic/utils.sh new file mode 100644 index 0000000000000000000000000000000000000000..8822b60ca3cf75416d3b3e14d75b8b52891fe86e --- /dev/null +++ b/dsoftbus/dist_executor/modules/basic/utils.sh @@ -0,0 +1,228 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +# ---------------------------------------------------------------------- +# funcname: log_info. +# description: Print build info log. +# parameters: NA +# return value: NA +# ---------------------------------------------------------------------- +log_info() { + echo "[BUILD_INFO][$(date +%b\ %d\ %H:%M:%S)]$*" +} + +# ---------------------------------------------------------------------- +# funcname: log_warning. +# description: Print build warning log. +# parameters: NA +# return value: NA +# ---------------------------------------------------------------------- +log_warning() { + echo "[BUILD_WARNING][$(date +%b\ %d\ %H:%M:%S)]$*" +} + +# ---------------------------------------------------------------------- +# funcname: log_error. +# description: Print build error log. +# parameters: NA +# return value: NA +# ---------------------------------------------------------------------- +log_error() { + echo "[BUILD_ERROR][$(date +%b\ %d\ %H:%M:%S)]$*" +} + +# ---------------------------------------------------------------------- +# funcname: die. +# description: Print build error log. +# parameters: NA +# return value: NA +# ---------------------------------------------------------------------- +die() { + log_error "$*" + stty echo + exit 1 +} + +# enter node IP manually +function get_ip_manually() { + local_ip="$1" + echo "The IP address entered manually is ${local_ip}" +} + +# get node IP automatically +function get_ip_auto() { + echo "Try to get IP address of this device" + ip_list_len=$(ifconfig | grep inet | grep -v "127.0.0.1" | grep -v "172.17.0.1" | grep -v inet6 | awk '{print $2}' | wc -l) + local_ip=0 + if [ "$ip_list_len" -ge 2 ]; then + echo "Cannot get IP address of this device. Please choose the appropriate one manually" + for i in $(seq 1 "$ip_list_len"); do + ip=$(ifconfig | grep inet | grep -v "127.0.0.1" | grep -v "172.17.0.1" | grep -v inet6 | awk '{print $2}' | head -n "$i" | tail -n 1) + read -rp "Local IP address is ${ip}. Press y to ensure or press Enter to skip:" conf + conf_flag='x'$conf + if [ "$conf_flag" == 'xy' ]; then + local_ip=$ip + break + fi + done + fi + if [ "$ip_list_len" -eq 1 ]; then + local_ip=$(ifconfig | grep inet | grep -v "127.0.0.1" | grep -v "172.17.0.1" | grep -v inet6 | awk '{print $2}') + fi +} + +# please install libxml2 first +function init_config_var() { + local config_file=$1 + # shellcheck disable=SC2155 + export DEPLOY_PATH="$(xmllint --xpath "string(//config/deploy_path)" "${config_file}")" + # shellcheck disable=SC2155 + export LOCAL_IP="$(xmllint --xpath "string(//config/local_ip)" "${config_file}")" + # shellcheck disable=SC2155 + export ETCD_AUTH_TYPE="$(xmllint --xpath "string(//config/etcd_auth_type)" "${config_file}")" + # shellcheck disable=SC2155 + export ETCD_PORT="$(xmllint --xpath "string(//config/etcd_port)" "${config_file}")" + # shellcheck disable=SC2155 + export ETCD_IP="$(xmllint --xpath "string(//config/etcd_ip)" "${config_file}")" + # shellcheck disable=SC2155 + export ETCD_PROXY_PORT="$(xmllint --xpath "string(//config/etcd_proxy_ports)" "${config_file}")" + # shellcheck disable=SC2155 + export ETCD_PROXY_ENABLE="$(xmllint --xpath "string(//config/etcd_proxy_enable)" "${config_file}")" + # shellcheck disable=SC2155 + export MINIO_IP="$(xmllint --xpath "string(//config/minio_ip)" "${config_file}")" + # shellcheck disable=SC2155 + export MINIO_PORT="$(xmllint --xpath "string(//config/minio_port)" "${config_file}")" + # shellcheck disable=SC2155 + export REDIS_PORT="$(xmllint --xpath "string(//config/redis_port)" "${config_file}")" + # shellcheck disable=SC2155 + export WORKERMGR_LISTEN_PORT="$(xmllint --xpath "string(//config/workermgr_listen_port)" "${config_file}")" + # shellcheck disable=SC2155 + export CODE_DIR="$(xmllint --xpath "string(//config/code_dir)" "${config_file}")" + # shellcheck disable=SC2155 + export DS_MASTER_IP="$(xmllint --xpath "string(//config/ds_master_ip)" "${config_file}")" + # shellcheck disable=SC2155 + export DS_MASTER_PORT="$(xmllint --xpath "string(//config/ds_master_port)" "${config_file}")" + # shellcheck disable=SC2155 + export LOG_LEVEL="$(xmllint --xpath "string(//config/log_level)" "${config_file}")" + # shellcheck disable=SC2155 + export RUNTIME_LOG_LEVEL="$(xmllint --xpath "string(//config/runtime_log_level)" "${config_file}")" + # shellcheck disable=SC2155 + export FUNCTIONCORE_LOG_PATH="$(xmllint --xpath "string(//config/functioncore_log_path)" "${config_file}")" + if [ "X$FUNCTIONCORE_LOG_PATH" == "Xdefault" ]; then + unset FUNCTIONCORE_LOG_PATH + fi + # shellcheck disable=SC2155 + export RUNTIME_LOG_PATH="$(xmllint --xpath "string(//config/runtime_log_path)" "${config_file}")" + if [ "X$RUNTIME_LOG_PATH" == "Xdefault" ]; then + unset RUNTIME_LOG_PATH + fi + # shellcheck disable=SC2155 + export GLOBAL_SCHEDULER_PORT="$(xmllint --xpath "string(//config/global_scheduler_port)" "${config_file}")" + export LOKI_IP="$(xmllint --xpath "string(//config/loki_ip)" "${config_file}")" + export LOKI_PORT="$(xmllint --xpath "string(//config/loki_port)" "${config_file}")" +} + +declare -A PORT_HASH_MAP +declare -a ORDERED_PORTS=() +function get_free_port() { + local BIND_IP="$1" + local PORT_MIN="$2" + local PORT_MAX="$3" + + if ! command -v nc &>/dev/null; then + PORT=$(shuf -i "${PORT_MIN}"-"${PORT_MAX}" -n 1) + while [[ ${PORT_HASH_MAP[$PORT]} ]]; do + PORT=$(shuf -i "${PORT_MIN}"-"${PORT_MAX}" -n 1) + done + PORT_HASH_MAP[$PORT]=$PORT + ORDERED_PORTS=($PORT "${ORDERED_PORTS[@]}") + echo "$PORT" + return 0 + fi + + CHECK="port not assigned" + PORT="port" + # shellcheck disable=SC2236 + while [[ ! -z $CHECK ]]; do + PORT=$(shuf -i "${PORT_MIN}"-"${PORT_MAX}" -n 1) + if [[ ! ${PORT_HASH_MAP[$PORT]} ]]; then + set +e; CHECK=$(LD_LIBRARY_PATH="" timeout 0.2 nc -l "$BIND_IP" "$PORT" 2>&1 >/dev/null) + fi + done + PORT_HASH_MAP[$PORT]=$PORT + ORDERED_PORTS=($PORT "${ORDERED_PORTS[@]}") + echo "$PORT" + return 0 +} + + +function check_port(){ + if ! command -v nc &>/dev/null; then + return 0 + fi + local BIND_IP="$1" + local PORT="$2" + CHECK=$(LD_LIBRARY_PATH="" timeout 0.2 nc -l "$BIND_IP" "$PORT" 2>&1 >/dev/null) + if [[ ! -z $CHECK ]]; then + return 1 + else + return 0 + fi + +} + +function getJsonValuesByAwk() { + awk -v json="$1" -v key="$2" -v defaultValue="$3" 'BEGIN{ + foundKeyCount = 0 + while (length(json) > 0) { + pos = match(json, "\""key"\"[ \\t]*?:[ \\t]*"); + if (pos == 0) {if (foundKeyCount == 0) {print defaultValue;} exit 0;} + + ++foundKeyCount; + start = 0; stop = 0; layer = 0; + for (i = pos + length(key) + 1; i <= length(json); ++i) { + lastChar = substr(json, i - 1, 1) + currChar = substr(json, i, 1) + + if (start <= 0) { + if (lastChar == ":") { + start = currChar == " " ? i + 1: i; + if (currChar == "{" || currChar == "[") { + layer = 1; + } + } + } else { + if (currChar == "{" || currChar == "[") { + ++layer; + } + if (currChar == "}" || currChar == "]") { + --layer; + } + if ((currChar == "," || currChar == "}" || currChar == "]") && layer <= 0) { + stop = currChar == "," ? i : i + 1 + layer; + break; + } + } + } + + if (start <= 0 || stop <= 0 || start > length(json) || stop > length(json) || start >= stop) { + if (foundKeyCount == 0) {print defaultValue;} exit 0; + } else { + print substr(json, start, stop - start); + } + + json = substr(json, stop + 1, length(json) - stop) + } + }' +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/bin/config.sh b/dsoftbus/dist_executor/modules/bin/config.sh new file mode 100644 index 0000000000000000000000000000000000000000..fa19cb6ce08736b4c5d1663ab2da1fbd05de69a4 --- /dev/null +++ b/dsoftbus/dist_executor/modules/bin/config.sh @@ -0,0 +1,117 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# Perform hot backups of Oracle databases. + +set -e +readonly BASE_DIR=$(dirname "$(readlink -f "$0")") +readonly CONFIG_FILE="${BASE_DIR}/../config/config.xml" + +[[ ! -f "${CONFIG_FILE}" ]] && echo "The ${CONFIG_FILE} is not exist. This program is exiting" && exit 1 +. ${BASE_DIR}/utils.sh + +# get node IP automatically +function get_ip_auto() +{ + log_info "Try to get IP address of this device" + local ip_list_len=$(ifconfig | grep inet | grep -v "127.0.0.1" | grep -v "172.17.0.1" | grep -v inet6 | awk '{print $2}' | wc -l) + if [ "$ip_list_len" -ge 2 ]; then + log_info "Cannot get IP address of this device. Please choose the appropriate one manually" + for i in $(seq 1 "$ip_list_len"); do + local ip=$(ifconfig | grep inet | grep -v "127.0.0.1" | grep -v "172.17.0.1" | grep -v inet6 | awk '{print $2}' | head -n "$i" | tail -n 1) + read -rp "Local IP address is ${ip}. Press y to ensure or press Enter to skip:" conf + if [ "x$conf" == 'xy' ]; then + local_ip=$ip + break + fi + done + fi + if [ "$ip_list_len" -eq 1 ]; then + local_ip=$(ifconfig | grep inet | grep -v "127.0.0.1" | grep -v "172.17.0.1" | grep -v inet6 | awk '{print $2}') + fi +} + +# enter node IP manually +function get_ip_manually() +{ + local_ip="$1" + log_info "The IP address entered manually is ${local_ip}" +} + +# ---------------------------------------------------------------------- +# funcname: usage +# description: help of this script +# parameters: void +# return value: void +# ---------------------------------------------------------------------- +usage() +{ + echo -e "Usage: bash config.sh [-p local_ip] [-h help]" + echo -e "Options:" + echo -e " -p local_ip, such as 127.0.0.1" + echo -e " -h usage help" + echo -e " " + echo -e "example:" + echo -e " bash config.sh -p \"127.0.0.1\" " + echo -e " bash config.sh -h" + echo -e "" +} + +while getopts 'p:h' opt; do + case "${opt}" in + p) + OPTION="$OPTARG" + get_ip_manually "$OPTION" + ;; + h) + usage + exit 0 + ;; + *) + ;; + esac +done + +# check whether there is a profile in the directory +# it will delete this profile if there is +if [ -z "${OPTION}" ]; then + get_ip_auto + log_info "The selected IP address is ${local_ip}" +fi + +if [ "x${local_ip}" == 'x' ]; then + log_error "The IP address is not configured. This program is exiting" + exit 1 +fi + +if [ "$(echo "$local_ip" | grep ":")" != "" ]; then + log_info "The IP address contains a mark ':', which needs to be deleted" + local_ip=$(echo "$local_ip" | awk -F":" '{print $2}') + log_info "The IP address has been modified to ${local_ip}" +fi + +# synchronize the IP address into the profile +log_info "The IP address writen into the profile is ${local_ip}" +if [ "x$local_ip" != "x" ]; then + sed -i "s#.*#${local_ip}#g" "$CONFIG_FILE" + sed -i "s#.*#${local_ip}#g" "$CONFIG_FILE" + sed -i "s#.*#${local_ip}#g" "$CONFIG_FILE" + sed -i "s#.*#${local_ip}#g" "$CONFIG_FILE" + sed -i "s#.*#${local_ip}#g" "$CONFIG_FILE" + log_info "please set password by yourself!!!" +else + log_error "Got none IP!!!" + exit 1 +fi + +exit 0 diff --git a/dsoftbus/dist_executor/modules/bin/distribute-executor b/dsoftbus/dist_executor/modules/bin/distribute-executor new file mode 100644 index 0000000000000000000000000000000000000000..6ff3603115c4e253796f64672c28a9be7a798b0a Binary files /dev/null and b/dsoftbus/dist_executor/modules/bin/distribute-executor differ diff --git a/dsoftbus/dist_executor/modules/bin/encrypt b/dsoftbus/dist_executor/modules/bin/encrypt new file mode 100644 index 0000000000000000000000000000000000000000..2b537e10de7ad5b423fae1252bb930900d4aeb53 Binary files /dev/null and b/dsoftbus/dist_executor/modules/bin/encrypt differ diff --git a/dsoftbus/dist_executor/modules/bin/health-check b/dsoftbus/dist_executor/modules/bin/health-check new file mode 100644 index 0000000000000000000000000000000000000000..9f80f271e45db1e680fe6a83685e4f024b304caa Binary files /dev/null and b/dsoftbus/dist_executor/modules/bin/health-check differ diff --git a/dsoftbus/dist_executor/modules/bin/health_check.py b/dsoftbus/dist_executor/modules/bin/health_check.py new file mode 100644 index 0000000000000000000000000000000000000000..d893bb3f41ac0c12a1490c2a8bdeac87351ed7e3 --- /dev/null +++ b/dsoftbus/dist_executor/modules/bin/health_check.py @@ -0,0 +1,21 @@ +#!/usr/bin/python +# health_check.py + +import urllib.request as urllib2 +import time + +RETRY_TIME = 5 +STATUS_OK = 200 + + +def check(url): + for x in range(RETRY_TIME): + try: + response = urllib2.urlopen(url) + if response.status == STATUS_OK: + return True + else: + time.sleep(1) + except Exception: + time.sleep(1) + return False diff --git a/dsoftbus/dist_executor/modules/bin/monitor.sh b/dsoftbus/dist_executor/modules/bin/monitor.sh new file mode 100644 index 0000000000000000000000000000000000000000..ee37d296d7e68a5d4be4f72f3c7e64676b73d4fa --- /dev/null +++ b/dsoftbus/dist_executor/modules/bin/monitor.sh @@ -0,0 +1,129 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + + +BASE_DIR=$( + cd "$(dirname "$0")" + pwd +) + +. ${BASE_DIR}/utils.sh + +function check_admin_service() { + local pid=$(ps -ef | grep "admin/bin/distribute-executor" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + if [ -z "$pid" ]; then + log_warning "admin-service not exist" + return 1 + fi + return 0 +} + +function check_function_repo() { + local pid=$(ps -ef | grep "function-repo/bin/distribute-executor" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + if [ -z "$pid" ]; then + log_warning "function-repo not exist" + return 1 + fi + return 0 +} + +function check_worker_manager() { + local pid=$(ps -ef | grep "worker-manager/bin/distribute-executor" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + if [ -z "$pid" ]; then + log_warning "worker-manager not exist" + return 1 + fi + return 0 +} + +function check_frontend() { + local pid=$(ps -ef | grep "frontend/bin/distribute-executor" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + if [ -z "$pid" ]; then + log_warning "frontend not exist" + return 1 + fi + return 0 +} + +function check_dsmaster() { + local pid=$(ps -ef | grep "datasystem/service/master" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + if [ -z "$pid" ]; then + log_warning "datasystem master not exist" + return 1 + fi + return 0 +} + +function check_ds_agent() { + local pid=$(ps -ef | grep "datasystem/service/agent" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + if [ -z "$pid" ]; then + log_warning "datasystem agent not exist" + return 1 + fi + return 0 +} + +function check_bus_proxy() { + local pid=$(ps -ef | grep "functiontask/bin/distribute-executor" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + if [ -z "$pid" ]; then + log_warning "busproxy not exist" + return 1 + fi + return 0 +} + +function check_runtime_mgr() { + local pid=$(ps -ef | grep "runtime-manager/bin/distribute-executor" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + if [ -z "$pid" ]; then + log_warning "runtime-manager not exist" + return 1 + fi + return 0 +} + +function check_worker() { + local pid=$(ps -ef | grep "worker/bin/distribute-executor" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + if [ -z "$pid" ]; then + log_warning "worker not exist" + return 1 + fi + return 0 +} + +function check_ds_worker() { + local pid=$(ps -ef | grep "datasystem/service/worker" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + if [ -z "$pid" ]; then + log_warning "datasystem worker not exist" + return 1 + fi + return 0 +} + +function main() { + check_admin_service + check_function_repo + check_worker_manager + check_frontend + check_dsmaster + check_ds_agent + check_bus_proxy + check_runtime_mgr + check_worker + check_ds_worker + + + log_info "check mp success" +} + +main diff --git a/dsoftbus/dist_executor/modules/bin/mpctl b/dsoftbus/dist_executor/modules/bin/mpctl new file mode 100644 index 0000000000000000000000000000000000000000..5246d8f9d55368adaad9faa949ed38a9caaac206 Binary files /dev/null and b/dsoftbus/dist_executor/modules/bin/mpctl differ diff --git a/dsoftbus/dist_executor/modules/bin/start_mp b/dsoftbus/dist_executor/modules/bin/start_mp new file mode 100644 index 0000000000000000000000000000000000000000..ce8d89f6607d7ae6394a35026ad4697b7ef6471a --- /dev/null +++ b/dsoftbus/dist_executor/modules/bin/start_mp @@ -0,0 +1,695 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e + +BASE_DIR=$( + cd "$(dirname "$0")" + pwd +) + +ulimit -u 10240 + +export YR_BARE_MENTAL=1 +# limit the fd consumption +export GOMAXPROCS=16 + +[[ ! -f "${BASE_DIR}/utils.sh" ]] && echo "${BASE_DIR}/utils.sh is not exist" && exit 1 +. ${BASE_DIR}/utils.sh + +unset http_proxy +unset https_proxy + +# installation directory is the mandatory script input parameter +INSTALL_DIR=$(readlink -f "${BASE_DIR}/..") +CONFIG_FILE="${BASE_DIR}/../config/config.xml" +LOG_LEVEL="INFO" +ENABLE_DS_AGENT=OFF +ENABLE_FRONTEND=OFF +REALTIME_LOGS=false +ENABLE_SCHEDULER=false +LOG_DIR=~/mindpandas/log + +# identifier assignment +HOST_IP=$(hostname -I | awk '{print $1}') +NODE_ID="$(hostname)-${RANDOM}" +DATA_PLANE_PORT_MIN=20000 +DATA_PLANE_PORT_MAX=25000 + +# shellcheck disable=SC2002 +TCP_CLIENT_MIN=$(cat /proc/sys/net/ipv4/ip_local_port_range | awk '{print $1}') +RUNTIME_MIN_PORT=$((TCP_CLIENT_MIN - 2000)) +RUNTIME_MAX_PORT=$((TCP_CLIENT_MIN - 1000)) + +# address assignment +PROXY_TCP_PORT=21001 +PROXY_HTTP_PORT=21002 +PROXY_GRPC_PORT=21003 + +RUNTIME_MGR_PORT=21005 +# runtime manager will check whether the port is available +RUNTIME_INIT_PORT=21006 + +WORKER_HTTP_PORT=21007 +WORKER_TCP_PORT=21008 +WORKER_AGENT_PORT=21009 + +FRONTEND_HTTP_PORT=31220 +FRONTEND_HTTP2_PORT=31221 +FRONTEND_GRPC_PORT=21011 + +SPILL_PATH="" +SPILL_SIZE_LIMIT=0 +DS_MASTER_ADDRESS="" # load from config file +DS_WORKER_PORT=31501 +DS_AGENT_PORT=31502 +GLOBAL_SCHEDULER_PORT=22770 + +# resource allocation +# CPU unit: 1/1000 Core, Mem unit: 1 MilliBytes +CPUALL=0 +MEMALL=0 +CPU4COMP=0 +MEM4COMP=0 +MEM4DATA=0 +ETCD_PASSWORD="" +REDIS_PASSWORD="" +MAX_CLIENT_NUM=2000 + +# belonging process pid +# shellcheck disable=SC2034 +# shellcheck disable=SC2116 +OWNER_SHELL_ID="$(echo $$)" +PROXY_PID="" +WORKER_PID="" +RUNTIME_MGR_PID="" +DS_WORKER_PID="" +DS_AGENT_PID="" +SOCKET_DIR=~/.datasystem/socket +TEMPLATE_DIR="${BASE_DIR}"/../config/templates +# shellcheck disable=SC2012 +MODULE_LIST=$(ls -l "${BASE_DIR}"/../functioncore | awk '/^d/ {print $NF}') + +function restore_config() { + # restore functioncore config + for module_name in ${MODULE_LIST} + do + src_dir=${TEMPLATE_DIR}/functioncore/${module_name}/config + dst_dir="${BASE_DIR}"/../functioncore/${module_name} + [ -d "${src_dir}" ] && [ -d "${dst_dir}" ] && cp -rf "${src_dir}" "${dst_dir}" + done + printf "restore config" +} + +function init_config() { + local CONFIG_FILE="${BASE_DIR}/../config/config.xml" + [[ ! -f "${CONFIG_FILE}" ]] && echo "${CONFIG_FILE} is not exist" && exit 1 + + # shellcheck disable=SC2155 + local redis_ip="$(xmllint --xpath "string(//config/redis_ip)" "${CONFIG_FILE}")" + [[ "${redis_ip}X" == "IPX" ]] && log_error "please configure ${CONFIG_FILE} redis_ip first!" && exit 1 + # shellcheck disable=SC2155 + local redis_port="$(xmllint --xpath "string(//config/redis_port)" "${CONFIG_FILE}")" + + # shellcheck disable=SC2155 + local etcd_ip="$(xmllint --xpath "string(//config/etcd_ip)" "${CONFIG_FILE}")" + [[ "${etcd_ip}X" == "IPX" ]] && log_error "please configure ${CONFIG_FILE} etcd_ip first!" && exit 1 + # shellcheck disable=SC2155 + local etcd_port="$(xmllint --xpath "string(//config/etcd_port)" "${CONFIG_FILE}")" + + # shellcheck disable=SC2155 + local master_ip="$(xmllint --xpath "string(//config/master_ip)" "${CONFIG_FILE}")" + [[ "${master_ip}X" == "IPX" ]] && log_error "please configure ${CONFIG_FILE} master_ip first!" && exit 1 + # shellcheck disable=SC2155 + local ds_master_port="$(xmllint --xpath "string(//config/ds_master_port)" "${CONFIG_FILE}")" + + # shellcheck disable=SC2155 + local local_ip="$(xmllint --xpath "string(//config/local_ip)" "${CONFIG_FILE}")" + [[ "${local_ip}X" == "IPX" ]] && log_error "please configure ${CONFIG_FILE} local_ip first!" && exit 1 + # shellcheck disable=SC2155 + # shellcheck disable=SC2155 + local workermgr_listen_port="$(xmllint --xpath "string(//config/workermgr_listen_port)" "${CONFIG_FILE}")" + + # shellcheck disable=SC2155 + local log_level="$(xmllint --xpath "string(//config/log_level)" "${CONFIG_FILE}")" + + # shellcheck disable=SC2155 + GLOBAL_SCHEDULER_PORT="$(xmllint --xpath "string(//config/global_scheduler_port)" "${CONFIG_FILE}")" + PROXY_TCP_PORT=$(xmllint --xpath "string(//config/proxy_tcp_port)" "${CONFIG_FILE}") + PROXY_HTTP_PORT=$(xmllint --xpath "string(//config/proxy_http_port)" "${CONFIG_FILE}") + PROXY_GRPC_PORT=$(xmllint --xpath "string(//config/proxy_grpc_port)" "${CONFIG_FILE}") + RUNTIME_MGR_PORT=$(xmllint --xpath "string(//config/runtime_mgr_port)" "${CONFIG_FILE}") + RUNTIME_INIT_PORT=$(xmllint --xpath "string(//config/runtime_init_port)" "${CONFIG_FILE}") + WORKER_HTTP_PORT=$(xmllint --xpath "string(//config/worker_http_port)" "${CONFIG_FILE}") + WORKER_TCP_PORT=$(xmllint --xpath "string(//config/worker_tcp_port)" "${CONFIG_FILE}") + WORKER_AGENT_PORT=$(xmllint --xpath "string(//config/worker_agent_port)" "${CONFIG_FILE}") + FRONTEND_HTTP_PORT=$(xmllint --xpath "string(//config/frontend_http_port)" "${CONFIG_FILE}") + FRONTEND_HTTP2_PORT=$(xmllint --xpath "string(//config/frontend_http2_port)" "${CONFIG_FILE}") + FRONTEND_GRPC_PORT=$(xmllint --xpath "string(//config/frontend_grpc_port)" "${CONFIG_FILE}") + DS_WORKER_PORT=$(xmllint --xpath "string(//config/ds_worker_port)" "${CONFIG_FILE}") + DS_AGENT_PORT=$(xmllint --xpath "string(//config/ds_agent_port)" "${CONFIG_FILE}") + + export REDIS_ADDR="${redis_ip}:${redis_port}" + export ETCD_ADDR="${etcd_ip}:${etcd_port}" + export WORKERMGR_IP=${master_ip} + export WORKERMGR_SVC_PORT=${workermgr_listen_port} + export DS_MASTER_ADDRESS="${master_ip}:${ds_master_port}" + export LOG_LEVEL=${log_level} + export LOCAL_IP=${local_ip} +} + +function exit_with_msg() { + local err_msg="$1" + local ret="$2" + echo -e "${err_msg}" >&2 + if [ -n "${ret}" ]; then + exit "${ret}" + else + exit 1 + fi +} + +function check_input() { + if [ ${CPU4COMP} -le 0 ]; then + printf "**ERROR** cpu for function instances deployment is less then 0\n" + usage + return 1 + fi + + if [ ${MEM4COMP} -le 0 ]; then + printf "**ERROR** memory for function instances deployment is less then 0\n" + usage + return 1 + fi + + if [ -z "${INSTALL_DIR}" ]; then + printf "**ERROR** installation directory is not specified\n" + usage + return 1 + fi + + if [ ! -d "${INSTALL_DIR}" ]; then + printf "**WARN** provided installation directory is not a directory and then create\n" + mkdir -p "${INSTALL_DIR}" + fi + + return 0 +} + +function print_info() { + printf "mp kernel Deployment Info:\n" + printf "%25s %10s\n" "LOG_LEVEL:" "${LOG_LEVEL}" + printf "\n" + printf "%25s %10s\n" "HOST_IP:" "${HOST_IP}" + printf "\n" + + printf "%25s %10s\n" "PROXY_TCP_PORT:" "${PROXY_TCP_PORT}" + printf "%25s %10s\n" "PROXY_HTTP_PORT:" "${PROXY_HTTP_PORT}" + printf "%25s %10s\n" "PROXY_GRPC_PORT:" "${PROXY_GRPC_PORT}" + + printf "%25s %10s\n" "WORKER_HTTP_PORT:" "${WORKER_HTTP_PORT}" + printf "%25s %10s\n" "WORKER_TCP_PORT:" "${WORKER_TCP_PORT}" + printf "%25s %10s\n" "WORKER_AGENT_PORT:" "${WORKER_AGENT_PORT}" + printf "%25s %10s\n" "REALTIME_LOGS:" "${REALTIME_LOGS}" + printf "%25s %10s\n" "ENABLE_SCHEDULER:" "${ENABLE_SCHEDULER}" + + printf "%25s %10s\n" "RUNTIME_MGR_PORT:" "${RUNTIME_MGR_PORT}" + printf "%25s %10s\n" "RUNTIME_INIT_PORT:" "${RUNTIME_INIT_PORT}" + printf "\n" + + printf "%25s %10s\n" "OVERALL CPU:" "${CPUALL}" + printf "%25s %10s\n" "OVERALL MEM:" "${MEMALL}" + printf "%25s %10s\n" "CPU FOR FUNCTIONS:" "${CPU4COMP}" + printf "%25s %10s\n" "MEM FOR FUNCTIONS:" "${MEM4COMP}" + printf "%25s %10s\n" "SHARED MEM FOR DS:" "${MEM4DATA}" + printf "\n" + + if [ "X${ENABLE_FRONTEND}" == "XON" ]; then + printf "%25s %10s\n" "FRONTEND_HTTP_PORT:" "${FRONTEND_HTTP_PORT}" + printf "%25s %10s\n" "FRONTEND_HTTP2_PORT:" "${FRONTEND_HTTP2_PORT}" + printf "%25s %10s\n" "FRONTEND_GRPC_PORT:" "${FRONTEND_GRPC_PORT}" + fi + printf "%25s %10s\n" "DS_WORKER_PORT:" "${DS_WORKER_PORT}" + if [ "X${ENABLE_DS_AGENT}" == "XON" ]; then + printf "%25s %10s\n" "DS_AGENT_PORT:" "${DS_AGENT_PORT}" + fi + printf "%25s %10s\n" "DS_MASTER_ADDRESS:" "${DS_MASTER_ADDRESS}" + + printf "\n\n" +} + +function start_admin_service() { + # load config + init_config_var ${CONFIG_FILE} + [[ "${LOCAL_IP}X" == "IPX" || "${LOCAL_IP}X" == "X" ]] && log_error "please configure ${CONFIG_FILE} local_ip first!" && exit 1 + [[ "${REPO_PORT}X" == "X" ]] && log_error "please config ${CONFIG_FILE} function_repo_port first!" && exit 1 + [[ "${ADMIN_PORT}X" == "X" ]] && log_error "please config ${CONFIG_FILE} admin_port first!" && exit 1 + [[ "${LOG_LEVEL}X" == "X" ]] && log_error "please configure ${LOG_LEVEL} DEPLOY_PATH first!" && exit 1 + [[ "${WORKERMGR_LISTEN_PORT}X" == "X" ]] && log_error "please config ${CONFIG_FILE} workermgr port first!" && exit 1 + DEPLOY_PATH="${INSTALL_DIR}"/functioncore/admin + + CONFIG_INSTALL_DIR="${DEPLOY_PATH}"/config + sed -i "s#{logConfigPath}#${LOG_DIR}#g" "${CONFIG_INSTALL_DIR}"/log.json + sed -i "s#{logLevel}#${LOG_LEVEL}#g" "${CONFIG_INSTALL_DIR}"/log.json + sed -i "s#{install_dir}#${INSTALL_DIR}#g" "${CONFIG_INSTALL_DIR}"/* + sed -i "s#{local_ip}#${LOCAL_IP}#g" "${CONFIG_INSTALL_DIR}"/* + sed -i "s#{repo_port}#${REPO_PORT}#g" "${CONFIG_INSTALL_DIR}"/config.json + sed -i "s#{admin_port}#${ADMIN_PORT}#g" "${CONFIG_INSTALL_DIR}"/config.json + sed -i "s#{workermgr_listen_port}#${WORKERMGR_LISTEN_PORT}#g" "${CONFIG_INSTALL_DIR}"/config.json + + cp -rf "${INSTALL_DIR}/bin/distribute-executor" "${DEPLOY_PATH}/bin" + ResourcePath="${DEPLOY_PATH}"/resource/ "${DEPLOY_PATH}"/bin/distribute-executor --module admin-service \ + --log_config_path="${CONFIG_INSTALL_DIR}"/log.json \ + --config_path="${CONFIG_INSTALL_DIR}"/config.json \ + >"${LOG_DIR}"/admin.log 2>&1 & + + ADMIN_SERVICE_PID=$(echo $!) + return 0 +} + +function start_function_repo() { + # load config + init_config_var ${CONFIG_FILE} + [[ "${LOCAL_IP}X" == "IPX" || "${LOCAL_IP}X" == "X" ]] && log_error "please configure ${CONFIG_FILE} local_ip first!" && exit 1 + [[ "${ETCD_PORT}X" == "X" ]] && log_error "please configure ${CONFIG_FILE} etcd_port first!" && exit 1 + [[ "${ETCD_IP}X" == "IPX" || "${ETCD_IP}X" == "X" ]] && log_error "please configuer ${CONFIG_FILE} etcd_ip first!" && exit 1 + [[ "${REPO_PORT}X" == "X" ]] && log_error "please config ${CONFIG_FILE} function_repo_port first!" && exit 1 + [[ "${LOG_LEVEL}X" == "X" ]] && log_error "please configure ${LOG_LEVEL} DEPLOY_PATH first!" && exit 1 + DEPLOY_PATH="${INSTALL_DIR}"/functioncore/function-repo + + CONFIG_INSTALL_DIR="${DEPLOY_PATH}"/config + sed -i "s#{logConfigPath}#${LOG_DIR}#g" "${CONFIG_INSTALL_DIR}"/log.json + sed -i "s#{logLevel}#${LOG_LEVEL}#g" "${CONFIG_INSTALL_DIR}"/log.json + sed -i "s#{etcd_port}#${ETCD_PORT}#g" "${CONFIG_INSTALL_DIR}"/config.json + sed -i "s#{username}#root#g" "${CONFIG_INSTALL_DIR}"/config.json + sed -i "s#{etcd_password}#${ETCD_PASSWORD}#g" "${CONFIG_INSTALL_DIR}"/config.json + sed -i "s#{etcd_ip}#${ETCD_IP}#g" "${CONFIG_INSTALL_DIR}"/config.json + sed -i "s#{install_dir}/upload#/tmp/upload#g" "${CONFIG_INSTALL_DIR}"/config.json + sed -i "s#{local_ip}#${LOCAL_IP}#g" "${CONFIG_INSTALL_DIR}"/config.json + [ -n "$MINIO_ADDR" ] && sed -i "s#{minio_ip}:{minio_port}#${MINIO_ADDR}#g" "${CONFIG_INSTALL_DIR}"/config.json + [ -z "$MINIO_ADDR" ] && sed -i "s#{minio_ip}:{minio_port}#127.0.0.1:19001#g" "${CONFIG_INSTALL_DIR}"/config.json + sed -i "s#{repo_port}#${REPO_PORT}#g" "${CONFIG_INSTALL_DIR}"/config.json + [ -z "$MINIO_ADDR" ] && sed -i 's#"storageType": "s3"#"storageType": "local"#' "${CONFIG_INSTALL_DIR}"/config.json + + cp -rf "${INSTALL_DIR}/bin/distribute-executor" "${DEPLOY_PATH}/bin" + ResourcePath="${DEPLOY_PATH}"/resource/ POD_IP=${LOCAL_IP} "${DEPLOY_PATH}"/bin/distribute-executor --module function-repo \ + --log_config_path="${CONFIG_INSTALL_DIR}"/log.json \ + --config_path="${CONFIG_INSTALL_DIR}"/config.json \ + >"${LOG_DIR}"/function-repo.log 2>&1 & + + FUNCTION_REPO_PID=$(echo $!) + return 0 +} + +function start_worker_manager() { + + # load config + init_config_var ${CONFIG_FILE} + + [[ "${LOG_LEVEL}X" == "X" ]] && log_error "please configure ${LOG_LEVEL} DEPLOY_PATH first!" && exit 1 + [[ "${LOCAL_IP}X" == "IPX" || "${LOCAL_IP}X" == "X" ]] && log_error "please configure ${CONFIG_FILE} LOCAL_IP first!" && exit 1 + [[ "${ETCD_PORT}X" == "X" ]] && log_error "please configure ${CONFIG_FILE} etcd_port first!" && exit 1 + [[ "${ETCD_IP}X" == "IPX" || "${ETCD_IP}X" == "X" ]] && log_error "please configure ${CONFIG_FILE} etcd_ip first!" && exit 1 + [[ "${WORKERMGR_LISTEN_PORT}X" == "X" || "${WORKERMGR_LISTEN_PORT}X" == "X" ]] && WORKERMGR_LISTEN_PORT="12111" + [[ "${CODE_DIR}X" == "X" ]] && CODE_DIR="/dcache" + [[ "${GLOBAL_SCHEDULER_PORT}X" == "X" ]] && log_error "please configure ${CONFIG_FILE} etcd_ip first!" && exit 1 + local DEPLOY_PATH="${INSTALL_DIR}"/functioncore/worker-manager + + CONFIG_INSTALL_DIR="${DEPLOY_PATH}"/config + sed -i "s#{logConfigPath}#${LOG_DIR}#g" "${CONFIG_INSTALL_DIR}"/log.json + sed -i "s#{logLevel}#${LOG_LEVEL}#g" "${CONFIG_INSTALL_DIR}"/log.json + sed -i "s#{deploy_dir}#${CODE_DIR}#g" ${CONFIG_INSTALL_DIR}/worker-manager.conf + sed -i "s#{etcd_port}#${ETCD_PORT}#g" "${CONFIG_INSTALL_DIR}"/worker-manager.conf + sed -i "s#{username}#root#g" "${CONFIG_INSTALL_DIR}"/worker-manager.conf + sed -i "s#{etcd_password}#${ETCD_PASSWORD}#g" "${CONFIG_INSTALL_DIR}"/worker-manager.conf + sed -i "s#{etcd_ip}#${ETCD_IP}#g" "${CONFIG_INSTALL_DIR}"/worker-manager.conf + sed -i "s#{local_ip}#${LOCAL_IP}#g" "${CONFIG_INSTALL_DIR}"/worker-manager.conf + sed -i "s#{install_dir}#${INSTALL_DIR}#g" "${CONFIG_INSTALL_DIR}"/worker-manager.conf + sed -i "s#{global_scheduler_port}#${GLOBAL_SCHEDULER_PORT}#g" "${CONFIG_INSTALL_DIR}"/worker-manager.conf + sed -i "/httpEnable/s/false/true/g" "${CONFIG_INSTALL_DIR}"/worker-manager.conf + + cp -rf "${INSTALL_DIR}/bin/distribute-executor" "${DEPLOY_PATH}/bin" + ResourcePath="${DEPLOY_PATH}"/resource/ POD_IP=${LOCAL_IP} FUNCTION_ACTIVE_PORT=${WORKERMGR_LISTEN_PORT} "${DEPLOY_PATH}"/bin/distribute-executor --module worker-manager \ + --log_config_path="${CONFIG_INSTALL_DIR}"/log.json \ + --config_path="${CONFIG_INSTALL_DIR}"/worker-manager.conf \ + >"${LOG_DIR}"/worker-manager.log 2>&1 & + + WORKER_MANAGER_PID=$(echo $!) + return 0 +} + +function start_frontend() { + local FRONTEND_INSTALL_DIR="${INSTALL_DIR}/functioncore/frontend" + local FRONTEND_CONFIG_INSTALL_DIR="${INSTALL_DIR}/functioncore/frontend/config" + + sed -i "s|{{logLevel}}|$LOG_LEVEL|g" "${FRONTEND_CONFIG_INSTALL_DIR}/log.json" + sed -i "s|{{logConfigPath}}|$LOG_DIR|g" "${FRONTEND_CONFIG_INSTALL_DIR}/log.json" + sed -i "s|{{ETCD_USER}}|root|g" "${FRONTEND_CONFIG_INSTALL_DIR}/config.json" + sed -i "s|{{ETCD_PASSWORD}}|$ETCD_PASSWORD|g" "${FRONTEND_CONFIG_INSTALL_DIR}/config.json" + sed -i "s|{{ETCD_ADDR}}|$ETCD_ADDR|g" "${FRONTEND_CONFIG_INSTALL_DIR}/config.json" + sed -i "s|{{INSTALL_DIR}}|$FRONTEND_INSTALL_DIR|g" "${FRONTEND_CONFIG_INSTALL_DIR}/config.json" + sed -i "s|{{FRONTEND_HTTP_PORT}}|$FRONTEND_HTTP_PORT|g" "${FRONTEND_CONFIG_INSTALL_DIR}/config.json" + sed -i "s|{{FRONTEND_HTTP2_PORT}}|$FRONTEND_HTTP2_PORT|g" "${FRONTEND_CONFIG_INSTALL_DIR}/config.json" + sed -i "s|{{FRONTEND_GRPC_PORT}}|$FRONTEND_GRPC_PORT|g" "${FRONTEND_CONFIG_INSTALL_DIR}/config.json" + sed -i "s|{{WORKERMGR_IP}}|$WORKERMGR_IP|g" "${FRONTEND_CONFIG_INSTALL_DIR}/config.json" + sed -i "s|{{WORKERMGR_SVC_PORT}}|$WORKERMGR_SVC_PORT|g" "${FRONTEND_CONFIG_INSTALL_DIR}/config.json" + bash ${INSTALL_DIR}/functioncore/frontend/bin/init-frontend.sh + + cp -rf "${INSTALL_DIR}/bin/distribute-executor" "${INSTALL_DIR}/functioncore/frontend/bin" + local bin="${INSTALL_DIR}/functioncore/frontend/bin/distribute-executor --module frontend" + + POD_IP=${HOST_IP} ResourcePath=${FRONTEND_INSTALL_DIR}/resource HOST_IP=${HOST_IP} \ + NODE_ID=${NODE_ID} ${bin} \ + --log_config_path="${FRONTEND_CONFIG_INSTALL_DIR}"/log.json \ + --config_path="${FRONTEND_CONFIG_INSTALL_DIR}"/config.json \ + >"${LOG_DIR}"/frontend.log 2>&1 & + + FRONTEND_PID=$(echo $!) + return 0 +} + +function start_bus_proxy() { + local FNTASK_CONFIG_INSTALL_DIR="${INSTALL_DIR}/functioncore/functiontask/config" + sed -i "s|{{ETCD_USER}}|root|g" "${FNTASK_CONFIG_INSTALL_DIR}/config.json" + sed -i "s|{{ETCD_PASSWORD}}|$ETCD_PASSWORD|g" "${FNTASK_CONFIG_INSTALL_DIR}/config.json" + sed -i "s|{{ETCD_ADDR}}|$ETCD_ADDR|g" "${FNTASK_CONFIG_INSTALL_DIR}/config.json" + sed -i "s|{{WORKERMGR_IP}}|$WORKERMGR_IP|g" "${FNTASK_CONFIG_INSTALL_DIR}/config.json" + sed -i "s|{{WORKERMGR_SVC_PORT}}|$WORKERMGR_SVC_PORT|g" "${FNTASK_CONFIG_INSTALL_DIR}/config.json" + sed -i "s|{{REDIS_ADDR}}|$REDIS_ADDR|g" "${FNTASK_CONFIG_INSTALL_DIR}/conf.json" + sed -i "s|{{S3_ADDR}}|$MINIO_ADDR|g" "${FNTASK_CONFIG_INSTALL_DIR}/conf.json" + sed -i "s|{{REDIS_PASSWORD}}|$REDIS_PASSWORD|g" "${FNTASK_CONFIG_INSTALL_DIR}/conf.json" + sed -i "s|{{STATE_STORAGE_TYPE}}|redis|g" "${FNTASK_CONFIG_INSTALL_DIR}/conf.json" + sed -i "s|{{logConfigPath}}|$LOG_DIR|g" "${FNTASK_CONFIG_INSTALL_DIR}/log.json" + sed -i "s|{{logLevel}}|$LOG_LEVEL|g" "${FNTASK_CONFIG_INSTALL_DIR}/log.json" + mkdir -p ${INSTALL_DIR}/functioncore/dcache + + local DEPLOY_PATH="${INSTALL_DIR}"/functioncore/functiontask/ + cp -rf "${INSTALL_DIR}/bin/distribute-executor" "${DEPLOY_PATH}/bin" + local bin="${INSTALL_DIR}/functioncore/functiontask/bin/distribute-executor --module functiontask" + + HOST_IP=${HOST_IP} POD_IP=${HOST_IP} NODE_ID=${NODE_ID} HOSTNAME=${NODE_ID} ResourcePath=${DEPLOY_PATH}/resource \ + FUNCTION_TASK_CONFIG_PATH=${INSTALL_DIR}/functioncore/functiontask/config/conf.json \ + LD_LIBRARY_PATH=${INSTALL_DIR}/functioncore/functiontask/datasystem/lib:${LD_LIBRARY_PATH} \ + DEPLOY_DIR=${INSTALL_DIR}/functioncore \ + ${bin} --ds_port="${DS_WORKER_PORT}" --tcp_port="${PROXY_TCP_PORT}" --http_port="${PROXY_HTTP_PORT}" \ + --log_config_path="${FNTASK_CONFIG_INSTALL_DIR}"/log.json --config_path="${FNTASK_CONFIG_INSTALL_DIR}"/config.json \ + --storage_config_path="${FNTASK_CONFIG_INSTALL_DIR}"/conf.json \ + --grpc_port="${PROXY_GRPC_PORT}" \ + --global_scheduler_port="${GLOBAL_SCHEDULER_PORT}" --enable_scheduler=${ENABLE_SCHEDULER} \ + --health_enable=true --metrics_enable=false >"${LOG_DIR}"/busproxy.log 2>&1 & + + PROXY_PID=$(echo $!) + return 0 +} + +function start_runtime_mgr() { + local RUNTIMEMGR_CONFIG_INSTALL_DIR="${INSTALL_DIR}/functioncore/runtime-manager/config" + sed -i "s|{{logConfigPath}}|$LOG_DIR|g" "${RUNTIMEMGR_CONFIG_INSTALL_DIR}/log.json" + sed -i "s|{{logLevel}}|$LOG_LEVEL|g" "${RUNTIMEMGR_CONFIG_INSTALL_DIR}/log.json" + sed -i "s|{{initPort}}|$RUNTIME_INIT_PORT|g" "${RUNTIMEMGR_CONFIG_INSTALL_DIR}/runtime-manager-config.json" + sed -i "s|{{serverPort}}|$RUNTIME_MGR_PORT|g" "${RUNTIMEMGR_CONFIG_INSTALL_DIR}/runtime-manager-config.json" + sed -i 's|"useNewRuntimePath": false|"useNewRuntimePath": true|g' "${RUNTIMEMGR_CONFIG_INSTALL_DIR}/runtime-manager-config.json" + sed -i "s|/home/snuser/log|$LOG_DIR/runtime/log|g" "${INSTALL_DIR}/runtime/python/config/python-runtime-log.json" + sed -i "s|DEBUG|$LOG_LEVEL|g" "${INSTALL_DIR}/runtime/python/config/python-runtime-log.json" + local RUNTIME_MGR_DIR=${INSTALL_DIR}/functioncore/runtime-manager + + cp -rf "${INSTALL_DIR}/bin/distribute-executor" "${RUNTIME_MGR_DIR}/bin" + local bin="${RUNTIME_MGR_DIR}/bin/distribute-executor --module runtime-manager" + + HOST_IP=${HOST_IP} POD_IP=${HOST_IP} RUNTIME_PATH=${INSTALL_DIR}/runtime \ + LD_LIBRARY_PATH="${INSTALL_DIR}/sdk/cpp/lib:${LD_LIBRARY_PATH}" ${bin} \ + --proc_metrics_cpu=${CPU4COMP} --proc_metrics_memory=${MEM4COMP} \ + --log_config_path="${RUNTIMEMGR_CONFIG_INSTALL_DIR}"/log.json \ + --runtime_mgr_config_path="${RUNTIMEMGR_CONFIG_INSTALL_DIR}"/runtime-manager-config.json \ + --runtime_dir="${INSTALL_DIR}/runtime" --setCmdCred=false --pythonDependencyPath="" \ + --runtime_config_dir="${RUNTIMEMGR_CONFIG_INSTALL_DIR}/" \ + --runtime_logs_dir="${LOG_DIR}/runtime" >"${LOG_DIR}"/runtime-manager.log 2>&1 & + RUNTIME_MGR_PID=$(echo $!) + return 0 +} + +function start_worker() { + local WORKER_CONFIG_INSTALL_DIR="${INSTALL_DIR}/functioncore/worker/config" + sed -i "s|{{ETCD_USER}}|root|g" "${WORKER_CONFIG_INSTALL_DIR}/config.yaml" + sed -i "s|{{ETCD_PASSWORD}}|$ETCD_PASSWORD|g" "${WORKER_CONFIG_INSTALL_DIR}/config.yaml" + sed -i "s|{{ETCD_ADDR}}|$ETCD_ADDR|g" "${WORKER_CONFIG_INSTALL_DIR}/config.yaml" + sed -i "s|{{REDIS_ADDR}}|$REDIS_ADDR|g" "${WORKER_CONFIG_INSTALL_DIR}/config.yaml" + sed -i "s|{{S3_ADDR}}|$MINIO_ADDR|g" "${WORKER_CONFIG_INSTALL_DIR}/config.yaml" + sed -i "s|{{REDIS_PASSWORD}}|$REDIS_PASSWORD|g" "${WORKER_CONFIG_INSTALL_DIR}/config.yaml" + sed -i "s|{{WORKER_HTTP_PORT}}|$WORKER_HTTP_PORT|g" "${WORKER_CONFIG_INSTALL_DIR}/config.yaml" + sed -i "s|{{WORKER_TCP_PORT}}|$WORKER_TCP_PORT|g" "${WORKER_CONFIG_INSTALL_DIR}/config.yaml" + sed -i "s|{{logConfigPath}}|$LOG_DIR|g" "${WORKER_CONFIG_INSTALL_DIR}/log.json" + sed -i "s|{{logLevel}}|$LOG_LEVEL|g" "${WORKER_CONFIG_INSTALL_DIR}/log.json" + sed -i "s|{{FUNCTION_STORAGE_TYPE}}|s3|g" "${WORKER_CONFIG_INSTALL_DIR}/config.yaml" + + local DEPLOY_PATH="${INSTALL_DIR}"/functioncore/worker + cp -rf "${INSTALL_DIR}/bin/distribute-executor" "${DEPLOY_PATH}/bin" + local bin="${DEPLOY_PATH}/bin/distribute-executor --module worker" + + HOST_IP=${HOST_IP} POD_IP=${HOST_IP} NODE_ID=${NODE_ID} ResourcePath=${DEPLOY_PATH}/resource MULTI_RUNTIME_MODE=true \ + RUNTIME_TYPE=cpp NODE_ID=${NODE_ID} STORAGE_LIMIT="35103210" \ + ${bin} --config="${WORKER_CONFIG_INSTALL_DIR}"/config.yaml \ + --log_config_path="${WORKER_CONFIG_INSTALL_DIR}"/log.json \ + --rm_port=${RUNTIME_MGR_PORT} --tcp_port=${WORKER_TCP_PORT} \ + --local_scheduler_port="${PROXY_GRPC_PORT}" \ + --cpu=${CPU4COMP} --mem=${MEM4COMP} --worker_agent_port=${WORKER_AGENT_PORT} \ + --ds_port="${DS_WORKER_PORT}" --print_metrics=${REALTIME_LOGS} --busGrpcPort=${PROXY_GRPC_PORT} >"${LOG_DIR}"/worker.log 2>&1 & + + WORKER_PID=$(echo $!) + return 0 +} + +function start_dsmaster() { + DATASYSTEM_INSTALL_DIR="${INSTALL_DIR}"/datasystem + + # install datasystem and start master + mkdir -p "${DATASYSTEM_INSTALL_DIR}"/rocksdb + [ ! -d "${SOCKET_DIR}" ] && mkdir -p "${SOCKET_DIR}" + LD_LIBRARY_PATH="${DATASYSTEM_INSTALL_DIR}/service/lib:${LD_LIBRARY_PATH}" ${DATASYSTEM_INSTALL_DIR}/service/master -master_address="${DS_MASTER_ADDRESS}" \ + -backend_store_dir="${DATASYSTEM_INSTALL_DIR}"/rocksdb \ + -log_dir="${LOG_DIR}"/ \ + -unix_domain_socket_dir="${SOCKET_DIR}" \ + -v=1 >"${LOG_DIR}"/datasystem-master.log 2>&1 & + DS_MASTER_PID=$(echo $!) + return 0 +} + +function start_ds_worker() { + local DS_DIR="${INSTALL_DIR}/datasystem" + local bin=${DS_DIR}/service/worker + [ ! -d "${LOG_DIR}" ] && mkdir -p "${LOG_DIR}" + [ ! -d "${SOCKET_DIR}" ] && mkdir -p "${SOCKET_DIR}" + + LD_LIBRARY_PATH=${DS_DIR}/service/lib:${LD_LIBRARY_PATH} ${bin} \ + -master_address="${DS_MASTER_ADDRESS}" \ + -log_dir="${LOG_DIR}" \ + -shared_memory_size_mb=${MEM4DATA} \ + -worker_address="${HOST_IP}:${DS_WORKER_PORT}" \ + -unix_domain_socket_dir="${SOCKET_DIR}" \ + -v=1 \ + -sc_regular_socket_num=2 \ + -sc_stream_socket_num=2 \ + -spill_directory="${SPILL_PATH}" \ + -spill_size_limit="${SPILL_SIZE_LIMIT}" \ + -max_client_num="${MAX_CLIENT_NUM}" >"${LOG_DIR}"/datasystem-worker.log 2>&1 & + DS_WORKER_PID=$(echo $!) + return 0 +} + +function start_ds_agent() { + local DS_DIR="${INSTALL_DIR}/datasystem" + local bin=${DS_DIR}/service/agent + [ ! -d "${SOCKET_DIR}" ] && mkdir -p "${SOCKET_DIR}" + + LD_LIBRARY_PATH=${DS_DIR}/service/lib:${LD_LIBRARY_PATH} ${bin} \ + -log_dir="${LOG_DIR}" \ + -worker_address="${HOST_IP}:${DS_WORKER_PORT}" \ + -agent_address="${HOST_IP}:${DS_AGENT_PORT}" \ + -unix_domain_socket_dir="${SOCKET_DIR}" >"${LOG_DIR}"/datasystem-agent.log 2>&1 & + DS_AGENT_PID=$(echo $!) + return 0 +} + +function admin_health_check() { + echo -e "health check: admin" + local admin_url="http://${HOST_IP}:${ADMIN_PORT}/healthz" + admin_status=`python -c 'import health_check; print(health_check.check("'${admin_url}'"))'` + if [ "${admin_status}" = "False" ]; then + echo "admin health check failed" + echo "please check admin port: ${ADMIN_PORT}" + exit 1 + fi +} + +function repo_health_check() { + echo -e "health check: repo" + local repo_url="http://${HOST_IP}:${REPO_PORT}/healthz" + repo_status=`python -c 'import health_check; print(health_check.check("'${repo_url}'"))'` + if [ "${repo_status}" = "False" ]; then + echo "repo health check failed" + echo "please check repo port: ${REPO_PORT}" + exit 1 + fi +} + +function frontend_health_check() { + echo -e "health check: fronend" + local frontend_url="http://${HOST_IP}:${FRONTEND_HTTP_PORT}/healthz" + frontend_status=`python -c 'import health_check; print(health_check.check("'${frontend_url}'"))'` + if [ "${frontend_status}" = "False" ]; then + echo "frontend health check failed" + echo "please check frontend port: ${FRONTEND_HTTP_PORT}" + exit 1 + fi +} + +function worker_manager_health_check() { + echo -e "health check: worker-manager" + local worker_manager_url="http://${HOST_IP}:${WORKERMGR_LISTEN_PORT}/healthz" + worker_manager_status=`python -c 'import health_check; print(health_check.check("'${worker_manager_url}'"))'` + if [ "${worker_manager_status}" = "False" ]; then + echo "worker-manager health check failed" + echo "please check worker-manager port: ${WORKERMGR_LISTEN_PORT}" + exit 1 + fi +} + +function functiontask_health_check() { + echo -e "health check: functiontask" + local functiontask_url="http://${HOST_IP}:8888/healthz" + functiontask_status=`python -c 'import health_check; print(health_check.check("'${functiontask_url}'"))'` + if [ "${functiontask_status}" = "False" ]; then + echo "functiontask health check failed" + echo "please check functiontask port: 8888" + exit 1 + fi +} + +function runtime_manager_health_check() { + echo -e "health check: runtime-manager" + runtime_mgr_listen_num=`lsof -i:${RUNTIME_MGR_PORT} | wc -l` + if [ "$runtime_mgr_listen_num" -le "0" ];then + echo "runtime-manager health check failed" + echo "please check runtime-manager port: ${RUNTIME_MGR_PORT}" + exit 1 + fi +} + +function usage() { + echo -e "Usage: ./start_mp [-c cpu_cores_all] [-m memory_all_mb] [-s ds_shared_memory_mb] [-a ] [-h help]" + echo -e "Options:" + echo -e " -c overall cpu cores (1/1000 core) in current script context" + echo -e " -m overall memory (MB) in current script context" + echo -e " -s data system shared memory (MB) should be reserved in current script context" + echo -e " -a install master module" + echo -e " -p etcd password" + echo -e " -h usage help" + echo -e " " + echo -e "example:" + echo -e " ./start_mp -c 10000 -m 40960 -s 2048 -a" + echo -e "" +} + +function check_opt() { + while getopts "c:m:s:p:l:v:q:ah" opt; do + case "$opt" in + c) + CPUALL=$OPTARG + CPU4COMP=$((${CPUALL} + ${CPU4COMP})) + ;; + m) + MEMALL=$OPTARG + MEM4COMP=$((${MEMALL})) + ;; + s) + MEM4DATA=$OPTARG + MEM4COMP=$((${MEM4COMP} - ${MEM4DATA})) + ;; + p) + ETCD_PASSWORD=$OPTARG + REDIS_PASSWORD=$OPTARG + ;; + l) + HOST_IP=$OPTARG + ;; + a) + ENABLE_HEAD=ON + ;; + v) + SPILL_PATH=$OPTARG + ;; + q) + SPILL_SIZE_LIMIT=$OPTARG + ;; + h) + usage + exit 0 + ;; + *) + log_error "Unknown parameter" + echo -e "" + usage + exit 1 + ;; + esac + done +} + +function main() { + + check_opt "$@" + check_input + # load config + restore_config + init_config_var ${CONFIG_FILE} + init_config + print_info + start=$(date +%s) + time=$(echo "$start" "$(date +%s)" | awk '{print $2-$1}') + [ ! -d "${LOG_DIR}" ] && mkdir -p "${LOG_DIR}" + if [ "X${ENABLE_HEAD}" == "XON" ]; then + start_admin_service + start_function_repo + start_worker_manager + start_frontend + start_dsmaster + start_ds_agent + fi + + start_bus_proxy + start_runtime_mgr + start_worker + start_ds_worker + + # simple health check loop + PROCESS_LIST="${PROXY_PID} ${WORKER_PID} ${RUNTIME_MGR_PID} ${DS_WORKER_PID}" + if [ "X${ENABLE_HEAD}" == "XON" ]; then + PROCESS_LIST="${PROCESS_LIST} ${DS_MASTER_PID} ${DS_AGENT_PID} ${FRONTEND_PID} ${WORKER_MANAGER_PID} ${ADMIN_SERVICE_PID} ${FUNCTION_REPO_PID}" + fi + + cd "${BASE_DIR}" + if [ "X${ENABLE_HEAD}" == "XON" ]; then + admin_health_check + repo_health_check + frontend_health_check + worker_manager_health_check + fi + functiontask_health_check + runtime_manager_health_check +} + +export PATH=${PATH}:${INSTALL_DIR}/bin +export ResourcePath="${INSTALL_DIR}"/resource + +main "$@" diff --git a/dsoftbus/dist_executor/modules/bin/stop_mp b/dsoftbus/dist_executor/modules/bin/stop_mp new file mode 100644 index 0000000000000000000000000000000000000000..838596d5a3bcb7b388af511876a5dd9c31138fc3 --- /dev/null +++ b/dsoftbus/dist_executor/modules/bin/stop_mp @@ -0,0 +1,186 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e + +BASE_DIR=$( + cd "$(dirname "$0")" + pwd +) + +. "${BASE_DIR}"/utils.sh + +function stop_admin_service() { + local pid=$(ps -ef | grep "admin/bin/distribute-executor --module admin-service" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + [ -z "$pid" ] && return 0 + if ! kill -9 "${pid}"; then + log_warning "failed to stop admin-service" + fi + return 0 +} + +function stop_function_repo() { + local pid=$(ps -ef | grep "function-repo/bin/distribute-executor --module function-repo" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + [ -z "$pid" ] && return 0 + if ! kill -9 "${pid}"; then + log_warning "failed to function repo" + fi + return 0 +} + +function stop_worker_manager() { + local pid=$(ps -ef | grep "worker-manager/bin/distribute-executor --module worker-manager" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + [ -z "$pid" ] && return 0 + if ! kill -9 "${pid}"; then + log_warning "failed to stop worker-manager" + fi + return 0 +} + +function stop_frontend() { + local pid=$(ps -ef | grep "frontend/bin/distribute-executor --module frontend" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + [ -z "$pid" ] && return 0 + if ! kill -9 "${pid}"; then + log_warning "failed to stop frontend" + fi + return 0 +} + +function stop_dsmaster() { + local pid=$(ps -ef | grep "datasystem/service/master" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + [ -z "$pid" ] && return 0 + if ! kill -9 "${pid}"; then + log_warning "failed to stop datasystem master" + fi + return 0 +} + +function stop_ds_agent() { + local pid=$(ps -ef | grep "datasystem/service/agent" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + [ -z "$pid" ] && return 0 + if ! kill -9 "${pid}"; then + log_warning "failed to stop datasystem agent" + fi + return 0 +} + +function stop_bus_proxy() { + local pid=$(ps -ef | grep "functiontask/bin/distribute-executor --module functiontask" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + [ -z "$pid" ] && return 0 + if ! kill -9 ${pid}; then + log_warning "failed to stop busproxy" + fi + return 0 +} + +function stop_runtime_mgr() { + local pid=$(ps -ef | grep "runtime-manager/bin/distribute-executor --module runtime-manager" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + [ -z "$pid" ] && return 0 + runtime_pid=$(ps -ef | awk -v ppid="$pid" '{if($3==$ppid) print $1}') + if [ -n "$runtime_pid" ]; then + if ! kill -9 ${runtime_pid}; then + log_warning "failed to stop runtime" + fi + fi + if ! kill -9 ${pid}; then + log_warning "failed to stop runtime-manager" + fi + return 0 +} + +function stop_worker() { + local pid=$(ps -ef | grep "worker/bin/distribute-executor --module worker" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + [ -z "$pid" ] && return 0 + if ! kill -9 "${pid}"; then + log_warning "failed to stop worker" + fi + return 0 +} + +function stop_ds_worker() { + local pid=$(ps -ef | grep "datasystem/service/worker" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + [ -z "$pid" ] && return 0 + if ! kill -9 "${pid}"; then + log_warning "failed to stop datasystem worker" + fi + return 0 +} + +function stop_runtime() { + local runtime_key="dist_executor/modules/runtime/python/fnruntime/server.py" + local pids=$(ps -ef | grep "$runtime_key" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + [ -z "$pids" ] && return 0 + for pid in ${pids[@]} + do + if ! kill -9 "${pid}"; then + log_warning "failed to stop runtime" + fi + done + return 0 +} + +function remove_distribute_executor() { + local MODULE_LIST=$(ls -l "${BASE_DIR}"/../functioncore | awk '/^d/ {print $NF}') + for module in ${MODULE_LIST[@]} + do + local path="${BASE_DIR}/../functioncore/${module}/bin/distribute-executor" + [ -f "$path" ] && rm "$path" + done + return 0 +} + +function remove_extra_yrlib_handler() { + local helloworld="${BASE_DIR}/../resource/local-repo/yrlib/service/python3.9/yrlib-hello/helloworld/yrlib_handler.py" + local helloclass="${BASE_DIR}/../resource/local-repo/yrlib/service/python3.9/yrlib-class/helloclass/yrlib_handler.py" + local defaultfunc="${BASE_DIR}/../custom/default/func/yrlib_handler.py" + local demo_pycache="${BASE_DIR}/../custom/default/func/__pycache__/yrlib_handler.cpython-38.pyc" + + [ -f "$helloworld" ] && rm "$helloworld" + [ -f "$helloclass" ] && rm "$helloclass" + [ -f "$defaultfunc" ] && rm "$defaultfunc" + [ -f "$demo_pycache" ] && rm "$demo_pycache" + return 0 +} + +function remove_datasystem_rocksdb() { + local rocksdb="${BASE_DIR}/../datasystem/rocksdb" + [ -d "$rocksdb" ] && rm -r "$rocksdb" + return 0 +} + +function main() { + stop_admin_service + stop_function_repo + stop_worker_manager + stop_frontend + stop_dsmaster + stop_ds_agent + stop_bus_proxy + stop_runtime_mgr + stop_worker + stop_ds_worker + stop_runtime + remove_distribute_executor + remove_extra_yrlib_handler + remove_datasystem_rocksdb + + local pid=$(ps -ef | grep "start_mp" | grep -v grep | grep -v PPID | grep -v clear.sh | grep -v deploy.sh | awk '{ print $2 }') + [ -z "$pid" ] && return 0 + if ! kill -9 "${pid}"; then + log_warning "failed to stop mp" + fi + log_info "stop mp success" +} + +main diff --git a/dsoftbus/dist_executor/modules/bin/sudo_init.sh b/dsoftbus/dist_executor/modules/bin/sudo_init.sh new file mode 100644 index 0000000000000000000000000000000000000000..52aa5597ee89816e01428efa986317ce22d1d4c1 --- /dev/null +++ b/dsoftbus/dist_executor/modules/bin/sudo_init.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e + +chown 1002:1002 /home/sn/log +sed -i '/sudo_init/d' /etc/sudoers \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/bin/utils.sh b/dsoftbus/dist_executor/modules/bin/utils.sh new file mode 100644 index 0000000000000000000000000000000000000000..b42d7c382c636c3eac11cdde7f4de06319497aed --- /dev/null +++ b/dsoftbus/dist_executor/modules/bin/utils.sh @@ -0,0 +1,164 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e + +# ---------------------------------------------------------------------- +# funcname: log_info. +# description: Print build info log. +# parameters: NA +# return value: NA +# ---------------------------------------------------------------------- +log_info() +{ + echo "[BUILD_INFO][$(date +%b\ %d\ %H:%M:%S)]$*" +} + +# ---------------------------------------------------------------------- +# funcname: log_warning. +# description: Print build warning log. +# parameters: NA +# return value: NA +# ---------------------------------------------------------------------- +log_warning() +{ + echo "[BUILD_WARNING][$(date +%b\ %d\ %H:%M:%S)]$*" +} + +# ---------------------------------------------------------------------- +# funcname: log_error. +# description: Print build error log. +# parameters: NA +# return value: NA +# ---------------------------------------------------------------------- +log_error() +{ + echo "[BUILD_ERROR][$(date +%b\ %d\ %H:%M:%S)]$*" +} + +# ---------------------------------------------------------------------- +# funcname: die. +# description: Print build error log. +# parameters: NA +# return value: NA +# ---------------------------------------------------------------------- +die() +{ + log_error "$*" + stty echo + exit 1 +} + +# enter node IP manually +function get_ip_manually() +{ + local_ip="$1" + echo "The IP address entered manually is ${local_ip}" +} + +# get node IP automatically +function get_ip_auto() +{ + echo "Try to get IP address of this device" + ip_list_len=$(ifconfig | grep inet | grep -v "127.0.0.1" | grep -v "172.17.0.1" | grep -v inet6 | awk '{print $2}' | wc -l) + local_ip=0 + if [ "$ip_list_len" -ge 2 ]; then + echo "Cannot get IP address of this device. Please choose the appropriate one manually" + for i in $(seq 1 "$ip_list_len"); do + ip=$(ifconfig | grep inet | grep -v "127.0.0.1" | grep -v "172.17.0.1" | grep -v inet6 | awk '{print $2}' | head -n "$i" | tail -n 1) + read -rp "Local IP address is ${ip}. Press y to ensure or press Enter to skip:" conf + conf_flag='x'$conf + if [ "$conf_flag" == 'xy' ]; then + local_ip=$ip + break + fi + done + fi + if [ "$ip_list_len" -eq 1 ]; then + local_ip=$(ifconfig | grep inet | grep -v "127.0.0.1" | grep -v "172.17.0.1" | grep -v inet6 | awk '{print $2}') + fi +} + +# please install libxml2 first +function init_config_var() { + local config_file=$1 + # shellcheck disable=SC2155 + export LOCAL_IP="$(xmllint --xpath "string(//config/local_ip)" "${config_file}")" + # shellcheck disable=SC2155 + export IAM_PORT="$(xmllint --xpath "string(//config/iam_port)" "${config_file}")" + # shellcheck disable=SC2155 + export REPO_PORT="$(xmllint --xpath "string(//config/function_repo_port)" "${config_file}")" + # shellcheck disable=SC2155 + export ADMIN_PORT="$(xmllint --xpath "string(//config/admin_port)" "${config_file}")" + # shellcheck disable=SC2155 + export ETCD_PORT="$(xmllint --xpath "string(//config/etcd_port)" "${config_file}")" + # shellcheck disable=SC2155 + export ETCD_IP="$(xmllint --xpath "string(//config/etcd_ip)" "${config_file}")" + # shellcheck disable=SC2155 + export MINIO_IP="$(xmllint --xpath "string(//config/minio_ip)" "${config_file}")" + # shellcheck disable=SC2155 + export MINIO_PORT="$(xmllint --xpath "string(//config/minio_port)" "${config_file}")" + if [ "X$MINIO_IP" == "X" ] || [ "X$MINIO_IP" == "XIP" ];then + export MINIO_ADDR="" + else + export MINIO_ADDR="${MINIO_IP}:${MINIO_PORT}" + fi + # shellcheck disable=SC2155 + export REDIS_PORT="$(xmllint --xpath "string(//config/redis_port)" "${config_file}")" + # shellcheck disable=SC2155 + export WORKERMGR_LISTEN_PORT="$(xmllint --xpath "string(//config/workermgr_listen_port)" "${config_file}")" + # shellcheck disable=SC2155 + export CODE_DIR="$(xmllint --xpath "string(//config/code_dir)" "${config_file}")" + # shellcheck disable=SC2155 + export MASTER_IP="$(xmllint --xpath "string(//config/master_ip)" "${config_file}")" + # shellcheck disable=SC2155 + export DS_MASTER_PORT="$(xmllint --xpath "string(//config/ds_master_port)" "${config_file}")" + # shellcheck disable=SC2155 + export LOG_LEVEL="$(xmllint --xpath "string(//config/log_level)" "${config_file}")" + # shellcheck disable=SC2155 + export GLOBAL_SCHEDULER_PORT="$(xmllint --xpath "string(//config/global_scheduler_port)" "${config_file}")" +} + +declare -A PORT_HASH_MAP +declare -a ORDERED_PORTS=() +function get_free_port() { + local BIND_IP="$1" + local PORT_MIN="$2" + local PORT_MAX="$3" + + if ! command -v nc &> /dev/null; then + PORT=$(shuf -i "${PORT_MIN}"-"${PORT_MAX}" -n 1) + while [[ ${PORT_HASH_MAP[$PORT]} ]]; do + PORT=$(shuf -i "${PORT_MIN}"-"${PORT_MAX}" -n 1) + done + PORT_HASH_MAP[$PORT]=$PORT + ORDERED_PORTS=($PORT "${ORDERED_PORTS[@]}") + echo "$PORT" + return 0 + fi + + CHECK="port not assigned" + PORT="port" + # shellcheck disable=SC2236 + while [[ ! -z $CHECK ]]; do + PORT=$(shuf -i "${PORT_MIN}"-"${PORT_MAX}" -n 1) + if [[ ! ${PORT_HASH_MAP[$PORT]} ]]; then + set +e; CHECK=$(timeout 0.2 nc -l "$BIND_IP" "$PORT" 2>&1 >/dev/null) + fi + done + PORT_HASH_MAP[$PORT]=$PORT + ORDERED_PORTS=($PORT "${ORDERED_PORTS[@]}") + echo "$PORT" + return 0 +} diff --git a/dsoftbus/dist_executor/modules/config/config.xml b/dsoftbus/dist_executor/modules/config/config.xml new file mode 100644 index 0000000000000000000000000000000000000000..0391bee83206fafcb55caf60cd09f5d4bc77b54a --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/config.xml @@ -0,0 +1,100 @@ + + + + IP + + + IP + + + redis + + IP + + 6379 + + password + + + accessKey + + secretKey + + + s3 + + IP + + 19002 + + 19001 + + + IP + + PWD + + password + + 32379 + + 32380 + + ~/mindpandas + + + path_to_cert + + path_to_key + + path_to_ca + + path_to_client_cert + + path_to_client_key + + false + + + 12123 + + 31501 + + 31502 + + + 31172 + + 35556 + + 35557 + + 58866 + + 22770 + + 21001 + + 21002 + + 21003 + + 21005 + + 21006 + + 21007 + + 21008 + + 21009 + + 31220 + + 31221 + + 21011 + + + INFO + diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/admin/config/config.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/admin/config/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6be0f09dcc76f7b3d5ac986f99729aa00eeae350 --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/admin/config/config.json @@ -0,0 +1,61 @@ +{ + "service": { + "function_accessor": { + "addr": "", + "api_version": "/serverless/v1", + "timeout": 86400 + }, + "function_repo": { + "addr": "http://{local_ip}:{repo_port}", + "api_version": "/function-repository/v1", + "timeout": 86400 + }, + "metrics": { + "addr": "", + "api_version": "/api/v1", + "timeout": 30 + }, + "logger": { + "addr": "http://{loki_ip}:{loki_port}", + "api_version": "/loki/api/v1", + "max_size": 1000, + "timeout": 30, + "tls_enable": false + }, + "instance_manager": { + "addr": "http://{local_ip}:{workermgr_listen_port}", + "api_version": "/worker-manager/v1", + "timeout": 30 + } + }, + "server": { + "port": {admin_port}, + "read_timeout": 86400, + "write_timeout": 86400, + "body_limit": "250MB", + "https_enable": false, + "key_file": "{install_dir}/resource/tls/tls.key", + "cert_file": "{install_dir}/resource/tls/tls.crt" + }, + "rate_limit": { + "login_weight": 0, + "logout_weight": 0, + "create_weight": 0, + "delete_weight": 0, + "update_weight": 0, + "query_weight": 0, + "invoke_weight": 0, + "log_query_weight": 0, + "daily_token_of_tenant": 10000, + "concurrent_token_of_tenant": 10000, + "total_concurrent_token": 100000 + }, + "invokeLimit": { + "enable": false, + "tokenBucketSize": 1000, + "frequency": 1 + }, + "auth_enable": false, + "cli_version": "2.0.0.B183.20220210125433", + "upload_absolute_path": "{upload_absolute_path}" +} diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/admin/config/log.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/admin/config/log.json new file mode 100644 index 0000000000000000000000000000000000000000..abdece33f429a084fec8155ba09a44644e090eda --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/admin/config/log.json @@ -0,0 +1,13 @@ +{ + "filepath": "{logConfigPath}", + "level": "{logLevel}", + "rolling": { + "maxsize": 400, + "maxbackups": 1, + "maxage": 1, + "compress": true + }, + "tick": 10, + "first": 10, + "thereafter": 5 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/frontend/config/config.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/frontend/config/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e9ddae40664bcd3358da775a895226390e427b7f --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/frontend/config/config.json @@ -0,0 +1,67 @@ +{ + "etcd": { + "servers": [ + "{{ETCD_ADDR}}" + ], + "user": "{{ETCD_USER}}", + "password": "{{ETCD_PASSWORD}}", + "sslEnable": false + }, + "worker-manager": [ + { + "host": "{{WORKERMGR_IP}}", + "port": "{{WORKERMGR_SVC_PORT}}", + "zone": "defaultaz" + } + ], + "http": { + "resptimeout": 43200, + "workerInstanceReadTimeOut": 43200, + "maxRequestBodySize": 6 + }, + "metricsEnable": false, + "slaQuota": 100, + "backpressureDisable": false, + "trafficLimitDisable": false, + "httpsConfig": { + "httpsEnable": false, + "tlsProtocol": "TLSv1.2", + "tlsCiphers": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + }, + "retry": { + "instanceExceptionRetry": true + }, + "metricsConfig": { + "metricsport": "9099", + "metricspath": "/metrics" + }, + "runtime": { + "host": "0.0.0.0", + "port": "{{FRONTEND_HTTP_PORT}}", + "http2port": "{{FRONTEND_HTTP2_PORT}}", + "grpcport": "{{FRONTEND_GRPC_PORT}}", + "azkey": "KUBERNETES_IO_AVAILABLEZONE", + "protocol": "http", + "priority": { + "default": 5 + } + }, + "trafficLimitParams": { + "instanceLimitRate": 20000, + "instanceBucketSize": 40000, + "funcLimitRate": 2000, + "funcBucketSize": 4000 + }, + "invokeLimit": { + "enable": false, + "tokenBucketSize": 1000, + "frequency": 1 + }, + "functionNameSeparator": "-", + "functionCapability": 2, + "usrReqAuthTimeout": 5, + "servicesPath": "{{SERVICES_PATH}}", + "schedule": { + "policy": 2 + } +} diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/frontend/config/log.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/frontend/config/log.json new file mode 100644 index 0000000000000000000000000000000000000000..78dafcc0b05d2d0b1e1274076c9223511e97e264 --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/frontend/config/log.json @@ -0,0 +1,13 @@ +{ + "filepath": "{{logConfigPath}}", + "level": "{{logLevel}}", + "rolling": { + "maxsize": 400, + "maxbackups": 1, + "maxage": 1, + "compress": true + }, + "tick": 10, + "first": 10, + "thereafter": 5 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/function-repo/config/config.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/function-repo/config/config.json new file mode 100644 index 0000000000000000000000000000000000000000..bec9102892ddb3ac3216e122b5f39a510c4b4a0a --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/function-repo/config/config.json @@ -0,0 +1,118 @@ +{ + "etcd": { + "servers": ["{etcd_ip}:{etcd_port}"], + "user": "{username}", + "password": "{etcd_password}", + "sslEnable": false + }, + "function": { + "default": { + "version": "$latest", + "envPrefix": "func-", + "pageIndex": 1, + "pageSize": 1000, + "cpuList": [ + 500, + 1500, + 4000 + ], + "memoryList": [ + 500, + 3000, + 16000 + ], + "timeout": 86400, + "defaultMinInstance": "0", + "defaultMaxInstance": "100", + "defaultConcurrentNum": "100", + "maxInstanceUpperLimit": "10000", + "concurrentNumUpperLimit": "100" + }, + "package": { + "uploadTempPath": "{install_dir}/upload", + "zipFileSizeMaxMB": 500, + "unzipFileSizeMaxMB": 1000, + "fileCountsMax": 30000, + "dirDepthMax": 20, + "ioReadTimeout": 100000 + }, + "versionMax": 8, + "aliasMax": 8, + "layerMax": 5 + }, + "bucket": [ + { + "bucketId": "bucket-test-log1", + "businessId": "yrk", + "appId": "61022", + "appSecret": "E8305B6B5802595E198B82985F61014E:85D87E1102D94A246C58F8A0DCC829B99A88DCA6F5DDAA658CEE0C27D60385F984FC2F3035067A038280C7F62BE57AE9", + "url": "http://{minio_ip}:{minio_port}", + "writable": 1, + "description": "11", + "createTime": "2019-04-24 18:15:53", + "updateTime": "2019-04-24 18:15:53" + } + ], + "runtimeType": [ + "nodejs10.15.2", + "java1.8", + "python3.7", + "python3.9", + "python3.9", + "cpp11", + "cpp11-bin", + "custom-runtime", + "go1.13" + ], + "fileServer": { + "storageType": "s3", + "s3": { + "endpoint": "{minio_ip}:{minio_port}", + "accessKey": "d06b003f7d8d60c8ac3d7fa8:5cb9bbf58a0b306a7a8c57b0b933f1b08522c41c", + "secretKey": "da701b304eb4a523aa13becc:fb120bfb517a8916b0608165f154d166b9adc07e32f59ad864699af7", + "secure": false, + "caFile": "/home/sn/module/ca.crt", + "presignedUrlExpires": 3600, + "timeout": 40, + "trustedCA": false + } + }, + "trigger": { + "urlPrefix": "http://{local_ip}/service/", + "type": [ + { + "sourceProvider": "aa", + "effect": "bb", + "action": "cc" + } + ] + }, + "compatibleRuntimeType": [ + "nodejs10.15.2", + "java1.8", + "python3.7", + "python3.9", + "cpp11", + "cpp11-bin", + "custom-runtime", + "go1.13" + ], + "urn": { + "prefix": "sn", + "zone": "cn", + "resourceType": "function" + }, + "env": { + "timeZone": "Asia/Shanghai", + "nodejsLdPath": "/lib64:/usr/lib64", + "nodejsPath": "/home/snuser/runtime/node_modules/:/node_modules:/usr/local/lib/nodejs/node-v10.16.0-linux-x64/lib/node_modules", + "javaLdPath": "/lib64:/usr/lib64", + "javaPath": "/opt/huawei/jre1.8.0_252/bin", + "cppLdPath": "/usr/local/lib", + "pythonLdPath": "/lib64:/usr/lib64", + "pythonPath": "/usr/lib/python3.7/lib-dynload:/usr/local/lib/python3.7/dist-packages:/usr/local/lib/python3.7/dist-packages/pip-20.1.1-py3.7.egg:/usr/lib/python3/dist-packages" + }, + "server": { + "port": {repo_port} + } +} diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/function-repo/config/log.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/function-repo/config/log.json new file mode 100644 index 0000000000000000000000000000000000000000..abdece33f429a084fec8155ba09a44644e090eda --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/function-repo/config/log.json @@ -0,0 +1,13 @@ +{ + "filepath": "{logConfigPath}", + "level": "{logLevel}", + "rolling": { + "maxsize": 400, + "maxbackups": 1, + "maxage": 1, + "compress": true + }, + "tick": 10, + "first": 10, + "thereafter": 5 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/functiontask/config/conf.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/functiontask/config/conf.json new file mode 100644 index 0000000000000000000000000000000000000000..0447d222f9d166e6ea9c84bd8a730b4217842558 --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/functiontask/config/conf.json @@ -0,0 +1,41 @@ +{ + "state_server_addr": "{{REDIS_ADDR}}", + "state_server_enable_tls": false, + "state_server_mode": "single", + "state_server_password": "{{REDIS_PASSWORD}}", + "http_invoke_port": 8080, + "p2p_invoke_port": 8017, + "call_timeout": 15, + "init_call_timeout": 300, + "max_wait_future_cnt": 1000, + "resolve_deadlock": true, + "persistent_state": true, + "persistent_future": true, + "init_state_client": true, + "max_nums_of_state_id": 10000, + "busstorageconfig": { + "storagetype": "{{STATE_STORAGE_TYPE}}", + "redisclientconfig": { + "server_addr": "{{REDIS_ADDR}}", + "server_enable_tls": false, + "server_mode": "single", + "server_password": "{{REDIS_PASSWORD}}" + }, + "s3clientconfig": { + "endpoint": "{{S3_ADDR}}", + "accessKey": "d06b003f7d8d60c8ac3d7fa8:5cb9bbf58a0b306a7a8c57b0b933f1b08522c41c", + "secretKey": "418213bcfc5c5d10e046fbfc:48f48e7562a65ad14aa3031c2af432b289a4a837239bb571801b", + "secure": false, + "caFile": "/home/sn/module/ca.crt", + "presignedUrlExpires": 3600, + "timeout": 40, + "trustedCA": false + } + }, + "future_ttl": 43200, + "external_future_ttl": 43200, + "redis_max_idle": 16, + "redis_max_active": 16, + "functionNameSeparator": "-", + "functionCapability": 2 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/functiontask/config/config.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/functiontask/config/config.json new file mode 100644 index 0000000000000000000000000000000000000000..4309098721412d2a2b2bbc00d5b92ce2c310a67c --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/functiontask/config/config.json @@ -0,0 +1,50 @@ +{ + "invokeLimit": { + "enable": false, + "tokenBucketSize": 1000, + "frequency": 1 + }, + "metrics": { + "metricsport": "9098", + "metricspath": "/metrics", + "prometheus_server_address": "http://prometheus-server.monitor:80" + }, + "httpsConfig": { + "httpsEnable": false, + "tlsProtocol": "TLSv1.2", + "tlsCiphers": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + }, + "retry": { + "instanceExceptionRetry": true + }, + "requestConfig": { + "requestWorkerTimeout": 36 + }, + "etcd": { + "servers": ["{{ETCD_ADDR}}"], + "user": "{{ETCD_USER}}", + "password": "{{ETCD_PASSWORD}}", + "sslEnable": false + }, + "workerManager": { + "serviceName": "{{WORKERMGR_IP}}", + "port": "{{WORKERMGR_SVC_PORT}}" + }, + "dataSystem": { + "enable": true, + "workerPort": "31501" + }, + "schedule": { + "policy": 2, + "forwardScheduleResourceNotEnough": true, + "sleepingMemoryThreshold": 0.8, + "selectInstanceToSleepingPolicy": "LRU" + }, + "functionCapability": 2, + "upgradingTimeout": 900, + "forwardLoadBalancePolicy": "roundRobin", + "recoverEnable": false, + "runtimeHeartbeatInterval": 86400000, + "runtimeHeartbeatEnable": true, + "servicesPath": "{{SERVICES_PATH}}" +} diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/functiontask/config/log.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/functiontask/config/log.json new file mode 100644 index 0000000000000000000000000000000000000000..280899119e57ac07d8887bfd1ed89cb79eac2ddc --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/functiontask/config/log.json @@ -0,0 +1,13 @@ +{ + "filepath": "{{logConfigPath}}", + "level": "{{logLevel}}", + "rolling": { + "maxsize": 400, + "maxbackups": 1, + "maxage": 1, + "compress": true + }, + "tick": 10, + "first": 10, + "thereafter": 5 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/runtime-manager/config/custom-runtime-config.yaml b/dsoftbus/dist_executor/modules/config/templates/functioncore/runtime-manager/config/custom-runtime-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75a781208113a31c991b0ae7acfc38caa20fa7a0 --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/runtime-manager/config/custom-runtime-config.yaml @@ -0,0 +1,27 @@ +{ + "serverUrl": { + "nextPath": "/v1/runtime/invocation/request", + "responsePath": "/v1/runtime/invocation/response/{request_id}", + "errorPath": "/v1/runtime/invocation/error/{request_id}" + }, + "serverHeaders": { + "headerInvokeIDKey": "X-Cff-Request-Id", + "headerAccessKey": "X-CFF-Access-Key", + "headerSecretKey ": "X-CFF-Secret-Key", + "headerAuthToken": "X-CFF-Auth-Token", + "headerSecurityToken": "X-CFF-Security-Token" + }, + "serverEnvKey": { + "runtimeProject": "RUNTIME_PROJECT_ID", + "runtimeFuncName": "RUNTIME_FUNC_NAME", + "runtimeFuncVersion ": "RUNTIME_FUNC_VERSION", + "runtimePackage": "RUNTIME_PACKAGE", + "runtimeTime": "RUNTIME_TIMEOUT", + "runtimeHandler": "RUNTIME_HANDLER", + "runtimeUserData": "RUNTIME_USERDATA", + "runtimeCPU ": "RUNTIME_CPU", + "runtimeMemory": "RUNTIME_MEMORY", + "runtimeCodeRoot": "RUNTIME_CODE_ROOT", + "runtimeApiAddr": "RUNTIME_API_ADDR" + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/runtime-manager/config/log.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/runtime-manager/config/log.json new file mode 100644 index 0000000000000000000000000000000000000000..280899119e57ac07d8887bfd1ed89cb79eac2ddc --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/runtime-manager/config/log.json @@ -0,0 +1,13 @@ +{ + "filepath": "{{logConfigPath}}", + "level": "{{logLevel}}", + "rolling": { + "maxsize": 400, + "maxbackups": 1, + "maxage": 1, + "compress": true + }, + "tick": 10, + "first": 10, + "thereafter": 5 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/runtime-manager/config/python-runtime-log.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/runtime-manager/config/python-runtime-log.json new file mode 100644 index 0000000000000000000000000000000000000000..2b4fb6671eb06cfbce55d27d597d3a1c8c77079d --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/runtime-manager/config/python-runtime-log.json @@ -0,0 +1,26 @@ +{ + "version": 1, + "disable_existing_loggers": false, + "formatters": { + "extra": { + "format": "[%(asctime)s %(levelname)s %(filename)s:%(lineno)d] [%(podname)s %(thread)d] %(message)s" + } + }, + "handlers": { + "file": { + "class": "logging.handlers.RotatingFileHandler", + "filename": "{{LOG_PATH}}", + "formatter": "extra", + "maxBytes": 419430400, + "backupCount": 1 + } + }, + "loggers": { + "FileLogger": { + "handlers": [ + "file" + ], + "level": "{{logLevel}}" + } + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/runtime-manager/config/runtime-manager-config.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/runtime-manager/config/runtime-manager-config.json new file mode 100644 index 0000000000000000000000000000000000000000..0e472e3e154a8ae1327c736f6580c7d3666296a9 --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/runtime-manager/config/runtime-manager-config.json @@ -0,0 +1,38 @@ +{ + "LokiPath": "", + "ServerPort": {{serverPort}}, + "useNewRuntimePath": false, + "portNum": 10000, + "retryTimes": 2, + "recycleLog": 604800, + "metricsCollectorType": "proc", + "runtimeConfigs": { + "initial": { + "port": {{initPort}} + }, + "java8": { + "port": 31537, + "enable": false + }, + "python3.8": { + "port": 31539, + "enable": false + }, + "nodejs10.15.2": { + "port": 31540, + "enable": false + }, + "cpp11": { + "port": 31541, + "enable": false + }, + "custom-runtime": { + "port": 31538, + "enable": false + }, + "posix-custom-runtime": { + "port": 31542, + "enable": false + } + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/runtime-manager/config/runtime.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/runtime-manager/config/runtime.json new file mode 100644 index 0000000000000000000000000000000000000000..f85dbf047d74a6a0b56ebf9fcf0ef815022aa694 --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/runtime-manager/config/runtime.json @@ -0,0 +1,5 @@ +{ + "maxRequestBodySize": "6", + "maxFdNum": 1024, + "dataSystemConnectionTimeout": "1" +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/worker-manager/config/log.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/worker-manager/config/log.json new file mode 100644 index 0000000000000000000000000000000000000000..2982a56a8ebcf379a776d3859c939de0fae1eebd --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/worker-manager/config/log.json @@ -0,0 +1,13 @@ +{ + "filepath": "{logConfigPath}", + "level": "{logLevel}", + "rolling": { + "maxsize": 400, + "maxbackups": 1, + "maxage": 1, + "compress": true + }, + "tick": 10, + "first": 10, + "thereafter": 5 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/worker-manager/config/worker-manager.conf b/dsoftbus/dist_executor/modules/config/templates/functioncore/worker-manager/config/worker-manager.conf new file mode 100644 index 0000000000000000000000000000000000000000..2cd6c11ccfc786875a1d9096c410899006bf703d --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/worker-manager/config/worker-manager.conf @@ -0,0 +1,92 @@ +{ + "etcd": { + "servers": [ + "{etcd_ip}:{etcd_port}" + ], + "user": "{username}", + "password": "{etcd_password}", + "sslEnable": false, + "limitRate": 500, + "limitBurst": 500, + "limitTimeout": 5 + }, + "globalScheduler": { + "levels": [ + 1000, + 2 + ], + "deployDomainMode": "process", + "domainSchedulerPort": 22771, + "domainSchedulerPoolSize": 200, + "globalSchedulerPort": {global_scheduler_port}, + "kubernetes": { + "namespace": "default", + "limitMem": "1024Mi", + "limitCPU": "1", + "requestMem": "256Mi", + "requestCPU": "0.5", + "domainSchedulerImage": "domain-scheduler:latest" + } + }, + "etcdcluster": { + "electionttl": 5, + "forwardrequest": true + }, + "deployDir": "{deploy_dir}", + "httpsConfig": { + "httpsEnable": false, + "tlsProtocol": "TLSv1.2", + "tlsCiphers": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + }, + "kubeCfgTimeout": 30, + "resourceManagementType": "etcd", + "podGeneralizeTimeout": 33, + "idleWorkerTimeout": 300, + "timeout": 900, + "poolMgrConfig": { + "dynamic": { + "workerImage": "runtime-manager:testcpp0224", + "resourcePercent": 30 + }, + "enableLocalCache": false, + "enableMinio": true, + "runtimeConfigs": {}, + "poolList": { + "podgctime": 5, + "hostaliaseshostname": [ + { + "ip": "127.0.0.1", + "hostnames": [ + "bucket-test-log1.hwcloudtest.cn", + "sn-repository-test-cn.hwcloudtest.cn" + ] + } + ], + "pool": [] + }, + "gcConfig": { + "terminationGracePeriodSeconds": 900 + }, + "volumeConfig": { + "logMountPath": "/home/sn/log", + "runtimeLogMountPath": "/home/snuser/log", + "userOutputLogMountPath": "/home/snuser/logs", + "packageHostPath": "", + "userHostVolumeMap": {}, + "userStorageConfig": { + "userStorageEnable": false, + "userStoragePvcName": "pvc-yuanrong", + "userStoragePvcMountPath": "/tmp/sfsData" + } + }, + "customCAConfig": { + "customCAEnable": false, + "caFilePath": "/home/sn/certs/ca" + }, + "workerAuthEnable": false + }, + "functionNameSeparator": "-", + "functionCapability": 2, + "functionbootstrapEnable": false, + "httpEnable": false +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/worker/config/config.yaml b/dsoftbus/dist_executor/modules/config/templates/functioncore/worker/config/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e4464a40acc68100de38602440f77d7ae1067b74 --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/worker/config/config.yaml @@ -0,0 +1,58 @@ +{ + "runtime": { + "timeout": 86400, + "loadtimeout": 30, + "rpcPort": 31530, + "connectType": "unix", + "runtimeLogDir": "/home/snuser/log" + }, + "deployment": { + "storageType": "{{FUNCTION_STORAGE_TYPE}}", + "codePackage": { + "unzipFileSizeMaxMB": 1000, + "zipFileSizeMaxMB": 500, + "fileCountsMax": 30000, + "dirDepthMax": 20 + }, + "s3Config": { + "accessKey": "d06b003f7d8d60c8ac3d7fa8:5cb9bbf58a0b306a7a8c57b0b933f1b08522c41c", + "secretKey": "da701b304eb4a523aa13becc:fb120bfb517a8916b0608165f154d166b9adc07e32f59ad864699af7", + "useSSL" : false, + "endpoint": "{{S3_ADDR}}", + "trustedCA" : false + }, + "httpsConfig": { + "httpsEnable": false, + "tlsProtocol": "TLSv1.2", + "tlsCiphers": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + } + }, + "storage":{ + "addr": "{{REDIS_ADDR}}", + "password": "{{REDIS_PASSWORD}}" + }, + "server": { + "httpsConfig": { + "httpsEnable": false, + "tlsProtocol": "TLSv1.2", + "tlsCiphers": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + }, + "readTimeout": 33, + "writeTimeout": 33, + "port": {{WORKER_HTTP_PORT}}, + "maxHeaderBytes": 1048576, + "ioReadTimeout": 100000, + "httpServerDisabled": true + }, + "invokeServer": { + "protocol": "tcp", + "port": 8889 + }, + "functionNameSeparator": "-", + "functionCapability": 2, + "backgroundProcessLimit": { + "enable": false, + "idleTime": 3, + "retryTimes": 3 + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/worker/config/custom-runtime-config.yaml b/dsoftbus/dist_executor/modules/config/templates/functioncore/worker/config/custom-runtime-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75a781208113a31c991b0ae7acfc38caa20fa7a0 --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/worker/config/custom-runtime-config.yaml @@ -0,0 +1,27 @@ +{ + "serverUrl": { + "nextPath": "/v1/runtime/invocation/request", + "responsePath": "/v1/runtime/invocation/response/{request_id}", + "errorPath": "/v1/runtime/invocation/error/{request_id}" + }, + "serverHeaders": { + "headerInvokeIDKey": "X-Cff-Request-Id", + "headerAccessKey": "X-CFF-Access-Key", + "headerSecretKey ": "X-CFF-Secret-Key", + "headerAuthToken": "X-CFF-Auth-Token", + "headerSecurityToken": "X-CFF-Security-Token" + }, + "serverEnvKey": { + "runtimeProject": "RUNTIME_PROJECT_ID", + "runtimeFuncName": "RUNTIME_FUNC_NAME", + "runtimeFuncVersion ": "RUNTIME_FUNC_VERSION", + "runtimePackage": "RUNTIME_PACKAGE", + "runtimeTime": "RUNTIME_TIMEOUT", + "runtimeHandler": "RUNTIME_HANDLER", + "runtimeUserData": "RUNTIME_USERDATA", + "runtimeCPU ": "RUNTIME_CPU", + "runtimeMemory": "RUNTIME_MEMORY", + "runtimeCodeRoot": "RUNTIME_CODE_ROOT", + "runtimeApiAddr": "RUNTIME_API_ADDR" + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/worker/config/log.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/worker/config/log.json new file mode 100644 index 0000000000000000000000000000000000000000..280899119e57ac07d8887bfd1ed89cb79eac2ddc --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/worker/config/log.json @@ -0,0 +1,13 @@ +{ + "filepath": "{{logConfigPath}}", + "level": "{{logLevel}}", + "rolling": { + "maxsize": 400, + "maxbackups": 1, + "maxage": 1, + "compress": true + }, + "tick": 10, + "first": 10, + "thereafter": 5 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/worker/config/runtime-manager-config.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/worker/config/runtime-manager-config.json new file mode 100644 index 0000000000000000000000000000000000000000..eff4c1e30a0ed42778f4161905dddfdc6a9f5884 --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/worker/config/runtime-manager-config.json @@ -0,0 +1,11 @@ +{ + "ServerPort": 31530, + "portNum": 10000, + "retryTimes": 2, + "recycleLog": 604800, + "runtimeConfigs": { + "initial": { + "port": 31537 + } + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/config/templates/functioncore/worker/config/runtime.json b/dsoftbus/dist_executor/modules/config/templates/functioncore/worker/config/runtime.json new file mode 100644 index 0000000000000000000000000000000000000000..3dd93bdd35c7e72f7628ca44b4ab4e0ff44b59e6 --- /dev/null +++ b/dsoftbus/dist_executor/modules/config/templates/functioncore/worker/config/runtime.json @@ -0,0 +1,5 @@ +{ + "maxRequestBodySize": "6", + "maxFdNum": 65535, + "dataSystemConnectionTimeout": "1" +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/custom/__init__.py b/dsoftbus/dist_executor/modules/custom/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9c9dd874f24d862a78307ba03a8c60fed6fd6d1a --- /dev/null +++ b/dsoftbus/dist_executor/modules/custom/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/custom/default/func/handler.py b/dsoftbus/dist_executor/modules/custom/default/func/handler.py new file mode 100644 index 0000000000000000000000000000000000000000..d2ac64920f0caff3c44f835227ca622a37d2100d --- /dev/null +++ b/dsoftbus/dist_executor/modules/custom/default/func/handler.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" handler example """ +import sys + +import yr + + +@yr.invoke +def hello_world(): + """ example """ + return "hello world" + + +def usage(): + """ print usage """ + print(""" + Usage: + python handler.py {MASTER_IP_ADDRESS} + """) + + +if __name__ == '__main__': + if len(sys.argv) != 2: + print(f"invalid args: {sys.argv}") + usage() + sys.exit(1) + + ip_addr = sys.argv[-1] + + conf = yr.Config( + function_id="sn:cn:yrk:12345678901234561234567890123456:function:0-default-func:$latest", + server_address=f"{ip_addr}:31220", + ds_address=f"{ip_addr}:31502", + log_level="INFO" + ) + + yr.init(conf) + + res = [hello_world.invoke() for i in range(1)] + print(yr.get(res)) + yr.finalize() diff --git a/dsoftbus/dist_executor/modules/custom/default/service.yaml b/dsoftbus/dist_executor/modules/custom/default/service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..1423709725dad46a1dcdbb5b1d58641f77de9652 --- /dev/null +++ b/dsoftbus/dist_executor/modules/custom/default/service.yaml @@ -0,0 +1,12 @@ +service: default +kind: yrlib +description: this is a func service +functions: + func: + storageType: local + codePath: /home + handler: "" + runtime: python3.9 + memory: 500 + timeout: 900 + cpu: 1000 diff --git a/dsoftbus/dist_executor/modules/custom/path_init.sh b/dsoftbus/dist_executor/modules/custom/path_init.sh new file mode 100644 index 0000000000000000000000000000000000000000..76cdb3fefe27069a378c68a17fbcf2bd906b61e2 --- /dev/null +++ b/dsoftbus/dist_executor/modules/custom/path_init.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# usage example: bash path_init.sh /xxxxtest/lib/python3.9/site-packages/dist_executor/xxx +# This shell script will change /usr/bin/python3.9 link target to the python3.9 which installed dist_executor. +# This shell script should in directory */mindpandas/de/custom + +# match lib/python3.9 , to /xxxxxx/bin/python3.9 +path_left=${1%/lib/python3.9*} +PY_VERSION=`python -V 2>&1 | awk '{print$2}'|awk -F '.' '{print $1"."$2 }'` + +if [ "$PY_VERSION" != "3.9" ];then + echo "please use python3.9" && exit 1 +fi + +SCRIPT_FILE=$(readlink -f $0) +CUSTOM_DIR=$(dirname $SCRIPT_FILE) +MODULES_DIR=$(dirname $CUSTOM_DIR ) +yamlList=$(find $MODULES_DIR -path "*/service.yaml") +for yamlPath in ${yamlList[@]} +do + yamlDir=${yamlPath%%"/service.yaml"} + subDirs=$(ls -d $yamlDir/*/ 2> /dev/null ) + for subDir in ${subDirs[@]} + do + if ls ${subDir}*.py >/dev/null 2>&1; then + realCodePath=$subDir + cp ${MODULES_DIR}/resource/local-repo/sdk/yrlib/python3.9/yrlib_handler.py $realCodePath + codePathDoubleSlash=${realCodePath//"/"/"\\/"} + sed -i 's/codePath:.*/codePath: '"$codePathDoubleSlash"'/g' $yamlPath + break + fi + done +done \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/datasystem/service/agent b/dsoftbus/dist_executor/modules/datasystem/service/agent new file mode 100644 index 0000000000000000000000000000000000000000..87040148c5551512e47946599d673b7e0e23760f Binary files /dev/null and b/dsoftbus/dist_executor/modules/datasystem/service/agent differ diff --git a/dsoftbus/dist_executor/modules/datasystem/service/deploy/agent-launcher.sh b/dsoftbus/dist_executor/modules/datasystem/service/deploy/agent-launcher.sh new file mode 100644 index 0000000000000000000000000000000000000000..9be6dc9a2ca4e6577b4d6668ff31f095c0672935 --- /dev/null +++ b/dsoftbus/dist_executor/modules/datasystem/service/deploy/agent-launcher.sh @@ -0,0 +1,164 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +set -o pipefail + +readonly USAGE="Usage: agent-launcher.sh command..." +readonly BASE_DIR=$(dirname "$(readlink -f "$0")") +readonly CONF_DIR=${BASE_DIR}/conf + +. "${BASE_DIR}/deploy-common.sh" + +IP_LIST=() + +function set_default_address() +{ + case "agent" in + master) + export MASTER_ADDRESS="127.0.0.1:9089" + ;; + worker) + export WORKER_ADDRESS="127.0.0.1:9088" + export WORKER_MASTER_ADDRESS="127.0.0.1:9089" + ;; + agent) + export AGENT_ADDRESS="127.0.0.1:9087" + export AGENT_WORKER_ADDRESS="127.0.0.1:9088" + ;; + gcs) + export GCS_ADDRESS="127.0.0.1:9090" + export GCS_MASTER_ADDRESS="127.0.0.1:9089" + export GCS_NODE_ID="NODE-ID" + ;; + *) + echo -e "Error: agent not found, No such component for deployment!" + ;; + esac +} + +function valid_ip() +{ + local ip=$1 + local ret=1 + + if [[ "$ip" =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}:[0-9]{1,5}$ ]]; then + ret=0 + fi + return $ret +} + +function parse_ip_list() +{ + . "${CONF_DIR}/agent-env.sh" + if is_array "AGENT_ADDRESS"; then + for((i=0;i<${#AGENT_ADDRESS[@]};i++)) + do + if ! valid_ip "${AGENT_ADDRESS[i]}"; then + echo -e "[$(date '+%F %T')] Invalid agent ip at $i: \"${AGENT_ADDRESS[i]}\"" >>"${deploy_log}" + return 1 + fi + IP_LIST+=("$(echo "${AGENT_ADDRESS[i]}" | cut -d : -f 1)") + done + else + if [[ -z "${AGENT_ADDRESS}" ]]; then + set_default_address + fi + if ! valid_ip "${AGENT_ADDRESS}"; then + echo -e "[$(date '+%F %T')] Invalid agent ip: \"${AGENT_ADDRESS}\"" >>"${deploy_log}" + return 1 + fi + IP_LIST+=("$(echo "${AGENT_ADDRESS}" | cut -d : -f 1)") + fi +} + +function main() +{ + local launch_prefix="bash" + # Add -x if debugging is enable. + if [[ "$-" == *x* ]]; then + launch_prefix="${launch_prefix} -x" + fi + + if [[ -z "$@" ]]; then + echo -e "${USAGE}" >&2 + exit 1 + fi + + local cmd_list=("$@") + + deploy_log="${BASE_DIR}/deploy.log" + + if ! parse_ip_list ; then + echo -e "---- ${RED}[ FAILED ]${NC} The ${CYAN}agent${NC} service parse ip list failed." + echo -e "---- Task(s) on agent nodes failed! look at ${deploy_log} for details." + exit 1 + fi + + # We split a large number of deployment requests into multiple batches to avoid very perverted deployment requests. + local batch_num=5 + local ip_num="${#IP_LIST[@]}" + local rounds="$[(ip_num+batch_num-1)/batch_num]" + + local pids=() + local err_flag=0 + + for((k=0;k<"${rounds}";k++)) + do + local start="$[k*batch_num]" + local end="$[(k+1)*batch_num]" + if [[ "${end}" -gt "${ip_num}" ]]; then + end="${ip_num}" + fi + for((i="${start}";i<"${end}";i++)) + do + local ip="${IP_LIST[$i]}" + local cmd=("${cmd_list[@]}") + cmd+=("-n" "$i") + echo -e "[$(date '+%F %T')] Execute the command: ${cmd[@]} on agent node [${ip}]." >>"${deploy_log}" + if is_local_ip "${ip}"; then + echo -e "[$(date '+%F %T')][${ip}] Running in local machine..." >>"${deploy_log}" + nohup $"${cmd[@]// /\\ }" 2>&1 | \ + while read -r line; do echo "[$(date '+%F %T')][${ip}] ${line}"; done >> "${deploy_log}" & + else + echo -e "[$(date '+%F %T')][${ip}] Connecting as ${USER}..." >>"${deploy_log}" + nohup ssh -o NumberOfPasswordPrompts=0 -o ConnectTimeout=5 -o StrictHostKeyChecking=no -tt "${ip}" \ + "${launch_prefix}" $"${cmd[@]// /\\ }" 2>&1 | while read -r line; do echo "[$(date '+%F %T')][${ip}] ${line}"; done >> "${deploy_log}" & + fi + pids[${#pids[@]}]=$! + done + + echo -e "[$(date '+%F %T')] Waiting for all tasks finish..." >>"${deploy_log}" + + for ((i=0; i<${#pids[@]}; i++)); + do + wait ${pids[$i]} + ret_code=$? + if [[ ${ret_code} -ne 0 ]]; then + err_flag=1 + echo -e "---- ${RED}[ FAILED ]${NC} The ${CYAN}agent${NC} service @ ${PURPLE}${IP_LIST[$i]}${NC} failed." + echo -e "[$(date '+%F %T')][${IP_LIST[$i]}] Task on agent node '${IP_LIST[$i]}' failed, exit code: ${ret_code}" >>"${deploy_log}" + else + echo -e "---- ${GREEN}[ OK ]${NC} The ${CYAN}agent${NC} service @ ${PURPLE}${IP_LIST[$i]}${NC} success." + echo -e "[$(date '+%F %T')][${IP_LIST[$i]}] Task on agent node '${IP_LIST[$i]}' success!" >>"${deploy_log}" + fi + done + done + + if [[ ${err_flag} -eq 0 ]]; then + echo -e "[$(date '+%F %T')] All tasks run on agent nodes success!" >>"${deploy_log}" + else + echo -e "---- Task(s) on agent nodes failed! look at ${deploy_log} for details." + fi +} + +main "$@" diff --git a/dsoftbus/dist_executor/modules/datasystem/service/deploy/conf/agent-env.sh b/dsoftbus/dist_executor/modules/datasystem/service/deploy/conf/agent-env.sh new file mode 100644 index 0000000000000000000000000000000000000000..36fc4706c68ba700bf9f624a0a47d015edfe8668 --- /dev/null +++ b/dsoftbus/dist_executor/modules/datasystem/service/deploy/conf/agent-env.sh @@ -0,0 +1,75 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +# Edit this file to configure startup parameters, it is sourced to launch components. + +# Address of worker and the value cannot be empty. (Default: "127.0.0.1:9088") +# AGENT_WORKER_ADDRESS="127.0.0.1:9088" + +# Address of agent and the value cannot be empty. (Default: "127.0.0.1:9087") +# AGENT_ADDRESS="127.0.0.1:9087" + +# Maximum time interval before a client is considered lost. (Default: "60") +# AGENT_CLIENT_TIMEOUT_S="60" + +# Config rpc server thread number, must be greater than 0. (Default: "16") +# AGENT_RPC_THREAD_NUM="16" + +# The directory where log files are stored. (Default: "~/.datasystem/logs") +# AGENT_LOG_DIR="~/.datasystem/logs" + +# Prefix of log filename, default is program invocation short name. Use standard characters only. (Default: "") +# AGENT_LOG_FILENAME="" + +# Async log buffer, unit is MB. (Default: "2") +# AGENT_LOG_ASYNC_BUFFER_MB="2" + +# Maximum log file size (in MB), must be greater than 0. (Default: "400") +# AGENT_MAX_LOG_SIZE="400" + +# All log files max size (in MB), must be greater than 400MB. Log will roll if the size is exceeded. (Default: "25") +# AGENT_MAX_LOG_FILE_NUM="25" + +# If log_retention_day is greater than 0, any log file from your project whose last modified time is greater than log_retention_day days will be unlinked. If log_retention_day is equal 0, will not unlink log file by time. (Default: "0") +# AGENT_LOG_RETENTION_DAY="0" + +# Flush log files with async mode. (Default: "false") +# AGENT_LOG_ASYNC="false" + +# Compress old log files in .gz format. This parameter takes effect only when the size of the generated log is greater than max log size. (Default: "true") +# AGENT_LOG_COMPRESS="true" + +# vlog level. (Default: "0") +# AGENT_V="0" + +# Whether to enable the authentication function between components(agent, worker, master). (Default: "false") +# AGENT_ENABLE_COMPONENT_AUTH="false" + +# Optimize the performance of the customer. Default server 5. The higher the throughput, the higher the value, but should be in range [1, 32]. (Default: "5") +# AGENT_ZMQ_SERVER_IO_CONTEXT="5" + +# The directory to find ZMQ curve key files. This path must be specified when zmq authentication is enabled. (Default: "") +# AGENT_CURVE_KEY_DIR="" + +# One of key component file path. Key components make up rootkey to encrypt or decrypt. (Default: "") +# AGENT_SECRET_KEY1="" + +# One of key component file path. Key components make up rootkey to encrypt or decrypt. (Default: "") +# AGENT_SECRET_KEY2="" + +# One of key component file path. Key components make up rootkey to encrypt or decrypt. (Default: "") +# AGENT_SECRET_KEY3="" + +# One of key component file path. Key components make up rootkey to encrypt or decrypt. (Default: "") +# AGENT_SECRET_SALT="" diff --git a/dsoftbus/dist_executor/modules/datasystem/service/deploy/conf/master-env.sh b/dsoftbus/dist_executor/modules/datasystem/service/deploy/conf/master-env.sh new file mode 100644 index 0000000000000000000000000000000000000000..4a88c73f06d93340157282504723df776b9bb9fd --- /dev/null +++ b/dsoftbus/dist_executor/modules/datasystem/service/deploy/conf/master-env.sh @@ -0,0 +1,102 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +# Edit this file to configure startup parameters, it is sourced to launch components. + +# Address of master and the value cannot be empty. (Default: "127.0.0.1:9089") +# MASTER_ADDRESS="127.0.0.1:9089" + +# Config MASTER back store directory and must specify in rocksdb scenario. The rocksdb database is used to persistently store the metadata stored in the master so that the metadata before the restart can be re-obtained when the master restarts. (Default: "~/.datasystem/rocksdb") +# MASTER_BACKEND_STORE_DIR="~/.datasystem/rocksdb" + +# Controls whether rocksdb sets sync to true when writing data. (Default: "false") +# MASTER_ROCKSDB_SYNC_WRITE="false" + +# The redis IP address host:port. If the redis service is required, the address must be set and match the worker's redis address. ex: 127.0.0.1:6379. (Default: "") +# MASTER_REDIS_ADDRESS="" + +# Maximum time interval before a node is considered lost. (Default: "60") +# MASTER_NODE_TIMEOUT_S="60" + +# Maximum time interval for the master to determine node death. (Default: "7200") +# MASTER_NODE_DEAD_TIMEOUT_S="7200" + +# Interval in milliseconds at which master check heartbeat status. (Default: "30000") +# MASTER_CHECK_HEARTBEAT_INTERVAL_MS="30000" + +# File path of trusted CA/ca bundle file, optional. Use standard characters only. (Default: "") +# MASTER_REDIS_CA="" + +# File path of client certificate file, optional. Use standard characters only. (Default: "") +# MASTER_REDIS_CERT="" + +# File path of client private key, optional. Use standard characters only. (Default: "") +# MASTER_REDIS_KEY="" + +# The redis username for auth. (Default: "") +# MASTER_REDIS_USERNAME="" + +# The redis password for auth. (Default: "") +# MASTER_REDIS_PASSWD="" + +# Config rpc server thread number, must be greater than 0. (Default: "16") +# MASTER_RPC_THREAD_NUM="16" + +# The directory where log files are stored. (Default: "~/.datasystem/logs") +# MASTER_LOG_DIR="~/.datasystem/logs" + +# Prefix of log filename, default is program invocation short name. Use standard characters only. (Default: "") +# MASTER_LOG_FILENAME="" + +# Async log buffer, unit is MB. (Default: "2") +# MASTER_LOG_ASYNC_BUFFER_MB="2" + +# Maximum log file size (in MB), must be greater than 0. (Default: "400") +# MASTER_MAX_LOG_SIZE="400" + +# All log files max size (in MB), must be greater than 400MB. Log will roll if the size is exceeded. (Default: "25") +# MASTER_MAX_LOG_FILE_NUM="25" + +# If log_retention_day is greater than 0, any log file from your project whose last modified time is greater than log_retention_day days will be unlinked. If log_retention_day is equal 0, will not unlink log file by time. (Default: "0") +# MASTER_LOG_RETENTION_DAY="0" + +# Flush log files with async mode. (Default: "false") +# MASTER_LOG_ASYNC="false" + +# Compress old log files in .gz format. This parameter takes effect only when the size of the generated log is greater than max log size. (Default: "true") +# MASTER_LOG_COMPRESS="true" + +# vlog level. (Default: "0") +# MASTER_V="0" + +# Whether to enable the authentication function between components(agent, worker, master). (Default: "false") +# MASTER_ENABLE_COMPONENT_AUTH="false" + +# Optimize the performance of the customer. Default server 5. The higher the throughput, the higher the value, but should be in range [1, 32]. (Default: "5") +# MASTER_ZMQ_SERVER_IO_CONTEXT="5" + +# The directory to find ZMQ curve key files. This path must be specified when zmq authentication is enabled. (Default: "") +# MASTER_CURVE_KEY_DIR="" + +# One of key component file path. Key components make up rootkey to encrypt or decrypt. (Default: "") +# MASTER_SECRET_KEY1="" + +# One of key component file path. Key components make up rootkey to encrypt or decrypt. (Default: "") +# MASTER_SECRET_KEY2="" + +# One of key component file path. Key components make up rootkey to encrypt or decrypt. (Default: "") +# MASTER_SECRET_KEY3="" + +# One of key component file path. Key components make up rootkey to encrypt or decrypt. (Default: "") +# MASTER_SECRET_SALT="" diff --git a/dsoftbus/dist_executor/modules/datasystem/service/deploy/conf/worker-env.sh b/dsoftbus/dist_executor/modules/datasystem/service/deploy/conf/worker-env.sh new file mode 100644 index 0000000000000000000000000000000000000000..a5f63d462b520c6faf6a712442e840b9cc5a70bc --- /dev/null +++ b/dsoftbus/dist_executor/modules/datasystem/service/deploy/conf/worker-env.sh @@ -0,0 +1,138 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +# +# Edit this file to configure startup parameters, it is sourced to launch components. + +# Address of worker and the value cannot be empty. Multiple nodes can be configured, such as ("127.0.0.1:18482" "127.0.0.2:18482"). (Default: "127.0.0.1:9088") +# WORKER_ADDRESS="127.0.0.1:9088" + +# Address of master and the value cannot be empty. (Default: "127.0.0.1:9089") +# WORKER_MASTER_ADDRESS="127.0.0.1:9089" + +# Indicates whether the worker allows using the shared memory. (Default: "true") +# WORKER_ENABLE_SHARED_MEMORY="true" + +# Upper limit of the shared memory, the unit is mb, must be greater than 0. (Default: "1024") +# WORKER_SHARED_MEMORY_SIZE_MB="1024" + +# Time interval between worker and master heartbeats. (Default: "1000") +# WORKER_HEARTBEAT_INTERVAL_MS="1000" + +# Indicates whether to enable the tenant authentication, default is false. (Default: "false") +# WORKER_AUTHORIZATION_ENABLE="false" + +# Enable unix domain socket. (Default: "true") +# WORKER_ENABLE_UDS="true" + +# The directory to store unix domain socket file. The UDS generates temporary files in this path. (Default: "~/.datasystem/unix_domain_socket_dir") +# WORKER_UNIX_DOMAIN_SOCKET_DIR="~/.datasystem/unix_domain_socket_dir" + +# The number of regular backend socket for stream cache. (Default: "32") +# WORKER_SC_REGULAR_SOCKET_NUM="32" + +# The number of stream backend socket for stream cache. (Default: "32") +# WORKER_SC_STREAM_SOCKET_NUM="32" + +# The number of worker service for object cache. (Default: "32") +# WORKER_OC_THREAD_NUM="32" + +# Thread number of eviction for object cache. (Default: "1") +# WORKER_EVICTION_THREAD_NUM="1" + +# Client reconnect wait seconds, default is 5. (Default: "5") +# WORKER_CLIENT_RECONNECT_WAIT_S="5" + +# Size of the page used for caching worker files. The valid range is 4096-1073741824. (Default: "1048576") +# WORKER_PAGE_SIZE="1048576" + +# The num of threads used to send elements to remote worker. (Default: "8") +# WORKER_REMOTE_SEND_THREAD_NUM="8" + +# The redis IP address host:port. If the redis service is required, the address must be set. ex: 127.0.0.1:6379. (Default: "") +# WORKER_REDIS_ADDRESS="" + +# The path and file name prefix of the spilling, empty means spill disabled. (Default: "") +# WORKER_SPILL_DIRECTORY="" + +# The size limit of spilled data, 0 means unlimited. (Default: "0") +# WORKER_SPILL_SIZE_LIMIT="0" + +# The redis username for auth. (Default: "") +# WORKER_REDIS_USERNAME="" + +# The redis password for auth. (Default: "") +# WORKER_REDIS_PASSWD="" + +# File path of trusted CA/ca bundle file, optional. Use standard characters only. (Default: "") +# WORKER_REDIS_CA="" + +# File path of client certificate file, optional. Use standard characters only. (Default: "") +# WORKER_REDIS_CERT="" + +# File path of client private key, optional. Use standard characters only. (Default: "") +# WORKER_REDIS_KEY="" + +# Maximum number of clients that can be connected to a worker. Value range: [1, 10000]. (Default: "200") +# WORKER_MAX_CLIENT_NUM="200" + +# Config rpc server thread number, must be greater than 0. (Default: "16") +# WORKER_RPC_THREAD_NUM="16" + +# The directory where log files are stored. (Default: "~/.datasystem/logs") +# WORKER_LOG_DIR="~/.datasystem/logs" + +# Prefix of log filename, default is program invocation short name. Use standard characters only. (Default: "") +# WORKER_LOG_FILENAME="" + +# Async log buffer, unit is MB. (Default: "2") +# WORKER_LOG_ASYNC_BUFFER_MB="2" + +# Maximum log file size (in MB), must be greater than 0. (Default: "400") +# WORKER_MAX_LOG_SIZE="400" + +# All log files max size (in MB), must be greater than 400MB. Log will roll if the size is exceeded. (Default: "25") +# WORKER_MAX_LOG_FILE_NUM="25" + +# If log_retention_day is greater than 0, any log file from your project whose last modified time is greater than log_retention_day days will be unlinked. If log_retention_day is equal 0, will not unlink log file by time. (Default: "0") +# WORKER_LOG_RETENTION_DAY="0" + +# Flush log files with async mode. (Default: "false") +# WORKER_LOG_ASYNC="false" + +# Compress old log files in .gz format. This parameter takes effect only when the size of the generated log is greater than max log size. (Default: "true") +# WORKER_LOG_COMPRESS="true" + +# vlog level. (Default: "0") +# WORKER_V="0" + +# Whether to enable the authentication function between components(agent, worker, master). (Default: "false") +# WORKER_ENABLE_COMPONENT_AUTH="false" + +# Optimize the performance of the customer. Default server 5. The higher the throughput, the higher the value, but should be in range [1, 32]. (Default: "5") +# WORKER_ZMQ_SERVER_IO_CONTEXT="5" + +# The directory to find ZMQ curve key files. This path must be specified when zmq authentication is enabled. (Default: "") +# WORKER_CURVE_KEY_DIR="" + +# One of key component file path. Key components make up rootkey to encrypt or decrypt. (Default: "") +# WORKER_SECRET_KEY1="" + +# One of key component file path. Key components make up rootkey to encrypt or decrypt. (Default: "") +# WORKER_SECRET_KEY2="" + +# One of key component file path. Key components make up rootkey to encrypt or decrypt. (Default: "") +# WORKER_SECRET_KEY3="" + +# One of key component file path. Key components make up rootkey to encrypt or decrypt. (Default: "") +# WORKER_SECRET_SALT="" diff --git a/dsoftbus/dist_executor/modules/datasystem/service/deploy/deploy-common.sh b/dsoftbus/dist_executor/modules/datasystem/service/deploy/deploy-common.sh new file mode 100644 index 0000000000000000000000000000000000000000..f1279262e6f7aa8c46a80a3b43ab0bfdad44b4ea --- /dev/null +++ b/dsoftbus/dist_executor/modules/datasystem/service/deploy/deploy-common.sh @@ -0,0 +1,87 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +# ANSI escape codes for colorful echo. +# Usage: echo -e "${RED}LoL${NC} !" will print 'LoL' in red. +readonly BLACK='\033[0;30m' +readonly DARK_GRAY='\033[1;30m' +readonly RED='\033[0;31m' +readonly LIGHT_RED='\033[1;31m' +readonly GREEN='\033[0;32m' +readonly LIGHT_GREEN='\033[1;32m' +readonly BROWN='\033[0;33m' +readonly YELLOW='\033[1;33m' +readonly BLUE='\033[0;34m' +readonly LIGHT_BLUE='\033[1;34m' +readonly PURPLE='\033[0;35m' +readonly LIGHT_PURPLE='\033[1;35m' +readonly CYAN='\033[0;36m' +readonly LIGHT_CYAN='\033[1;36m' +readonly LIGHT_GRAY='\033[0;37m' +readonly WHITE='\033[1;37m' +readonly NC='\033[0m' # No Color + +readonly SIG_TERMINATE="15" + +LOCAL_IP_LIST=($(ifconfig -a|grep inet|grep -v inet6|awk '{print $2}'|tr -d "addr:"|tr -d "地址"|tr "\n" "\n")) + +function print_ok_msg() +{ + local action="$1" + local component="$2" + local ip="$3" + echo -e "---- ${GREEN}[ OK ]${NC} The ${CYAN}${component}${NC} service @ ${PURPLE}${ip}${NC} ${action} success." +} + +function print_fail_msg() +{ + local action="$1" + local component="$2" + local ip="$3" + local log_dir="$4" + echo -e "---- ${RED}[ FAILED ]${NC} The ${CYAN}${component}${NC} service @ ${PURPLE}${ip}${NC} ${action} failed." +} + +function is_array() +{ + local var_name="$1" + local val + val=$(eval "echo -e \$${var_name}") + if [[ -z "${val}" ]]; then + return 1 + fi + if [[ "$(declare -p "${var_name}")" =~ "typeset -a" ]] || [[ "$(declare -p "${var_name}")" =~ "declare -a" ]]; then + return 0 + else + return 1 + fi +} + +function is_local_ip() +{ + local ip="$1" + for local_ip in "${LOCAL_IP_LIST[@]}"; + do + if [[ "${ip}" = "${local_ip}" ]]; then + return 0 + fi + done + return 1 +} + +function is_num() +{ + local re='^[0-9]+$' + [[ "$1" =~ $re ]] && return 0 || return 1 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/datasystem/service/deploy/deploy-datasystem.sh b/dsoftbus/dist_executor/modules/datasystem/service/deploy/deploy-datasystem.sh new file mode 100644 index 0000000000000000000000000000000000000000..039aa6aa7aa2b535454c443dcd5ae64dd8d24f52 --- /dev/null +++ b/dsoftbus/dist_executor/modules/datasystem/service/deploy/deploy-datasystem.sh @@ -0,0 +1,598 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e + +readonly USAGE="Usage: deploy-datasystem.sh [-a ACTION] [-c COMPONENT] [-d DATASYSTEM_HOME] [-p CONFIG_PATH] [-h] + +Where 'ACTION' is one of: + pass \tPass packages to the hosts. + pass_conf \tUpdate configures to hosts. + start \tStart the datasystem component(s), default ACTION. + stop \tStop the datasystem component(s). + +Where 'COMPONENT' is one of: + all \tStart/Stop all masters, workers and gcs nodes, default COMPONENT. + master \tStart/Stop masters on master nodes. + worker \tStart/Stop workers on worker nodes. + gcs \tStart/Stop gcs on gcs nodes. + agent \tStart/Stop agent on agent nodes. + clusterfs \tStart/Stop fuse mount dir on clusterfs nodes. + +-a Action for datasystem components. +-c Specify datasystem component. +-d Datasystem home, default is $(realpath "../"). +-n Component number. +-p Specify the config directory. +-w Number of seconds to wait for result, default is 2s. +-h Show usage for help." + +readonly BASE_DIR=$(dirname "$(readlink -f "$0")") +readonly DEFAULT_CONF_DIR="${BASE_DIR}/../conf" +readonly DEPLOY_LOG="${BASE_DIR}/deploy.log" + +function init_default_opts() { + export ACTION="start" + export COMPONENT="all" + export DATASYSTEM_HOME="$(realpath "${BASE_DIR}/..")" + export BIN_DIR="$(realpath "${BASE_DIR}/..")" + export CONF_DIR=$(realpath "${BASE_DIR}/conf") + export ALL_IP_LIST=() + export COMPONENT_NUM="" + export HANDLE_CLUSTERFS="No" + export LAUNCHER=("bash") +} + +function get_env() +{ + if [[ -f "${CONF_DIR}/master-env.sh" ]]; then . "${CONF_DIR}/master-env.sh"; fi + if [[ -f "${CONF_DIR}/worker-env.sh" ]]; then . "${CONF_DIR}/worker-env.sh"; fi + if [[ -f "${CONF_DIR}/gcs-env.sh" ]]; then . "${CONF_DIR}/gcs-env.sh"; fi + if [[ -f "${CONF_DIR}/agent-env.sh" ]]; then . "${CONF_DIR}/agent-env.sh"; fi + + # get all ip addresses and remove deduplicate ip address + local all_addresses + all_addresses=("${MASTER_ADDRESS[@]}" "${WORKER_ADDRESS[@]}" "${GCS_ADDRESS[@]}" "${AGENT_ADDRESS[@]}") + for((i=0;i<"${#all_addresses[@]}";i++)) + do + ALL_IP_LIST+=("$(echo "${all_addresses[i]}" | cut -d : -f 1)") + done + ALL_IP_LIST=($(awk -v RS=' ' '!a[$1]++' <<< "${ALL_IP_LIST[@]}")) +} + +function pass_package_remote() +{ + local ip="$1" + local pkg="$2" + local dst_dir="$3" + + ssh -o NumberOfPasswordPrompts=0 -o ConnectTimeout=5 -o StrictHostKeyChecking=no -tt "${ip}" \ + "mkdir -p ${dst_dir}" 2>&1 | while read -r line; do echo "[$(date '+%F %T')][${ip}] ${line}"; done >> "${DEPLOY_LOG}" + if [ $? -ne 0 ]; then + return 1 + fi + + scp -o NumberOfPasswordPrompts=0 -o ConnectTimeout=5 -o StrictHostKeyChecking=no -r "${pkg}"/* "${ip}":"${dst_dir}" 2>&1 | + while read -r line; do echo "[$(date '+%F %T')][${ip}] ${line}"; done >> "${DEPLOY_LOG}" + if [[ $? -ne 0 ]]; then + return 1 + fi + return 0 +} + +function pass_package_local() +{ + local ip="$1" + local pkg="$2" + local dst_dir="$3" + + if [[ "${pkg}" == "${dst_dir}" ]]; then + return 0 + fi + + if [[ ! -d "${dst_dir}" ]]; then + mkdir -p "${dst_dir}" 2>&1 | while read -r line; do echo "[$(date '+%F %T')][${ip}] ${line}"; done >> "${DEPLOY_LOG}" + if [[ $? -ne 0 ]]; then + return 1 + fi + fi + + cp -r "${pkg}"/* "${dst_dir}" 2>&1 | while read -r line; do echo "[$(date '+%F %T')][${ip}] ${line}"; done >> "${DEPLOY_LOG}" + if [[ $? -ne 0 ]]; then + return 1 + fi + return 0 +} + +function pass_packages() +{ + # if conf dir is not default dir, pass it as well. + local deafult_path + if [[ -d "${BASE_DIR}/../conf" ]]; then + deafult_path=$(realpath "${BASE_DIR}") + fi + local dst_path + if [[ -d "${CONF_DIR}" ]]; then + dst_path=$(realpath "${CONF_DIR}") + fi + if [[ "x${deafult_path}" != "x${dst_path}" ]]; then + pass_conf + fi + echo -e "-- ${CYAN}Starting${NC} to pass the datasystem packages" + local real_path + for ip in "${ALL_IP_LIST[@]}" + do + real_path=$(realpath "${BASE_DIR}/..") + if is_local_ip "${ip}"; then + pass_package_local "${ip}" "${real_path}" "${DATASYSTEM_HOME}" + else + pass_package_remote "${ip}" "${real_path}" "${DATASYSTEM_HOME}" + fi + + if [[ $? -eq 0 ]]; then + print_ok_msg "pass package" "server" "${ip}" + else + print_fail_msg "pass package" "server" "${ip}" + echo -e "${CYAN}---- $(tail -1 "${DEPLOY_LOG}")${NC}" + fi + done + echo -e "-- Pass the datasystem packages ${GREEN}Complete${NC}!\n" +} + +function pass_conf() +{ + echo -e "-- ${CYAN}Starting${NC} to pass the datasystem configure items" + local real_path + for ip in "${ALL_IP_LIST[@]}" + do + real_path=$(realpath "${CONF_DIR}") + if is_local_ip "${ip}"; then + pass_package_local "${ip}" "${real_path}" "${DEFAULT_CONF_DIR}" + else + pass_package_remote "${ip}" "${real_path}" "${DEFAULT_CONF_DIR}" + fi + + if [[ $? -eq 0 ]]; then + print_ok_msg "pass configure items" "server" "${ip}" + else + print_fail_msg "pass configure items" "server" "${ip}" + echo -e "${CYAN}---- $(tail -1 "${DEPLOY_LOG}")${NC}" + fi + done + echo -e "-- Pass the datasystem configure items ${GREEN}Complete${NC}!\n" +} + +# start one master/worker/gcs generate place. + +function start_one_master() +{ + local argv_list + is_array "MASTER_ADDRESS" && master_address="${MASTER_ADDRESS[$COMPONENT_NUM]}" || master_address="${MASTER_ADDRESS}" + [[ -n "${master_address}" ]] && argv_list+=("-master_address" "${master_address}") + is_array "MASTER_BACKEND_STORE_DIR" && backend_store_dir="${MASTER_BACKEND_STORE_DIR[$COMPONENT_NUM]}" || backend_store_dir="${MASTER_BACKEND_STORE_DIR}" + [[ -n "${backend_store_dir}" ]] && argv_list+=("-backend_store_dir" "${backend_store_dir}") + is_array "MASTER_ROCKSDB_SYNC_WRITE" && rocksdb_sync_write="${MASTER_ROCKSDB_SYNC_WRITE[$COMPONENT_NUM]}" || rocksdb_sync_write="${MASTER_ROCKSDB_SYNC_WRITE}" + [[ -n "${rocksdb_sync_write}" ]] && argv_list+=("-rocksdb_sync_write" "${rocksdb_sync_write}") + is_array "MASTER_REDIS_ADDRESS" && redis_address="${MASTER_REDIS_ADDRESS[$COMPONENT_NUM]}" || redis_address="${MASTER_REDIS_ADDRESS}" + [[ -n "${redis_address}" ]] && argv_list+=("-redis_address" "${redis_address}") + is_array "MASTER_NODE_TIMEOUT_S" && node_timeout_s="${MASTER_NODE_TIMEOUT_S[$COMPONENT_NUM]}" || node_timeout_s="${MASTER_NODE_TIMEOUT_S}" + [[ -n "${node_timeout_s}" ]] && argv_list+=("-node_timeout_s" "${node_timeout_s}") + is_array "MASTER_NODE_DEAD_TIMEOUT_S" && node_dead_timeout_s="${MASTER_NODE_DEAD_TIMEOUT_S[$COMPONENT_NUM]}" || node_dead_timeout_s="${MASTER_NODE_DEAD_TIMEOUT_S}" + [[ -n "${node_dead_timeout_s}" ]] && argv_list+=("-node_dead_timeout_s" "${node_dead_timeout_s}") + is_array "MASTER_CHECK_HEARTBEAT_INTERVAL_MS" && check_heartbeat_interval_ms="${MASTER_CHECK_HEARTBEAT_INTERVAL_MS[$COMPONENT_NUM]}" || check_heartbeat_interval_ms="${MASTER_CHECK_HEARTBEAT_INTERVAL_MS}" + [[ -n "${check_heartbeat_interval_ms}" ]] && argv_list+=("-check_heartbeat_interval_ms" "${check_heartbeat_interval_ms}") + is_array "MASTER_REDIS_CA" && redis_ca="${MASTER_REDIS_CA[$COMPONENT_NUM]}" || redis_ca="${MASTER_REDIS_CA}" + [[ -n "${redis_ca}" ]] && argv_list+=("-redis_ca" "${redis_ca}") + is_array "MASTER_REDIS_CERT" && redis_cert="${MASTER_REDIS_CERT[$COMPONENT_NUM]}" || redis_cert="${MASTER_REDIS_CERT}" + [[ -n "${redis_cert}" ]] && argv_list+=("-redis_cert" "${redis_cert}") + is_array "MASTER_REDIS_KEY" && redis_key="${MASTER_REDIS_KEY[$COMPONENT_NUM]}" || redis_key="${MASTER_REDIS_KEY}" + [[ -n "${redis_key}" ]] && argv_list+=("-redis_key" "${redis_key}") + is_array "MASTER_REDIS_USERNAME" && redis_username="${MASTER_REDIS_USERNAME[$COMPONENT_NUM]}" || redis_username="${MASTER_REDIS_USERNAME}" + [[ -n "${redis_username}" ]] && argv_list+=("-redis_username" "${redis_username}") + is_array "MASTER_REDIS_PASSWD" && redis_passwd="${MASTER_REDIS_PASSWD[$COMPONENT_NUM]}" || redis_passwd="${MASTER_REDIS_PASSWD}" + [[ -n "${redis_passwd}" ]] && argv_list+=("-redis_passwd" "${redis_passwd}") + is_array "MASTER_RPC_THREAD_NUM" && rpc_thread_num="${MASTER_RPC_THREAD_NUM[$COMPONENT_NUM]}" || rpc_thread_num="${MASTER_RPC_THREAD_NUM}" + [[ -n "${rpc_thread_num}" ]] && argv_list+=("-rpc_thread_num" "${rpc_thread_num}") + is_array "MASTER_LOG_DIR" && log_dir="${MASTER_LOG_DIR[$COMPONENT_NUM]}" || log_dir="${MASTER_LOG_DIR}" + [[ -n "${log_dir}" ]] && argv_list+=("-log_dir" "${log_dir}") + is_array "MASTER_LOG_FILENAME" && log_filename="${MASTER_LOG_FILENAME[$COMPONENT_NUM]}" || log_filename="${MASTER_LOG_FILENAME}" + [[ -n "${log_filename}" ]] && argv_list+=("-log_filename" "${log_filename}") + is_array "MASTER_LOG_ASYNC_BUFFER_MB" && log_async_buffer_mb="${MASTER_LOG_ASYNC_BUFFER_MB[$COMPONENT_NUM]}" || log_async_buffer_mb="${MASTER_LOG_ASYNC_BUFFER_MB}" + [[ -n "${log_async_buffer_mb}" ]] && argv_list+=("-log_async_buffer_mb" "${log_async_buffer_mb}") + is_array "MASTER_MAX_LOG_SIZE" && max_log_size="${MASTER_MAX_LOG_SIZE[$COMPONENT_NUM]}" || max_log_size="${MASTER_MAX_LOG_SIZE}" + [[ -n "${max_log_size}" ]] && argv_list+=("-max_log_size" "${max_log_size}") + is_array "MASTER_MAX_LOG_FILE_NUM" && max_log_file_num="${MASTER_MAX_LOG_FILE_NUM[$COMPONENT_NUM]}" || max_log_file_num="${MASTER_MAX_LOG_FILE_NUM}" + [[ -n "${max_log_file_num}" ]] && argv_list+=("-max_log_file_num" "${max_log_file_num}") + is_array "MASTER_LOG_RETENTION_DAY" && log_retention_day="${MASTER_LOG_RETENTION_DAY[$COMPONENT_NUM]}" || log_retention_day="${MASTER_LOG_RETENTION_DAY}" + [[ -n "${log_retention_day}" ]] && argv_list+=("-log_retention_day" "${log_retention_day}") + is_array "MASTER_LOG_ASYNC" && log_async="${MASTER_LOG_ASYNC[$COMPONENT_NUM]}" || log_async="${MASTER_LOG_ASYNC}" + [[ -n "${log_async}" ]] && argv_list+=("-log_async" "${log_async}") + is_array "MASTER_LOG_COMPRESS" && log_compress="${MASTER_LOG_COMPRESS[$COMPONENT_NUM]}" || log_compress="${MASTER_LOG_COMPRESS}" + [[ -n "${log_compress}" ]] && argv_list+=("-log_compress" "${log_compress}") + is_array "MASTER_V" && v="${MASTER_V[$COMPONENT_NUM]}" || v="${MASTER_V}" + [[ -n "${v}" ]] && argv_list+=("-v" "${v}") + is_array "MASTER_ENABLE_COMPONENT_AUTH" && enable_component_auth="${MASTER_ENABLE_COMPONENT_AUTH[$COMPONENT_NUM]}" || enable_component_auth="${MASTER_ENABLE_COMPONENT_AUTH}" + [[ -n "${enable_component_auth}" ]] && argv_list+=("-enable_component_auth" "${enable_component_auth}") + is_array "MASTER_ZMQ_SERVER_IO_CONTEXT" && zmq_server_io_context="${MASTER_ZMQ_SERVER_IO_CONTEXT[$COMPONENT_NUM]}" || zmq_server_io_context="${MASTER_ZMQ_SERVER_IO_CONTEXT}" + [[ -n "${zmq_server_io_context}" ]] && argv_list+=("-zmq_server_io_context" "${zmq_server_io_context}") + is_array "MASTER_CURVE_KEY_DIR" && curve_key_dir="${MASTER_CURVE_KEY_DIR[$COMPONENT_NUM]}" || curve_key_dir="${MASTER_CURVE_KEY_DIR}" + [[ -n "${curve_key_dir}" ]] && argv_list+=("-curve_key_dir" "${curve_key_dir}") + is_array "MASTER_SECRET_KEY1" && secret_key1="${MASTER_SECRET_KEY1[$COMPONENT_NUM]}" || secret_key1="${MASTER_SECRET_KEY1}" + [[ -n "${secret_key1}" ]] && argv_list+=("-secret_key1" "${secret_key1}") + is_array "MASTER_SECRET_KEY2" && secret_key2="${MASTER_SECRET_KEY2[$COMPONENT_NUM]}" || secret_key2="${MASTER_SECRET_KEY2}" + [[ -n "${secret_key2}" ]] && argv_list+=("-secret_key2" "${secret_key2}") + is_array "MASTER_SECRET_KEY3" && secret_key3="${MASTER_SECRET_KEY3[$COMPONENT_NUM]}" || secret_key3="${MASTER_SECRET_KEY3}" + [[ -n "${secret_key3}" ]] && argv_list+=("-secret_key3" "${secret_key3}") + is_array "MASTER_SECRET_SALT" && secret_salt="${MASTER_SECRET_SALT[$COMPONENT_NUM]}" || secret_salt="${MASTER_SECRET_SALT}" + [[ -n "${secret_salt}" ]] && argv_list+=("-secret_salt" "${secret_salt}") + export LD_LIBRARY_PATH="${BIN_DIR}/lib:$LD_LIBRARY_PATH" + (nohup "${BIN_DIR}/master" "${argv_list[@]}" >${BASE_DIR}/master.out 2>&1) & + local pid=$! + sleep 5 + [[ -n $(ps -p "$pid" | grep "$pid") ]] && ps -p "$pid" -o args || ret_code=1 + if [[ $ret_code -ne 0 ]]; then + cat ${BASE_DIR}/master.out + fi + return $ret_code +} + +function stop_one_master() +{ + is_array "MASTER_ADDRESS" && master_address="${MASTER_ADDRESS[$COMPONENT_NUM]}" || master_address="${MASTER_ADDRESS}" + local pid="$(ps ux | grep /master | grep ${master_address} | grep -v grep | awk '{print $2}')" + if ! is_num "${pid}"; then + echo -e "Cannot found the master we want: ${master_address}" >&2 + exit 1 + fi + kill -15 "$pid" + while [[ -n $(ps -p "$pid" | grep "$pid") ]]; do sleep 0.5; done +} + +function deploy_one_master() +{ + . "${CONF_DIR}/master-env.sh" + if [ x"${ACTION}" = "xstart" ]; then + start_one_master + else + stop_one_master + fi +} + +function deploy_master() +{ + if [ x"${ACTION}" = "xstart" ]; then + "${LAUNCHER[@]}" "${BASE_DIR}/master-launcher.sh" "${BASE_DIR}/deploy-datasystem.sh" "-a" "start" "-c" "master" "-p" "${CONF_DIR}" + else + "${LAUNCHER[@]}" "${BASE_DIR}/master-launcher.sh" "${BASE_DIR}/deploy-datasystem.sh" "-a" "stop" "-c" "master" "-p" "${CONF_DIR}" + fi +} + + +function start_one_worker() +{ + local argv_list + is_array "WORKER_ADDRESS" && worker_address="${WORKER_ADDRESS[$COMPONENT_NUM]}" || worker_address="${WORKER_ADDRESS}" + [[ -n "${worker_address}" ]] && argv_list+=("-worker_address" "${worker_address}") + is_array "WORKER_MASTER_ADDRESS" && master_address="${WORKER_MASTER_ADDRESS[$COMPONENT_NUM]}" || master_address="${WORKER_MASTER_ADDRESS}" + [[ -n "${master_address}" ]] && argv_list+=("-master_address" "${master_address}") + is_array "WORKER_ENABLE_SHARED_MEMORY" && enable_shared_memory="${WORKER_ENABLE_SHARED_MEMORY[$COMPONENT_NUM]}" || enable_shared_memory="${WORKER_ENABLE_SHARED_MEMORY}" + [[ -n "${enable_shared_memory}" ]] && argv_list+=("-enable_shared_memory" "${enable_shared_memory}") + is_array "WORKER_SHARED_MEMORY_SIZE_MB" && shared_memory_size_mb="${WORKER_SHARED_MEMORY_SIZE_MB[$COMPONENT_NUM]}" || shared_memory_size_mb="${WORKER_SHARED_MEMORY_SIZE_MB}" + [[ -n "${shared_memory_size_mb}" ]] && argv_list+=("-shared_memory_size_mb" "${shared_memory_size_mb}") + is_array "WORKER_HEARTBEAT_INTERVAL_MS" && heartbeat_interval_ms="${WORKER_HEARTBEAT_INTERVAL_MS[$COMPONENT_NUM]}" || heartbeat_interval_ms="${WORKER_HEARTBEAT_INTERVAL_MS}" + [[ -n "${heartbeat_interval_ms}" ]] && argv_list+=("-heartbeat_interval_ms" "${heartbeat_interval_ms}") + is_array "WORKER_AUTHORIZATION_ENABLE" && authorization_enable="${WORKER_AUTHORIZATION_ENABLE[$COMPONENT_NUM]}" || authorization_enable="${WORKER_AUTHORIZATION_ENABLE}" + [[ -n "${authorization_enable}" ]] && argv_list+=("-authorization_enable" "${authorization_enable}") + is_array "WORKER_ENABLE_UDS" && enable_uds="${WORKER_ENABLE_UDS[$COMPONENT_NUM]}" || enable_uds="${WORKER_ENABLE_UDS}" + [[ -n "${enable_uds}" ]] && argv_list+=("-enable_uds" "${enable_uds}") + is_array "WORKER_UNIX_DOMAIN_SOCKET_DIR" && unix_domain_socket_dir="${WORKER_UNIX_DOMAIN_SOCKET_DIR[$COMPONENT_NUM]}" || unix_domain_socket_dir="${WORKER_UNIX_DOMAIN_SOCKET_DIR}" + [[ -n "${unix_domain_socket_dir}" ]] && argv_list+=("-unix_domain_socket_dir" "${unix_domain_socket_dir}") + is_array "WORKER_SC_REGULAR_SOCKET_NUM" && sc_regular_socket_num="${WORKER_SC_REGULAR_SOCKET_NUM[$COMPONENT_NUM]}" || sc_regular_socket_num="${WORKER_SC_REGULAR_SOCKET_NUM}" + [[ -n "${sc_regular_socket_num}" ]] && argv_list+=("-sc_regular_socket_num" "${sc_regular_socket_num}") + is_array "WORKER_SC_STREAM_SOCKET_NUM" && sc_stream_socket_num="${WORKER_SC_STREAM_SOCKET_NUM[$COMPONENT_NUM]}" || sc_stream_socket_num="${WORKER_SC_STREAM_SOCKET_NUM}" + [[ -n "${sc_stream_socket_num}" ]] && argv_list+=("-sc_stream_socket_num" "${sc_stream_socket_num}") + is_array "WORKER_OC_THREAD_NUM" && oc_thread_num="${WORKER_OC_THREAD_NUM[$COMPONENT_NUM]}" || oc_thread_num="${WORKER_OC_THREAD_NUM}" + [[ -n "${oc_thread_num}" ]] && argv_list+=("-oc_thread_num" "${oc_thread_num}") + is_array "WORKER_EVICTION_THREAD_NUM" && eviction_thread_num="${WORKER_EVICTION_THREAD_NUM[$COMPONENT_NUM]}" || eviction_thread_num="${WORKER_EVICTION_THREAD_NUM}" + [[ -n "${eviction_thread_num}" ]] && argv_list+=("-eviction_thread_num" "${eviction_thread_num}") + is_array "WORKER_CLIENT_RECONNECT_WAIT_S" && client_reconnect_wait_s="${WORKER_CLIENT_RECONNECT_WAIT_S[$COMPONENT_NUM]}" || client_reconnect_wait_s="${WORKER_CLIENT_RECONNECT_WAIT_S}" + [[ -n "${client_reconnect_wait_s}" ]] && argv_list+=("-client_reconnect_wait_s" "${client_reconnect_wait_s}") + is_array "WORKER_PAGE_SIZE" && page_size="${WORKER_PAGE_SIZE[$COMPONENT_NUM]}" || page_size="${WORKER_PAGE_SIZE}" + [[ -n "${page_size}" ]] && argv_list+=("-page_size" "${page_size}") + is_array "WORKER_REMOTE_SEND_THREAD_NUM" && remote_send_thread_num="${WORKER_REMOTE_SEND_THREAD_NUM[$COMPONENT_NUM]}" || remote_send_thread_num="${WORKER_REMOTE_SEND_THREAD_NUM}" + [[ -n "${remote_send_thread_num}" ]] && argv_list+=("-remote_send_thread_num" "${remote_send_thread_num}") + is_array "WORKER_REDIS_ADDRESS" && redis_address="${WORKER_REDIS_ADDRESS[$COMPONENT_NUM]}" || redis_address="${WORKER_REDIS_ADDRESS}" + [[ -n "${redis_address}" ]] && argv_list+=("-redis_address" "${redis_address}") + is_array "WORKER_SPILL_DIRECTORY" && spill_directory="${WORKER_SPILL_DIRECTORY[$COMPONENT_NUM]}" || spill_directory="${WORKER_SPILL_DIRECTORY}" + [[ -n "${spill_directory}" ]] && argv_list+=("-spill_directory" "${spill_directory}") + is_array "WORKER_SPILL_SIZE_LIMIT" && spill_size_limit="${WORKER_SPILL_SIZE_LIMIT[$COMPONENT_NUM]}" || spill_size_limit="${WORKER_SPILL_SIZE_LIMIT}" + [[ -n "${spill_size_limit}" ]] && argv_list+=("-spill_size_limit" "${spill_size_limit}") + is_array "WORKER_REDIS_USERNAME" && redis_username="${WORKER_REDIS_USERNAME[$COMPONENT_NUM]}" || redis_username="${WORKER_REDIS_USERNAME}" + [[ -n "${redis_username}" ]] && argv_list+=("-redis_username" "${redis_username}") + is_array "WORKER_REDIS_PASSWD" && redis_passwd="${WORKER_REDIS_PASSWD[$COMPONENT_NUM]}" || redis_passwd="${WORKER_REDIS_PASSWD}" + [[ -n "${redis_passwd}" ]] && argv_list+=("-redis_passwd" "${redis_passwd}") + is_array "WORKER_REDIS_CA" && redis_ca="${WORKER_REDIS_CA[$COMPONENT_NUM]}" || redis_ca="${WORKER_REDIS_CA}" + [[ -n "${redis_ca}" ]] && argv_list+=("-redis_ca" "${redis_ca}") + is_array "WORKER_REDIS_CERT" && redis_cert="${WORKER_REDIS_CERT[$COMPONENT_NUM]}" || redis_cert="${WORKER_REDIS_CERT}" + [[ -n "${redis_cert}" ]] && argv_list+=("-redis_cert" "${redis_cert}") + is_array "WORKER_REDIS_KEY" && redis_key="${WORKER_REDIS_KEY[$COMPONENT_NUM]}" || redis_key="${WORKER_REDIS_KEY}" + [[ -n "${redis_key}" ]] && argv_list+=("-redis_key" "${redis_key}") + is_array "WORKER_MAX_CLIENT_NUM" && max_client_num="${WORKER_MAX_CLIENT_NUM[$COMPONENT_NUM]}" || max_client_num="${WORKER_MAX_CLIENT_NUM}" + [[ -n "${max_client_num}" ]] && argv_list+=("-max_client_num" "${max_client_num}") + is_array "WORKER_RPC_THREAD_NUM" && rpc_thread_num="${WORKER_RPC_THREAD_NUM[$COMPONENT_NUM]}" || rpc_thread_num="${WORKER_RPC_THREAD_NUM}" + [[ -n "${rpc_thread_num}" ]] && argv_list+=("-rpc_thread_num" "${rpc_thread_num}") + is_array "WORKER_LOG_DIR" && log_dir="${WORKER_LOG_DIR[$COMPONENT_NUM]}" || log_dir="${WORKER_LOG_DIR}" + [[ -n "${log_dir}" ]] && argv_list+=("-log_dir" "${log_dir}") + is_array "WORKER_LOG_FILENAME" && log_filename="${WORKER_LOG_FILENAME[$COMPONENT_NUM]}" || log_filename="${WORKER_LOG_FILENAME}" + [[ -n "${log_filename}" ]] && argv_list+=("-log_filename" "${log_filename}") + is_array "WORKER_LOG_ASYNC_BUFFER_MB" && log_async_buffer_mb="${WORKER_LOG_ASYNC_BUFFER_MB[$COMPONENT_NUM]}" || log_async_buffer_mb="${WORKER_LOG_ASYNC_BUFFER_MB}" + [[ -n "${log_async_buffer_mb}" ]] && argv_list+=("-log_async_buffer_mb" "${log_async_buffer_mb}") + is_array "WORKER_MAX_LOG_SIZE" && max_log_size="${WORKER_MAX_LOG_SIZE[$COMPONENT_NUM]}" || max_log_size="${WORKER_MAX_LOG_SIZE}" + [[ -n "${max_log_size}" ]] && argv_list+=("-max_log_size" "${max_log_size}") + is_array "WORKER_MAX_LOG_FILE_NUM" && max_log_file_num="${WORKER_MAX_LOG_FILE_NUM[$COMPONENT_NUM]}" || max_log_file_num="${WORKER_MAX_LOG_FILE_NUM}" + [[ -n "${max_log_file_num}" ]] && argv_list+=("-max_log_file_num" "${max_log_file_num}") + is_array "WORKER_LOG_RETENTION_DAY" && log_retention_day="${WORKER_LOG_RETENTION_DAY[$COMPONENT_NUM]}" || log_retention_day="${WORKER_LOG_RETENTION_DAY}" + [[ -n "${log_retention_day}" ]] && argv_list+=("-log_retention_day" "${log_retention_day}") + is_array "WORKER_LOG_ASYNC" && log_async="${WORKER_LOG_ASYNC[$COMPONENT_NUM]}" || log_async="${WORKER_LOG_ASYNC}" + [[ -n "${log_async}" ]] && argv_list+=("-log_async" "${log_async}") + is_array "WORKER_LOG_COMPRESS" && log_compress="${WORKER_LOG_COMPRESS[$COMPONENT_NUM]}" || log_compress="${WORKER_LOG_COMPRESS}" + [[ -n "${log_compress}" ]] && argv_list+=("-log_compress" "${log_compress}") + is_array "WORKER_V" && v="${WORKER_V[$COMPONENT_NUM]}" || v="${WORKER_V}" + [[ -n "${v}" ]] && argv_list+=("-v" "${v}") + is_array "WORKER_ENABLE_COMPONENT_AUTH" && enable_component_auth="${WORKER_ENABLE_COMPONENT_AUTH[$COMPONENT_NUM]}" || enable_component_auth="${WORKER_ENABLE_COMPONENT_AUTH}" + [[ -n "${enable_component_auth}" ]] && argv_list+=("-enable_component_auth" "${enable_component_auth}") + is_array "WORKER_ZMQ_SERVER_IO_CONTEXT" && zmq_server_io_context="${WORKER_ZMQ_SERVER_IO_CONTEXT[$COMPONENT_NUM]}" || zmq_server_io_context="${WORKER_ZMQ_SERVER_IO_CONTEXT}" + [[ -n "${zmq_server_io_context}" ]] && argv_list+=("-zmq_server_io_context" "${zmq_server_io_context}") + is_array "WORKER_CURVE_KEY_DIR" && curve_key_dir="${WORKER_CURVE_KEY_DIR[$COMPONENT_NUM]}" || curve_key_dir="${WORKER_CURVE_KEY_DIR}" + [[ -n "${curve_key_dir}" ]] && argv_list+=("-curve_key_dir" "${curve_key_dir}") + is_array "WORKER_SECRET_KEY1" && secret_key1="${WORKER_SECRET_KEY1[$COMPONENT_NUM]}" || secret_key1="${WORKER_SECRET_KEY1}" + [[ -n "${secret_key1}" ]] && argv_list+=("-secret_key1" "${secret_key1}") + is_array "WORKER_SECRET_KEY2" && secret_key2="${WORKER_SECRET_KEY2[$COMPONENT_NUM]}" || secret_key2="${WORKER_SECRET_KEY2}" + [[ -n "${secret_key2}" ]] && argv_list+=("-secret_key2" "${secret_key2}") + is_array "WORKER_SECRET_KEY3" && secret_key3="${WORKER_SECRET_KEY3[$COMPONENT_NUM]}" || secret_key3="${WORKER_SECRET_KEY3}" + [[ -n "${secret_key3}" ]] && argv_list+=("-secret_key3" "${secret_key3}") + is_array "WORKER_SECRET_SALT" && secret_salt="${WORKER_SECRET_SALT[$COMPONENT_NUM]}" || secret_salt="${WORKER_SECRET_SALT}" + [[ -n "${secret_salt}" ]] && argv_list+=("-secret_salt" "${secret_salt}") + export LD_LIBRARY_PATH="${BIN_DIR}/lib:$LD_LIBRARY_PATH" + (nohup "${BIN_DIR}/worker" "${argv_list[@]}" >${BASE_DIR}/worker.out 2>&1) & + local pid=$! + sleep 5 + [[ -n $(ps -p "$pid" | grep "$pid") ]] && ps -p "$pid" -o args || ret_code=1 + if [[ $ret_code -ne 0 ]]; then + cat ${BASE_DIR}/worker.out + fi + return $ret_code +} + +function stop_one_worker() +{ + is_array "WORKER_ADDRESS" && worker_address="${WORKER_ADDRESS[$COMPONENT_NUM]}" || worker_address="${WORKER_ADDRESS}" + local pid="$(ps ux | grep /worker | grep ${worker_address} | grep -v grep | awk '{print $2}')" + if ! is_num "${pid}"; then + echo -e "Cannot found the worker we want: ${worker_address}" >&2 + exit 1 + fi + kill -15 "$pid" + while [[ -n $(ps -p "$pid" | grep "$pid") ]]; do sleep 0.5; done +} + +function deploy_one_worker() +{ + . "${CONF_DIR}/worker-env.sh" + if [ x"${ACTION}" = "xstart" ]; then + start_one_worker + else + stop_one_worker + fi +} + +function deploy_worker() +{ + if [ x"${ACTION}" = "xstart" ]; then + "${LAUNCHER[@]}" "${BASE_DIR}/worker-launcher.sh" "${BASE_DIR}/deploy-datasystem.sh" "-a" "start" "-c" "worker" "-p" "${CONF_DIR}" + else + "${LAUNCHER[@]}" "${BASE_DIR}/worker-launcher.sh" "${BASE_DIR}/deploy-datasystem.sh" "-a" "stop" "-c" "worker" "-p" "${CONF_DIR}" + fi +} + + +function start_one_agent() +{ + local argv_list + is_array "AGENT_WORKER_ADDRESS" && worker_address="${AGENT_WORKER_ADDRESS[$COMPONENT_NUM]}" || worker_address="${AGENT_WORKER_ADDRESS}" + [[ -n "${worker_address}" ]] && argv_list+=("-worker_address" "${worker_address}") + is_array "AGENT_ADDRESS" && agent_address="${AGENT_ADDRESS[$COMPONENT_NUM]}" || agent_address="${AGENT_ADDRESS}" + [[ -n "${agent_address}" ]] && argv_list+=("-agent_address" "${agent_address}") + is_array "AGENT_CLIENT_TIMEOUT_S" && client_timeout_s="${AGENT_CLIENT_TIMEOUT_S[$COMPONENT_NUM]}" || client_timeout_s="${AGENT_CLIENT_TIMEOUT_S}" + [[ -n "${client_timeout_s}" ]] && argv_list+=("-client_timeout_s" "${client_timeout_s}") + is_array "AGENT_RPC_THREAD_NUM" && rpc_thread_num="${AGENT_RPC_THREAD_NUM[$COMPONENT_NUM]}" || rpc_thread_num="${AGENT_RPC_THREAD_NUM}" + [[ -n "${rpc_thread_num}" ]] && argv_list+=("-rpc_thread_num" "${rpc_thread_num}") + is_array "AGENT_LOG_DIR" && log_dir="${AGENT_LOG_DIR[$COMPONENT_NUM]}" || log_dir="${AGENT_LOG_DIR}" + [[ -n "${log_dir}" ]] && argv_list+=("-log_dir" "${log_dir}") + is_array "AGENT_LOG_FILENAME" && log_filename="${AGENT_LOG_FILENAME[$COMPONENT_NUM]}" || log_filename="${AGENT_LOG_FILENAME}" + [[ -n "${log_filename}" ]] && argv_list+=("-log_filename" "${log_filename}") + is_array "AGENT_LOG_ASYNC_BUFFER_MB" && log_async_buffer_mb="${AGENT_LOG_ASYNC_BUFFER_MB[$COMPONENT_NUM]}" || log_async_buffer_mb="${AGENT_LOG_ASYNC_BUFFER_MB}" + [[ -n "${log_async_buffer_mb}" ]] && argv_list+=("-log_async_buffer_mb" "${log_async_buffer_mb}") + is_array "AGENT_MAX_LOG_SIZE" && max_log_size="${AGENT_MAX_LOG_SIZE[$COMPONENT_NUM]}" || max_log_size="${AGENT_MAX_LOG_SIZE}" + [[ -n "${max_log_size}" ]] && argv_list+=("-max_log_size" "${max_log_size}") + is_array "AGENT_MAX_LOG_FILE_NUM" && max_log_file_num="${AGENT_MAX_LOG_FILE_NUM[$COMPONENT_NUM]}" || max_log_file_num="${AGENT_MAX_LOG_FILE_NUM}" + [[ -n "${max_log_file_num}" ]] && argv_list+=("-max_log_file_num" "${max_log_file_num}") + is_array "AGENT_LOG_RETENTION_DAY" && log_retention_day="${AGENT_LOG_RETENTION_DAY[$COMPONENT_NUM]}" || log_retention_day="${AGENT_LOG_RETENTION_DAY}" + [[ -n "${log_retention_day}" ]] && argv_list+=("-log_retention_day" "${log_retention_day}") + is_array "AGENT_LOG_ASYNC" && log_async="${AGENT_LOG_ASYNC[$COMPONENT_NUM]}" || log_async="${AGENT_LOG_ASYNC}" + [[ -n "${log_async}" ]] && argv_list+=("-log_async" "${log_async}") + is_array "AGENT_LOG_COMPRESS" && log_compress="${AGENT_LOG_COMPRESS[$COMPONENT_NUM]}" || log_compress="${AGENT_LOG_COMPRESS}" + [[ -n "${log_compress}" ]] && argv_list+=("-log_compress" "${log_compress}") + is_array "AGENT_V" && v="${AGENT_V[$COMPONENT_NUM]}" || v="${AGENT_V}" + [[ -n "${v}" ]] && argv_list+=("-v" "${v}") + is_array "AGENT_ENABLE_COMPONENT_AUTH" && enable_component_auth="${AGENT_ENABLE_COMPONENT_AUTH[$COMPONENT_NUM]}" || enable_component_auth="${AGENT_ENABLE_COMPONENT_AUTH}" + [[ -n "${enable_component_auth}" ]] && argv_list+=("-enable_component_auth" "${enable_component_auth}") + is_array "AGENT_ZMQ_SERVER_IO_CONTEXT" && zmq_server_io_context="${AGENT_ZMQ_SERVER_IO_CONTEXT[$COMPONENT_NUM]}" || zmq_server_io_context="${AGENT_ZMQ_SERVER_IO_CONTEXT}" + [[ -n "${zmq_server_io_context}" ]] && argv_list+=("-zmq_server_io_context" "${zmq_server_io_context}") + is_array "AGENT_CURVE_KEY_DIR" && curve_key_dir="${AGENT_CURVE_KEY_DIR[$COMPONENT_NUM]}" || curve_key_dir="${AGENT_CURVE_KEY_DIR}" + [[ -n "${curve_key_dir}" ]] && argv_list+=("-curve_key_dir" "${curve_key_dir}") + is_array "AGENT_SECRET_KEY1" && secret_key1="${AGENT_SECRET_KEY1[$COMPONENT_NUM]}" || secret_key1="${AGENT_SECRET_KEY1}" + [[ -n "${secret_key1}" ]] && argv_list+=("-secret_key1" "${secret_key1}") + is_array "AGENT_SECRET_KEY2" && secret_key2="${AGENT_SECRET_KEY2[$COMPONENT_NUM]}" || secret_key2="${AGENT_SECRET_KEY2}" + [[ -n "${secret_key2}" ]] && argv_list+=("-secret_key2" "${secret_key2}") + is_array "AGENT_SECRET_KEY3" && secret_key3="${AGENT_SECRET_KEY3[$COMPONENT_NUM]}" || secret_key3="${AGENT_SECRET_KEY3}" + [[ -n "${secret_key3}" ]] && argv_list+=("-secret_key3" "${secret_key3}") + is_array "AGENT_SECRET_SALT" && secret_salt="${AGENT_SECRET_SALT[$COMPONENT_NUM]}" || secret_salt="${AGENT_SECRET_SALT}" + [[ -n "${secret_salt}" ]] && argv_list+=("-secret_salt" "${secret_salt}") + export LD_LIBRARY_PATH="${BIN_DIR}/lib:$LD_LIBRARY_PATH" + (nohup "${BIN_DIR}/agent" "${argv_list[@]}" >${BASE_DIR}/agent.out 2>&1) & + local pid=$! + sleep 5 + [[ -n $(ps -p "$pid" | grep "$pid") ]] && ps -p "$pid" -o args || ret_code=1 + if [[ $ret_code -ne 0 ]]; then + cat ${BASE_DIR}/agent.out + fi + return $ret_code +} + +function stop_one_agent() +{ + is_array "AGENT_ADDRESS" && agent_address="${AGENT_ADDRESS[$COMPONENT_NUM]}" || agent_address="${AGENT_ADDRESS}" + local pid="$(ps ux | grep /agent | grep ${agent_address} | grep -v grep | awk '{print $2}')" + if ! is_num "${pid}"; then + echo -e "Cannot found the agent we want: ${agent_address}" >&2 + exit 1 + fi + kill -15 "$pid" + while [[ -n $(ps -p "$pid" | grep "$pid") ]]; do sleep 0.5; done +} + +function deploy_one_agent() +{ + . "${CONF_DIR}/agent-env.sh" + if [ x"${ACTION}" = "xstart" ]; then + start_one_agent + else + stop_one_agent + fi +} + +function deploy_agent() +{ + if [ x"${ACTION}" = "xstart" ]; then + "${LAUNCHER[@]}" "${BASE_DIR}/agent-launcher.sh" "${BASE_DIR}/deploy-datasystem.sh" "-a" "start" "-c" "agent" "-p" "${CONF_DIR}" + else + "${LAUNCHER[@]}" "${BASE_DIR}/agent-launcher.sh" "${BASE_DIR}/deploy-datasystem.sh" "-a" "stop" "-c" "agent" "-p" "${CONF_DIR}" + fi +} + + +function deploy_all() +{ + if [[ x"${ACTION}" = "xstart" ]]; then + deploy_master + deploy_worker + deploy_agent + else + deploy_agent + deploy_worker + deploy_master + fi +} + +function deploy_components() +{ + echo -e "-- ${CYAN}Starting${NC} to ${ACTION} the datasystem cluster" + case "${COMPONENT}" in + all|master|worker|gcs|agent|clusterfs) + deploy_${COMPONENT} + ;; + *) + echo -e "This command requires a component be specified, your component: \"${COMPONENT}\"." >&2 + echo -e "${USAGE}" >&2 + exit 1 + esac + echo -e "-- ${ACTION} the datasystem cluster ${GREEN}Complete${NC}!" +} + +function main() +{ + # Add -x if debugging is enable. + if [[ "$-" == *x* ]]; then + LAUNCHER+=("-x") + fi + . "${BASE_DIR}/deploy-common.sh" + init_default_opts + while getopts "hfa:c:d:n:p:w:" OPT; do + case "${OPT}" in + a) + ACTION="${OPTARG}" + ;; + c) + COMPONENT="${OPTARG}" + ;; + d) + DATASYSTEM_HOME="${OPTARG}" + ;; + f) + HANDLE_CLUSTERFS="Yes" + ;; + n) + COMPONENT_NUM="${OPTARG}" + ;; + p) + CONF_DIR=$(realpath "${OPTARG}") + if [[ ! -d "${CONF_DIR}" ]]; then + echo -e "No such conf directory: ${OPTARG}" >&2 + exit 1 + fi + ;; + h) + echo -e "${USAGE}" + exit 0 + ;; + w) + echo -e "Deprecated options, no longer used!" >&2 + ;; + *) + echo -e "Error: OPTION \"${OPT}\" not recognized\n${USAGE}" >&2 + exit 1 + ;; + esac + done + + if [[ -z "${ACTION}" ]]; then + echo -e "Error: no ACTION specified\n${USAGE}" >&2 + exit 1 + fi + + # deploy or pass packages according to the action. + case "${ACTION}" in + pass) + get_env + pass_packages + ;; + pass_conf) + get_env + pass_conf + ;; + start|stop) + if [[ -z "${COMPONENT_NUM}" ]] ; then + deploy_components + else + deploy_one_${COMPONENT} + fi + ;; + *) + echo -e "Error: ACTION \"${ACTION}\" not recognized\n${USAGE}" >&2 + ;; + esac +} + +main "$@" \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/datasystem/service/deploy/master-launcher.sh b/dsoftbus/dist_executor/modules/datasystem/service/deploy/master-launcher.sh new file mode 100644 index 0000000000000000000000000000000000000000..a47bb2fc0996ffbd3424f28214caab800f7ef343 --- /dev/null +++ b/dsoftbus/dist_executor/modules/datasystem/service/deploy/master-launcher.sh @@ -0,0 +1,164 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +set -o pipefail + +readonly USAGE="Usage: master-launcher.sh command..." +readonly BASE_DIR=$(dirname "$(readlink -f "$0")") +readonly CONF_DIR=${BASE_DIR}/conf + +. "${BASE_DIR}/deploy-common.sh" + +IP_LIST=() + +function set_default_address() +{ + case "master" in + master) + export MASTER_ADDRESS="127.0.0.1:9089" + ;; + worker) + export WORKER_ADDRESS="127.0.0.1:9088" + export WORKER_MASTER_ADDRESS="127.0.0.1:9089" + ;; + agent) + export AGENT_ADDRESS="127.0.0.1:9087" + export AGENT_WORKER_ADDRESS="127.0.0.1:9088" + ;; + gcs) + export GCS_ADDRESS="127.0.0.1:9090" + export GCS_MASTER_ADDRESS="127.0.0.1:9089" + export GCS_NODE_ID="NODE-ID" + ;; + *) + echo -e "Error: master not found, No such component for deployment!" + ;; + esac +} + +function valid_ip() +{ + local ip=$1 + local ret=1 + + if [[ "$ip" =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}:[0-9]{1,5}$ ]]; then + ret=0 + fi + return $ret +} + +function parse_ip_list() +{ + . "${CONF_DIR}/master-env.sh" + if is_array "MASTER_ADDRESS"; then + for((i=0;i<${#MASTER_ADDRESS[@]};i++)) + do + if ! valid_ip "${MASTER_ADDRESS[i]}"; then + echo -e "[$(date '+%F %T')] Invalid master ip at $i: \"${MASTER_ADDRESS[i]}\"" >>"${deploy_log}" + return 1 + fi + IP_LIST+=("$(echo "${MASTER_ADDRESS[i]}" | cut -d : -f 1)") + done + else + if [[ -z "${MASTER_ADDRESS}" ]]; then + set_default_address + fi + if ! valid_ip "${MASTER_ADDRESS}"; then + echo -e "[$(date '+%F %T')] Invalid master ip: \"${MASTER_ADDRESS}\"" >>"${deploy_log}" + return 1 + fi + IP_LIST+=("$(echo "${MASTER_ADDRESS}" | cut -d : -f 1)") + fi +} + +function main() +{ + local launch_prefix="bash" + # Add -x if debugging is enable. + if [[ "$-" == *x* ]]; then + launch_prefix="${launch_prefix} -x" + fi + + if [[ -z "$@" ]]; then + echo -e "${USAGE}" >&2 + exit 1 + fi + + local cmd_list=("$@") + + deploy_log="${BASE_DIR}/deploy.log" + + if ! parse_ip_list ; then + echo -e "---- ${RED}[ FAILED ]${NC} The ${CYAN}master${NC} service parse ip list failed." + echo -e "---- Task(s) on master nodes failed! look at ${deploy_log} for details." + exit 1 + fi + + # We split a large number of deployment requests into multiple batches to avoid very perverted deployment requests. + local batch_num=5 + local ip_num="${#IP_LIST[@]}" + local rounds="$[(ip_num+batch_num-1)/batch_num]" + + local pids=() + local err_flag=0 + + for((k=0;k<"${rounds}";k++)) + do + local start="$[k*batch_num]" + local end="$[(k+1)*batch_num]" + if [[ "${end}" -gt "${ip_num}" ]]; then + end="${ip_num}" + fi + for((i="${start}";i<"${end}";i++)) + do + local ip="${IP_LIST[$i]}" + local cmd=("${cmd_list[@]}") + cmd+=("-n" "$i") + echo -e "[$(date '+%F %T')] Execute the command: ${cmd[@]} on master node [${ip}]." >>"${deploy_log}" + if is_local_ip "${ip}"; then + echo -e "[$(date '+%F %T')][${ip}] Running in local machine..." >>"${deploy_log}" + nohup $"${cmd[@]// /\\ }" 2>&1 | \ + while read -r line; do echo "[$(date '+%F %T')][${ip}] ${line}"; done >> "${deploy_log}" & + else + echo -e "[$(date '+%F %T')][${ip}] Connecting as ${USER}..." >>"${deploy_log}" + nohup ssh -o NumberOfPasswordPrompts=0 -o ConnectTimeout=5 -o StrictHostKeyChecking=no -tt "${ip}" \ + "${launch_prefix}" $"${cmd[@]// /\\ }" 2>&1 | while read -r line; do echo "[$(date '+%F %T')][${ip}] ${line}"; done >> "${deploy_log}" & + fi + pids[${#pids[@]}]=$! + done + + echo -e "[$(date '+%F %T')] Waiting for all tasks finish..." >>"${deploy_log}" + + for ((i=0; i<${#pids[@]}; i++)); + do + wait ${pids[$i]} + ret_code=$? + if [[ ${ret_code} -ne 0 ]]; then + err_flag=1 + echo -e "---- ${RED}[ FAILED ]${NC} The ${CYAN}master${NC} service @ ${PURPLE}${IP_LIST[$i]}${NC} failed." + echo -e "[$(date '+%F %T')][${IP_LIST[$i]}] Task on master node '${IP_LIST[$i]}' failed, exit code: ${ret_code}" >>"${deploy_log}" + else + echo -e "---- ${GREEN}[ OK ]${NC} The ${CYAN}master${NC} service @ ${PURPLE}${IP_LIST[$i]}${NC} success." + echo -e "[$(date '+%F %T')][${IP_LIST[$i]}] Task on master node '${IP_LIST[$i]}' success!" >>"${deploy_log}" + fi + done + done + + if [[ ${err_flag} -eq 0 ]]; then + echo -e "[$(date '+%F %T')] All tasks run on master nodes success!" >>"${deploy_log}" + else + echo -e "---- Task(s) on master nodes failed! look at ${deploy_log} for details." + fi +} + +main "$@" diff --git a/dsoftbus/dist_executor/modules/datasystem/service/deploy/worker-launcher.sh b/dsoftbus/dist_executor/modules/datasystem/service/deploy/worker-launcher.sh new file mode 100644 index 0000000000000000000000000000000000000000..5a676726793ea7c822211b9ce0ca09339871f834 --- /dev/null +++ b/dsoftbus/dist_executor/modules/datasystem/service/deploy/worker-launcher.sh @@ -0,0 +1,164 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +set -o pipefail + +readonly USAGE="Usage: worker-launcher.sh command..." +readonly BASE_DIR=$(dirname "$(readlink -f "$0")") +readonly CONF_DIR=${BASE_DIR}/conf + +. "${BASE_DIR}/deploy-common.sh" + +IP_LIST=() + +function set_default_address() +{ + case "worker" in + master) + export MASTER_ADDRESS="127.0.0.1:9089" + ;; + worker) + export WORKER_ADDRESS="127.0.0.1:9088" + export WORKER_MASTER_ADDRESS="127.0.0.1:9089" + ;; + agent) + export AGENT_ADDRESS="127.0.0.1:9087" + export AGENT_WORKER_ADDRESS="127.0.0.1:9088" + ;; + gcs) + export GCS_ADDRESS="127.0.0.1:9090" + export GCS_MASTER_ADDRESS="127.0.0.1:9089" + export GCS_NODE_ID="NODE-ID" + ;; + *) + echo -e "Error: worker not found, No such component for deployment!" + ;; + esac +} + +function valid_ip() +{ + local ip=$1 + local ret=1 + + if [[ "$ip" =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}:[0-9]{1,5}$ ]]; then + ret=0 + fi + return $ret +} + +function parse_ip_list() +{ + . "${CONF_DIR}/worker-env.sh" + if is_array "WORKER_ADDRESS"; then + for((i=0;i<${#WORKER_ADDRESS[@]};i++)) + do + if ! valid_ip "${WORKER_ADDRESS[i]}"; then + echo -e "[$(date '+%F %T')] Invalid worker ip at $i: \"${WORKER_ADDRESS[i]}\"" >>"${deploy_log}" + return 1 + fi + IP_LIST+=("$(echo "${WORKER_ADDRESS[i]}" | cut -d : -f 1)") + done + else + if [[ -z "${WORKER_ADDRESS}" ]]; then + set_default_address + fi + if ! valid_ip "${WORKER_ADDRESS}"; then + echo -e "[$(date '+%F %T')] Invalid worker ip: \"${WORKER_ADDRESS}\"" >>"${deploy_log}" + return 1 + fi + IP_LIST+=("$(echo "${WORKER_ADDRESS}" | cut -d : -f 1)") + fi +} + +function main() +{ + local launch_prefix="bash" + # Add -x if debugging is enable. + if [[ "$-" == *x* ]]; then + launch_prefix="${launch_prefix} -x" + fi + + if [[ -z "$@" ]]; then + echo -e "${USAGE}" >&2 + exit 1 + fi + + local cmd_list=("$@") + + deploy_log="${BASE_DIR}/deploy.log" + + if ! parse_ip_list ; then + echo -e "---- ${RED}[ FAILED ]${NC} The ${CYAN}worker${NC} service parse ip list failed." + echo -e "---- Task(s) on worker nodes failed! look at ${deploy_log} for details." + exit 1 + fi + + # We split a large number of deployment requests into multiple batches to avoid very perverted deployment requests. + local batch_num=5 + local ip_num="${#IP_LIST[@]}" + local rounds="$[(ip_num+batch_num-1)/batch_num]" + + local pids=() + local err_flag=0 + + for((k=0;k<"${rounds}";k++)) + do + local start="$[k*batch_num]" + local end="$[(k+1)*batch_num]" + if [[ "${end}" -gt "${ip_num}" ]]; then + end="${ip_num}" + fi + for((i="${start}";i<"${end}";i++)) + do + local ip="${IP_LIST[$i]}" + local cmd=("${cmd_list[@]}") + cmd+=("-n" "$i") + echo -e "[$(date '+%F %T')] Execute the command: ${cmd[@]} on worker node [${ip}]." >>"${deploy_log}" + if is_local_ip "${ip}"; then + echo -e "[$(date '+%F %T')][${ip}] Running in local machine..." >>"${deploy_log}" + nohup $"${cmd[@]// /\\ }" 2>&1 | \ + while read -r line; do echo "[$(date '+%F %T')][${ip}] ${line}"; done >> "${deploy_log}" & + else + echo -e "[$(date '+%F %T')][${ip}] Connecting as ${USER}..." >>"${deploy_log}" + nohup ssh -o NumberOfPasswordPrompts=0 -o ConnectTimeout=5 -o StrictHostKeyChecking=no -tt "${ip}" \ + "${launch_prefix}" $"${cmd[@]// /\\ }" 2>&1 | while read -r line; do echo "[$(date '+%F %T')][${ip}] ${line}"; done >> "${deploy_log}" & + fi + pids[${#pids[@]}]=$! + done + + echo -e "[$(date '+%F %T')] Waiting for all tasks finish..." >>"${deploy_log}" + + for ((i=0; i<${#pids[@]}; i++)); + do + wait ${pids[$i]} + ret_code=$? + if [[ ${ret_code} -ne 0 ]]; then + err_flag=1 + echo -e "---- ${RED}[ FAILED ]${NC} The ${CYAN}worker${NC} service @ ${PURPLE}${IP_LIST[$i]}${NC} failed." + echo -e "[$(date '+%F %T')][${IP_LIST[$i]}] Task on worker node '${IP_LIST[$i]}' failed, exit code: ${ret_code}" >>"${deploy_log}" + else + echo -e "---- ${GREEN}[ OK ]${NC} The ${CYAN}worker${NC} service @ ${PURPLE}${IP_LIST[$i]}${NC} success." + echo -e "[$(date '+%F %T')][${IP_LIST[$i]}] Task on worker node '${IP_LIST[$i]}' success!" >>"${deploy_log}" + fi + done + done + + if [[ ${err_flag} -eq 0 ]]; then + echo -e "[$(date '+%F %T')] All tasks run on worker nodes success!" >>"${deploy_log}" + else + echo -e "---- Task(s) on worker nodes failed! look at ${deploy_log} for details." + fi +} + +main "$@" diff --git a/dsoftbus/dist_executor/modules/datasystem/service/lib/libcrypto.so.1.1 b/dsoftbus/dist_executor/modules/datasystem/service/lib/libcrypto.so.1.1 new file mode 100644 index 0000000000000000000000000000000000000000..d400acff15912d341b51188c83ad0e5b7a27b2d9 Binary files /dev/null and b/dsoftbus/dist_executor/modules/datasystem/service/lib/libcrypto.so.1.1 differ diff --git a/dsoftbus/dist_executor/modules/datasystem/service/lib/libgflags.so.2.2 b/dsoftbus/dist_executor/modules/datasystem/service/lib/libgflags.so.2.2 new file mode 120000 index 0000000000000000000000000000000000000000..3d6c2ad5008d343cda622206e5a8d3653c8507ba --- /dev/null +++ b/dsoftbus/dist_executor/modules/datasystem/service/lib/libgflags.so.2.2 @@ -0,0 +1 @@ +libgflags.so.2.2.2 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/datasystem/service/lib/libgflags.so.2.2.2 b/dsoftbus/dist_executor/modules/datasystem/service/lib/libgflags.so.2.2.2 new file mode 100644 index 0000000000000000000000000000000000000000..dd3127d4f83c0aa4f3a5980adb83f1da0d0966b3 Binary files /dev/null and b/dsoftbus/dist_executor/modules/datasystem/service/lib/libgflags.so.2.2.2 differ diff --git a/dsoftbus/dist_executor/modules/datasystem/service/lib/libglog.so.0 b/dsoftbus/dist_executor/modules/datasystem/service/lib/libglog.so.0 new file mode 120000 index 0000000000000000000000000000000000000000..915d5bd127819811a4afadf3750b96e851097b5c --- /dev/null +++ b/dsoftbus/dist_executor/modules/datasystem/service/lib/libglog.so.0 @@ -0,0 +1 @@ +libglog.so.0.5.0 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/datasystem/service/lib/libglog.so.0.5.0 b/dsoftbus/dist_executor/modules/datasystem/service/lib/libglog.so.0.5.0 new file mode 100644 index 0000000000000000000000000000000000000000..118d59529990d15365ec0ae0f0896abef2c51a54 Binary files /dev/null and b/dsoftbus/dist_executor/modules/datasystem/service/lib/libglog.so.0.5.0 differ diff --git a/dsoftbus/dist_executor/modules/datasystem/service/lib/libhiredis.so.1.0.0 b/dsoftbus/dist_executor/modules/datasystem/service/lib/libhiredis.so.1.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..a93a914de7e2ebd2d559dfe2811265a26cc6d49c Binary files /dev/null and b/dsoftbus/dist_executor/modules/datasystem/service/lib/libhiredis.so.1.0.0 differ diff --git a/dsoftbus/dist_executor/modules/datasystem/service/lib/libhiredis_ssl.so.1.0.0 b/dsoftbus/dist_executor/modules/datasystem/service/lib/libhiredis_ssl.so.1.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..8ca9ea5bfe33313e98d529c64e5bcb4eb502d05b Binary files /dev/null and b/dsoftbus/dist_executor/modules/datasystem/service/lib/libhiredis_ssl.so.1.0.0 differ diff --git a/dsoftbus/dist_executor/modules/datasystem/service/lib/libprotobuf.so.3.13.0.0 b/dsoftbus/dist_executor/modules/datasystem/service/lib/libprotobuf.so.3.13.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..6de235c52e46f1ccdc759aa0a91902a2a2e2f780 Binary files /dev/null and b/dsoftbus/dist_executor/modules/datasystem/service/lib/libprotobuf.so.3.13.0.0 differ diff --git a/dsoftbus/dist_executor/modules/datasystem/service/lib/libprotoc.so.3.13.0.0 b/dsoftbus/dist_executor/modules/datasystem/service/lib/libprotoc.so.3.13.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..5644dfdb629de9c0db29a197a8d6b49074fc5a60 Binary files /dev/null and b/dsoftbus/dist_executor/modules/datasystem/service/lib/libprotoc.so.3.13.0.0 differ diff --git a/dsoftbus/dist_executor/modules/datasystem/service/lib/libssl.so.1.1 b/dsoftbus/dist_executor/modules/datasystem/service/lib/libssl.so.1.1 new file mode 100644 index 0000000000000000000000000000000000000000..c6b6cd951a80761b013cc5c0e58820fe38bb30e8 Binary files /dev/null and b/dsoftbus/dist_executor/modules/datasystem/service/lib/libssl.so.1.1 differ diff --git a/dsoftbus/dist_executor/modules/datasystem/service/lib/libtbb.so.2 b/dsoftbus/dist_executor/modules/datasystem/service/lib/libtbb.so.2 new file mode 100644 index 0000000000000000000000000000000000000000..935ae89a92056be6c100594764c0358acc75b033 Binary files /dev/null and b/dsoftbus/dist_executor/modules/datasystem/service/lib/libtbb.so.2 differ diff --git a/dsoftbus/dist_executor/modules/datasystem/service/lib/libzmq.so.5 b/dsoftbus/dist_executor/modules/datasystem/service/lib/libzmq.so.5 new file mode 120000 index 0000000000000000000000000000000000000000..6ccc986c067d93460c11612db6dc176224d1e707 --- /dev/null +++ b/dsoftbus/dist_executor/modules/datasystem/service/lib/libzmq.so.5 @@ -0,0 +1 @@ +libzmq.so.5.2.4 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/datasystem/service/lib/libzmq.so.5.2.4 b/dsoftbus/dist_executor/modules/datasystem/service/lib/libzmq.so.5.2.4 new file mode 100644 index 0000000000000000000000000000000000000000..2f67e03597c36cdfa5c28ed367e03b6fc69d4713 Binary files /dev/null and b/dsoftbus/dist_executor/modules/datasystem/service/lib/libzmq.so.5.2.4 differ diff --git a/dsoftbus/dist_executor/modules/datasystem/service/master b/dsoftbus/dist_executor/modules/datasystem/service/master new file mode 100644 index 0000000000000000000000000000000000000000..d6cd0e1842f6ae7b88715c5df88d0ba9a7fc36b9 Binary files /dev/null and b/dsoftbus/dist_executor/modules/datasystem/service/master differ diff --git a/dsoftbus/dist_executor/modules/datasystem/service/worker b/dsoftbus/dist_executor/modules/datasystem/service/worker new file mode 100644 index 0000000000000000000000000000000000000000..a9f98d28dca0d27e529eb1d411a46f97d24c1736 Binary files /dev/null and b/dsoftbus/dist_executor/modules/datasystem/service/worker differ diff --git a/dsoftbus/dist_executor/modules/functioncore/admin/bin/entrypoint b/dsoftbus/dist_executor/modules/functioncore/admin/bin/entrypoint new file mode 100644 index 0000000000000000000000000000000000000000..58dadbd490fe7dacb0bbe2f78ae83479ff5ac5f2 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/admin/bin/entrypoint @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e + +if [ whoami != "${USER_NAME}" ]; then + if [ -w /etc/passwd ]; then + echo "${USER_NAME}:x:$(id -u):$(id -g):${USER_NAME} user:${HOME}:/sbin/nologin" >> /etc/passwd + fi +fi + +# prevent from creating files that have incorrect permission +umask 0027 + + +if ! exec "${ADMIN}" --module admin-service "$@" ;then + echo "failed to start admin service $?" + exit 1 +fi \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/admin/config/config.json b/dsoftbus/dist_executor/modules/functioncore/admin/config/config.json new file mode 100644 index 0000000000000000000000000000000000000000..6be0f09dcc76f7b3d5ac986f99729aa00eeae350 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/admin/config/config.json @@ -0,0 +1,61 @@ +{ + "service": { + "function_accessor": { + "addr": "", + "api_version": "/serverless/v1", + "timeout": 86400 + }, + "function_repo": { + "addr": "http://{local_ip}:{repo_port}", + "api_version": "/function-repository/v1", + "timeout": 86400 + }, + "metrics": { + "addr": "", + "api_version": "/api/v1", + "timeout": 30 + }, + "logger": { + "addr": "http://{loki_ip}:{loki_port}", + "api_version": "/loki/api/v1", + "max_size": 1000, + "timeout": 30, + "tls_enable": false + }, + "instance_manager": { + "addr": "http://{local_ip}:{workermgr_listen_port}", + "api_version": "/worker-manager/v1", + "timeout": 30 + } + }, + "server": { + "port": {admin_port}, + "read_timeout": 86400, + "write_timeout": 86400, + "body_limit": "250MB", + "https_enable": false, + "key_file": "{install_dir}/resource/tls/tls.key", + "cert_file": "{install_dir}/resource/tls/tls.crt" + }, + "rate_limit": { + "login_weight": 0, + "logout_weight": 0, + "create_weight": 0, + "delete_weight": 0, + "update_weight": 0, + "query_weight": 0, + "invoke_weight": 0, + "log_query_weight": 0, + "daily_token_of_tenant": 10000, + "concurrent_token_of_tenant": 10000, + "total_concurrent_token": 100000 + }, + "invokeLimit": { + "enable": false, + "tokenBucketSize": 1000, + "frequency": 1 + }, + "auth_enable": false, + "cli_version": "2.0.0.B183.20220210125433", + "upload_absolute_path": "{upload_absolute_path}" +} diff --git a/dsoftbus/dist_executor/modules/functioncore/admin/config/log.json b/dsoftbus/dist_executor/modules/functioncore/admin/config/log.json new file mode 100644 index 0000000000000000000000000000000000000000..abdece33f429a084fec8155ba09a44644e090eda --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/admin/config/log.json @@ -0,0 +1,13 @@ +{ + "filepath": "{logConfigPath}", + "level": "{logLevel}", + "rolling": { + "maxsize": 400, + "maxbackups": 1, + "maxage": 1, + "compress": true + }, + "tick": 10, + "first": 10, + "thereafter": 5 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/admin/resource/rdo/v1/apple/a.txt b/dsoftbus/dist_executor/modules/functioncore/admin/resource/rdo/v1/apple/a.txt new file mode 100644 index 0000000000000000000000000000000000000000..3709ccd9afd30c21d59bfae320a74b80daa14f41 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/admin/resource/rdo/v1/apple/a.txt @@ -0,0 +1 @@ +f48f9d5a9706088947ac438ebe005aa26c9370579f2231c538b28894a315562182da0eb18002c86728c4cdc0df5efb19e1c2060e93370fd891d4f3d9e5b2b61376643f86d0210ce996446a985759b15112037a5a2f6463cf5fd6afc7ff30fe814bf960eb0c16c5059407c74d6a93a8b3110405cbc935dff672da3b648d62e0d5cecd91bc7063211e6b33210afb6899e8322eabffe167318a5ac5d591aa7579efd37e9e4c7fcf390e97c1151b7c1bf00b4a18764a1a0cac1fda1ea6389b39d755127f0e5bc072e6d5936738be1585535dc63b71ad58686f71c821325009de36bdbac31c1c044845bd1bb41230ec9815695ef3f9e7143a16410113ff3286147a76 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/admin/resource/rdo/v1/boy/b.txt b/dsoftbus/dist_executor/modules/functioncore/admin/resource/rdo/v1/boy/b.txt new file mode 100644 index 0000000000000000000000000000000000000000..ccf997f96126e67f30cdf76f1cb9dc8ef5bff9dd --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/admin/resource/rdo/v1/boy/b.txt @@ -0,0 +1 @@ +5d3da9f432be72b34951c737053eb2c816aaccae2b390d092046288aa5ce2cc5b16529f8197de316303735fbc0c041ccc3885b9be5fef4933b6806febb940b6bb609b3bf1d1501110e3ba62c6d8b2cf4388a08a8e123a3cea96daec619fbca177bdf092461f5701b02e5af83ddf0f6ce40deb279cda3ec7d6805237d229e26e30555f3dd890b7306b42bdef0ca1f963dbe25cd00d75018ab3216fcd3b7002b8a493d015306bf264cca12718890ef11c8d9e54721ebd6bdecab6c7084442f45611f249d9b5d703414770a46380d0b97c0187185241e9b6187c8168414370649fe6e7afef83a0df645424c4b6c0631dc3ef50c30af37eda905a1886ca12474c68a \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/admin/resource/rdo/v1/cat/c.txt b/dsoftbus/dist_executor/modules/functioncore/admin/resource/rdo/v1/cat/c.txt new file mode 100644 index 0000000000000000000000000000000000000000..10995ca71de7c44692a6f781fad7c1de77af08ae --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/admin/resource/rdo/v1/cat/c.txt @@ -0,0 +1 @@ +df04ed142bf01fb1fe6c3c85944d71b77041b1f9f364ae09280cda4cdd65de76a4ce0459f10890eefd36708f3d154aa8af202301a215679207f17c472f04f7d19e219edb5028e18613378b3296e0e8f9940d1785d1daf1aed07dfad3a12c7d129bfc54635d0c8a5bc10c55ea116de90765d687e6cb6d657fe36a654eb8393e657b77b0916f291fccbddbbaeb9a43d4418458970e7b7ee257b0c54a1b4466b5dba393e7934b398874bec060407e9bac139c8d00937c8e738f4e88401e0950fb968658c9e0cfd639b4a530da92f1abd6c2e06953d7f5d4bdf4f5b8385edc8aab8b8ec7e897d24bd8d736955c8e19676c0037ce5347f6997f7c2e32ba71e995fc5d \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/admin/resource/rdo/v1/dog/d.txt b/dsoftbus/dist_executor/modules/functioncore/admin/resource/rdo/v1/dog/d.txt new file mode 100644 index 0000000000000000000000000000000000000000..bff402ff7bfa253116d0d3b089d44517d60e3127 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/admin/resource/rdo/v1/dog/d.txt @@ -0,0 +1 @@ +37a1b37efbb9bb6beadb4446f40aa2c4bcaeb298192fa390ed03ee65bfcd54e55da39bae9961b9fa0d4b89591e41eed835ed01cca315eab75ebaf8a9e7b02287a468ec6d0c61f9f8e4d58dad90fb8a6a13bee7fe4685dbb535bfdb7e76b328d66b4d4bc7aa48791b205d1d2f2ef176f2b5b80a8ddc34ed9514372130eb896bc18745facf059a7fa37ef5e2ef413d0030f5bca581055eb3b3565dca642651cb802530e2e4964ab3c8a37370adfd65c80483398a1a8668caed455deabae0dbae7fb2bcdeeee4c2a2d9431ed93c6527985ef684127691904c799e13f37daeb1cb7ebfb0904d61796362514e521ac0fed682fd952ca3e9ce9a7a4407aaaa44f8aab6 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/admin/resource/wdo/wdo.json b/dsoftbus/dist_executor/modules/functioncore/admin/resource/wdo/wdo.json new file mode 100644 index 0000000000000000000000000000000000000000..b0a932b3f5e3082766dd859c1c311eaa824758e2 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/admin/resource/wdo/wdo.json @@ -0,0 +1,23 @@ +{ + "secretEncryptKey": { + "keys": { + "key": "da1352391c6d54eed9de4d02:1a5cfba5caed9b96114834c3dbd760192acc82af4d2aa462768ddfd5da129311f27d9e34f097086736a6802f9937b10e9cf9c58da9105e76ca6f72ddd903ee34d7de0b385fb9166a03a2666525dced74", + "mac": "cc299f4651f30fc24235c88ae8a041a8c2972a1d0070c030ed89fc552ede1c09" + }, + "description": "secretEncryptKey" + }, + "app-key": { + "keys": { + "key": "37acf134aaee66d19213c31a:cda713aa2eb2f80deb63c96d69c15b3b6c7276683a3b55cfe1d15a375d2b1cafbd3cd2024eabf11684a8c8b14091972a824838a1ae122af3a3c141397c55c1d0061cb2eef98faaaa67d6bdcdeb72664b", + "mac": "dfaaf5a186aca5603b20be97e8540d336c35fe775546841244cded6da1638efd" + }, + "description": "app-key" + }, + "config-key": { + "keys": { + "key": "7014223f0a628977b983c4b4:10516a0fa6f4df5d1884ccb61404a6439dc9024a97dd070e2e26dbaafd07ba8fcf87ed2f49cf4a8bd5a191a3d50db45cfb2fbc0c0524f85f2a6ab4c6074294b2ac299bc63526f8a943c1629618f94ed6", + "mac": "24b8a400b978f712ba6a7805fae67d99725c7df2a77707d0b7e3decd862fb3bc" + }, + "description": "config-key" + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/domain-scheduler/bin/entrypoint b/dsoftbus/dist_executor/modules/functioncore/domain-scheduler/bin/entrypoint new file mode 100644 index 0000000000000000000000000000000000000000..e0db8e4cc5b03e6eb815200d9b36a464d39b3d1d --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/domain-scheduler/bin/entrypoint @@ -0,0 +1,30 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e + +if ! whoami &>/dev/null; then + if [ -w /etc/passwd ]; then + echo "${USER_NAME:-faas-operator}:x:$(id -u):$(id -g):${USER_NAME:-faas-operator} user:${HOME}:/sbin/nologin" >>/etc/passwd + fi +fi + +# prevent from creating files that have incorrect permission +umask 0027 + +if ! exec "${DOMAINSCHEDULER}" --module domain-scheduler "$@"; then + echo "failed to execute domain scheduler, error code: $?" +fi + +exit 0 diff --git a/dsoftbus/dist_executor/modules/functioncore/frontend/bin/entrypoint b/dsoftbus/dist_executor/modules/functioncore/frontend/bin/entrypoint new file mode 100644 index 0000000000000000000000000000000000000000..c665f851e7f997e56a2907ee32cfce1be15274c3 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/frontend/bin/entrypoint @@ -0,0 +1,32 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e + +if [ whoami != "${USER_NAME}" ]; then + if [ -w /etc/passwd ]; then + echo "${USER_NAME}:x:$(id -u):$(id -g):${USER_NAME} user:${HOME}:/sbin/nologin" >> /etc/passwd + fi +fi + +# prevent from creating files that have incorrect permission +umask 0027 + +sudo bash "${HOME}"/bin/sudo_init.sh + + +if ! exec "${FRONTEND}" --module frontend "$@" ;then + echo "failed to start frontend $?" + exit 1 +fi \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/frontend/bin/init-frontend.sh b/dsoftbus/dist_executor/modules/functioncore/frontend/bin/init-frontend.sh new file mode 100644 index 0000000000000000000000000000000000000000..523534e16a6f69737c49064a4082ade182be5faa --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/frontend/bin/init-frontend.sh @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +# Added the capability of Linux to receive TCP connections. + + +file="/proc/sys/net/core/somaxconn" +if [ -f "$file" ]; then + sysctl -w net.core.somaxconn=10240 +fi +file="/proc/sys/net/ipv4/tcp_max_syn_backlog" +if [ -f "$file" ]; then + sysctl -w net.ipv4.tcp_max_syn_backlog=10240 +fi +echo "config finished" +exit 0 diff --git a/dsoftbus/dist_executor/modules/functioncore/frontend/config/config.json b/dsoftbus/dist_executor/modules/functioncore/frontend/config/config.json new file mode 100644 index 0000000000000000000000000000000000000000..e9ddae40664bcd3358da775a895226390e427b7f --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/frontend/config/config.json @@ -0,0 +1,67 @@ +{ + "etcd": { + "servers": [ + "{{ETCD_ADDR}}" + ], + "user": "{{ETCD_USER}}", + "password": "{{ETCD_PASSWORD}}", + "sslEnable": false + }, + "worker-manager": [ + { + "host": "{{WORKERMGR_IP}}", + "port": "{{WORKERMGR_SVC_PORT}}", + "zone": "defaultaz" + } + ], + "http": { + "resptimeout": 43200, + "workerInstanceReadTimeOut": 43200, + "maxRequestBodySize": 6 + }, + "metricsEnable": false, + "slaQuota": 100, + "backpressureDisable": false, + "trafficLimitDisable": false, + "httpsConfig": { + "httpsEnable": false, + "tlsProtocol": "TLSv1.2", + "tlsCiphers": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + }, + "retry": { + "instanceExceptionRetry": true + }, + "metricsConfig": { + "metricsport": "9099", + "metricspath": "/metrics" + }, + "runtime": { + "host": "0.0.0.0", + "port": "{{FRONTEND_HTTP_PORT}}", + "http2port": "{{FRONTEND_HTTP2_PORT}}", + "grpcport": "{{FRONTEND_GRPC_PORT}}", + "azkey": "KUBERNETES_IO_AVAILABLEZONE", + "protocol": "http", + "priority": { + "default": 5 + } + }, + "trafficLimitParams": { + "instanceLimitRate": 20000, + "instanceBucketSize": 40000, + "funcLimitRate": 2000, + "funcBucketSize": 4000 + }, + "invokeLimit": { + "enable": false, + "tokenBucketSize": 1000, + "frequency": 1 + }, + "functionNameSeparator": "-", + "functionCapability": 2, + "usrReqAuthTimeout": 5, + "servicesPath": "{{SERVICES_PATH}}", + "schedule": { + "policy": 2 + } +} diff --git a/dsoftbus/dist_executor/modules/functioncore/frontend/config/log.json b/dsoftbus/dist_executor/modules/functioncore/frontend/config/log.json new file mode 100644 index 0000000000000000000000000000000000000000..78dafcc0b05d2d0b1e1274076c9223511e97e264 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/frontend/config/log.json @@ -0,0 +1,13 @@ +{ + "filepath": "{{logConfigPath}}", + "level": "{{logLevel}}", + "rolling": { + "maxsize": 400, + "maxbackups": 1, + "maxage": 1, + "compress": true + }, + "tick": 10, + "first": 10, + "thereafter": 5 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/frontend/resource/rdo/v1/apple/a.txt b/dsoftbus/dist_executor/modules/functioncore/frontend/resource/rdo/v1/apple/a.txt new file mode 100644 index 0000000000000000000000000000000000000000..3709ccd9afd30c21d59bfae320a74b80daa14f41 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/frontend/resource/rdo/v1/apple/a.txt @@ -0,0 +1 @@ +f48f9d5a9706088947ac438ebe005aa26c9370579f2231c538b28894a315562182da0eb18002c86728c4cdc0df5efb19e1c2060e93370fd891d4f3d9e5b2b61376643f86d0210ce996446a985759b15112037a5a2f6463cf5fd6afc7ff30fe814bf960eb0c16c5059407c74d6a93a8b3110405cbc935dff672da3b648d62e0d5cecd91bc7063211e6b33210afb6899e8322eabffe167318a5ac5d591aa7579efd37e9e4c7fcf390e97c1151b7c1bf00b4a18764a1a0cac1fda1ea6389b39d755127f0e5bc072e6d5936738be1585535dc63b71ad58686f71c821325009de36bdbac31c1c044845bd1bb41230ec9815695ef3f9e7143a16410113ff3286147a76 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/frontend/resource/rdo/v1/boy/b.txt b/dsoftbus/dist_executor/modules/functioncore/frontend/resource/rdo/v1/boy/b.txt new file mode 100644 index 0000000000000000000000000000000000000000..ccf997f96126e67f30cdf76f1cb9dc8ef5bff9dd --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/frontend/resource/rdo/v1/boy/b.txt @@ -0,0 +1 @@ +5d3da9f432be72b34951c737053eb2c816aaccae2b390d092046288aa5ce2cc5b16529f8197de316303735fbc0c041ccc3885b9be5fef4933b6806febb940b6bb609b3bf1d1501110e3ba62c6d8b2cf4388a08a8e123a3cea96daec619fbca177bdf092461f5701b02e5af83ddf0f6ce40deb279cda3ec7d6805237d229e26e30555f3dd890b7306b42bdef0ca1f963dbe25cd00d75018ab3216fcd3b7002b8a493d015306bf264cca12718890ef11c8d9e54721ebd6bdecab6c7084442f45611f249d9b5d703414770a46380d0b97c0187185241e9b6187c8168414370649fe6e7afef83a0df645424c4b6c0631dc3ef50c30af37eda905a1886ca12474c68a \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/frontend/resource/rdo/v1/cat/c.txt b/dsoftbus/dist_executor/modules/functioncore/frontend/resource/rdo/v1/cat/c.txt new file mode 100644 index 0000000000000000000000000000000000000000..10995ca71de7c44692a6f781fad7c1de77af08ae --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/frontend/resource/rdo/v1/cat/c.txt @@ -0,0 +1 @@ +df04ed142bf01fb1fe6c3c85944d71b77041b1f9f364ae09280cda4cdd65de76a4ce0459f10890eefd36708f3d154aa8af202301a215679207f17c472f04f7d19e219edb5028e18613378b3296e0e8f9940d1785d1daf1aed07dfad3a12c7d129bfc54635d0c8a5bc10c55ea116de90765d687e6cb6d657fe36a654eb8393e657b77b0916f291fccbddbbaeb9a43d4418458970e7b7ee257b0c54a1b4466b5dba393e7934b398874bec060407e9bac139c8d00937c8e738f4e88401e0950fb968658c9e0cfd639b4a530da92f1abd6c2e06953d7f5d4bdf4f5b8385edc8aab8b8ec7e897d24bd8d736955c8e19676c0037ce5347f6997f7c2e32ba71e995fc5d \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/frontend/resource/rdo/v1/dog/d.txt b/dsoftbus/dist_executor/modules/functioncore/frontend/resource/rdo/v1/dog/d.txt new file mode 100644 index 0000000000000000000000000000000000000000..bff402ff7bfa253116d0d3b089d44517d60e3127 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/frontend/resource/rdo/v1/dog/d.txt @@ -0,0 +1 @@ +37a1b37efbb9bb6beadb4446f40aa2c4bcaeb298192fa390ed03ee65bfcd54e55da39bae9961b9fa0d4b89591e41eed835ed01cca315eab75ebaf8a9e7b02287a468ec6d0c61f9f8e4d58dad90fb8a6a13bee7fe4685dbb535bfdb7e76b328d66b4d4bc7aa48791b205d1d2f2ef176f2b5b80a8ddc34ed9514372130eb896bc18745facf059a7fa37ef5e2ef413d0030f5bca581055eb3b3565dca642651cb802530e2e4964ab3c8a37370adfd65c80483398a1a8668caed455deabae0dbae7fb2bcdeeee4c2a2d9431ed93c6527985ef684127691904c799e13f37daeb1cb7ebfb0904d61796362514e521ac0fed682fd952ca3e9ce9a7a4407aaaa44f8aab6 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/frontend/resource/rdo/v1/egg/e.txt b/dsoftbus/dist_executor/modules/functioncore/frontend/resource/rdo/v1/egg/e.txt new file mode 100644 index 0000000000000000000000000000000000000000..445c90b71075e95ba8d3fc248099078ee2fa1bce --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/frontend/resource/rdo/v1/egg/e.txt @@ -0,0 +1 @@ +43b0d158d9dcf4ffd416eb4e6a89d1b7a66d595c43329bb5c1c66d5befe33c37f31da53aaf539e43238457c46e1f28339cb9dda461c71c0ea2dba3dc8006684ff0d8d59ee2192582983c155e400d5b7cadcb65bbe682e61d175af54549796e447f3174b95f1f50998ae7785b5c0c359746e1ee6eeb989284fbe9e0f801ce5a7267285afbab7694c0e8434d6b86991298a46039de4d1fbfd824b8337b11c2d0b2f30ed4d46312e315cd9042abddc09ea73169f9e1f5baa496d44ed5cac9659cab076212499ef09a56db69e7444d665195a0562a7c82d176d027b0ecc7f4a26215e003fd463bf3911633baf85ee98f9187357a65ee2869b3d93a3871d830b4034e \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/function-repo/bin/entrypoint b/dsoftbus/dist_executor/modules/functioncore/function-repo/bin/entrypoint new file mode 100644 index 0000000000000000000000000000000000000000..31adc526fb61fed890d167ea5dc1859d66fa04d6 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/function-repo/bin/entrypoint @@ -0,0 +1,35 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e + +if [ "$(whoami)X" != "${USER_NAME}X" ]; then + if [ -w /etc/passwd ]; then + echo "${USER_NAME:-sn}:x:$(id -u):$(id -g):${USER_NAME:-sn} user:${HOME}:/sbin/nologin" >> /etc/passwd + fi +fi + +cd "${HOME}"/bin || (echo "failed to change directory" && exit 1) +if ! tar -xf node_modules.tar; then + echo "tar node_modules error, trace chain of nodeJS is not functional!" +fi + +# prevent from creating files that have incorrect permission +umask 0027 + +if ! exec "${FUNCTIONREPO}" --module function-repo "$@" ;then + echo "failed to start function repo $?" + exit 1 +fi +exit 0 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/function-repo/config/config.json b/dsoftbus/dist_executor/modules/functioncore/function-repo/config/config.json new file mode 100644 index 0000000000000000000000000000000000000000..19baa03e76b37253f88d55f68013d9106bd1e621 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/function-repo/config/config.json @@ -0,0 +1,118 @@ +{ + "etcd": { + "servers": ["{etcd_ip}:{etcd_port}"], + "user": "{username}", + "password": "{etcd_password}", + "sslEnable": false + }, + "function": { + "default": { + "version": "$latest", + "envPrefix": "func-", + "pageIndex": 1, + "pageSize": 1000, + "cpuList": [ + 500, + 1500, + 4000 + ], + "memoryList": [ + 500, + 3000, + 16000 + ], + "timeout": 86400, + "defaultMinInstance": "0", + "defaultMaxInstance": "100", + "defaultConcurrentNum": "100", + "maxInstanceUpperLimit": "10000", + "concurrentNumUpperLimit": "100" + }, + "package": { + "uploadTempPath": "{install_dir}/upload", + "zipFileSizeMaxMB": 500, + "unzipFileSizeMaxMB": 1000, + "fileCountsMax": 30000, + "dirDepthMax": 20, + "ioReadTimeout": 100000 + }, + "versionMax": 8, + "aliasMax": 8, + "layerMax": 5 + }, + "bucket": [ + { + "bucketId": "bucket-test-log1", + "businessId": "yrk", + "appId": "61022", + "appSecret": "E8305B6B5802595E198B82985F61014E:85D87E1102D94A246C58F8A0DCC829B99A88DCA6F5DDAA658CEE0C27D60385F984FC2F3035067A038280C7F62BE57AE9", + "url": "http://{minio_ip}:{minio_port}", + "writable": 1, + "description": "11", + "createTime": "2019-04-24 18:15:53", + "updateTime": "2019-04-24 18:15:53" + } + ], + "runtimeType": [ + "nodejs10.15.2", + "java1.8", + "python3.7", + "python3.8", + "python3.9", + "cpp11", + "cpp11-bin", + "custom-runtime", + "go1.13" + ], + "fileServer": { + "storageType": "s3", + "s3": { + "endpoint": "{minio_ip}:{minio_port}", + "accessKey": "d06b003f7d8d60c8ac3d7fa8:5cb9bbf58a0b306a7a8c57b0b933f1b08522c41c", + "secretKey": "da701b304eb4a523aa13becc:fb120bfb517a8916b0608165f154d166b9adc07e32f59ad864699af7", + "secure": false, + "caFile": "/home/sn/module/ca.crt", + "presignedUrlExpires": 3600, + "timeout": 40, + "trustedCA": false + } + }, + "trigger": { + "urlPrefix": "http://{local_ip}/service/", + "type": [ + { + "sourceProvider": "aa", + "effect": "bb", + "action": "cc" + } + ] + }, + "compatibleRuntimeType": [ + "nodejs10.15.2", + "java1.8", + "python3.7", + "python3.8", + "cpp11", + "cpp11-bin", + "custom-runtime", + "go1.13" + ], + "urn": { + "prefix": "sn", + "zone": "cn", + "resourceType": "function" + }, + "env": { + "timeZone": "Asia/Shanghai", + "nodejsLdPath": "/lib64:/usr/lib64", + "nodejsPath": "/home/snuser/runtime/node_modules/:/node_modules:/usr/local/lib/nodejs/node-v10.16.0-linux-x64/lib/node_modules", + "javaLdPath": "/lib64:/usr/lib64", + "javaPath": "/opt/huawei/jre1.8.0_252/bin", + "cppLdPath": "/usr/local/lib", + "pythonLdPath": "/lib64:/usr/lib64", + "pythonPath": "/usr/lib/python3.7/lib-dynload:/usr/local/lib/python3.7/dist-packages:/usr/local/lib/python3.7/dist-packages/pip-20.1.1-py3.7.egg:/usr/lib/python3/dist-packages" + }, + "server": { + "port": {repo_port} + } +} diff --git a/dsoftbus/dist_executor/modules/functioncore/function-repo/config/log.json b/dsoftbus/dist_executor/modules/functioncore/function-repo/config/log.json new file mode 100644 index 0000000000000000000000000000000000000000..abdece33f429a084fec8155ba09a44644e090eda --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/function-repo/config/log.json @@ -0,0 +1,13 @@ +{ + "filepath": "{logConfigPath}", + "level": "{logLevel}", + "rolling": { + "maxsize": 400, + "maxbackups": 1, + "maxage": 1, + "compress": true + }, + "tick": 10, + "first": 10, + "thereafter": 5 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/function-repo/resource/rdo/v1/apple/a.txt b/dsoftbus/dist_executor/modules/functioncore/function-repo/resource/rdo/v1/apple/a.txt new file mode 100644 index 0000000000000000000000000000000000000000..3709ccd9afd30c21d59bfae320a74b80daa14f41 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/function-repo/resource/rdo/v1/apple/a.txt @@ -0,0 +1 @@ +f48f9d5a9706088947ac438ebe005aa26c9370579f2231c538b28894a315562182da0eb18002c86728c4cdc0df5efb19e1c2060e93370fd891d4f3d9e5b2b61376643f86d0210ce996446a985759b15112037a5a2f6463cf5fd6afc7ff30fe814bf960eb0c16c5059407c74d6a93a8b3110405cbc935dff672da3b648d62e0d5cecd91bc7063211e6b33210afb6899e8322eabffe167318a5ac5d591aa7579efd37e9e4c7fcf390e97c1151b7c1bf00b4a18764a1a0cac1fda1ea6389b39d755127f0e5bc072e6d5936738be1585535dc63b71ad58686f71c821325009de36bdbac31c1c044845bd1bb41230ec9815695ef3f9e7143a16410113ff3286147a76 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/function-repo/resource/rdo/v1/boy/b.txt b/dsoftbus/dist_executor/modules/functioncore/function-repo/resource/rdo/v1/boy/b.txt new file mode 100644 index 0000000000000000000000000000000000000000..ccf997f96126e67f30cdf76f1cb9dc8ef5bff9dd --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/function-repo/resource/rdo/v1/boy/b.txt @@ -0,0 +1 @@ +5d3da9f432be72b34951c737053eb2c816aaccae2b390d092046288aa5ce2cc5b16529f8197de316303735fbc0c041ccc3885b9be5fef4933b6806febb940b6bb609b3bf1d1501110e3ba62c6d8b2cf4388a08a8e123a3cea96daec619fbca177bdf092461f5701b02e5af83ddf0f6ce40deb279cda3ec7d6805237d229e26e30555f3dd890b7306b42bdef0ca1f963dbe25cd00d75018ab3216fcd3b7002b8a493d015306bf264cca12718890ef11c8d9e54721ebd6bdecab6c7084442f45611f249d9b5d703414770a46380d0b97c0187185241e9b6187c8168414370649fe6e7afef83a0df645424c4b6c0631dc3ef50c30af37eda905a1886ca12474c68a \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/function-repo/resource/rdo/v1/cat/c.txt b/dsoftbus/dist_executor/modules/functioncore/function-repo/resource/rdo/v1/cat/c.txt new file mode 100644 index 0000000000000000000000000000000000000000..10995ca71de7c44692a6f781fad7c1de77af08ae --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/function-repo/resource/rdo/v1/cat/c.txt @@ -0,0 +1 @@ +df04ed142bf01fb1fe6c3c85944d71b77041b1f9f364ae09280cda4cdd65de76a4ce0459f10890eefd36708f3d154aa8af202301a215679207f17c472f04f7d19e219edb5028e18613378b3296e0e8f9940d1785d1daf1aed07dfad3a12c7d129bfc54635d0c8a5bc10c55ea116de90765d687e6cb6d657fe36a654eb8393e657b77b0916f291fccbddbbaeb9a43d4418458970e7b7ee257b0c54a1b4466b5dba393e7934b398874bec060407e9bac139c8d00937c8e738f4e88401e0950fb968658c9e0cfd639b4a530da92f1abd6c2e06953d7f5d4bdf4f5b8385edc8aab8b8ec7e897d24bd8d736955c8e19676c0037ce5347f6997f7c2e32ba71e995fc5d \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/function-repo/resource/rdo/v1/dog/d.txt b/dsoftbus/dist_executor/modules/functioncore/function-repo/resource/rdo/v1/dog/d.txt new file mode 100644 index 0000000000000000000000000000000000000000..bff402ff7bfa253116d0d3b089d44517d60e3127 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/function-repo/resource/rdo/v1/dog/d.txt @@ -0,0 +1 @@ +37a1b37efbb9bb6beadb4446f40aa2c4bcaeb298192fa390ed03ee65bfcd54e55da39bae9961b9fa0d4b89591e41eed835ed01cca315eab75ebaf8a9e7b02287a468ec6d0c61f9f8e4d58dad90fb8a6a13bee7fe4685dbb535bfdb7e76b328d66b4d4bc7aa48791b205d1d2f2ef176f2b5b80a8ddc34ed9514372130eb896bc18745facf059a7fa37ef5e2ef413d0030f5bca581055eb3b3565dca642651cb802530e2e4964ab3c8a37370adfd65c80483398a1a8668caed455deabae0dbae7fb2bcdeeee4c2a2d9431ed93c6527985ef684127691904c799e13f37daeb1cb7ebfb0904d61796362514e521ac0fed682fd952ca3e9ce9a7a4407aaaa44f8aab6 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/function-repo/resource/rdo/v1/egg/e.txt b/dsoftbus/dist_executor/modules/functioncore/function-repo/resource/rdo/v1/egg/e.txt new file mode 100644 index 0000000000000000000000000000000000000000..445c90b71075e95ba8d3fc248099078ee2fa1bce --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/function-repo/resource/rdo/v1/egg/e.txt @@ -0,0 +1 @@ +43b0d158d9dcf4ffd416eb4e6a89d1b7a66d595c43329bb5c1c66d5befe33c37f31da53aaf539e43238457c46e1f28339cb9dda461c71c0ea2dba3dc8006684ff0d8d59ee2192582983c155e400d5b7cadcb65bbe682e61d175af54549796e447f3174b95f1f50998ae7785b5c0c359746e1ee6eeb989284fbe9e0f801ce5a7267285afbab7694c0e8434d6b86991298a46039de4d1fbfd824b8337b11c2d0b2f30ed4d46312e315cd9042abddc09ea73169f9e1f5baa496d44ed5cac9659cab076212499ef09a56db69e7444d665195a0562a7c82d176d027b0ecc7f4a26215e003fd463bf3911633baf85ee98f9187357a65ee2869b3d93a3871d830b4034e \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/functiontask/bin/entrypoint b/dsoftbus/dist_executor/modules/functioncore/functiontask/bin/entrypoint new file mode 100644 index 0000000000000000000000000000000000000000..ea4579615a7a9e7bdf9ba2dc745d8acd491add83 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/functiontask/bin/entrypoint @@ -0,0 +1,28 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e + +if [ whoami != "${USER_NAME}" ]; then + if [ -w /etc/passwd ]; then + echo "${USER_NAME}:x:$(id -u):$(id -g):${USER_NAME} user:${HOME}:/sbin/nologin" >> /etc/passwd + fi +fi + +# prevent from creating files that have incorrect permission +umask 0027 + +sudo bash "${HOME}"/bin/sudo_init.sh + +exec "${FUNCTIONTASK}" --module functiontask "$@" diff --git a/dsoftbus/dist_executor/modules/functioncore/functiontask/config/conf.json b/dsoftbus/dist_executor/modules/functioncore/functiontask/config/conf.json new file mode 100644 index 0000000000000000000000000000000000000000..0447d222f9d166e6ea9c84bd8a730b4217842558 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/functiontask/config/conf.json @@ -0,0 +1,41 @@ +{ + "state_server_addr": "{{REDIS_ADDR}}", + "state_server_enable_tls": false, + "state_server_mode": "single", + "state_server_password": "{{REDIS_PASSWORD}}", + "http_invoke_port": 8080, + "p2p_invoke_port": 8017, + "call_timeout": 15, + "init_call_timeout": 300, + "max_wait_future_cnt": 1000, + "resolve_deadlock": true, + "persistent_state": true, + "persistent_future": true, + "init_state_client": true, + "max_nums_of_state_id": 10000, + "busstorageconfig": { + "storagetype": "{{STATE_STORAGE_TYPE}}", + "redisclientconfig": { + "server_addr": "{{REDIS_ADDR}}", + "server_enable_tls": false, + "server_mode": "single", + "server_password": "{{REDIS_PASSWORD}}" + }, + "s3clientconfig": { + "endpoint": "{{S3_ADDR}}", + "accessKey": "d06b003f7d8d60c8ac3d7fa8:5cb9bbf58a0b306a7a8c57b0b933f1b08522c41c", + "secretKey": "418213bcfc5c5d10e046fbfc:48f48e7562a65ad14aa3031c2af432b289a4a837239bb571801b", + "secure": false, + "caFile": "/home/sn/module/ca.crt", + "presignedUrlExpires": 3600, + "timeout": 40, + "trustedCA": false + } + }, + "future_ttl": 43200, + "external_future_ttl": 43200, + "redis_max_idle": 16, + "redis_max_active": 16, + "functionNameSeparator": "-", + "functionCapability": 2 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/functiontask/config/config.json b/dsoftbus/dist_executor/modules/functioncore/functiontask/config/config.json new file mode 100644 index 0000000000000000000000000000000000000000..1e949d7fd40b7ae73a75b13ebc9ae206df47caaf --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/functiontask/config/config.json @@ -0,0 +1,50 @@ +{ + "invokeLimit": { + "enable": false, + "tokenBucketSize": 1000, + "frequency": 1 + }, + "metrics": { + "metricsport": "9098", + "metricspath": "/metrics", + "prometheus_server_address": "http://prometheus-server.monitor:80" + }, + "httpsConfig": { + "httpsEnable": false, + "tlsProtocol": "TLSv1.2", + "tlsCiphers": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + }, + "retry": { + "instanceExceptionRetry": true + }, + "requestConfig": { + "requestWorkerTimeout": 36 + }, + "etcd": { + "servers": ["{{ETCD_ADDR}}"], + "user": "{{ETCD_USER}}", + "password": "{{ETCD_PASSWORD}}", + "sslEnable": false + }, + "workerManager": { + "serviceName": "{{WORKERMGR_IP}}", + "port": "{{WORKERMGR_SVC_PORT}}" + }, + "dataSystem": { + "enable": true, + "workerPort": "31501" + }, + "schedule": { + "policy": 2, + "forwardScheduleResourceNotEnough": true, + "sleepingMemoryThreshold": 0.8, + "selectInstanceToSleepingPolicy": "LRU" + }, + "functionCapability": 2, + "upgradingTimeout": 900, + "forwardLoadBalancePolicy": "roundRobin", + "recoverEnable": false, + "runtimeHeartbeatInterval": 1000, + "runtimeHeartbeatEnable": true, + "servicesPath": "{{SERVICES_PATH}}" +} diff --git a/dsoftbus/dist_executor/modules/functioncore/functiontask/config/log.json b/dsoftbus/dist_executor/modules/functioncore/functiontask/config/log.json new file mode 100644 index 0000000000000000000000000000000000000000..280899119e57ac07d8887bfd1ed89cb79eac2ddc --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/functiontask/config/log.json @@ -0,0 +1,13 @@ +{ + "filepath": "{{logConfigPath}}", + "level": "{{logLevel}}", + "rolling": { + "maxsize": 400, + "maxbackups": 1, + "maxage": 1, + "compress": true + }, + "tick": 10, + "first": 10, + "thereafter": 5 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/rdo/v1/apple/a.txt b/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/rdo/v1/apple/a.txt new file mode 100644 index 0000000000000000000000000000000000000000..3709ccd9afd30c21d59bfae320a74b80daa14f41 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/rdo/v1/apple/a.txt @@ -0,0 +1 @@ +f48f9d5a9706088947ac438ebe005aa26c9370579f2231c538b28894a315562182da0eb18002c86728c4cdc0df5efb19e1c2060e93370fd891d4f3d9e5b2b61376643f86d0210ce996446a985759b15112037a5a2f6463cf5fd6afc7ff30fe814bf960eb0c16c5059407c74d6a93a8b3110405cbc935dff672da3b648d62e0d5cecd91bc7063211e6b33210afb6899e8322eabffe167318a5ac5d591aa7579efd37e9e4c7fcf390e97c1151b7c1bf00b4a18764a1a0cac1fda1ea6389b39d755127f0e5bc072e6d5936738be1585535dc63b71ad58686f71c821325009de36bdbac31c1c044845bd1bb41230ec9815695ef3f9e7143a16410113ff3286147a76 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/rdo/v1/boy/b.txt b/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/rdo/v1/boy/b.txt new file mode 100644 index 0000000000000000000000000000000000000000..ccf997f96126e67f30cdf76f1cb9dc8ef5bff9dd --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/rdo/v1/boy/b.txt @@ -0,0 +1 @@ +5d3da9f432be72b34951c737053eb2c816aaccae2b390d092046288aa5ce2cc5b16529f8197de316303735fbc0c041ccc3885b9be5fef4933b6806febb940b6bb609b3bf1d1501110e3ba62c6d8b2cf4388a08a8e123a3cea96daec619fbca177bdf092461f5701b02e5af83ddf0f6ce40deb279cda3ec7d6805237d229e26e30555f3dd890b7306b42bdef0ca1f963dbe25cd00d75018ab3216fcd3b7002b8a493d015306bf264cca12718890ef11c8d9e54721ebd6bdecab6c7084442f45611f249d9b5d703414770a46380d0b97c0187185241e9b6187c8168414370649fe6e7afef83a0df645424c4b6c0631dc3ef50c30af37eda905a1886ca12474c68a \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/rdo/v1/cat/c.txt b/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/rdo/v1/cat/c.txt new file mode 100644 index 0000000000000000000000000000000000000000..10995ca71de7c44692a6f781fad7c1de77af08ae --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/rdo/v1/cat/c.txt @@ -0,0 +1 @@ +df04ed142bf01fb1fe6c3c85944d71b77041b1f9f364ae09280cda4cdd65de76a4ce0459f10890eefd36708f3d154aa8af202301a215679207f17c472f04f7d19e219edb5028e18613378b3296e0e8f9940d1785d1daf1aed07dfad3a12c7d129bfc54635d0c8a5bc10c55ea116de90765d687e6cb6d657fe36a654eb8393e657b77b0916f291fccbddbbaeb9a43d4418458970e7b7ee257b0c54a1b4466b5dba393e7934b398874bec060407e9bac139c8d00937c8e738f4e88401e0950fb968658c9e0cfd639b4a530da92f1abd6c2e06953d7f5d4bdf4f5b8385edc8aab8b8ec7e897d24bd8d736955c8e19676c0037ce5347f6997f7c2e32ba71e995fc5d \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/rdo/v1/dog/d.txt b/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/rdo/v1/dog/d.txt new file mode 100644 index 0000000000000000000000000000000000000000..bff402ff7bfa253116d0d3b089d44517d60e3127 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/rdo/v1/dog/d.txt @@ -0,0 +1 @@ +37a1b37efbb9bb6beadb4446f40aa2c4bcaeb298192fa390ed03ee65bfcd54e55da39bae9961b9fa0d4b89591e41eed835ed01cca315eab75ebaf8a9e7b02287a468ec6d0c61f9f8e4d58dad90fb8a6a13bee7fe4685dbb535bfdb7e76b328d66b4d4bc7aa48791b205d1d2f2ef176f2b5b80a8ddc34ed9514372130eb896bc18745facf059a7fa37ef5e2ef413d0030f5bca581055eb3b3565dca642651cb802530e2e4964ab3c8a37370adfd65c80483398a1a8668caed455deabae0dbae7fb2bcdeeee4c2a2d9431ed93c6527985ef684127691904c799e13f37daeb1cb7ebfb0904d61796362514e521ac0fed682fd952ca3e9ce9a7a4407aaaa44f8aab6 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/rdo/v1/egg/e.txt b/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/rdo/v1/egg/e.txt new file mode 100644 index 0000000000000000000000000000000000000000..445c90b71075e95ba8d3fc248099078ee2fa1bce --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/rdo/v1/egg/e.txt @@ -0,0 +1 @@ +43b0d158d9dcf4ffd416eb4e6a89d1b7a66d595c43329bb5c1c66d5befe33c37f31da53aaf539e43238457c46e1f28339cb9dda461c71c0ea2dba3dc8006684ff0d8d59ee2192582983c155e400d5b7cadcb65bbe682e61d175af54549796e447f3174b95f1f50998ae7785b5c0c359746e1ee6eeb989284fbe9e0f801ce5a7267285afbab7694c0e8434d6b86991298a46039de4d1fbfd824b8337b11c2d0b2f30ed4d46312e315cd9042abddc09ea73169f9e1f5baa496d44ed5cac9659cab076212499ef09a56db69e7444d665195a0562a7c82d176d027b0ecc7f4a26215e003fd463bf3911633baf85ee98f9187357a65ee2869b3d93a3871d830b4034e \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/wdo/wdo.json b/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/wdo/wdo.json new file mode 100644 index 0000000000000000000000000000000000000000..2c915dd611ee9740f4238540e4d9c408580fe1d9 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/functiontask/resource/wdo/wdo.json @@ -0,0 +1,37 @@ +{ + "config-key": { + "keys": { + "key": "64af9454204aae70f5376c17:6b1a86c83003bc10399c1a654144c7613c62a4c4c4cd92b2db3573fc0e71b18f5908b6edb44231d55a253701cdaf6f75ae1759486aa1666f546349db362ce9f62610b6c7ba8309e913fef5be3ad3507f", + "mac": "24b8a400b978f712ba6a7805fae67d99725c7df2a77707d0b7e3decd862fb3bc" + }, + "description": "config-key" + }, + "etcd-key": { + "keys": { + "key": "acaf1180283050d2287cad29:c9b96e9f6a0624110545f29c579d8bb6033893a22e2b9f6650ebd4bf86974b200b80f89ab7a5050de0aa8b4cede01898280d2fdbdcc108939a74348618b0804e50389e590ec15eb79d27a6ba09a44a5b", + "mac": "ec16d9a41faf725c0b859ce603a70b3eb3c7495ad37436df068627ec6ce83e9a" + }, + "description": "etcd-key" + }, + "yaml-key": { + "keys": { + "key": "d3be30262a1226447eccd996:ef9216ba0dce3365438d26f8359ac491c0917c4f05b7002bec9e3f82d09004046e8521e8bda08420377a931ba220014b0df950caa8906a7f2e429cbd84197045bdd90c16c235bea1939c9e3a685ba565", + "mac": "bc7914d82045ec0da9c69733cfdfb50ac2e7ea1e858df4072e04a247e1b07807" + }, + "description": "yaml-key" + }, + "secretEncryptKey": { + "keys": { + "key": "0555a5d99b2d35d7ece65e58:928ba6da9c987943747f8a52e2d7d2777027aa5dff85adb15e3a5ff9f8b03448f00079e5176a1ace878c5fe51ed03861a78952d40e4de537f2d263c520f858789c5dbe8284bc9a6a8ff227f2647136cc", + "mac": "95103141eb8c257b1e973bf4e5458346427bf84236ee2da93b56bf1327add930" + }, + "description": "secretEncryptKey" + }, + "workerProxyKey": { + "keys": { + "key": "f0b24a4a8e575da9483a4d18:9cd21ad679bb46bc54c4b2879f2f9e168ec1d67e4eaf048fd24cc6f2de4031d4cee7f50f325f0406744578c7b1fddb39d981b403e6193af537ddf33860411036aefb1a29f1decd960fc42a4cc048454c", + "mac": "0be0fb206c134657852080188b67118ee0b98cf42434ef9fa25580959e24c46c" + }, + "description": "workerProxyKey" + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/runtime-manager/bin/entrypoint b/dsoftbus/dist_executor/modules/functioncore/runtime-manager/bin/entrypoint new file mode 100644 index 0000000000000000000000000000000000000000..653f339353e4d71683fac2afe103c1521619b360 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/runtime-manager/bin/entrypoint @@ -0,0 +1,29 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e +RUNTIMECONFIG_PATH="/home/sn/config/runtime-manager-config.json" + +function getCfgValByKey() { + echo $(cat ${RUNTIMECONFIG_PATH} | sed 's/,/\n/g' | grep $1 | sed 's/:/\n/g' | sed '1d' | sed 's/}//g') +} + +# limiting the number of file handles +ulimit -n $(getCfgValByKey "maxFdNum") + +# prevent from creating files that have incorrect permission +umask 0027 + +exec ${RUNTIMEMANAGER} --module runtime-manager $@ +exit 0 diff --git a/dsoftbus/dist_executor/modules/functioncore/runtime-manager/config/custom-runtime-config.yaml b/dsoftbus/dist_executor/modules/functioncore/runtime-manager/config/custom-runtime-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75a781208113a31c991b0ae7acfc38caa20fa7a0 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/runtime-manager/config/custom-runtime-config.yaml @@ -0,0 +1,27 @@ +{ + "serverUrl": { + "nextPath": "/v1/runtime/invocation/request", + "responsePath": "/v1/runtime/invocation/response/{request_id}", + "errorPath": "/v1/runtime/invocation/error/{request_id}" + }, + "serverHeaders": { + "headerInvokeIDKey": "X-Cff-Request-Id", + "headerAccessKey": "X-CFF-Access-Key", + "headerSecretKey ": "X-CFF-Secret-Key", + "headerAuthToken": "X-CFF-Auth-Token", + "headerSecurityToken": "X-CFF-Security-Token" + }, + "serverEnvKey": { + "runtimeProject": "RUNTIME_PROJECT_ID", + "runtimeFuncName": "RUNTIME_FUNC_NAME", + "runtimeFuncVersion ": "RUNTIME_FUNC_VERSION", + "runtimePackage": "RUNTIME_PACKAGE", + "runtimeTime": "RUNTIME_TIMEOUT", + "runtimeHandler": "RUNTIME_HANDLER", + "runtimeUserData": "RUNTIME_USERDATA", + "runtimeCPU ": "RUNTIME_CPU", + "runtimeMemory": "RUNTIME_MEMORY", + "runtimeCodeRoot": "RUNTIME_CODE_ROOT", + "runtimeApiAddr": "RUNTIME_API_ADDR" + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/runtime-manager/config/log.json b/dsoftbus/dist_executor/modules/functioncore/runtime-manager/config/log.json new file mode 100644 index 0000000000000000000000000000000000000000..280899119e57ac07d8887bfd1ed89cb79eac2ddc --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/runtime-manager/config/log.json @@ -0,0 +1,13 @@ +{ + "filepath": "{{logConfigPath}}", + "level": "{{logLevel}}", + "rolling": { + "maxsize": 400, + "maxbackups": 1, + "maxage": 1, + "compress": true + }, + "tick": 10, + "first": 10, + "thereafter": 5 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/runtime-manager/config/python-runtime-log.json b/dsoftbus/dist_executor/modules/functioncore/runtime-manager/config/python-runtime-log.json new file mode 100644 index 0000000000000000000000000000000000000000..2b4fb6671eb06cfbce55d27d597d3a1c8c77079d --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/runtime-manager/config/python-runtime-log.json @@ -0,0 +1,26 @@ +{ + "version": 1, + "disable_existing_loggers": false, + "formatters": { + "extra": { + "format": "[%(asctime)s %(levelname)s %(filename)s:%(lineno)d] [%(podname)s %(thread)d] %(message)s" + } + }, + "handlers": { + "file": { + "class": "logging.handlers.RotatingFileHandler", + "filename": "{{LOG_PATH}}", + "formatter": "extra", + "maxBytes": 419430400, + "backupCount": 1 + } + }, + "loggers": { + "FileLogger": { + "handlers": [ + "file" + ], + "level": "{{logLevel}}" + } + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/runtime-manager/config/runtime-manager-config.json b/dsoftbus/dist_executor/modules/functioncore/runtime-manager/config/runtime-manager-config.json new file mode 100644 index 0000000000000000000000000000000000000000..0e472e3e154a8ae1327c736f6580c7d3666296a9 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/runtime-manager/config/runtime-manager-config.json @@ -0,0 +1,38 @@ +{ + "LokiPath": "", + "ServerPort": {{serverPort}}, + "useNewRuntimePath": false, + "portNum": 10000, + "retryTimes": 2, + "recycleLog": 604800, + "metricsCollectorType": "proc", + "runtimeConfigs": { + "initial": { + "port": {{initPort}} + }, + "java8": { + "port": 31537, + "enable": false + }, + "python3.8": { + "port": 31539, + "enable": false + }, + "nodejs10.15.2": { + "port": 31540, + "enable": false + }, + "cpp11": { + "port": 31541, + "enable": false + }, + "custom-runtime": { + "port": 31538, + "enable": false + }, + "posix-custom-runtime": { + "port": 31542, + "enable": false + } + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/runtime-manager/config/runtime.json b/dsoftbus/dist_executor/modules/functioncore/runtime-manager/config/runtime.json new file mode 100644 index 0000000000000000000000000000000000000000..f85dbf047d74a6a0b56ebf9fcf0ef815022aa694 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/runtime-manager/config/runtime.json @@ -0,0 +1,5 @@ +{ + "maxRequestBodySize": "6", + "maxFdNum": 1024, + "dataSystemConnectionTimeout": "1" +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker-manager/bin/entrypoint b/dsoftbus/dist_executor/modules/functioncore/worker-manager/bin/entrypoint new file mode 100644 index 0000000000000000000000000000000000000000..d77e140a600d87efc5091f082218aad3dcbe6d00 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker-manager/bin/entrypoint @@ -0,0 +1,29 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e + +if [ whoami != "${USER_NAME}" ]; then + if [ -w /etc/passwd ]; then + echo "${USER_NAME}:x:$(id -u):$(id -g):${USER_NAME} user:${HOME}:/sbin/nologin" >> /etc/passwd + fi +fi + +[[ -f /home/sn/config/stunnel.conf ]] && stunnel /home/sn/config/stunnel.conf +# prevent from creating files that have incorrect permission +umask 0027 + +sudo bash "${HOME}"/bin/sudo_init.sh + +exec ${WORKERMANAGER} --module worker-manager $@ diff --git a/dsoftbus/dist_executor/modules/functioncore/worker-manager/config/log.json b/dsoftbus/dist_executor/modules/functioncore/worker-manager/config/log.json new file mode 100644 index 0000000000000000000000000000000000000000..2982a56a8ebcf379a776d3859c939de0fae1eebd --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker-manager/config/log.json @@ -0,0 +1,13 @@ +{ + "filepath": "{logConfigPath}", + "level": "{logLevel}", + "rolling": { + "maxsize": 400, + "maxbackups": 1, + "maxage": 1, + "compress": true + }, + "tick": 10, + "first": 10, + "thereafter": 5 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker-manager/config/worker-manager.conf b/dsoftbus/dist_executor/modules/functioncore/worker-manager/config/worker-manager.conf new file mode 100644 index 0000000000000000000000000000000000000000..2cd6c11ccfc786875a1d9096c410899006bf703d --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker-manager/config/worker-manager.conf @@ -0,0 +1,92 @@ +{ + "etcd": { + "servers": [ + "{etcd_ip}:{etcd_port}" + ], + "user": "{username}", + "password": "{etcd_password}", + "sslEnable": false, + "limitRate": 500, + "limitBurst": 500, + "limitTimeout": 5 + }, + "globalScheduler": { + "levels": [ + 1000, + 2 + ], + "deployDomainMode": "process", + "domainSchedulerPort": 22771, + "domainSchedulerPoolSize": 200, + "globalSchedulerPort": {global_scheduler_port}, + "kubernetes": { + "namespace": "default", + "limitMem": "1024Mi", + "limitCPU": "1", + "requestMem": "256Mi", + "requestCPU": "0.5", + "domainSchedulerImage": "domain-scheduler:latest" + } + }, + "etcdcluster": { + "electionttl": 5, + "forwardrequest": true + }, + "deployDir": "{deploy_dir}", + "httpsConfig": { + "httpsEnable": false, + "tlsProtocol": "TLSv1.2", + "tlsCiphers": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + }, + "kubeCfgTimeout": 30, + "resourceManagementType": "etcd", + "podGeneralizeTimeout": 33, + "idleWorkerTimeout": 300, + "timeout": 900, + "poolMgrConfig": { + "dynamic": { + "workerImage": "runtime-manager:testcpp0224", + "resourcePercent": 30 + }, + "enableLocalCache": false, + "enableMinio": true, + "runtimeConfigs": {}, + "poolList": { + "podgctime": 5, + "hostaliaseshostname": [ + { + "ip": "127.0.0.1", + "hostnames": [ + "bucket-test-log1.hwcloudtest.cn", + "sn-repository-test-cn.hwcloudtest.cn" + ] + } + ], + "pool": [] + }, + "gcConfig": { + "terminationGracePeriodSeconds": 900 + }, + "volumeConfig": { + "logMountPath": "/home/sn/log", + "runtimeLogMountPath": "/home/snuser/log", + "userOutputLogMountPath": "/home/snuser/logs", + "packageHostPath": "", + "userHostVolumeMap": {}, + "userStorageConfig": { + "userStorageEnable": false, + "userStoragePvcName": "pvc-yuanrong", + "userStoragePvcMountPath": "/tmp/sfsData" + } + }, + "customCAConfig": { + "customCAEnable": false, + "caFilePath": "/home/sn/certs/ca" + }, + "workerAuthEnable": false + }, + "functionNameSeparator": "-", + "functionCapability": 2, + "functionbootstrapEnable": false, + "httpEnable": false +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker-manager/resource/rdo/v1/apple/a.txt b/dsoftbus/dist_executor/modules/functioncore/worker-manager/resource/rdo/v1/apple/a.txt new file mode 100644 index 0000000000000000000000000000000000000000..3709ccd9afd30c21d59bfae320a74b80daa14f41 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker-manager/resource/rdo/v1/apple/a.txt @@ -0,0 +1 @@ +f48f9d5a9706088947ac438ebe005aa26c9370579f2231c538b28894a315562182da0eb18002c86728c4cdc0df5efb19e1c2060e93370fd891d4f3d9e5b2b61376643f86d0210ce996446a985759b15112037a5a2f6463cf5fd6afc7ff30fe814bf960eb0c16c5059407c74d6a93a8b3110405cbc935dff672da3b648d62e0d5cecd91bc7063211e6b33210afb6899e8322eabffe167318a5ac5d591aa7579efd37e9e4c7fcf390e97c1151b7c1bf00b4a18764a1a0cac1fda1ea6389b39d755127f0e5bc072e6d5936738be1585535dc63b71ad58686f71c821325009de36bdbac31c1c044845bd1bb41230ec9815695ef3f9e7143a16410113ff3286147a76 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker-manager/resource/rdo/v1/boy/b.txt b/dsoftbus/dist_executor/modules/functioncore/worker-manager/resource/rdo/v1/boy/b.txt new file mode 100644 index 0000000000000000000000000000000000000000..ccf997f96126e67f30cdf76f1cb9dc8ef5bff9dd --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker-manager/resource/rdo/v1/boy/b.txt @@ -0,0 +1 @@ +5d3da9f432be72b34951c737053eb2c816aaccae2b390d092046288aa5ce2cc5b16529f8197de316303735fbc0c041ccc3885b9be5fef4933b6806febb940b6bb609b3bf1d1501110e3ba62c6d8b2cf4388a08a8e123a3cea96daec619fbca177bdf092461f5701b02e5af83ddf0f6ce40deb279cda3ec7d6805237d229e26e30555f3dd890b7306b42bdef0ca1f963dbe25cd00d75018ab3216fcd3b7002b8a493d015306bf264cca12718890ef11c8d9e54721ebd6bdecab6c7084442f45611f249d9b5d703414770a46380d0b97c0187185241e9b6187c8168414370649fe6e7afef83a0df645424c4b6c0631dc3ef50c30af37eda905a1886ca12474c68a \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker-manager/resource/rdo/v1/cat/c.txt b/dsoftbus/dist_executor/modules/functioncore/worker-manager/resource/rdo/v1/cat/c.txt new file mode 100644 index 0000000000000000000000000000000000000000..10995ca71de7c44692a6f781fad7c1de77af08ae --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker-manager/resource/rdo/v1/cat/c.txt @@ -0,0 +1 @@ +df04ed142bf01fb1fe6c3c85944d71b77041b1f9f364ae09280cda4cdd65de76a4ce0459f10890eefd36708f3d154aa8af202301a215679207f17c472f04f7d19e219edb5028e18613378b3296e0e8f9940d1785d1daf1aed07dfad3a12c7d129bfc54635d0c8a5bc10c55ea116de90765d687e6cb6d657fe36a654eb8393e657b77b0916f291fccbddbbaeb9a43d4418458970e7b7ee257b0c54a1b4466b5dba393e7934b398874bec060407e9bac139c8d00937c8e738f4e88401e0950fb968658c9e0cfd639b4a530da92f1abd6c2e06953d7f5d4bdf4f5b8385edc8aab8b8ec7e897d24bd8d736955c8e19676c0037ce5347f6997f7c2e32ba71e995fc5d \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker-manager/resource/rdo/v1/dog/d.txt b/dsoftbus/dist_executor/modules/functioncore/worker-manager/resource/rdo/v1/dog/d.txt new file mode 100644 index 0000000000000000000000000000000000000000..bff402ff7bfa253116d0d3b089d44517d60e3127 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker-manager/resource/rdo/v1/dog/d.txt @@ -0,0 +1 @@ +37a1b37efbb9bb6beadb4446f40aa2c4bcaeb298192fa390ed03ee65bfcd54e55da39bae9961b9fa0d4b89591e41eed835ed01cca315eab75ebaf8a9e7b02287a468ec6d0c61f9f8e4d58dad90fb8a6a13bee7fe4685dbb535bfdb7e76b328d66b4d4bc7aa48791b205d1d2f2ef176f2b5b80a8ddc34ed9514372130eb896bc18745facf059a7fa37ef5e2ef413d0030f5bca581055eb3b3565dca642651cb802530e2e4964ab3c8a37370adfd65c80483398a1a8668caed455deabae0dbae7fb2bcdeeee4c2a2d9431ed93c6527985ef684127691904c799e13f37daeb1cb7ebfb0904d61796362514e521ac0fed682fd952ca3e9ce9a7a4407aaaa44f8aab6 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker-manager/resource/rdo/v1/egg/e.txt b/dsoftbus/dist_executor/modules/functioncore/worker-manager/resource/rdo/v1/egg/e.txt new file mode 100644 index 0000000000000000000000000000000000000000..445c90b71075e95ba8d3fc248099078ee2fa1bce --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker-manager/resource/rdo/v1/egg/e.txt @@ -0,0 +1 @@ +43b0d158d9dcf4ffd416eb4e6a89d1b7a66d595c43329bb5c1c66d5befe33c37f31da53aaf539e43238457c46e1f28339cb9dda461c71c0ea2dba3dc8006684ff0d8d59ee2192582983c155e400d5b7cadcb65bbe682e61d175af54549796e447f3174b95f1f50998ae7785b5c0c359746e1ee6eeb989284fbe9e0f801ce5a7267285afbab7694c0e8434d6b86991298a46039de4d1fbfd824b8337b11c2d0b2f30ed4d46312e315cd9042abddc09ea73169f9e1f5baa496d44ed5cac9659cab076212499ef09a56db69e7444d665195a0562a7c82d176d027b0ecc7f4a26215e003fd463bf3911633baf85ee98f9187357a65ee2869b3d93a3871d830b4034e \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/bin/entrypoint b/dsoftbus/dist_executor/modules/functioncore/worker/bin/entrypoint new file mode 100644 index 0000000000000000000000000000000000000000..822118fa9367f7ec888b2709a903fdd7a2330e5b --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/bin/entrypoint @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e + +# limiting the number of file handles +ulimit -n 1024 + +# prevent from creating files that have incorrect permission +umask 0027 + +if ! exec ${WORKER} --module worker $@ ;then + echo "failed to start worker" + exit 1 +fi +exit 0 diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/bin/user_setup b/dsoftbus/dist_executor/modules/functioncore/worker/bin/user_setup new file mode 100644 index 0000000000000000000000000000000000000000..69380c24e1760dcf2bdc922fa9228a6207e48741 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/bin/user_setup @@ -0,0 +1,27 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e +# ensure $HOME exists and is accessible by group 0 (we don't know what the runtime UID will be) +chown -R "${USER_UID}":"${GROUP_ID}" "${HOME}" + +# runtime user will need to be able to self-insert in /etc/passwd +if [ -w /etc/passwd ]; then + echo "${USER_NAME}:x:${USER_UID}:${GROUP_ID}:${USER_NAME}:${HOME}:/sbin/nologin" >> /etc/passwd +fi + +# no need for this script to remain in the image after running +rm "$0" + +exit 0 diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/config/config.yaml b/dsoftbus/dist_executor/modules/functioncore/worker/config/config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..e4464a40acc68100de38602440f77d7ae1067b74 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/config/config.yaml @@ -0,0 +1,58 @@ +{ + "runtime": { + "timeout": 86400, + "loadtimeout": 30, + "rpcPort": 31530, + "connectType": "unix", + "runtimeLogDir": "/home/snuser/log" + }, + "deployment": { + "storageType": "{{FUNCTION_STORAGE_TYPE}}", + "codePackage": { + "unzipFileSizeMaxMB": 1000, + "zipFileSizeMaxMB": 500, + "fileCountsMax": 30000, + "dirDepthMax": 20 + }, + "s3Config": { + "accessKey": "d06b003f7d8d60c8ac3d7fa8:5cb9bbf58a0b306a7a8c57b0b933f1b08522c41c", + "secretKey": "da701b304eb4a523aa13becc:fb120bfb517a8916b0608165f154d166b9adc07e32f59ad864699af7", + "useSSL" : false, + "endpoint": "{{S3_ADDR}}", + "trustedCA" : false + }, + "httpsConfig": { + "httpsEnable": false, + "tlsProtocol": "TLSv1.2", + "tlsCiphers": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + } + }, + "storage":{ + "addr": "{{REDIS_ADDR}}", + "password": "{{REDIS_PASSWORD}}" + }, + "server": { + "httpsConfig": { + "httpsEnable": false, + "tlsProtocol": "TLSv1.2", + "tlsCiphers": "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384" + }, + "readTimeout": 33, + "writeTimeout": 33, + "port": {{WORKER_HTTP_PORT}}, + "maxHeaderBytes": 1048576, + "ioReadTimeout": 100000, + "httpServerDisabled": true + }, + "invokeServer": { + "protocol": "tcp", + "port": 8889 + }, + "functionNameSeparator": "-", + "functionCapability": 2, + "backgroundProcessLimit": { + "enable": false, + "idleTime": 3, + "retryTimes": 3 + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/config/custom-runtime-config.yaml b/dsoftbus/dist_executor/modules/functioncore/worker/config/custom-runtime-config.yaml new file mode 100644 index 0000000000000000000000000000000000000000..75a781208113a31c991b0ae7acfc38caa20fa7a0 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/config/custom-runtime-config.yaml @@ -0,0 +1,27 @@ +{ + "serverUrl": { + "nextPath": "/v1/runtime/invocation/request", + "responsePath": "/v1/runtime/invocation/response/{request_id}", + "errorPath": "/v1/runtime/invocation/error/{request_id}" + }, + "serverHeaders": { + "headerInvokeIDKey": "X-Cff-Request-Id", + "headerAccessKey": "X-CFF-Access-Key", + "headerSecretKey ": "X-CFF-Secret-Key", + "headerAuthToken": "X-CFF-Auth-Token", + "headerSecurityToken": "X-CFF-Security-Token" + }, + "serverEnvKey": { + "runtimeProject": "RUNTIME_PROJECT_ID", + "runtimeFuncName": "RUNTIME_FUNC_NAME", + "runtimeFuncVersion ": "RUNTIME_FUNC_VERSION", + "runtimePackage": "RUNTIME_PACKAGE", + "runtimeTime": "RUNTIME_TIMEOUT", + "runtimeHandler": "RUNTIME_HANDLER", + "runtimeUserData": "RUNTIME_USERDATA", + "runtimeCPU ": "RUNTIME_CPU", + "runtimeMemory": "RUNTIME_MEMORY", + "runtimeCodeRoot": "RUNTIME_CODE_ROOT", + "runtimeApiAddr": "RUNTIME_API_ADDR" + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/config/log.json b/dsoftbus/dist_executor/modules/functioncore/worker/config/log.json new file mode 100644 index 0000000000000000000000000000000000000000..280899119e57ac07d8887bfd1ed89cb79eac2ddc --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/config/log.json @@ -0,0 +1,13 @@ +{ + "filepath": "{{logConfigPath}}", + "level": "{{logLevel}}", + "rolling": { + "maxsize": 400, + "maxbackups": 1, + "maxage": 1, + "compress": true + }, + "tick": 10, + "first": 10, + "thereafter": 5 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/config/runtime-manager-config.json b/dsoftbus/dist_executor/modules/functioncore/worker/config/runtime-manager-config.json new file mode 100644 index 0000000000000000000000000000000000000000..eff4c1e30a0ed42778f4161905dddfdc6a9f5884 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/config/runtime-manager-config.json @@ -0,0 +1,11 @@ +{ + "ServerPort": 31530, + "portNum": 10000, + "retryTimes": 2, + "recycleLog": 604800, + "runtimeConfigs": { + "initial": { + "port": 31537 + } + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/config/runtime.json b/dsoftbus/dist_executor/modules/functioncore/worker/config/runtime.json new file mode 100644 index 0000000000000000000000000000000000000000..3dd93bdd35c7e72f7628ca44b4ab4e0ff44b59e6 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/config/runtime.json @@ -0,0 +1,5 @@ +{ + "maxRequestBodySize": "6", + "maxFdNum": 65535, + "dataSystemConnectionTimeout": "1" +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/resource/rdo/v1/apple/a.txt b/dsoftbus/dist_executor/modules/functioncore/worker/resource/rdo/v1/apple/a.txt new file mode 100644 index 0000000000000000000000000000000000000000..3709ccd9afd30c21d59bfae320a74b80daa14f41 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/resource/rdo/v1/apple/a.txt @@ -0,0 +1 @@ +f48f9d5a9706088947ac438ebe005aa26c9370579f2231c538b28894a315562182da0eb18002c86728c4cdc0df5efb19e1c2060e93370fd891d4f3d9e5b2b61376643f86d0210ce996446a985759b15112037a5a2f6463cf5fd6afc7ff30fe814bf960eb0c16c5059407c74d6a93a8b3110405cbc935dff672da3b648d62e0d5cecd91bc7063211e6b33210afb6899e8322eabffe167318a5ac5d591aa7579efd37e9e4c7fcf390e97c1151b7c1bf00b4a18764a1a0cac1fda1ea6389b39d755127f0e5bc072e6d5936738be1585535dc63b71ad58686f71c821325009de36bdbac31c1c044845bd1bb41230ec9815695ef3f9e7143a16410113ff3286147a76 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/resource/rdo/v1/boy/b.txt b/dsoftbus/dist_executor/modules/functioncore/worker/resource/rdo/v1/boy/b.txt new file mode 100644 index 0000000000000000000000000000000000000000..ccf997f96126e67f30cdf76f1cb9dc8ef5bff9dd --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/resource/rdo/v1/boy/b.txt @@ -0,0 +1 @@ +5d3da9f432be72b34951c737053eb2c816aaccae2b390d092046288aa5ce2cc5b16529f8197de316303735fbc0c041ccc3885b9be5fef4933b6806febb940b6bb609b3bf1d1501110e3ba62c6d8b2cf4388a08a8e123a3cea96daec619fbca177bdf092461f5701b02e5af83ddf0f6ce40deb279cda3ec7d6805237d229e26e30555f3dd890b7306b42bdef0ca1f963dbe25cd00d75018ab3216fcd3b7002b8a493d015306bf264cca12718890ef11c8d9e54721ebd6bdecab6c7084442f45611f249d9b5d703414770a46380d0b97c0187185241e9b6187c8168414370649fe6e7afef83a0df645424c4b6c0631dc3ef50c30af37eda905a1886ca12474c68a \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/resource/rdo/v1/cat/c.txt b/dsoftbus/dist_executor/modules/functioncore/worker/resource/rdo/v1/cat/c.txt new file mode 100644 index 0000000000000000000000000000000000000000..10995ca71de7c44692a6f781fad7c1de77af08ae --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/resource/rdo/v1/cat/c.txt @@ -0,0 +1 @@ +df04ed142bf01fb1fe6c3c85944d71b77041b1f9f364ae09280cda4cdd65de76a4ce0459f10890eefd36708f3d154aa8af202301a215679207f17c472f04f7d19e219edb5028e18613378b3296e0e8f9940d1785d1daf1aed07dfad3a12c7d129bfc54635d0c8a5bc10c55ea116de90765d687e6cb6d657fe36a654eb8393e657b77b0916f291fccbddbbaeb9a43d4418458970e7b7ee257b0c54a1b4466b5dba393e7934b398874bec060407e9bac139c8d00937c8e738f4e88401e0950fb968658c9e0cfd639b4a530da92f1abd6c2e06953d7f5d4bdf4f5b8385edc8aab8b8ec7e897d24bd8d736955c8e19676c0037ce5347f6997f7c2e32ba71e995fc5d \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/resource/rdo/v1/dog/d.txt b/dsoftbus/dist_executor/modules/functioncore/worker/resource/rdo/v1/dog/d.txt new file mode 100644 index 0000000000000000000000000000000000000000..bff402ff7bfa253116d0d3b089d44517d60e3127 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/resource/rdo/v1/dog/d.txt @@ -0,0 +1 @@ +37a1b37efbb9bb6beadb4446f40aa2c4bcaeb298192fa390ed03ee65bfcd54e55da39bae9961b9fa0d4b89591e41eed835ed01cca315eab75ebaf8a9e7b02287a468ec6d0c61f9f8e4d58dad90fb8a6a13bee7fe4685dbb535bfdb7e76b328d66b4d4bc7aa48791b205d1d2f2ef176f2b5b80a8ddc34ed9514372130eb896bc18745facf059a7fa37ef5e2ef413d0030f5bca581055eb3b3565dca642651cb802530e2e4964ab3c8a37370adfd65c80483398a1a8668caed455deabae0dbae7fb2bcdeeee4c2a2d9431ed93c6527985ef684127691904c799e13f37daeb1cb7ebfb0904d61796362514e521ac0fed682fd952ca3e9ce9a7a4407aaaa44f8aab6 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/resource/rdo/v1/egg/e.txt b/dsoftbus/dist_executor/modules/functioncore/worker/resource/rdo/v1/egg/e.txt new file mode 100644 index 0000000000000000000000000000000000000000..445c90b71075e95ba8d3fc248099078ee2fa1bce --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/resource/rdo/v1/egg/e.txt @@ -0,0 +1 @@ +43b0d158d9dcf4ffd416eb4e6a89d1b7a66d595c43329bb5c1c66d5befe33c37f31da53aaf539e43238457c46e1f28339cb9dda461c71c0ea2dba3dc8006684ff0d8d59ee2192582983c155e400d5b7cadcb65bbe682e61d175af54549796e447f3174b95f1f50998ae7785b5c0c359746e1ee6eeb989284fbe9e0f801ce5a7267285afbab7694c0e8434d6b86991298a46039de4d1fbfd824b8337b11c2d0b2f30ed4d46312e315cd9042abddc09ea73169f9e1f5baa496d44ed5cac9659cab076212499ef09a56db69e7444d665195a0562a7c82d176d027b0ecc7f4a26215e003fd463bf3911633baf85ee98f9187357a65ee2869b3d93a3871d830b4034e \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/resource/s3-storage-ak b/dsoftbus/dist_executor/modules/functioncore/worker/resource/s3-storage-ak new file mode 100644 index 0000000000000000000000000000000000000000..35f8f46571976e2e9f1d505c5f753758fd575b4d --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/resource/s3-storage-ak @@ -0,0 +1 @@ +4eeeb4fc2c1c52c525a84f96:f2baacc8a7a46e840ac9956ef60273c15fcc3630 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/resource/s3-storage-sk b/dsoftbus/dist_executor/modules/functioncore/worker/resource/s3-storage-sk new file mode 100644 index 0000000000000000000000000000000000000000..b5f1b5b012a95bc0d5a225ee571ff4bc1fced878 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/resource/s3-storage-sk @@ -0,0 +1 @@ +da701b304eb4a523aa13becc:fb120bfb517a8916b0608165f154d166b9adc07e32f59ad864699af7 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/scripts/copy_secret.sh b/dsoftbus/dist_executor/modules/functioncore/worker/scripts/copy_secret.sh new file mode 100644 index 0000000000000000000000000000000000000000..81d058f4dfa3298a51c8c7c7ddc2d861f2634513 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/scripts/copy_secret.sh @@ -0,0 +1,38 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -ex + +DST_RESOURCE_PATH="/home/sn/temp-resource" +SRC_RESOURCE_PATH="/home/sn/resource" + +function recursive_copy_file() +{ + cd "$1" + for name in * + do + if [ -f "$1"/"$name" ]; then + if [ ! -f "$2"/"$name" ]; then + cp "$1"/"$name" "$2"/"$name" + fi + elif [ -d "$1"/"$name" ]; then + if [ ! -d "$2"/"$name" ]; then + mkdir -m 700 "$2"/"$name" + fi + recursive_copy_file "$1"/"$name" "$2"/"$name" + fi + done +} + +recursive_copy_file "$SRC_RESOURCE_PATH" "$DST_RESOURCE_PATH" \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/functioncore/worker/scripts/iptables.sh b/dsoftbus/dist_executor/modules/functioncore/worker/scripts/iptables.sh new file mode 100644 index 0000000000000000000000000000000000000000..8319d8eb0f5e47a9c14d835d0ae88036405726a8 --- /dev/null +++ b/dsoftbus/dist_executor/modules/functioncore/worker/scripts/iptables.sh @@ -0,0 +1,89 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e + +function with_backoff() { + local max_attempts=9 + local timeout=1 + local attempt=0 + + while [[ "$attempt" < "$max_attempts" ]]; do + if "$@"; then + return 0 + fi + echo "failure! retrying in after ${timeout}s..." 1>&2 + sleep "$timeout" + timeout=$((timeout * 2)) + attempt=$((attempt + 1)) + done + echo "failed to exe command "$@" for the last time!" 1>&2 + return 1 +} + +function lookup_ip() { + with_backoff nslookup "$1" | grep Address | grep -v "#53" | awk '{print $2}' +} + +dns_server=$(grep nameserver /etc/resolv.conf | awk 'NR==1{print $2}') +if [ "${ENABLE_MINIO}" == "true" ]; then + minio_ip=$(lookup_ip "minio") +fi + +iptables -P INPUT ACCEPT +iptables -P FORWARD ACCEPT +iptables -P OUTPUT ACCEPT + +ETCD_INFO=$(cat /home/sn/config/init.conf | grep "etcdAddr" | awk -F '=' '{print $2}') +etcd_ip=$(echo ${ETCD_INFO} | awk -F ':' '{print $1}') +etcd_port=$(echo "${ETCD_INFO}" | awk -F ':' '{print $2}') +if [ -n "${etcd_ip}" ] && [ -n "${etcd_port}" ]; then + iptables -A OUTPUT -d "${etcd_ip}"/32 -p tcp -m tcp --dport "${etcd_port}" -j ACCEPT +fi + +iptables -A OUTPUT -d "${dns_server}"/32 -p udp -j ACCEPT +iptables -A OUTPUT -d "${dns_server}"/32 -p tcp -j ACCEPT +if [ -n "${minio_ip}" ]; then + iptables -A OUTPUT -d "${minio_ip}"/32 -p tcp -m tcp --dport 9000 -j ACCEPT +fi + +# 31501 is dataSystem port, for storing +iptables -A OUTPUT -p tcp -m tcp --dport 31501 -j ACCEPT + +if [ -n "${SVC_CIDR}" ]; then + iptables -A OUTPUT -d "${SVC_CIDR}" -p icmp --icmp-type 8 -j DROP + iptables -A OUTPUT -d "${SVC_CIDR}" -p udp -j DROP + iptables -A OUTPUT -d "${SVC_CIDR}" -p tcp -m tcp --tcp-flags SYN SYN -m state --state NEW -j DROP + iptables -A OUTPUT -d "${SVC_CIDR}" -j ACCEPT +fi + +if [ -n "${POD_CIDR}" ]; then + iptables -A OUTPUT -d "${POD_CIDR}" -p icmp --icmp-type 8 -j DROP + iptables -A OUTPUT -d "${POD_CIDR}" -p udp -j DROP + iptables -A OUTPUT -d "${POD_CIDR}" -p tcp -m tcp --tcp-flags SYN SYN -m state --state NEW -j DROP + iptables -A OUTPUT -d "${POD_CIDR}" -j ACCEPT +fi + +if [ -n "${HOST_CIDR}" ]; then + arr=$(echo "${HOST_CIDR}" | tr "," "\n") + for value in $arr; do + iptables -A OUTPUT -d "${value}" -p icmp --icmp-type 8 -j DROP + iptables -A OUTPUT -d "${value}" -p udp -j DROP + iptables -A OUTPUT -d "${value}" -p tcp -m tcp --tcp-flags SYN SYN -m state --state NEW -j DROP + iptables -A OUTPUT -d "${value}" -j ACCEPT + done +fi +iptables-save + +exit 0 diff --git a/dsoftbus/dist_executor/modules/resource/config.json b/dsoftbus/dist_executor/modules/resource/config.json new file mode 100644 index 0000000000000000000000000000000000000000..8705caaaa0b7b83e4c6b652d67e2532f02a40ec9 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/config.json @@ -0,0 +1,21 @@ +{ + "cliConfig":{ + "adminService.host":"http://serverlessnative.huawei.com:31172", + "userId":"ic4062b8333240acb996cb3cacf99b36", + "tenantId": "i1fe539427b24702acc11fbb4e134e17", + "caPath": "", + "zipFileSizeMaxMB": 300, + "unzipFileSizeMaxMB": 600, + "mutualTLSConfig":{ + "tlsEnable": false, + "rootCAFile": "", + "moduleCertFile": "", + "moduleKeyFile": "", + "serverName": "" + }, + "securityEnable": true + }, + "cliLimit":{ + "eventSize": 3670016 + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/ddo/ddo.json b/dsoftbus/dist_executor/modules/resource/ddo/ddo.json new file mode 100644 index 0000000000000000000000000000000000000000..5ba0b5f97c912d7b5856c458462fd0544402c702 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/ddo/ddo.json @@ -0,0 +1,16 @@ +{ + "signdata": { + "keys": { + "key": "47a75e8cd6ae4284ce804a8e86fb87bd:b5773bb634322c1a739d4318cf7aef6c51ce8fca25441d88af70c765bf3a1111a0367c81f01362988553ae621c750df6", + "mac": "2c845b3da46c87532a8da01cfc359e9e8d9d02710348b8b22b0b4e98db0c940c" + }, + "description": "signdata" + }, + "paaswddata": { + "keys": { + "key": "66b49543ea2b46862d5a662cb6232cf7:b7c33750cda1bc9b9aebdb7a3aa17d7276b3c2882d596dc8e1a54e3f75eb7a3f43d9aa9cf7fcc410d432727b84f4f62f", + "mac": "802e2fa9336dc7b42237ae730a7cc2649ccf0e643bbbd4d921ec535f6f4307fb" + }, + "description": "paaswddata" + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/function.json b/dsoftbus/dist_executor/modules/resource/function.json new file mode 100644 index 0000000000000000000000000000000000000000..7a94a8d0d308816019c849cf7b3712bbda3360f7 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/function.json @@ -0,0 +1,29 @@ +{ + "runtime": [ + "nodejs10.15.2", + "java1.8", + "python3.7", + "python3.8", + "python3.9", + "cpp11", + "custom-runtime", + "posix-custom-runtime", + "go1.13" + ], + "deploymentInfo": [ + { + "cpu": 500, + "memory": 500 + }, + { + "cpu": 1500, + "memory": 3000 + }, + { + "cpu": 4000, + "memory": 16000 + } + ], + "maxMaxInstance": 1000, + "maxLayersSize": 5 +} diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/README.md b/dsoftbus/dist_executor/modules/resource/local-repo/README.md new file mode 100644 index 0000000000000000000000000000000000000000..32adeddc5eb9d4464b61c86c2a35bc13faa2f807 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/README.md @@ -0,0 +1 @@ +Hello sn-template \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/faas/function/python3.8/faas-empty/function.yaml b/dsoftbus/dist_executor/modules/resource/local-repo/faas/function/python3.8/faas-empty/function.yaml new file mode 100644 index 0000000000000000000000000000000000000000..98823f6fe7a92f6fb229f7efe13f6fc15763d5af --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/faas/function/python3.8/faas-empty/function.yaml @@ -0,0 +1,7 @@ +runtime: python3.8 +handler: handler.my_handler +initializer: handler.init +description: empty function +memory: 500 +cpu: 500 +timeout: 900 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/faas/function/python3.8/faas-empty/handler.py b/dsoftbus/dist_executor/modules/resource/local-repo/faas/function/python3.8/faas-empty/handler.py new file mode 100644 index 0000000000000000000000000000000000000000..d80be751f8ad1ddca482f6f998b1a05a25773872 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/faas/function/python3.8/faas-empty/handler.py @@ -0,0 +1,20 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +def my_handler(event, context): + # TODO + +def init(): + # TODO \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/faas/service/python3.9/faas-empty/service.yaml b/dsoftbus/dist_executor/modules/resource/local-repo/faas/service/python3.9/faas-empty/service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..850c433e2d559f6f361c191d49f0421644ddaf06 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/faas/service/python3.9/faas-empty/service.yaml @@ -0,0 +1,3 @@ +service: empty +kind: faas +description: this is a empty service \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/manifest.json b/dsoftbus/dist_executor/modules/resource/local-repo/manifest.json new file mode 100644 index 0000000000000000000000000000000000000000..687471b9ed0998529467768499279444c06779fa --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/manifest.json @@ -0,0 +1,158 @@ +{ + "faas": { + "service": { + "python3.8": [ + { + "directory": "faas/service/python3.8/faas-empty", + "description": "Empty Example", + "templateName": "empty" + } + ], + "java1.8": [ + { + "directory": "faas/service/java1.8/faas-empty", + "description": "Empty Example", + "templateName": "empty" + } + ] + }, + "function": { + "python3.8": [ + { + "directory": "faas/function/python3.8/faas-empty", + "description": "Empty Example", + "templateName": "empty" + } + ], + "java1.8": [ + { + "directory": "faas/function/java1.8/faas-empty", + "description": "Empty Example", + "templateName": "empty" + } + ] + } + }, + "yrlib": { + "service": { + "python3.8": [ + { + "directory": "yrlib/service/python3.8/yrlib-empty", + "description": "Empty Example", + "templateName": "empty" + }, + { + "directory": "yrlib/service/python3.8/yrlib-hello", + "description": "Hello World Example", + "templateName": "hello-world" + }, + { + "directory": "yrlib/service/python3.8/yrlib-class", + "description": "Class Example", + "templateName": "class" + } + ], + "python3.9": [ + { + "directory": "yrlib/service/python3.9/yrlib-empty", + "description": "Empty Example", + "templateName": "empty" + }, + { + "directory": "yrlib/service/python3.9/yrlib-hello", + "description": "Hello World Example", + "templateName": "hello-world" + }, + { + "directory": "yrlib/service/python3.9/yrlib-class", + "description": "Class Example", + "templateName": "class" + } + ], + "cpp11": [ + { + "directory": "yrlib/service/cpp11/yrlib-empty", + "description": "Empty Example", + "templateName": "empty" + }, + { + "directory": "yrlib/service/cpp11/yrlib-hello", + "description": "Hello World Example", + "templateName": "hello-world" + }, + { + "directory": "yrlib/service/cpp11/yrlib-class", + "description": "Class Example", + "templateName": "class" + } + ], + "java1.8": [ + { + "directory": "yrlib/service/java1.8/huawei-yrlib-empty", + "description": "Empty Example", + "templateName": "empty" + }, + { + "directory": "yrlib/service/java1.8/huawei-yrlib-hello", + "description": "Hello World Example", + "templateName": "hello-world" + }, + { + "directory": "yrlib/service/java1.8/huawei-yrlib-class", + "description": "Class Example", + "templateName": "class" + } + ] + }, + "function": { + "python3.8": [ + { + "directory": "yrlib/function/python3.8/yrlib-empty", + "description": "Empty Example", + "templateName": "empty" + } + ], + "python3.9": [ + { + "directory": "yrlib/function/python3.9/yrlib-empty", + "description": "Empty Example", + "templateName": "empty" + } + ], + "cpp11": [ + { + "directory": "yrlib/function/cpp11/yrlib-empty", + "description": "Empty Example", + "templateName": "empty" + } + ], + "java1.8": [ + { + "directory": "yrlib/function/java1.8/huawei-yrlib-empty", + "description": "Empty Example", + "templateName": "empty" + } + ] + } + }, + "posix-runtime-custom": { + "service": { + "posix-custom-runtime": [ + { + "directory": "posix-runtime-custom/service/posix-empty", + "description": "Empty Example", + "templateName": "empty" + } + ] + }, + "function": { + "posix-custom-runtime": [ + { + "directory": "posix-runtime-custom/function/posix-empty", + "description": "Empty Example", + "templateName": "empty" + } + ] + } + } +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/bootstrap b/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/bootstrap new file mode 100644 index 0000000000000000000000000000000000000000..de334855495a70a6581da2594cb5a706623a2424 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/bootstrap @@ -0,0 +1,14 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/function.yaml b/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/function.yaml new file mode 100644 index 0000000000000000000000000000000000000000..660e919dd3ae1e8e3a41643920feb4e5fdeb4f92 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/function.yaml @@ -0,0 +1,5 @@ +runtime: posix-custom-runtime +description: empty function +memory: 500 +cpu: 500 +timeout: 900 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/proto/common.proto b/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/proto/common.proto new file mode 100644 index 0000000000000000000000000000000000000000..7edb026f96eb1071dfa871281d2d01e097b195f4 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/proto/common.proto @@ -0,0 +1,44 @@ +/* + * Copyright (c) 2022 Huawei Technologies Co., Ltd + * + * This software is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +syntax = "proto3"; + +package common; + +option go_package = "grpc/pb/common;common"; + +message Arg { + enum ArgType { + VALUE = 0; + OBJECT_REF = 1; + } + ArgType type = 1; + bytes value = 2; + repeated string nested_refs = 3; +} + +enum ErrorCode { + ERR_NONE = 0; + ERR_PARAM_INVALID = 1001; + ERR_RESOURCE_NOT_ENOUGH = 1002; + ERR_INSTANCE_NOT_FOUND = 1003; + ERR_INSTANCE_DUPLICATED = 1004; + ERR_INVOKE_RATE_LIMITED = 1005; + ERR_USER_CODE_LOAD = 2001; + ERR_USER_FUNCTION_EXCEPTION = 2002; + ERR_REQUEST_BETWEEN_RUNTIME_BUS = 3001; + ERR_INNER_COMMUNICATION = 3002; + ERR_INNER_SYSTEM_ERROR = 3003; +} diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/proto/core_service.proto b/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/proto/core_service.proto new file mode 100644 index 0000000000000000000000000000000000000000..ec1a4e7c059bdb6935c16befd1445b18451d0707 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/proto/core_service.proto @@ -0,0 +1,151 @@ +/* + * Copyright (c) 2022 Huawei Technologies Co., Ltd + * + * This software is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +syntax = "proto3"; + +package core_service; + +import "common.proto"; + +option go_package = "grpc/pb/core;core"; + +// Core service provides APIs to runtime, +service CoreService { + // Create an instance for specify function + rpc Create (CreateRequest) returns (CreateResponse) {} + // invoke the created instance + rpc Invoke (InvokeRequest) returns (InvokeResponse) {} + // terminate the created instance + rpc Terminate (TerminateRequest) returns (TerminateResponse) {} + // exit the created instance + rpc Exit (ExitRequest) returns (ExitResponse) {} + // save state of the created instance + rpc SaveState (StateSaveRequest) returns (StateSaveResponse) {} + // load state of the created instance + rpc LoadState (StateLoadRequest) returns (StateLoadResponse) {} + // Kill the signal to instance + rpc Kill (KillRequest) returns (KillResponse) {} +} + +enum AffinityType { + PreferredAffinity = 0; + PreferredAntiAffinity = 1; + RequiredAffinity = 2; + RequiredAntiAffinity = 3; +} + +message SchedulingOptions { + int32 priority = 1; + map resources = 2; + map extension = 3; + map affinity = 4; +} + +message CreateRequest { + string function = 1; + repeated common.Arg args = 2; + SchedulingOptions schedulingOps = 3; + string requestID = 4; + string traceID = 5; + repeated string labels = 6; + // optional. if designated instanceID is not empty, the created instance id will be assigned designatedInstanceID + string designatedInstanceID = 7; + map createOptions = 8; +} + +message CreateResponse { + common.ErrorCode code = 1; + string message = 2; + string instanceID = 3; +} + +message InvokeRequest { + string function = 1; + repeated common.Arg args = 2; + string instanceID = 3; + string requestID = 4; + string traceID = 5; + repeated string returnObjectIDs = 6; +} + +message InvokeResponse { + common.ErrorCode code = 1; + string message = 2; + string returnObjectID = 3; +} + +message CallResult { + common.ErrorCode code = 1; + string message = 2; + string instanceID = 3; + string requestID = 4; + HttpTriggerResponse triggerResponse = 5; +} + +message CallResultAck { + common.ErrorCode code = 1; + string message = 2; +} + +message TerminateRequest { + string instanceID = 1; +} + +message TerminateResponse { + common.ErrorCode code = 1; + string message = 2; +} + +message ExitRequest {} + +message ExitResponse {} + +message StateSaveRequest { + bytes state = 1; +} + +message StateSaveResponse { + common.ErrorCode code = 1; + string message = 2; + string checkpointID = 3; +} + +message StateLoadRequest { + string checkpointID = 1; +} + +message StateLoadResponse { + common.ErrorCode code = 1; + string message = 2; + bytes state = 3; +} + +message KillRequest { + string instanceID = 1; + int32 signal = 2; + bytes payload = 3; +} + +message KillResponse { + common.ErrorCode code = 1; + string message = 2; +} + +message HttpTriggerResponse { + int32 statusCode = 1; + string contentType = 2; + bytes body = 3; + map headers = 4; +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/proto/runtime_rpc.proto b/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/proto/runtime_rpc.proto new file mode 100644 index 0000000000000000000000000000000000000000..3fedc6aebab89af1bbd4d252f5966d5bd0fa10fd --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/proto/runtime_rpc.proto @@ -0,0 +1,111 @@ +/* + * Copyright (c) 2022 Huawei Technologies Co., Ltd + * + * This software is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +syntax = "proto3"; + +package runtime_rpc; + +import "core_service.proto"; +import "runtime_service.proto"; + +option go_package = "grpc/pb;api"; + +// RuntimeRPC provide bidirectional streaming RPC interface +service RuntimeRPC { + // build bidirection grpc communication channel, different message body type specify different api handler + rpc MessageStream (stream StreamingMessage) returns (stream StreamingMessage) {} +} + +message StreamingMessage { + string messageID = 1; + oneof body { + + // Create an instance for specify function + // handle by core + core_service.CreateRequest createReq = 2; + core_service.CreateResponse createRsp = 3; + + // invoke the created instance + // handle by core + core_service.InvokeRequest invokeReq = 4; + core_service.InvokeResponse invokeRsp = 5; + + // exit the created instance + // only support to be called by instance itself + // handle by core + core_service.ExitRequest exitReq = 6; + core_service.ExitResponse exitRsp = 7; + + // save state of the created instance + // handle by core + core_service.StateSaveRequest saveReq = 8; + core_service.StateSaveResponse saveRsp = 9; + + // load state of the created instance + // handle by core + core_service.StateLoadRequest loadReq = 10; + core_service.StateLoadResponse loadRsp = 11; + + // send the signal to instance or core + // 1 ~ 63: core defined signal + // 64 ~ 1024: custom runtime defined signal + // handle by core + core_service.KillRequest killReq = 12; + core_service.KillResponse killRsp = 13; + + // send call request result to sender + // handle by core + core_service.CallResult callResultReq = 14; + core_service.CallResultAck callResultAck = 15; + + // Call a method or init state of instance + // handle by runtime + runtime_service.CallRequest callReq = 16; + runtime_service.CallResponse callRsp = 17; + + // NotifyResult is applied to async notify result of create or invoke request invoked by runtime + // handle by runtime + runtime_service.NotifyRequest notifyReq = 18; + runtime_service.NotifyResponse notifyRsp = 19; + + // Checkpoint request a state to save for failure recovery and state migration + // handle by runtime + runtime_service.CheckpointRequest checkpointReq = 20; + runtime_service.CheckpointResponse checkpointRsp = 21; + + // Recover state + // handle by runtime + runtime_service.RecoverRequest recoverReq = 22; + runtime_service.RecoverResponse recoverRsp = 23; + + // request an instance to shutdown + // handle by runtime + runtime_service.ShutdownRequest shutdownReq = 24; + runtime_service.ShutdownResponse shutdownRsp = 25; + + // receive the signal send by other runtime or driver + // handle by runtime + runtime_service.SignalRequest signalReq = 26; + runtime_service.SignalResponse signalRsp = 27; + + // check whether the runtime is alive + // handle by runtime + runtime_service.HeartbeatRequest heartbeatReq = 28; + runtime_service.HeartbeatResponse heartbeatRsp = 29; + } + + // message is sent from functiontask(0), or frontend, this is only used in runtime call request for now + int32 messageFrom = 30; +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/proto/runtime_service.proto b/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/proto/runtime_service.proto new file mode 100644 index 0000000000000000000000000000000000000000..689e32e15f1513e42a485c050b8992466a3d3da5 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/function/posix-empty/proto/runtime_service.proto @@ -0,0 +1,133 @@ +/* + * Copyright (c) 2022 Huawei Technologies Co., Ltd + * + * This software is licensed under Mulan PSL v2. + * You can use this software according to the terms and conditions of the Mulan PSL v2. + * You may obtain a copy of Mulan PSL v2 at: + * + * http://license.coscl.org.cn/MulanPSL2 + * + * THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, + * EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, + * MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. + * See the Mulan PSL v2 for more details. + */ + +syntax = "proto3"; + +package runtime_service; + +import "common.proto"; + +option go_package = "grpc/pb/runtime;runtime"; + +// Runtime service provides APIs to core, +service RuntimeService { + // Call a method or init state of instance + rpc Call (CallRequest) returns (CallResponse) {} + // NotifyResult is applied to async notify result of create or invoke request invoked by runtime + rpc NotifyResult (NotifyRequest) returns (NotifyResponse) {} + // Checkpoint request a state to save for failure recovery and state migration + rpc Checkpoint (CheckpointRequest) returns (CheckpointResponse) {} + // Recover state + rpc Recover (RecoverRequest) returns (RecoverResponse) {} + // GracefulExit request an instance graceful exit + rpc GracefulExit (GracefulExitRequest) returns (GracefulExitResponse) {} + // Shutdown request an instance shutdown + rpc Shutdown (ShutdownRequest) returns (ShutdownResponse) {} + // check whether the runtime is alive + rpc Heartbeat (HeartbeatRequest) returns (HeartbeatResponse) {} + // Signal the signal to instance + rpc Signal (SignalRequest) returns (SignalResponse) {} +} + +message CallRequest { + string function = 1; + repeated common.Arg args = 2; + string traceID = 3; + string returnObjectID = 4; + // isCreate specify the request whether initialization or runtime invoke + bool isCreate = 5; + // senderID specify the caller identity + // while process done, it should be send back to core by CallResult.instanceID + string senderID = 6; + // while process done, it should be send back to core by CallResult.requestID + string requestID = 7; + repeated string returnObjectIDs = 8; + map createOptions = 9; + HttpTriggerEvent triggerRequest = 10; +} + +message CallResponse { + common.ErrorCode code = 1; + string message = 2; + +} + +message CheckpointRequest { + string checkpointID = 1; +} + +message CheckpointResponse { + common.ErrorCode code = 1; + string message = 2; + bytes state = 3; +} + +message RecoverRequest { + bytes state = 1; + map createOptions = 2; +} + +message RecoverResponse { + common.ErrorCode code = 1; + string message = 2; +} + +message GracefulExitRequest { + uint64 gracePeriodSecond = 1; +} + +message GracefulExitResponse { + common.ErrorCode code = 1; + string message = 2; +} + +message ShutdownRequest { + uint64 gracePeriodSecond = 1; +} + +message ShutdownResponse { + common.ErrorCode code = 1; + string message = 2; +} + +message NotifyRequest { + string requestID = 1; + common.ErrorCode code = 2; + string message = 3; +} + +message NotifyResponse {} + +message HeartbeatRequest {} + +message HeartbeatResponse {} + +message SignalRequest { + int32 signal = 1; + bytes payload = 2; +} + +message SignalResponse { + common.ErrorCode code = 1; + string message = 2; +} + +message HttpTriggerEvent { + string path = 1; + string queryStringParameters = 2; + string httpMethod = 3; + bytes body = 4; + map headers = 5; +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/service/posix-empty/service.yaml b/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/service/posix-empty/service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..42a7a96569d901d3ba044c0023d9f606bd8e8907 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/posix-runtime-custom/service/posix-empty/service.yaml @@ -0,0 +1,3 @@ +service: empty +kind: posix-runtime-custom +description: this is a empty service \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/sdk/faas/python3.7/sdk.py b/dsoftbus/dist_executor/modules/resource/local-repo/sdk/faas/python3.7/sdk.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/sdk/faas/python3.8/faas_executor.py b/dsoftbus/dist_executor/modules/resource/local-repo/sdk/faas/python3.8/faas_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..01210014f428c0718ddb36cdedf3d747a91f78e7 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/sdk/faas/python3.8/faas_executor.py @@ -0,0 +1,175 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""Faas executor, an adapter between posix and faas""" +from dataclasses import dataclass +import json +import traceback + +from fnruntime import log +from fnruntime.handlers.utils import CodeManager + +_KEY_USER_INIT_ENTRY = "userInitEntry" +_KEY_USER_CALL_ENTRY = "userCallEntry" + +_INDEX_INIT_USER_ENTRIES = 0 +_INDEX_CALL_CONTEXT = 0 +_INDEX_CALL_USER_EVENT = 1 + + +@dataclass +class FaasContext: + """Faas context""" + logger: object + request_id: str + + +def faasInitHandler(posix_args, code_dir: str, *args, **kwargs) -> None: + """ + raise error if fail + """ + log.get_logger().debug("Faas init handler called.") + user_handlers = json.loads(posix_args[_INDEX_INIT_USER_ENTRIES].value) + log.get_logger().debug("Faas init handler extract user_handler: %s.", user_handlers) + + user_init_hook = user_handlers.get(_KEY_USER_INIT_ENTRY, None) + user_call_hook = user_handlers.get(_KEY_USER_CALL_ENTRY, None) + + # Load and run user init code + if user_init_hook is not None and len(user_init_hook) != 0: + try: + user_init_code = _load_module_and_entry(user_init_hook, code_dir) + except (ValueError, ImportError, RuntimeError) as err: + log.get_logger().error("faas failed to import user code. err: %s, traceback: %s", + err, traceback.format_exc()) + raise RuntimeError(f"faas failed to import user code. err: {err}") from err + else: + CodeManager().register(_KEY_USER_INIT_ENTRY, user_init_code) + try: + user_init_code() + except Exception as err: + log.get_logger().exception("Fail to run user init handler. err: %s. " + "traceback: %s", err, traceback.format_exc()) + raise + + # Load user call code, no run + if user_call_hook is not None and len(user_call_hook) != 0: + user_call_code = _load_module_and_entry(user_call_hook, code_dir) + CodeManager().register(_KEY_USER_CALL_ENTRY, user_call_code) + + +def faasCallHandler(posix_args, *args, **kwargs) -> str: + """faas call handler""" + user_code = CodeManager().load(_KEY_USER_CALL_ENTRY) + if user_code is None: + err_msg = "Faas executor find empty user call code." + log.get_logger().error(err_msg) + raise RuntimeError(err_msg) + + event = json.loads(posix_args[_INDEX_CALL_USER_EVENT].value) + + # This is a "placeholder" context, will implement in future + context = FaasContext(logger=log.get_logger(), request_id="") + + try: + result = user_code(event, context) + except Exception as err: + err_msg = f"Fail to run user call handler. err: {err}. traceback: {traceback.format_exc()}" + log.get_logger().exception(err_msg) + raise + + try: + result_str = transform_response_to_str(result) + except Exception as err: + # Can be RecursionError, RuntimeError, UnicodeError, MemoryError, etc... + err_msg = f"Fail to stringify user call result. " \ + f"err: {err}. traceback: {traceback.format_exc()}" + log.get_logger().exception(err_msg) + raise RuntimeError(err_msg) from err + + return result_str + + +# 按照一起打包的方案,以下方法faas暂不需要,当前留空 + + +def faasCheckPointHandler(check_point_id: str = "") -> bytes: + """faas checkpoint handler, leave empty""" + return bytes() + + +def faasRecoverHandler(state: bytes = None): + """faas recover handler, leave empty""" + + +def faasShutDownHandler(grace_period_second: int = -1): + """faas shutdown handler, leave empty""" + + +def faasSignalHandler(signal_num: int = -1, payload: bytes = None): + """faas signal handler, leave empty""" + + +# Helpers +def transform_response_to_str(response): + """Method transform_response_to_str""" + result = None + if response is None: + result = "" + elif isinstance(response, dict) or is_instance_type(response): + result = to_json_string(response) + else: + result = str(response) + return result + + +def convert_obj_to_json(obj): + """Method convert_obj_to_json""" + return obj.__dict__ + + +def is_instance_type(obj): + """Method is_instance_type""" + return hasattr(obj, '__dict__') + + +def to_json_string(obj, indent=None, sort_keys=False): + """Method to_json_string""" + if isinstance(obj, dict): + return json.dumps(obj, indent=indent, sort_keys=sort_keys) + return json.dumps(obj, indent=indent, default=convert_obj_to_json, sort_keys=sort_keys) + + +def _load_module_and_entry(user_hook, code_dir): + """load module and the entry code, throw RuntimeError if failed.""" + log.get_logger().debug("Faas load module and entry [%s] from [%s]", user_hook, code_dir) + user_hook_splits = user_hook.rsplit(".", maxsplit=1) if isinstance(user_hook, str) else None + if len(user_hook_splits) != 2: + raise RuntimeError("User hook not satisfy requirement, expect: xxx.xxx") + + user_module, user_entry = user_hook_splits[0], user_hook_splits[1] + log.get_logger().debug("User module: %s, entry: %s", user_module, user_entry) + + try: + user_code = CodeManager().get_code_from_local(code_dir, user_module, user_entry) + except ValueError as err: + log.get_logger().error(f"Missing user module. {user_hook}") + raise RuntimeError(f"Missing user module. {user_hook}") from err + + if user_code is None: + log.get_logger().error(f"Missing user entry. {user_hook}") + raise RuntimeError(f"Missing user entry. {user_hook}") + + return user_code diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/sdk/yrlib/nodejs10.15.2/sdk.js b/dsoftbus/dist_executor/modules/resource/local-repo/sdk/yrlib/nodejs10.15.2/sdk.js new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/sdk/yrlib/python3.7/sdk.py b/dsoftbus/dist_executor/modules/resource/local-repo/sdk/yrlib/python3.7/sdk.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/sdk/yrlib/python3.9/yrlib_handler.py b/dsoftbus/dist_executor/modules/resource/local-repo/sdk/yrlib/python3.9/yrlib_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..f7bf241b0c3eae4d93861bf92406745c3a7b759b --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/sdk/yrlib/python3.9/yrlib_handler.py @@ -0,0 +1,233 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""default handler for yr api""" +import traceback +from typing import List + +import yr.utils +import yr.runtime.runtime +import yr.storage.reference_count +from fnruntime import log +from fnruntime.common import utils +from fnruntime.common.utils import dependency_objref_process +from fnruntime.handlers.utils import InstanceManager, CodeManager +from yr import apis +from yr import config +from yr.config import ConfigManager +from yr.exception import YRInvokeError +from yr.runtime.task_spec import InvokeType +from yr.runtime.task_spec import TaskMetadata +from yr.serialization import Serialization +from yr.rpc.common_pb2 import Arg + +CUSTOM_SIGNAL_EXIT = 64 + + +def _get_serialized_code(code_id): + code = CodeManager().load(code_id) + if code is not None: + return code + code = utils.get_code(code_id) + if code is not None: + CodeManager().register(code_id, code) + return code + + +def _instance_create_function(posix_args: List[Arg], metadata: TaskMetadata): + if metadata.invoke_type == InvokeType.INVALID and metadata.code_id == yr.utils.NORMAL_FUNCTION: + return False + if metadata.invoke_type == InvokeType.CREATE_NORMAL_FUNCTION_INSTANCE: + return False + + log.get_logger().debug( + f"start to create instance: {metadata.object_descriptor.module_name}.{metadata.object_descriptor.class_name}") + class_code, use_msgpack = _parse_code(metadata, is_class=True) + if class_code is None: + raise RuntimeError("Not found code") + InstanceManager().class_code = class_code + + args, kwargs = utils.get_param(posix_args[utils.ARGS_INDEX:], use_msgpack) + dependency_objref_process(posix_args=posix_args, is_increase=False) + instance = class_code(*args, **kwargs) + InstanceManager().init(instance) + return use_msgpack + + +def _parse_code(metadata: TaskMetadata, is_class=False): + is_cross_language = False + if metadata.object_descriptor.src_language != yr.utils.LANGUAGE_PYTHON: + is_cross_language = True + + if is_cross_language: + if is_class: + code_name = metadata.object_descriptor.class_name + else: + code_name = metadata.object_descriptor.function_name + + src_language = metadata.object_descriptor.src_language + if src_language == yr.utils.LANGUAGE_CPP: + code = CodeManager().get_code_from_local(ConfigManager().code_dir, + metadata.object_descriptor.module_name, + code_name) + if is_class: + local_code = code.get_original_cls() + else: + local_code = code.get_original_func() + return local_code, True + raise RuntimeError(f'invalid srcLanguage value, expect srcLanguage= {yr.utils.LANGUAGE_CPP}, ' + f'actual {src_language}') + + code = _get_serialized_code(metadata.code_id) + return code, False + + +def _invoke_function(posix_args: List[Arg]): + if len(posix_args) == 0: + return None, False + + metadata = TaskMetadata.parse(posix_args[utils.METADATA_INDEX].value) + log.get_logger().debug( + f"start to create/invoke: {metadata.object_descriptor.module_name}.{metadata.object_descriptor.function_name}") + + if metadata.invoke_type == InvokeType.INVALID and metadata.code_id == yr.utils.MEMBER_FUNCTION: + return _instance_function(posix_args, metadata) + if metadata.invoke_type == InvokeType.INVOKE_MEMBER_FUNCTION: + return _instance_function(posix_args, metadata) + if metadata.invoke_type == InvokeType.GET_NAMED_INSTANCE_METADATA: + return _get_instance_class_code() + return _normal_function(posix_args, metadata) + + +def _normal_function(posix_args: List[Arg], metadata: TaskMetadata): + code, use_msgpack = _parse_code(metadata) + if code is None: + raise RuntimeError("Not found code") + + args, kwargs = utils.get_param(posix_args[utils.ARGS_INDEX:], use_msgpack) + dependency_objref_process(posix_args=posix_args, is_increase=False) + result = code(*args, **kwargs) + return result, use_msgpack + + +def _instance_function(posix_args: List[Arg], metadata: TaskMetadata): + instance_function_name = metadata.object_descriptor.function_name + use_msgpack = metadata.object_descriptor.src_language == yr.utils.LANGUAGE_CPP + args, kwargs = utils.get_param(posix_args[utils.ARGS_INDEX:], use_msgpack) + + instance = InstanceManager().instance() + if instance is None: + raise RuntimeError("No init class instance") + + dependency_objref_process(posix_args=posix_args, is_increase=False) + result = getattr(instance, instance_function_name)(*args, **kwargs) + return result, use_msgpack + + +def _get_instance_class_code(): + class_code = InstanceManager().class_code + return class_code, False + + +def init(posix_args: List[Arg], code_dir: str, cross_param=None): + """init call request""" + result = None + if len(posix_args) == 0: + return result + + metadata = TaskMetadata.parse(posix_args[utils.METADATA_INDEX].value) + if config.ConfigManager().is_init is not True: + cfg = config.Config(code_dir=code_dir, + on_cloud=True, + in_cluster=True, + job_id=metadata.job_id, + log_level=metadata.log_level, + recycle_time=metadata.recycle_time, + function_id=metadata.function_id_python, + cpp_function_id=metadata.function_id_cpp) + InstanceManager().config = cfg + apis.init(cfg) + + use_msgpack = False + try: + use_msgpack = _instance_create_function(posix_args, metadata) + except Exception as err: + if isinstance(err, YRInvokeError): + result = YRInvokeError(err.cause, traceback.format_exc()) + else: + result = YRInvokeError(err, traceback.format_exc()) + log.get_logger().error(f"failed to init, err: {repr(err)} {traceback.format_exc()}") + + if cross_param is not None: + cross_param.use_msgpack = use_msgpack + return result + + +def call(posix_args: List[Arg], cross_param=None): + """call request""" + if len(posix_args) == 0: + log.get_logger().debug("yrlib call, get 0 args from request.") + return None + use_msgpack = False + try: + result, use_msgpack = _invoke_function(posix_args) + except Exception as err: + if isinstance(err, YRInvokeError): + result = YRInvokeError(err.cause, traceback.format_exc()) + else: + result = YRInvokeError(err, traceback.format_exc()) + log.get_logger().error(f"failed to call, err: {repr(err)} {traceback.format_exc()}") + + if cross_param is not None: + cross_param.use_msgpack = use_msgpack + return result + + +def checkpoint(checkpoint_id: str) -> bytes: + """check point""" + log.get_logger().info("start to checkpoint") + instance = InstanceManager().instance() + try: + result = Serialization().serialize((instance, InstanceManager().config), "", False) + except TypeError as e: + log.get_logger().exception(e) + return bytes() + return result.data + + +def recover(state: bytes): + """recover state""" + log.get_logger().info('start to recover state') + if len(state) == 0: + log.get_logger().error("no instance when try to recover") + raise RuntimeError("No recover state") + instance, cfg = Serialization().deserialize(state) + InstanceManager().init(instance) + apis.init(cfg) + + +def shutdown(grace_period_second: int): + """shutdown""" + apis.finalize() + + +def signal(signal_num: int): + """ + signal + yr-api custom signal handler + """ + if signal_num == CUSTOM_SIGNAL_EXIT: + apis.finalize() + apis.exit() diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/function/python3.9/yrlib-empty/function.yaml b/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/function/python3.9/yrlib-empty/function.yaml new file mode 100644 index 0000000000000000000000000000000000000000..f11e678979dc5858959656f1bc7bb9e4d6d1d4fd --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/function/python3.9/yrlib-empty/function.yaml @@ -0,0 +1,4 @@ +runtime: python3.9 +memory: 500 +cpu: 500 +timeout: 900 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/function/python3.9/yrlib-empty/handler.py b/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/function/python3.9/yrlib-empty/handler.py new file mode 100644 index 0000000000000000000000000000000000000000..0f0246bfce05f85665acfb569c05148834818a1b --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/function/python3.9/yrlib-empty/handler.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import yr + + +yr.init() + + +@yr.invoke +def my_handler(): + # TODO + +res = my_handler.invoke() +yr.get(res) + +yr.finalize() \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/service/python3.9/yrlib-class/helloclass/helloclass.py b/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/service/python3.9/yrlib-class/helloclass/helloclass.py new file mode 100644 index 0000000000000000000000000000000000000000..b170eb6b6b476c85097fd2453917c31702325d79 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/service/python3.9/yrlib-class/helloclass/helloclass.py @@ -0,0 +1,38 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import time +import yr + +yr.init() + + +@yr.instance +class Instance: + sum = 0 + + def add(self, a): + self.sum += a + + def get(self): + return self.sum + + +ins = [Instance.invoke() for i in range(1)] +yr.get([i.add.invoke(1) for i in ins]) +res = [i.get.invoke() for i in ins] +yr.get(res) +[i.terminate() for i in ins] +yr.finalize() diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/service/python3.9/yrlib-class/service.yaml b/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/service/python3.9/yrlib-class/service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..dbc721e3c3872a1ef205ab02f6870c153ff3ac59 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/service/python3.9/yrlib-class/service.yaml @@ -0,0 +1,9 @@ +service: class +kind: yrlib +description: this is a class service +functions: + class: + runtime: python3.9 + memory: 500 + cpu: 500 + timeout: 900 diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/service/python3.9/yrlib-empty/service.yaml b/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/service/python3.9/yrlib-empty/service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..bef06b9432bc60691405cccca971d5e57767ece4 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/service/python3.9/yrlib-empty/service.yaml @@ -0,0 +1,3 @@ +service: empty +kind: yrlib +description: this is a empty service \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/service/python3.9/yrlib-hello/helloworld/helloworld.py b/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/service/python3.9/yrlib-hello/helloworld/helloworld.py new file mode 100644 index 0000000000000000000000000000000000000000..2e2c07ebb2d6ed6058bf47fb35adb319162c06a8 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/service/python3.9/yrlib-hello/helloworld/helloworld.py @@ -0,0 +1,29 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import yr + + +yr.init() + + +@yr.invoke +def hello_world(): + return "hello world" + + +res = hello_world.invoke() +print(yr.get(res)) +yr.finalize() diff --git a/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/service/python3.9/yrlib-hello/service.yaml b/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/service/python3.9/yrlib-hello/service.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4e103efac5088f1d8e5bf55f22905157aa4bc112 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/local-repo/yrlib/service/python3.9/yrlib-hello/service.yaml @@ -0,0 +1,9 @@ +service: helloworld +kind: yrlib +description: this is a helloworld service +functions: + helloworld: + runtime: python3.9 + memory: 500 + cpu: 500 + timeout: 900 diff --git a/dsoftbus/dist_executor/modules/resource/rdo/v1/apple/a.txt b/dsoftbus/dist_executor/modules/resource/rdo/v1/apple/a.txt new file mode 100644 index 0000000000000000000000000000000000000000..3709ccd9afd30c21d59bfae320a74b80daa14f41 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/rdo/v1/apple/a.txt @@ -0,0 +1 @@ +f48f9d5a9706088947ac438ebe005aa26c9370579f2231c538b28894a315562182da0eb18002c86728c4cdc0df5efb19e1c2060e93370fd891d4f3d9e5b2b61376643f86d0210ce996446a985759b15112037a5a2f6463cf5fd6afc7ff30fe814bf960eb0c16c5059407c74d6a93a8b3110405cbc935dff672da3b648d62e0d5cecd91bc7063211e6b33210afb6899e8322eabffe167318a5ac5d591aa7579efd37e9e4c7fcf390e97c1151b7c1bf00b4a18764a1a0cac1fda1ea6389b39d755127f0e5bc072e6d5936738be1585535dc63b71ad58686f71c821325009de36bdbac31c1c044845bd1bb41230ec9815695ef3f9e7143a16410113ff3286147a76 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/rdo/v1/boy/b.txt b/dsoftbus/dist_executor/modules/resource/rdo/v1/boy/b.txt new file mode 100644 index 0000000000000000000000000000000000000000..ccf997f96126e67f30cdf76f1cb9dc8ef5bff9dd --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/rdo/v1/boy/b.txt @@ -0,0 +1 @@ +5d3da9f432be72b34951c737053eb2c816aaccae2b390d092046288aa5ce2cc5b16529f8197de316303735fbc0c041ccc3885b9be5fef4933b6806febb940b6bb609b3bf1d1501110e3ba62c6d8b2cf4388a08a8e123a3cea96daec619fbca177bdf092461f5701b02e5af83ddf0f6ce40deb279cda3ec7d6805237d229e26e30555f3dd890b7306b42bdef0ca1f963dbe25cd00d75018ab3216fcd3b7002b8a493d015306bf264cca12718890ef11c8d9e54721ebd6bdecab6c7084442f45611f249d9b5d703414770a46380d0b97c0187185241e9b6187c8168414370649fe6e7afef83a0df645424c4b6c0631dc3ef50c30af37eda905a1886ca12474c68a \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/rdo/v1/cat/c.txt b/dsoftbus/dist_executor/modules/resource/rdo/v1/cat/c.txt new file mode 100644 index 0000000000000000000000000000000000000000..10995ca71de7c44692a6f781fad7c1de77af08ae --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/rdo/v1/cat/c.txt @@ -0,0 +1 @@ +df04ed142bf01fb1fe6c3c85944d71b77041b1f9f364ae09280cda4cdd65de76a4ce0459f10890eefd36708f3d154aa8af202301a215679207f17c472f04f7d19e219edb5028e18613378b3296e0e8f9940d1785d1daf1aed07dfad3a12c7d129bfc54635d0c8a5bc10c55ea116de90765d687e6cb6d657fe36a654eb8393e657b77b0916f291fccbddbbaeb9a43d4418458970e7b7ee257b0c54a1b4466b5dba393e7934b398874bec060407e9bac139c8d00937c8e738f4e88401e0950fb968658c9e0cfd639b4a530da92f1abd6c2e06953d7f5d4bdf4f5b8385edc8aab8b8ec7e897d24bd8d736955c8e19676c0037ce5347f6997f7c2e32ba71e995fc5d \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/rdo/v1/dog/d.txt b/dsoftbus/dist_executor/modules/resource/rdo/v1/dog/d.txt new file mode 100644 index 0000000000000000000000000000000000000000..bff402ff7bfa253116d0d3b089d44517d60e3127 --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/rdo/v1/dog/d.txt @@ -0,0 +1 @@ +37a1b37efbb9bb6beadb4446f40aa2c4bcaeb298192fa390ed03ee65bfcd54e55da39bae9961b9fa0d4b89591e41eed835ed01cca315eab75ebaf8a9e7b02287a468ec6d0c61f9f8e4d58dad90fb8a6a13bee7fe4685dbb535bfdb7e76b328d66b4d4bc7aa48791b205d1d2f2ef176f2b5b80a8ddc34ed9514372130eb896bc18745facf059a7fa37ef5e2ef413d0030f5bca581055eb3b3565dca642651cb802530e2e4964ab3c8a37370adfd65c80483398a1a8668caed455deabae0dbae7fb2bcdeeee4c2a2d9431ed93c6527985ef684127691904c799e13f37daeb1cb7ebfb0904d61796362514e521ac0fed682fd952ca3e9ce9a7a4407aaaa44f8aab6 \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/rdo/v1/egg/e.txt b/dsoftbus/dist_executor/modules/resource/rdo/v1/egg/e.txt new file mode 100644 index 0000000000000000000000000000000000000000..445c90b71075e95ba8d3fc248099078ee2fa1bce --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/rdo/v1/egg/e.txt @@ -0,0 +1 @@ +43b0d158d9dcf4ffd416eb4e6a89d1b7a66d595c43329bb5c1c66d5befe33c37f31da53aaf539e43238457c46e1f28339cb9dda461c71c0ea2dba3dc8006684ff0d8d59ee2192582983c155e400d5b7cadcb65bbe682e61d175af54549796e447f3174b95f1f50998ae7785b5c0c359746e1ee6eeb989284fbe9e0f801ce5a7267285afbab7694c0e8434d6b86991298a46039de4d1fbfd824b8337b11c2d0b2f30ed4d46312e315cd9042abddc09ea73169f9e1f5baa496d44ed5cac9659cab076212499ef09a56db69e7444d665195a0562a7c82d176d027b0ecc7f4a26215e003fd463bf3911633baf85ee98f9187357a65ee2869b3d93a3871d830b4034e \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/resource/version.yaml b/dsoftbus/dist_executor/modules/resource/version.yaml new file mode 100644 index 0000000000000000000000000000000000000000..4f7f0bfed8448ccc29e3e24e9138b39e4bb81a0f --- /dev/null +++ b/dsoftbus/dist_executor/modules/resource/version.yaml @@ -0,0 +1,3 @@ +last_check_time: 2020-12-09T19:53:38.9173835+08:00 +latest_release: + version: version-info \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/runtime/python/bin/entrypoint_runtime_python b/dsoftbus/dist_executor/modules/runtime/python/bin/entrypoint_runtime_python new file mode 100644 index 0000000000000000000000000000000000000000..416b5c66c512c818be3751b07bbaedbfeda7a74e --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/bin/entrypoint_runtime_python @@ -0,0 +1,36 @@ +#!/bin/bash +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +set -e +WRAPPER="/home/snuser/runtime/fnruntime/server.py" +CLASSPATH="/dcache/layer" +ADDRESS="${POD_IP}:31530" +RPC_ADDRESS="${POD_IP}:31531" +HandlerFilePath="" +HandlerName="" +RUNTIMECONFIG_PATH="/home/snuser/config/runtime.json" + +function getCfgValByKey() { + echo $(cat ${RUNTIMECONFIG_PATH} | sed 's/,/\n/g' | grep $1 | sed 's/:/\n/g' | sed '1d' | sed 's/}//g') +} + +# limiting the number of file handles +ulimit -n $(getCfgValByKey "maxHandlerNum") + +# prevent from creating files that have incorrect permission +umask 0027 + +echo ${RUNTIME_LANGUAGE} +exec -c ${RUNTIME_LANGUAGE} ${WRAPPER} ${ADDRESS} ${RPC_ADDRESS} ${HandlerFilePath} ${HandlerName} ${CLASSPATH} +exit 0 diff --git a/dsoftbus/dist_executor/modules/runtime/python/config/python-runtime-log.json b/dsoftbus/dist_executor/modules/runtime/python/config/python-runtime-log.json new file mode 100644 index 0000000000000000000000000000000000000000..94302b73f9f443a20a596cf555f90b90abb25223 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/config/python-runtime-log.json @@ -0,0 +1,24 @@ +{ + "version": 1, + "disable_existing_loggers": false, + "formatters": { + "extra": { + "format": "[%(asctime)s %(levelname)s %(filename)s:%(lineno)d] [%(podname)s %(thread)d] %(message)s" + } + }, + "handlers": { + "file": { + "class": "logging.FileHandler", + "filename": "/home/snuser/log", + "formatter": "extra" + } + }, + "loggers": { + "FileLogger": { + "handlers": [ + "file" + ], + "level": "DEBUG" + } + } +} diff --git a/dsoftbus/dist_executor/modules/runtime/python/config/runtime.json b/dsoftbus/dist_executor/modules/runtime/python/config/runtime.json new file mode 100644 index 0000000000000000000000000000000000000000..9600d7109b0ecfa6189253fbbb42eb043858c43d --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/config/runtime.json @@ -0,0 +1,6 @@ +{ + "maxRequestBodySize": 6, + "maxHandlerNum": 1024, + "maxProcessNum": 1024, + "dataSystemConnectionTimeout": 0.15 +} \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/runtime/python/datasystem/.commit_id b/dsoftbus/dist_executor/modules/runtime/python/datasystem/.commit_id new file mode 100644 index 0000000000000000000000000000000000000000..9db6f6712e36fcf2a88aee7576cffd13cd63c1a1 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/datasystem/.commit_id @@ -0,0 +1 @@ +__commit_id__ = '[7a2c877c5787b13e01afd368bb51c44b74455b82] [2022-12-30 14:55:03 +0800] [qianlong 00455322]' diff --git a/dsoftbus/dist_executor/modules/runtime/python/datasystem/__init__.py b/dsoftbus/dist_executor/modules/runtime/python/datasystem/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f8512a4a5871cbfb57638a179b6e6f46f669b41e --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/datasystem/__init__.py @@ -0,0 +1,25 @@ +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +Python module init. +""" + +__all__ = ["AgentClient", "Buffer", "ConsistencyType", "ObjectClient", "StateCacheClient", "Status", "StreamClient", + "SubconfigType", "WriteMode"] + +from .agent_client import AgentClient +from .object_cache import Buffer, ConsistencyType, ObjectClient, WriteMode +from .stream_client import SubconfigType, StreamClient +from .state_cache_client import StateCacheClient +from .util import Status diff --git a/dsoftbus/dist_executor/modules/runtime/python/datasystem/agent_client.py b/dsoftbus/dist_executor/modules/runtime/python/datasystem/agent_client.py new file mode 100644 index 0000000000000000000000000000000000000000..fddd01068d154cb24a9064f056bcedd6208998ff --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/datasystem/agent_client.py @@ -0,0 +1,153 @@ +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +Object cache agent client python interface. +""" +from __future__ import absolute_import + +from datasystem import libds_client_py as ds +from datasystem.util import Validator as validator + + +class AgentClient: + """ + Features: Data system object cache agent client management for python. + """ + + def __init__(self, host, port, timeout_ms=60000, client_public_key="", client_private_key="", server_public_key=""): + """ Init a client to connect to a worker. + + Args: + host(str): The host of the worker address. + port(int): The port of the worker address. + timeout_ms: The timeout of the init operation. + client_public_key(str): The client's public key, for curve authentication. + client_private_key(str): The client's private key, for curve authentication. + server_public_key(str): The agent server's public keys, for curve authentication. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if the client fails to connect to the worker. + """ + args = [ + ["host", host, str], + ["port", port, int], + ["timeout_ms", timeout_ms, int], + ["client_public_key", client_public_key, str], + ["client_private_key", client_private_key, str], + ["server_public_key", server_public_key, str] + ] + validator.check_args_types(args) + self._client = ds.AgentClient(host, port, timeout_ms, client_public_key, client_private_key, server_public_key) + + def init(self): + """ Init an agent client. + + Returns: + status: the init status. + + Raises: + RuntimeError: Raise a runtime error if the client fails to connect to the server. + """ + init_status = self._client.init() + if init_status.is_error(): + raise RuntimeError(init_status.to_string()) + + def put(self, object_id, value, nested_object_ids: list = None): + """ Store an object into the storage. + + Args: + object_id: The ID of the object to store. + value: The value of object. + nested_object_ids(list): An optional list that indicate specific nested_object_ids dependent on object_id. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if it fails to put an object in the storage. + """ + if nested_object_ids is None: + nested_object_ids = [] + args = [ + ["object_id", object_id, str], + ["nested_object_ids", nested_object_ids, list], + ["value", value, bytes] + ] + validator.check_args_types(args) + if not all(map(lambda id: isinstance(id, str), nested_object_ids)): + raise TypeError(r"The input of nested_object_ids should be list of strings. contains error type.") + status = self._client.put(object_id, value, nested_object_ids) + if status.is_error(): + raise RuntimeError(status.to_string()) + + def get(self, object_ids, timeout_ms): + """ Get objects from the storage. + + Args: + object_ids: The IDs of objects to get. + timeout_ms: The timeout of the get operation. + + Returns: + values: list of values for the data associated with the object_ids. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if the client fails to get an object. + """ + args = [["object_ids", object_ids, list], ["timeout_ms", timeout_ms, int]] + validator.check_args_types(args) + if not object_ids: + raise RuntimeError(r"The input of object_ids list should not be empty") + values = [] + status, values = self._client.get(object_ids, timeout_ms, values) + if status.is_error(): + raise RuntimeError(status.to_string()) + return values + + def g_increase_ref(self, object_ids: list): + """ Increase object(s)' global reference number by 1. + + Args: + object_ids(list): The ids of objects to increase their global references. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if the client fails to increase the reference count. + """ + args = [["object_ids", object_ids, list]] + validator.check_args_types(args) + if not object_ids: + raise RuntimeError(r"The input of object_ids list should not be empty") + status, failed_object_ids = self._client.g_increase_ref(object_ids) + if status.is_error(): + raise RuntimeError(status.to_string() + r", failed objects: {obj_ids}".format( + obj_ids=failed_object_ids)) + + def g_decrease_ref(self, object_ids: list): + """ Decrease object(s)' global reference number by 1. + + Args: + object_ids(list): The ids of objects to decrease their global references. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if the client fails to decrease the reference count. + """ + args = [["object_ids", object_ids, list]] + validator.check_args_types(args) + if not object_ids: + raise RuntimeError(r"The input of object_ids list should not be empty") + status, failed_object_ids = self._client.g_decrease_ref(object_ids) + if status.is_error(): + raise RuntimeError(status.to_string() + r", failed objects: {obj_ids}".format( + obj_ids=failed_object_ids)) diff --git a/dsoftbus/dist_executor/modules/runtime/python/datasystem/libgflags.so.2.2 b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libgflags.so.2.2 new file mode 100644 index 0000000000000000000000000000000000000000..dd3127d4f83c0aa4f3a5980adb83f1da0d0966b3 Binary files /dev/null and b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libgflags.so.2.2 differ diff --git a/dsoftbus/dist_executor/modules/runtime/python/datasystem/libgflags.so.2.2.2 b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libgflags.so.2.2.2 new file mode 100644 index 0000000000000000000000000000000000000000..dd3127d4f83c0aa4f3a5980adb83f1da0d0966b3 Binary files /dev/null and b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libgflags.so.2.2.2 differ diff --git a/dsoftbus/dist_executor/modules/runtime/python/datasystem/libglog.so.0 b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libglog.so.0 new file mode 100644 index 0000000000000000000000000000000000000000..118d59529990d15365ec0ae0f0896abef2c51a54 Binary files /dev/null and b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libglog.so.0 differ diff --git a/dsoftbus/dist_executor/modules/runtime/python/datasystem/libglog.so.0.5.0 b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libglog.so.0.5.0 new file mode 100644 index 0000000000000000000000000000000000000000..118d59529990d15365ec0ae0f0896abef2c51a54 Binary files /dev/null and b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libglog.so.0.5.0 differ diff --git a/dsoftbus/dist_executor/modules/runtime/python/datasystem/libprotobuf.so.3.13.0.0 b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libprotobuf.so.3.13.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..6de235c52e46f1ccdc759aa0a91902a2a2e2f780 Binary files /dev/null and b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libprotobuf.so.3.13.0.0 differ diff --git a/dsoftbus/dist_executor/modules/runtime/python/datasystem/libtbb.so.2 b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libtbb.so.2 new file mode 100644 index 0000000000000000000000000000000000000000..935ae89a92056be6c100594764c0358acc75b033 Binary files /dev/null and b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libtbb.so.2 differ diff --git a/dsoftbus/dist_executor/modules/runtime/python/datasystem/libzmq.so.5 b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libzmq.so.5 new file mode 100644 index 0000000000000000000000000000000000000000..2f67e03597c36cdfa5c28ed367e03b6fc69d4713 Binary files /dev/null and b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libzmq.so.5 differ diff --git a/dsoftbus/dist_executor/modules/runtime/python/datasystem/libzmq.so.5.2.4 b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libzmq.so.5.2.4 new file mode 100644 index 0000000000000000000000000000000000000000..2f67e03597c36cdfa5c28ed367e03b6fc69d4713 Binary files /dev/null and b/dsoftbus/dist_executor/modules/runtime/python/datasystem/libzmq.so.5.2.4 differ diff --git a/dsoftbus/dist_executor/modules/runtime/python/datasystem/object_cache.py b/dsoftbus/dist_executor/modules/runtime/python/datasystem/object_cache.py new file mode 100644 index 0000000000000000000000000000000000000000..0a2310d3d4d793a6e97e2c8327befcb8fbb650ef --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/datasystem/object_cache.py @@ -0,0 +1,429 @@ +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +Object cache client python interface. +""" +from __future__ import absolute_import + +from enum import Enum +from datasystem import libds_client_py as ds +from datasystem.util import Validator as validator + + +class WriteMode(Enum): + """ + Features: Wrapping the write modes + """ + NONE_L2_CACHE = ds.WriteMode.NONE_L2_CACHE + WRITE_THROUGH_L2_CACHE = ds.WriteMode.WRITE_THROUGH_L2_CACHE + + +class ConsistencyType(Enum): + """ + Features: Wrapping the consistency types + """ + PRAM = ds.ConsistencyType.PRAM + CAUSAL = ds.ConsistencyType.CAUSAL + + +class Buffer: + """ + Features: Wrapping the Buffer objects + """ + + def __init__(self): + self._buffer = None + + def wlatch(self, timeout=60): + """ Acquire the write-lock to protect the buffer from concurrent reads and writes. + + Args: + timeout(int): The try-lock timeout, the default value is 60 seconds. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if acquire write latch fails. + """ + args = [["timeout", timeout, int]] + validator.check_args_types(args) + self._check_buffer() + latch_status = self._buffer.wlatch(timeout) + if latch_status.is_error(): + raise RuntimeError(latch_status.to_string()) + + def unwlatch(self): + """ Release the write-lock. + + Raises: + RuntimeError: Raise a runtime error if release write latch fails. + """ + self._check_buffer() + unlatch_status = self._buffer.unwlatch() + if unlatch_status.is_error(): + raise RuntimeError(unlatch_status.to_string()) + + def rlatch(self, timeout=60): + """ Acquire the read-lock to protect the buffer from concurrent writes. + + Args: + timeout(int): The try-lock timeout, the default value is 60 seconds. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if acquire read latch fails. + """ + args = [["timeout", timeout, int]] + validator.check_args_types(args) + self._check_buffer() + latch_status = self._buffer.rlatch(timeout) + if latch_status.is_error(): + raise RuntimeError(latch_status.to_string()) + + def unrlatch(self): + """ Release the read-lock. + + Raises: + RuntimeError: Raise a runtime error if release read latch fails. + """ + self._check_buffer() + unlatch_status = self._buffer.unrlatch() + if unlatch_status.is_error(): + raise RuntimeError(unlatch_status.to_string()) + + def mutable_data(self): + """ Get a mutable data memory view. + + Returns: + The mutable memory view of the buffer. + """ + self._check_buffer() + return self._buffer.mutable_data() + + def immutable_data(self): + """ Get an immutable data memory view. + + Returns: + The immutable memory view of the buffer. + """ + self._check_buffer() + return memoryview(self._buffer.immutable_data()) + + def memory_copy(self, value): + """ Write data to the buffer. + + Args: + value(memoryview, bytes or bytearray): the data to be copied to the buffer + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if the copy fails. + """ + args = [["value", value, memoryview, bytes, bytearray]] + validator.check_args_types(args) + self._check_buffer() + copy_status = self._buffer.memory_copy(value) + if copy_status.is_error(): + raise RuntimeError(copy_status.to_string()) + + def publish(self, nested_object_ids=None): + """ Publish mutable data to the server. + + Args: + nested_object_ids(list): Nested object id list that buffer object depending on. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if publish fails. + """ + if nested_object_ids is None: + nested_object_ids = [] + args = [["nested_object_ids", nested_object_ids, list]] + validator.check_args_types(args) + if not all(map(lambda id: isinstance(id, str), nested_object_ids)): + raise TypeError(r"The input of nested_object_ids should be a list of strings. There exists error type.") + self._check_buffer() + pub_status = self._buffer.publish(nested_object_ids) + if pub_status.is_error(): + raise RuntimeError(pub_status.to_string()) + + def seal(self, nested_object_ids=None): + """ Publish immutable data to the server. + + Args: + nested_object_ids(list): Objects that depend on objectId. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if the seal fails. + """ + if nested_object_ids is None: + nested_object_ids = [] + args = [["nested_object_ids", nested_object_ids, list]] + validator.check_args_types(args) + if not all(map(lambda id: isinstance(id, str), nested_object_ids)): + raise TypeError(r"The input of nested_object_ids should be a list of strings. There exists error type.") + self._check_buffer() + seal_status = self._buffer.seal(nested_object_ids) + if seal_status.is_error(): + raise RuntimeError(seal_status.to_string()) + + def invalidate_buffer(self): + """ Invalidate data on the current host. + + Raises: + RuntimeError: Raise a runtime error if invalidate fails. + """ + self._check_buffer() + invalidate_status = self._buffer.invalidate_buffer() + if invalidate_status.is_error(): + raise RuntimeError(invalidate_status.to_string()) + + def get_size(self): + """ Get the size of the buffer. + + Returns: + size(int): data size of the buffer. + """ + self._check_buffer() + return self._buffer.get_size() + + def set_buffer(self, buffer): + """ Set buffer. + + Args: + buffer: The buffer created by the client + """ + self._buffer = buffer + + def _check_buffer(self): + """ Check to make sure that self._buffer is not None. + + Raises: + RuntimeError: Raise a runtime error if self._buffer is None + """ + if self._buffer is None: + raise RuntimeError(r"The buffer is None, please create it first.") + + +class ObjectClient: + """ + Features: Data system Object Client management for python. + """ + + def __init__( + self, + host, + port, + connect_timeout=60000, + token="", + client_public_key="", + client_private_key="", + server_public_key="" + ): + """ Constructor of the ObjectClient class + + Args: + host(str): The host of the worker address. + port(int): The port of the worker address. + connect_timeout(int): The timeout interval for the connection between the client and worker. + token(str): A string used for authentication. + client_public_key(str): The client's public key, for curve authentication. + client_private_key(str): The client's private key, for curve authentication. + server_public_key(str): The worker server's public key, for curve authentication. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + """ + args = [["host", host, str], + ["port", port, int], + ["connect_timeout", connect_timeout, int], + ["token", token, str], + ["client_public_key", client_public_key, str], + ["client_private_key", client_private_key, str], + ["server_public_key", server_public_key, str]] + validator.check_args_types(args) + self.client = ds.ObjectClient(host, port, connect_timeout, token, + client_public_key, client_private_key, server_public_key) + + def init(self): + """ Init a client to connect to a worker. + + Returns: + status: the init status. + + Raises: + RuntimeError: Raise a runtime error if the client fails to connect to the worker. + """ + init_status = self.client.init() + if init_status.is_error(): + raise RuntimeError(init_status.to_string()) + + def create(self, object_id, size, param=None): + """ Create an object buffer + + Args: + object_id(str): The id of the object to be created. + size(int): The size in bytes of object. + param(dict): which contains the following three "key: value" pairs successively: + (1) "write_mode", write_mode(Enum): Indicating whether the object will be written through L2 cache. + There are 2 options: + 1) WriteMode.NONE_L2_CACHE; + 2) WriteMode.WRITE_THROUGH_L2_CACHE; + (2) "consistency_type": consistency_type(Enum): Indicating which consistency type will be used. + There are 2 options: + 1) ConsistencyType.PRAM; + 2) ConsistencyType.CAUSAL; + + Returns: + buffer: The object buffer. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if the client fails to connect to the worker. + """ + if param is None: + param = { + "write_mode": WriteMode.NONE_L2_CACHE, + "consistency_type": ConsistencyType.PRAM + } + validator.check_args_types([["param", param, dict]]) + + params = validator.check_key_exists(param, ["write_mode", "consistency_type"]) + write_mode, consistency_type = params[0], params[1] + + args = [ + ["object_id", object_id, str], + ["size", size, int], + ["write_mode", write_mode, type(WriteMode.NONE_L2_CACHE), type(WriteMode.WRITE_THROUGH_L2_CACHE)], + ["consistency_type", consistency_type, type(ConsistencyType.PRAM), type(ConsistencyType.CAUSAL)] + ] + validator.check_args_types(args) + + create_status, buffer = self.client.create(object_id, size, write_mode.value, consistency_type.value) + if create_status.is_error(): + raise RuntimeError(create_status.to_string()) + buf = Buffer() + buf.set_buffer(buffer) + return buf + + def put(self, object_id, value, param=None, nested_object_ids=None): + """ Put the object data to the data system. + + Args: + object_id(str): The id of the object to be created. + value(memoryview, bytes or bytearray): the data to be put + param(dict): which contains the following three "key: value" pairs successively: + (1) "write_mode", write_mode(Enum): Indicating whether the object will be written through L2 cache. + There are 2 options: + 1) WriteMode.NONE_L2_CACHE; + 2) WriteMode.WRITE_THROUGH_L2_CACHE; + (2) "consistency_type": consistency_type(Enum): Indicating which consistency type will be used. + There are 2 options: + 1) ConsistencyType.PRAM; + 2) ConsistencyType.CAUSAL; + nested_object_ids(list): Objects that depend on objectId. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if the put fails. + """ + if param is None: + param = { + "write_mode": WriteMode.NONE_L2_CACHE, + "consistency_type": ConsistencyType.PRAM + } + validator.check_args_types([["param", param, dict]]) + params = validator.check_key_exists(param, ["write_mode", "consistency_type"]) + write_mode, consistency_type = params[0], params[1] + + if nested_object_ids is None: + nested_object_ids = [] + + args = [["object_id", object_id, str], + ["value", value, memoryview, bytes, bytearray], + ["write_mode", write_mode, type(WriteMode.NONE_L2_CACHE), type(WriteMode.WRITE_THROUGH_L2_CACHE)], + ["consistency_type", consistency_type, type(ConsistencyType.PRAM), type(ConsistencyType.CAUSAL)], + ["nested_object_ids", nested_object_ids, list]] + validator.check_args_types(args) + put_status = self.client.put(object_id, value, write_mode.value, consistency_type.value, + nested_object_ids) + if put_status.is_error(): + raise RuntimeError(put_status.to_string()) + + def get(self, object_ids, timeout): + """ Get the buffers corresponding to the designated object ids + + Args: + object_ids(list): The ids of the objects to get. + timeout(int): The timeout of the get operation. + + Returns: + buffers(list): list of buffers for the given object_ids. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if the RLatch fails. + """ + buffer_list = [] + args = [["object_ids", object_ids, list], ["timeout", timeout, int]] + validator.check_args_types(args) + if not object_ids: + raise RuntimeError(r"The input of object_ids list should not be empty") + get_status, buffer_array = self.client.get(object_ids, timeout) + if get_status.is_error(): + raise RuntimeError(get_status.to_string()) + for buffer in buffer_array: + buf = Buffer() + if not buffer.is_empty(): + buf.set_buffer(buffer) + buffer_list.append(buf) + return buffer_list + + def g_increase_ref(self, object_ids): + """ Increase the global reference of the given objects. + + Args: + object_ids(list): The ids of the objects to be increased. It cannot be empty. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if the increase fails. + """ + args = [["object_ids", object_ids, list]] + validator.check_args_types(args) + if not object_ids: + raise RuntimeError(r"The input of object_ids list should not be empty") + g_inc_ref_status, failed_object_ids = self.client.g_increase_ref(object_ids) + if g_inc_ref_status.is_error(): + raise RuntimeError(g_inc_ref_status.to_string() + r", failed objects: {obj_ids}".format( + obj_ids=failed_object_ids)) + + def g_decrease_ref(self, object_ids): + """ Decrease the global reference of the given objects. + + Args: + object_ids(list): The ids of the objects to be decreased. It cannot be empty. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if the decrease fails. + """ + args = [["object_ids", object_ids, list]] + validator.check_args_types(args) + if not object_ids: + raise RuntimeError(r"The input of object_ids list should not be empty") + g_dec_ref_status, failed_object_ids = self.client.g_decrease_ref(object_ids) + if g_dec_ref_status.is_error(): + raise RuntimeError(g_dec_ref_status.to_string() + r", failed objects: {obj_ids}".format( + obj_ids=failed_object_ids)) diff --git a/dsoftbus/dist_executor/modules/runtime/python/datasystem/state_cache_client.py b/dsoftbus/dist_executor/modules/runtime/python/datasystem/state_cache_client.py new file mode 100644 index 0000000000000000000000000000000000000000..fb71a1fb521a0c24ececb0395abf6c904d01904e --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/datasystem/state_cache_client.py @@ -0,0 +1,314 @@ +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +State cache client python interface. +""" +from datasystem import libds_client_py as ds +from datasystem.object_cache import WriteMode +from datasystem.util import Validator as validator + + +class StateCacheClient: + """ + Features: Data system State Cache Client management for python. + """ + + def __init__(self): + self._client = None + + def init(self, host: str, port: int, timeout_ms=60000, token: str = '', client_public_key: str = '', + client_private_key: str = '', server_public_key: str = ""): + """ Init a client to connect to a worker. + + Args: + host(str): The host of the worker. + port(int): The port of the worker. + timeout_ms(int): The timeout interval for the connection between the client and worker. + token(str): A string used for authentication. + client_public_key(str): The client's public key, for curve authentication. + client_private_key(str): The client's private key, for curve authentication. + server_public_key(str): The worker server's public key, for curve authentication. + Raises: + RuntimeError: Raise a runtime error if the client fails to connect to the worker. + TypeError: Raise a type error if the input parameter is invalid. + """ + args = [["host", host, str], ["timeout_ms", timeout_ms, int], ["client_public_key", client_public_key, str], + ["client_private_key", client_private_key, str], ["server_public_key", server_public_key, str]] + validator.check_args_types(args) + self._client = ds.StateCacheClient(host, port, timeout_ms, token, + client_public_key, client_private_key, server_public_key) + status = self._client.Init() + if status.is_error(): + raise RuntimeError(status.to_string()) + + def set(self, key, val, write_mode=WriteMode.NONE_L2_CACHE, ttl_second=0): + """ Invoke worker client to set the value of a key. + + Args: + key(str): The key of string data. + val(memoryview, bytes, bytearray, str): The data to be set. + write_mode(WriteMode): controls whether data is written to the L2 cache to enhance data reliability. + The options are as follows: + WriteMode.NONE_L2_CACHE: indicates that data reliability is not required, + WriteMode.WRITE_THROUGH_L2_CACHE: indicates that data is synchronously written to the L2 cache + to improve data reliability. + ttl_second(uint32): controls the expire time of the data: + If the value is greater than 0, the data will be deleted automatically after expired. + If set to 0, the data need to be manually deleted. + Raises: + RuntimeError: Raise a runtime error if fails to set the value of the key. + TypeError: Raise a type error if the input parameter is invalid. + """ + args = [["key", key, str], + ["val", val, memoryview, bytes, bytearray, str], + ["write_mode", write_mode, type(WriteMode.NONE_L2_CACHE), type(WriteMode.WRITE_THROUGH_L2_CACHE)], + ["ttl_second", ttl_second, int]] + validator.check_args_types(args) + + if isinstance(val, str): + val = str.encode(val) + status = self._client.Set(key, val, write_mode.value, ttl_second) + if status.is_error(): + raise RuntimeError(status.to_string()) + + def get(self, keys: list = None, convert_to_str: bool = False): + """ Get the values of all given keys. + + Args: + keys(list): The key list of string type. + convert_to_str(bool): True: convert the return value to string, False: return bytes directly. + + Returns: + values(list): The value list of keys. If the key is not found, it will raise RuntimeError. + + Raises: + RuntimeError: Raise a runtime error if fails to get the value of the key. + TypeError: Raise a type error if the input parameter is invalid. + """ + args = [["keys", keys, list]] + validator.check_args_types(args) + if keys is None: + raise RuntimeError(r"The input of keys list should not be empty") + status, values = self._client.Get(keys) # returned value is bytes type + if status.is_error(): + raise RuntimeError(status.to_string()) + if convert_to_str is False: + return values + ret_vals = [] + for val in values: + ret_vals.append(val.decode()) + return ret_vals + + def delete(self, keys: list = None): + """ Delete the values of all given keys. + + Args: + keys(list): The data list of string type. + + Returns: + success_count(int): Num of deleted string data. If the key is not found, it will return 0. + + Raises: + RuntimeError: Raise a runtime error if fails to delete the value of the key. + TypeError: Raise a type error if the input parameter is invalid. + """ + args = [["keys", keys, list]] + validator.check_args_types(args) + if keys is None: + raise RuntimeError(r"The input of keys list should not be empty") + status, success_count = self._client.Del(keys) + if status.is_error(): + raise RuntimeError(status.to_string()) + return success_count + + def lpush(self, key: str, values: list): + """ Prepend one or multiple elements to a list. + + Args: + key(str): The key of the list object. + values(list): The values of the list to lpush. + + Raises: + RuntimeError: Raise a runtime error if fails to lpush. + TypeError: Raise a type error if the input parameter is invalid. + """ + args = [["key", key, str], ["values", values, list]] + validator.check_args_types(args) + status = self._client.Lpush(key, values) + if status.is_error(): + raise RuntimeError(status.to_string()) + + def lpop(self, key: str): + """ Remove and get the first element in a list. + + Args: + key(str): The key of the list object. + + Returns: + value(string): value. + + Raises: + RuntimeError: Raise a runtime error if fails to put object in the storage. + TypeError: Raise a type error if the input parameter is invalid. + """ + args = [["key", key, str]] + validator.check_args_types(args) + status, value = self._client.Lpop(key) + if status.is_error(): + raise RuntimeError(status.to_string()) + return value + + def lindex(self, key: str, index: int): + """ Get an element from a list by its index. + + Args: + key(str): The key of the list object. + index(int): The index to get value of the list. + + Returns: + value(string): value. + + Raises: + RuntimeError: Raise a runtime error if fails to put object in the storage. + TypeError: Raise a type error if the input parameter is invalid. + """ + args = [["key", key, str], ["index", index, int]] + validator.check_args_types(args) + status, value = self._client.Lindex(key, index) + if status.is_error(): + raise RuntimeError(status.to_string()) + return value + + def llen(self, key: str): + """ Get the length of thelist. + + Args: + key(str): The key of the list object. + + Returns: + value(long long): value. + + Raises: + RuntimeError: Raise a runtime error if fails to put object in the storage. + TypeError: Raise a type error if the input parameter is invalid. + """ + args = [["key", key, str]] + validator.check_args_types(args) + status, value = self._client.Llen(key) + if status.is_error(): + raise RuntimeError(status.to_string()) + return value + + def hget(self, key: str, field: str): + """ Get the value of a hash field. + + Args: + key(str): The key of the hash object. + field(str): The field to get value of the hash object. + + Returns: + value(string): The value of the hash field. + + Raises: + RuntimeError: Raise a runtime error if fails to hget. + TypeError: Raise a type error if the input parameter is invalid. + + Examples: + >>> from datasystem.state_cache_client import StateCacheClient + >>> client = StateCacheClient() + >>> client.init('127.0.0.1:18482') + >>> client.hget('key','a') + '1' + """ + args = [["key", key, str], ["field", field, str]] + validator.check_args_types(args) + status, value = self._client.Hget(key, field) + if status.is_error(): + raise RuntimeError(status.to_string()) + return value + + def hset(self, key: str, field: str, value: str): + """ Set the string value of a hash field. + + Args: + key(str): The key of the hash object. + field(str): The field to set value of the hash object. + value(str): The value of the field. + + Raises: + RuntimeError: Raise a runtime error if fails to hset. + TypeError: Raise a type error if the input parameter is invalid. + + Examples: + >>> from datasystem.state_cache_client import StateCacheClient + >>> client = StateCacheClient() + >>> client.init('127.0.0.1:18482') + >>> client.hset('key','a', '1') + >>> client.hset('key','b', '2') + """ + args = [["key", key, str], ["field", field, str], ["value", value, str]] + validator.check_args_types(args) + status = self._client.Hset(key, field, value) + if status.is_error(): + raise RuntimeError(status.to_string()) + + def hdel(self, key: str, field: str): + """ Delete one hash field. + + Args: + key(str): The key of the hash object. + field(str): The field to delete of the hash object. + + Raises: + RuntimeError: Raise a runtime error if fails to hdel. + TypeError: Raise a type error if the input parameter is invalid. + + Examples: + >>> from datasystem.state_cache_client import StateCacheClient + >>> client = StateCacheClient() + >>> client.init('127.0.0.1:18482') + >>> client.hdel('key','c') + """ + args = [["key", key, str], ["field", field, str]] + validator.check_args_types(args) + status = self._client.Hdel(key, field) + if status.is_error(): + raise RuntimeError(status.to_string()) + + def hgetall(self, key: str): + """ Get all the fields and values in a hash. + + Args: + key(str): The key of the hash object. + + Returns: + dict: The value of the hashmap. + + Raises: + RuntimeError: Raise a runtime error if fails to hset. + TypeError: Raise a type error if the input parameter is invalid. + + Examples: + >>> from datasystem.state_cache_client import StateCacheClient + >>> client = StateCacheClient() + >>> client.init('127.0.0.1:18482') + >>> client.hgetall('key') + {'a': '1', 'b': '2'} + """ + args = [["key", key, str]] + validator.check_args_types(args) + status, values = self._client.Hgetall(key) + if status.is_error(): + raise RuntimeError(status.to_string()) + return values diff --git a/dsoftbus/dist_executor/modules/runtime/python/datasystem/stream_client.py b/dsoftbus/dist_executor/modules/runtime/python/datasystem/stream_client.py new file mode 100644 index 0000000000000000000000000000000000000000..2e55cfbbfff41b4ead7141fa44462c73fe52fa7e --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/datasystem/stream_client.py @@ -0,0 +1,267 @@ +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +Stream cache client python interface. +""" + +from enum import Enum +import datasystem.libds_client_py as ds + + +class SubconfigType(Enum): + """The type of stream""" + STREAM = 0 + ROUND_ROBIN = 1 + KEY_PARTITIONS = 2 + + +class StreamClient: + """the client of stream""" + + def __init__(self, host: str, port: int, client_public_key: str = "", + client_private_key: str = "", server_public_key: str = ""): + """ Constructor of the StreamClient class + + Args: + host(str): The worker address host. + port(str): The worker address port. + client_public_key(str): The client's public key, for curve authentication. + client_private_key(str): The client's private key, for curve authentication. + server_public_key(str): The worker server's public key, for curve authentication. + """ + self._client = ds.StreamClient(host, port, client_public_key, client_private_key, server_public_key) + + def init(self): + """ Init a stream client to connect to a worker. + + Raises: + RuntimeError: Raise a runtime error if the client fails to connect to the worker. + """ + init_status = self._client.init() + if init_status.is_error(): + raise RuntimeError(init_status.to_string()) + + def create_producer(self, stream_name, delay_flush_time_ms=5, page_size_byte=1024 * 1024, + max_stream_size_byte=1024 * 1024 * 1024): + """ Create one Producer to send element. + + Args: + stream_name: The name of the stream. + delay_flush_time_ms: The time used in automatic flush after send and default is 5ms. + page_size_byte: The size used in allocate page and default is 1MB. + must be a multiple of 4KB. + max_stream_size_byte: The max stream size in worker and default is 1GB. + must between greater then 64KB and less than the shared memory size. + Return: + outProducer: The output Producer that user can use it to send element. + + Raise: + TypeError: Raise a type error if the input parameter is invalid. + RutimeError: Raise a runtime error if creating a producer fails. + """ + if not isinstance(stream_name, str): + raise TypeError("The input of stream_name should be string.") + if not isinstance(delay_flush_time_ms, int): + raise TypeError("The input of delay_flush_time_ms should be int.") + if not isinstance(page_size_byte, int): + raise TypeError("The input of page_size_byte should be int.") + status, out_producer = self._client.CreateProducer( + stream_name, delay_flush_time_ms, page_size_byte, max_stream_size_byte) + if status.is_error(): + raise RuntimeError(status.to_string()) + return Producer(out_producer) + + def subscribe(self, stream_name, sub_name, subscription_type): + """ Subscribe a new consumer onto master request + + Args: + stream_name: The name of the stream. + sub_name: The name of subscription + subscription_type: The type of subscription. + + Return: + status: Status of the call. + + Raise: + TypeError: Raise a type error if the input parameter is invalid. + RuntimeError: Raise a runtime error if subscribing a new consumer fails. + """ + if not isinstance(stream_name, str): + raise TypeError("The input of stream_name should be string.") + if not isinstance(subscription_type, int): + raise TypeError("The input of type should be int.") + status, out_consumer = self._client.Subscribe(stream_name, sub_name, subscription_type) + if status.is_error(): + raise RuntimeError(status.to_string()) + return Consumer(out_consumer) + + def delete_stream(self, stream_name): + """ Delete one stream + + Args: + stream_name: The name of the stream. + + Raise: + TypeError: Raise a type error if the input parameter is invalid. + RutimeError: Raise a runtime error if deleting one stream fails. + """ + if not isinstance(stream_name, str): + raise TypeError("The input of stream_name should be string.") + status = self._client.DeleteStream(stream_name) + if status.is_error(): + raise RuntimeError(status.to_string()) + + def query_global_producer_num(self, stream_name): + """ Query number of producer in global worker node + + Args: + stream_name: The name of the target stream. + + Returns: + global_producer_num: Query result. + + Raise: + TypeError: Raise a type error if the input parameter is invalid. + RutimeError: Raise a runtime error if querying global producer number fails. + """ + if not isinstance(stream_name, str): + raise TypeError("The input of stream_name should be string.") + status, global_producer_num = self._client.QueryGlobalProducersNum(stream_name) + if status.is_error(): + raise RuntimeError(status.to_string()) + return global_producer_num + + def query_global_consumer_num(self, stream_name): + """ Query number of consumer in global worker node + + Args: + stream_name: The name of the target stream. + + Returns: + global_consumer_num: Query result. + + Raise: + TypeError: Raise a type error if the input parameter is invalid. + RutimeError: Raise a runtime error if querying global consumer number fails. + """ + if not isinstance(stream_name, str): + raise TypeError("The input of stream_name should be string.") + status, global_consumer_num = self._client.QueryGlobalConsumersNum(stream_name) + if status.is_error(): + raise RuntimeError(status.to_string()) + return global_consumer_num + + +class Producer: + """the producer of stream in client""" + + def __init__(self, producer): + if not isinstance(producer, ds.Producer): + raise TypeError("The input of parament should be Producer.") + self._producer = producer + + def send(self, element_bytes): + """ Produce send one element of the stream each time + + Args: + element: The element that to be written. + + Raise: + TypeError: Raise a type error if the input parameter is invalid. + RutimeError: Raise a runtime error if sending one element fails. + """ + if not isinstance(element_bytes, memoryview) and not isinstance(element_bytes, bytes) and not isinstance( + element_bytes, bytearray): + raise TypeError("The input of parament should be memoryview or bytes or bytearray.") + status = self._producer.Send(element_bytes) + if status.is_error(): + raise RuntimeError(status.to_string()) + + def flush(self): + """ Produce send one element of the stream each time + + Raise: + RuntimeError: Raise a runtime error if sending one element fails. + """ + status = self._producer.Flush() + if status.is_error(): + raise RuntimeError(status.to_string()) + + def close(self): + """ Close a producer, register a publisher to a stream. + + Raise: + RuntimeError: Raise a runtime error if closing a producer fails. + """ + status = self._producer.Close() + if status.is_error(): + raise RuntimeError(status.to_string()) + + +class Consumer: + """the consumer of stream in client""" + + def __init__(self, consumer): + if not isinstance(consumer, ds.Consumer): + raise TypeError("The input of parament should be Consumer.") + self._consumer = consumer + + def receive(self, except_num, timeout_ms): + """ Receive elements meta, where worker handles big and small element lookup and parsing + + Args: + except_num: The number of elements to be read. + timeout_ms: The timeout millisecond of elements to be Receive. + + Return: + values: element has been received + + Raise: + TypeError: Raise a type error if the input parameter is invalid. + RutimeError: Raise a runtime error if receiving elements meta falis. + """ + if not isinstance(except_num, int): + raise TypeError("The input of except_num should be int.") + if not isinstance(timeout_ms, int): + raise TypeError("The input of timeout_ms should be int.") + status, element = self._consumer.Receive(except_num, timeout_ms) + if status.is_error(): + raise RuntimeError(status.to_string()) + return element + + def ack(self, element_id): + """ Acknowledge elements that had been read by this consumer. + + Args: + element_id: The element id that to be acknowledged. + + Raise: + TypeError: Raise a type error if the input parameter is invalid. + RutimeError: Raise a runtime error if acknowledging elements falis. + """ + if not isinstance(element_id, int): + raise TypeError("The input of element_id should be int.") + status = self._consumer.Ack(element_id) + if status.is_error(): + raise RuntimeError(status.to_string()) + + def close(self): + """ Close the consumer, after close it will not allow Receive and Ack Elements. + + Raise: + RuntimeError: Raise a runtime error if closing the consumer falis. + """ + status = self._consumer.Close() + if status.is_error(): + raise RuntimeError(status.to_string()) diff --git a/dsoftbus/dist_executor/modules/runtime/python/datasystem/util.py b/dsoftbus/dist_executor/modules/runtime/python/datasystem/util.py new file mode 100644 index 0000000000000000000000000000000000000000..6e51eb62cdbbb8029fd970778a21fba13c92ea9f --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/datasystem/util.py @@ -0,0 +1,125 @@ +# Copyright (c) 2022 Huawei Technologies Co., Ltd. +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +The status of the return result. +""" +from datasystem import libds_common_py + + +class Validator: + """ + Features: Parse arguments + """ + + @staticmethod + def check_args_types(args): + """ Check the types of the input arguments + + Args: + args(list): The input arguments, which is a list of lists. Each list inside contains an argument name, + the argument value and its expected valid types. + Example: args = [["value", value, bytes, memoryview], ["timeout", timeout, int]]. Which means the argument + value should have the type of bytes or memoryview and the timeout argument should be an integer. + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + """ + if not isinstance(args, list): + raise TypeError( + r"The input of args should be a list, error type: {err}".format(err=type(args))) + for arguments in args: + if not isinstance(arguments, list): + raise TypeError( + r"Each element of the input of args should be a list, error type: {err}".format( + err=type(arguments))) + if len(arguments) < 3: + raise TypeError( + r"Each element of the input of args should have the length at least 3, which " + r"contains an argument name, the argument value and its expected valid types.") + arg_name = arguments[0] + arg_value = arguments[1] + arg_types = arguments[2:] + valid = False + for t in arg_types: + if isinstance(arg_value, t): + valid = True + break + if valid is False: + raise TypeError( + r"The input of {name} has invalid type, valid type: {type}".format(name=arg_name, type=arg_types)) + + @staticmethod + def check_key_exists(args, keys): + """ Check the types of the input arguments + + Args: + args(dict): The input arguments. + keys(list): a list of strings + + Raises: + TypeError: Raise a type error if the input parameter is invalid. + + Returns: + res: A list of the values of the given keys + """ + if not isinstance(args, dict): + raise TypeError(r"The input of args should be dict, error type: {err}".format(err=type(args))) + if not isinstance(keys, list): + raise TypeError(r"The input of keys should be list, error type: {err}".format(err=type(keys))) + res = [] + for key in keys: + k = args.get(key) + if k is None: + raise TypeError( + r"The key '{k_val}' of the input param does not exist".format(k_val=key)) + res.append(k) + return res + + +class Status: + """ + Features: The status of the return result + """ + + def __init__(self, status): + """ Wrap the status of result. + + Args: + status: libds_client_py.Status + """ + self._status = status + + def is_ok(self): + """ Whether the result is ok. + + Returns: + Return True if is ok. + """ + return self._status.is_ok() + + def is_error(self): + """ Whether the result is error. + + Returns: + Return True if is error. + """ + return self._status.is_error() + + def to_string(self): + """ Get the message of status. + + Returns: + Return the message of status. + """ + return self._status.to_string() diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/__init__.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..22d08a0525105525c2ee6aad2619303adeb6f1b3 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/common/__init__.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b42c0a516995cd9b22a68f1e141d473aaad7659b --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/common/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""common""" diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/common/utils.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/common/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9bce43af0f1fa75d73bc493c3c545563da54eb24 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/common/utils.py @@ -0,0 +1,140 @@ +# !/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. You can use this software +# according to the terms and conditions of the Mulan PSL v2. You may obtain a +# copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""util common methods""" +import binascii +import sys +import time +import uuid +from typing import List + +import yr +import yr.storage.reference_count +from fnruntime import log +from yr.rpc.common_pb2 import Arg +from yr.object_ref import ObjectRef +from yr.serialization import Serialization +from yr.signature import recover_args +from yr.runtime.runtime import Runtime + +OBJECT_ID_PREFIX = "yr-api-obj-" +LEN_OBJECT_ID_PREFIX = len(OBJECT_ID_PREFIX) +LEN_OBJECT_ID = LEN_OBJECT_ID_PREFIX + 36 +DEFAULT_TIME_OUT = 5 * 60 + +METADATA_INDEX = 0 +ARGS_INDEX = 1 + + +def generate_random_id(): + """ + This is a wrapper generating random id for user functions and objects + + Gets a random id string + + Example: yrobj-433ec3c1-ba11-5a16-ad97-ee4e68db67d5 + + Returns: + Unique uuid string with prefix for user functions and objects + """ + uuid_str = str(uuid.uuid4()) + return OBJECT_ID_PREFIX + uuid_str + + +def binary_to_hex(value): + """ + bytes to hex + """ + hex_id = binascii.hexlify(value) + if sys.version_info >= (3, 0): + hex_id = hex_id.decode() + return hex_id + + +def hex_to_binary(hex_id): + """ + hex to bytes + """ + return binascii.unhexlify(hex_id) + + +def get_param(param_args, use_msgpack=False): + """get param from list""" + params = Serialization().multi_deserialize([arg.value for arg in param_args], use_msgpack) + new_params = [] + for param in params: + if isinstance(param, str) and len(param) == LEN_OBJECT_ID and param.startswith(OBJECT_ID_PREFIX, 0, + LEN_OBJECT_ID_PREFIX): + new_params.append(param) + + objects = Runtime().rt.get_id(new_params, DEFAULT_TIME_OUT) if len(new_params) > 0 else [] + res = Serialization().multi_deserialize(objects, use_msgpack) + + ref_index = 0 + for i in range(len(params)): + if isinstance(params[i], str) and len(params[i]) == LEN_OBJECT_ID \ + and params[i].startswith(OBJECT_ID_PREFIX, 0, LEN_OBJECT_ID_PREFIX): + params[i] = res[ref_index] + ref_index += 1 + + # which means a invocation without place_holder args from cpp runtime + if use_msgpack: + return params, {} + args, kwargs = recover_args(params) + return args, kwargs + + +def get_code(code_id): + """get code from datasystem""" + log.get_logger().debug(f"code id: {code_id}") + code = yr.get(ObjectRef(object_id=code_id, need_incre=False, need_decre=False)) + return code + + +def get_time_offset(): + """ + get time offset + """ + is_dst = time.daylight and time.localtime().tm_isdst > 0 + return - (time.altzone if is_dst else time.timezone) + + +def get_formated_time_offset(): + """ + get formatting time offset + """ + result = "+" + offset = get_time_offset() + if offset < 0: + result = "-" + offset = -offset + + result += f"%02d:%02d" % (int(offset // 3600), offset % 60) + return result + + +def dependency_objref_process(posix_args: List[Arg], is_increase): + """process dependency obj ref count""" + ref_ids = [] + for arg in posix_args: + ref_ids += arg.nested_refs + + log.get_logger().debug( + f"[Reference Counting] preprocess dependency ref count, object_ids: {ref_ids}, increase: {is_increase} ") + + if is_increase: + yr.storage.reference_count.increase_reference_count(ref_ids) + else: + yr.storage.reference_count.decrease_reference_count(ref_ids) diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/crypto_tool.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/crypto_tool.py new file mode 100644 index 0000000000000000000000000000000000000000..61b3e9bab54d0dce3e46289cad3aae8b50b77479 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/crypto_tool.py @@ -0,0 +1,183 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and +# conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, +# WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" crypto tool decrypts redis pwd""" + +import hashlib +import hmac +import json +import string +from dataclasses import dataclass + +from Cryptodome.Cipher import AES + +from fnruntime import log + +_NONCE_SIZE = 12 +_TAG_SIZE = 16 +_BYTE_SIZE = 32 +_DEFAULT_SLICE_LEN = 1024 +_CIPHER_TEXT_LENGTH = 2 +_ITER_KEY_FACTORY_ITER = 10000 +# resource key +_APPLE_TXT = "apple" +_BOY_TXT = "boy" +_CAT_TXT = "cat" +_DOG_TXT = "dog" +_WDO_JSON = "wdo" + + +@dataclass +class RootKeyFactor: + """ + RootKeyFactor + """ + iter_count: int + component3: str + component3byte: bytes + k1_data: bytes = None + k2_data: bytes = None + mac_data: bytes = None + salt_data: bytes = None + + +@dataclass +class RootKey: + """ + RootKey + """ + root_key: bytes + mac_secret_key: bytes + + +def _resource_checker(resource) -> bool: + if not isinstance(resource, dict): + return False + if _APPLE_TXT not in resource: + return False + if _BOY_TXT not in resource: + return False + if _CAT_TXT not in resource: + return False + if _WDO_JSON not in resource: + return False + resource = None + return True + + +def _load_root_key(resource: dict) -> RootKey: + # data3 used to create root key + data3 = "0B6AA66FADD74F59F019109582E1AAED1EEEEA14CEDFAFCA6DB384D8C3360" \ + "D5E34087FD513B16929A2567E5E184AE2B49A71B9E25E6371C91227D8CE11" \ + "4957D3D383EBC4899DBA7C43F6D80273E57F60B8FC918C2474CA687F1C5DB" \ + "D7A71B1DC0A1EA455C7F2304A4846FD05FFD9FDD96B606546C51241A190EF" \ + "8B70382ABE55" + + f = RootKeyFactor(iter_count=_ITER_KEY_FACTORY_ITER, + component3=data3, + component3byte=str.encode(data3)) + if _resource_checker(resource) is False: + log.get_logger().error("failed to check resource") + raise ValueError("failed to check resource") + + f.k1_data = bytes.fromhex(resource[_APPLE_TXT]) + f.k2_data = bytes.fromhex(resource[_BOY_TXT]) + f.mac_data = bytes.fromhex(resource[_CAT_TXT]) + f.salt_data = bytes.fromhex(resource[_DOG_TXT]) + root_key = _encrypt_pbkdf2_sha256(f) + resource = None + return root_key + + +def _hmac_hash(data: bytes, key: bytes) -> str: + return hmac.new(key, data, 'sha256').hexdigest() + + +def get_worker_key(resource) -> str: + """ + get worker key from resource + """ + root_key = _load_root_key(resource) + if root_key is None: + resource = None + return "" + work_keys = json.loads(resource[_WDO_JSON]) + if work_keys is None: + resource = None + return "" + if "key" in work_keys and "mac" in work_keys: + resource = None + key = work_keys["key"] + mac = work_keys["mac"] + worker_key_decrypt = decrypt(key, root_key.root_key.hex()) + work_key_mac = _hmac_hash(str.encode(worker_key_decrypt), + root_key.mac_secret_key) + if work_key_mac == mac: + return worker_key_decrypt + log.get_logger().error("mac has changed") + resource = None + return "" + + +def decrypt(ciphertext: str, key: str) -> str: + """decrypt cipher text""" + cipher_texts = ciphertext.split(':') + if len(cipher_texts) != _CIPHER_TEXT_LENGTH: + raise ValueError("failed to decrypt: wrong cipher length") + salt_str = cipher_texts[0] + encrypt_str = cipher_texts[1] + salt = bytes.fromhex(salt_str) + encrypt = bytes.fromhex(encrypt_str) + secret_bytes = str.encode(key) + if all(c in string.hexdigits for c in key): + secret_bytes = bytes.fromhex(key) + + cipher_bytes = _decrypt_gcm_data(encrypt, secret_bytes, salt) + return cipher_bytes.decode('utf-8') + + +def _decrypt_gcm_data(encrypt: bytes, secret: bytes, salt: bytes) -> bytes: + if len(encrypt) <= _TAG_SIZE: + log.get_logger().error('The length of cipherText is not long enough') + return b'' + tag = encrypt[-_TAG_SIZE:] + aesgcm = AES.new(secret, AES.MODE_GCM, nonce=salt) + try: + decrypt_byte = aesgcm.decrypt_and_verify(encrypt[:-_TAG_SIZE], tag) + except Exception as err: + log.get_logger().error('decryption failed: %s', err) + return b'' + return decrypt_byte + + +def _encrypt_pbkdf2_sha256(f: RootKeyFactor) -> RootKey: + min_length = min(float(len(f.k1_data)), min( + float(len(f.k2_data)), float(len(f.component3byte)))) + + byte_psd = bytes([f.k1_data[i] ^ f.k2_data[i] ^ f.component3byte[i] + for i in range(int(min_length))]) + + root_key_byte = hashlib.pbkdf2_hmac("sha256", byte_psd, f.salt_data, f.iter_count, _BYTE_SIZE) + + slice_length = len(root_key_byte) + if slice_length <= 0 or slice_length > _DEFAULT_SLICE_LEN: + slice_length = _DEFAULT_SLICE_LEN + + mac_secret_key_byte = hashlib.pbkdf2_hmac("sha256", b'\0' * slice_length, f.mac_data, + f.iter_count, + _BYTE_SIZE) + return RootKey(root_key_byte, mac_secret_key_byte) diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/handlers/__init__.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/handlers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..4b2b5b07a41d47e9298e084cd2d041afdd07bb61 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/handlers/__init__.py @@ -0,0 +1,14 @@ +# !/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. You can use this software +# according to the terms and conditions of the Mulan PSL v2. You may obtain a +# copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/handlers/faas_executor.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/handlers/faas_executor.py new file mode 100644 index 0000000000000000000000000000000000000000..5ba4b9c77ab0e3c7feac452179ca9ba097906aa5 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/handlers/faas_executor.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""Faas executor, an adapter between posix and faas""" +import json +import traceback + +from fnruntime import log +from fnruntime.handlers.utils import CodeManager + +from functionsdk.sdk import Context +from functionsdk.sdk.logger import FaasLogger + + +_KEY_USER_INIT_ENTRY = "userInitEntry" +_KEY_USER_CALL_ENTRY = "userCallEntry" + +_INDEX_INIT_USER_ENTRIES = 0 +_INDEX_CALL_CONTEXT = 0 +_INDEX_CALL_USER_EVENT = 1 + + +def faasInitHandler(posix_args, code_dir: str, *args, **kwargs) -> None: + """ + raise error if fail + """ + log.get_logger().debug("Faas init handler called.") + user_handlers = json.loads(posix_args[_INDEX_INIT_USER_ENTRIES].value) + log.get_logger().debug("Faas init handler extract user_handler: %s.", user_handlers) + + user_init_hook = user_handlers.get(_KEY_USER_INIT_ENTRY, None) + user_call_hook = user_handlers.get(_KEY_USER_CALL_ENTRY, None) + + # Load and run user init code + if user_init_hook is not None and len(user_init_hook) != 0: + try: + user_init_code = _load_module_and_entry(user_init_hook, code_dir) + except (ValueError, ImportError, RuntimeError) as err: + log.get_logger().error("faas failed to import user code. err: %s, traceback: %s", + err, traceback.format_exc()) + raise RuntimeError(f"faas failed to import user code. err: {err}") from err + else: + CodeManager().register(_KEY_USER_INIT_ENTRY, user_init_code) + try: + user_init_code() + except Exception as err: + log.get_logger().exception("Fail to run user init handler. err: %s. " + "traceback: %s", err, traceback.format_exc()) + raise + + # Load user call code, no run + if user_call_hook is not None and len(user_call_hook) != 0: + user_call_code = _load_module_and_entry(user_call_hook, code_dir) + CodeManager().register(_KEY_USER_CALL_ENTRY, user_call_code) + + +def faasCallHandler(posix_args, *args, **kwargs) -> str: + """faas call handler""" + user_code = CodeManager().load(_KEY_USER_CALL_ENTRY) + if user_code is None: + err_msg = "faas executor find empty user call code" + log.get_logger().error(err_msg) + raise RuntimeError(err_msg) + + event = json.loads(posix_args[_INDEX_CALL_USER_EVENT].value) + context = Context(options={ + "logger": FaasLogger(), "requestId": "", "accessKey": "", "secretKey": "", + "authToken": "", "securityToken": "", "alias": "", "future_id": "", "invoke_id": "" + }) + + try: + result = user_code(event, context) + except Exception as err: + err_msg = f"Fail to run user call handler. err: {err}. traceback: {traceback.format_exc()}" + log.get_logger().exception(err_msg) + raise + + try: + result_str = transform_response_to_str(result) + except Exception as err: + # Can be RecursionError, RuntimeError, UnicodeError, MemoryError, etc... + err_msg = f"Fail to stringify user call result. " \ + f"err: {err}. traceback: {traceback.format_exc()}" + log.get_logger().exception(err_msg) + raise RuntimeError(err_msg) from err + + return result_str + + +# 按照一起打包的方案,以下方法faas暂不需要,当前留空 + + +def faasCheckPointHandler(check_point_id: str = "") -> bytes: + """faas checkpoint handler, leave empty""" + return bytes() + + +def faasRecoverHandler(state: bytes = None): + """faas recover handler, leave empty""" + + +def faasShutDownHandler(grace_period_second: int = -1): + """faas shutdown handler, leave empty""" + + +def faasSignalHandler(signal_num: int = -1, payload: bytes = None): + """faas signal handler, leave empty""" + + +# Helpers +def transform_response_to_str(response): + """Method transform_response_to_str""" + if response is None: + result = "" + elif isinstance(response, dict) or is_instance_type(response): + result = to_json_string(response) + else: + result = str(response) + return result + + +def convert_obj_to_json(obj): + """Method convert_obj_to_json""" + return obj.__dict__ + + +def is_instance_type(obj): + """Method is_instance_type""" + return hasattr(obj, '__dict__') + + +def to_json_string(obj, indent=None, sort_keys=False): + """Method to_json_string""" + if isinstance(obj, dict): + return json.dumps(obj, indent=indent, sort_keys=sort_keys) + return json.dumps(obj, indent=indent, default=convert_obj_to_json, sort_keys=sort_keys) + + +def _load_module_and_entry(user_hook, code_dir): + """load module and the entry code, throw RuntimeError if failed.""" + log.get_logger().debug("Faas load module and entry [%s] from [%s]", user_hook, code_dir) + user_hook_splits = user_hook.rsplit(".", maxsplit=1) if isinstance(user_hook, str) else None + if len(user_hook_splits) != 2: + raise RuntimeError("User hook not satisfy requirement, expect: xxx.xxx") + + user_module, user_entry = user_hook_splits[0], user_hook_splits[1] + log.get_logger().debug("User module: %s, entry: %s", user_module, user_entry) + + try: + user_code = CodeManager().get_code_from_local(code_dir, user_module, user_entry) + except ValueError as err: + log.get_logger().error(f"Missing user module. {user_hook}") + raise RuntimeError(f"Missing user module. {user_hook}") from err + + if user_code is None: + log.get_logger().error(f"Missing user entry. {user_hook}") + raise RuntimeError(f"Missing user entry. {user_hook}") + + return user_code diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/handlers/utils.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/handlers/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..6674d7c7c99b983b285901389fb1d7cab722b31b --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/handlers/utils.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""python runtime handlers common utils""" +import importlib.util +from os import path + +from fnruntime import log +from yr.utils import Singleton + +_DEFAULT_ADMIN_FUNC_PATH = "/adminfunc/" + + +@Singleton +class InstanceManager: + """ + InstanceManager stores class instance + """ + + def __init__(self): + self.__instance = None + self.__config = None + self.__class_code = None + + @property + def config(self): + """get config""" + return self.__config + + @config.setter + def config(self, value): + """save config""" + self.__config = value + + @property + def class_code(self): + """get class code""" + return self.__class_code + + @class_code.setter + def class_code(self, code): + """set class code""" + self.__class_code = code + + def init(self, instance=None): + """" + save instance + """ + self.__instance = instance + + def instance(self): + """" + get instance + """ + return self.__instance + + +@Singleton +class CodeManager: + """ + CodeManager provides code loading service + it will try to cache the code and load from the cache as possible + so it will only load + * one module at most once (identified by the module file path) + * one function at most once (identified by the module and function name) + """ + + def __init__(self): + self.code_map = {} + # module cache is a dict with { code_file_path : module } + self.module_cache = {} + + def register(self, function_key, function_obj): + """ + register function code to code manager + """ + self.code_map[function_key] = function_obj + + def load(self, function_key): + """ + load function code + """ + return self.code_map.get(function_key) + + def get_code_from_local(self, code_dir, module_name, entry_name): + """get code from local, + throw error if module not exists + return None if module exists but entry not exists + """ + log.get_logger().debug("get python code [%s] from local file [%s/%s.py]", entry_name, code_dir, module_name) + code_key = module_name + "%%" + entry_name + code = self.load(code_key) + if code is not None: + return code + + module = self.load_module(code_dir, module_name) + code = getattr(module, entry_name, None) + if code is not None: + self.register(code_key, code) + return code + + def load_module(self, code_dir, module_name): + """load module using cache""" + file_path = path.join(code_dir, module_name + ".py") + + if not path.exists(file_path): + admin_path = path.join(_DEFAULT_ADMIN_FUNC_PATH, module_name + ".py") + if not path.exists(admin_path): + raise ValueError("entry file does not exist: {}".format(file_path)) + file_path = admin_path + + module = self.module_cache.get(file_path, None) + if module is not None: + log.get_logger().debug("successfully load module [%s] from cache", file_path) + return module + + log.get_logger().debug("loading module [%s] from file system", file_path) + module_spec = importlib.util.spec_from_file_location(module_name, file_path) + try: + module = importlib.util.module_from_spec(module_spec) + except ImportError as exp: + log.get_logger().warning("failed to import user python module, %s", str(exp)) + raise + + module_spec.loader.exec_module(module) + self.module_cache[file_path] = module + return module diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/handlers/yrlib_handler.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/handlers/yrlib_handler.py new file mode 100644 index 0000000000000000000000000000000000000000..f7bf241b0c3eae4d93861bf92406745c3a7b759b --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/handlers/yrlib_handler.py @@ -0,0 +1,233 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""default handler for yr api""" +import traceback +from typing import List + +import yr.utils +import yr.runtime.runtime +import yr.storage.reference_count +from fnruntime import log +from fnruntime.common import utils +from fnruntime.common.utils import dependency_objref_process +from fnruntime.handlers.utils import InstanceManager, CodeManager +from yr import apis +from yr import config +from yr.config import ConfigManager +from yr.exception import YRInvokeError +from yr.runtime.task_spec import InvokeType +from yr.runtime.task_spec import TaskMetadata +from yr.serialization import Serialization +from yr.rpc.common_pb2 import Arg + +CUSTOM_SIGNAL_EXIT = 64 + + +def _get_serialized_code(code_id): + code = CodeManager().load(code_id) + if code is not None: + return code + code = utils.get_code(code_id) + if code is not None: + CodeManager().register(code_id, code) + return code + + +def _instance_create_function(posix_args: List[Arg], metadata: TaskMetadata): + if metadata.invoke_type == InvokeType.INVALID and metadata.code_id == yr.utils.NORMAL_FUNCTION: + return False + if metadata.invoke_type == InvokeType.CREATE_NORMAL_FUNCTION_INSTANCE: + return False + + log.get_logger().debug( + f"start to create instance: {metadata.object_descriptor.module_name}.{metadata.object_descriptor.class_name}") + class_code, use_msgpack = _parse_code(metadata, is_class=True) + if class_code is None: + raise RuntimeError("Not found code") + InstanceManager().class_code = class_code + + args, kwargs = utils.get_param(posix_args[utils.ARGS_INDEX:], use_msgpack) + dependency_objref_process(posix_args=posix_args, is_increase=False) + instance = class_code(*args, **kwargs) + InstanceManager().init(instance) + return use_msgpack + + +def _parse_code(metadata: TaskMetadata, is_class=False): + is_cross_language = False + if metadata.object_descriptor.src_language != yr.utils.LANGUAGE_PYTHON: + is_cross_language = True + + if is_cross_language: + if is_class: + code_name = metadata.object_descriptor.class_name + else: + code_name = metadata.object_descriptor.function_name + + src_language = metadata.object_descriptor.src_language + if src_language == yr.utils.LANGUAGE_CPP: + code = CodeManager().get_code_from_local(ConfigManager().code_dir, + metadata.object_descriptor.module_name, + code_name) + if is_class: + local_code = code.get_original_cls() + else: + local_code = code.get_original_func() + return local_code, True + raise RuntimeError(f'invalid srcLanguage value, expect srcLanguage= {yr.utils.LANGUAGE_CPP}, ' + f'actual {src_language}') + + code = _get_serialized_code(metadata.code_id) + return code, False + + +def _invoke_function(posix_args: List[Arg]): + if len(posix_args) == 0: + return None, False + + metadata = TaskMetadata.parse(posix_args[utils.METADATA_INDEX].value) + log.get_logger().debug( + f"start to create/invoke: {metadata.object_descriptor.module_name}.{metadata.object_descriptor.function_name}") + + if metadata.invoke_type == InvokeType.INVALID and metadata.code_id == yr.utils.MEMBER_FUNCTION: + return _instance_function(posix_args, metadata) + if metadata.invoke_type == InvokeType.INVOKE_MEMBER_FUNCTION: + return _instance_function(posix_args, metadata) + if metadata.invoke_type == InvokeType.GET_NAMED_INSTANCE_METADATA: + return _get_instance_class_code() + return _normal_function(posix_args, metadata) + + +def _normal_function(posix_args: List[Arg], metadata: TaskMetadata): + code, use_msgpack = _parse_code(metadata) + if code is None: + raise RuntimeError("Not found code") + + args, kwargs = utils.get_param(posix_args[utils.ARGS_INDEX:], use_msgpack) + dependency_objref_process(posix_args=posix_args, is_increase=False) + result = code(*args, **kwargs) + return result, use_msgpack + + +def _instance_function(posix_args: List[Arg], metadata: TaskMetadata): + instance_function_name = metadata.object_descriptor.function_name + use_msgpack = metadata.object_descriptor.src_language == yr.utils.LANGUAGE_CPP + args, kwargs = utils.get_param(posix_args[utils.ARGS_INDEX:], use_msgpack) + + instance = InstanceManager().instance() + if instance is None: + raise RuntimeError("No init class instance") + + dependency_objref_process(posix_args=posix_args, is_increase=False) + result = getattr(instance, instance_function_name)(*args, **kwargs) + return result, use_msgpack + + +def _get_instance_class_code(): + class_code = InstanceManager().class_code + return class_code, False + + +def init(posix_args: List[Arg], code_dir: str, cross_param=None): + """init call request""" + result = None + if len(posix_args) == 0: + return result + + metadata = TaskMetadata.parse(posix_args[utils.METADATA_INDEX].value) + if config.ConfigManager().is_init is not True: + cfg = config.Config(code_dir=code_dir, + on_cloud=True, + in_cluster=True, + job_id=metadata.job_id, + log_level=metadata.log_level, + recycle_time=metadata.recycle_time, + function_id=metadata.function_id_python, + cpp_function_id=metadata.function_id_cpp) + InstanceManager().config = cfg + apis.init(cfg) + + use_msgpack = False + try: + use_msgpack = _instance_create_function(posix_args, metadata) + except Exception as err: + if isinstance(err, YRInvokeError): + result = YRInvokeError(err.cause, traceback.format_exc()) + else: + result = YRInvokeError(err, traceback.format_exc()) + log.get_logger().error(f"failed to init, err: {repr(err)} {traceback.format_exc()}") + + if cross_param is not None: + cross_param.use_msgpack = use_msgpack + return result + + +def call(posix_args: List[Arg], cross_param=None): + """call request""" + if len(posix_args) == 0: + log.get_logger().debug("yrlib call, get 0 args from request.") + return None + use_msgpack = False + try: + result, use_msgpack = _invoke_function(posix_args) + except Exception as err: + if isinstance(err, YRInvokeError): + result = YRInvokeError(err.cause, traceback.format_exc()) + else: + result = YRInvokeError(err, traceback.format_exc()) + log.get_logger().error(f"failed to call, err: {repr(err)} {traceback.format_exc()}") + + if cross_param is not None: + cross_param.use_msgpack = use_msgpack + return result + + +def checkpoint(checkpoint_id: str) -> bytes: + """check point""" + log.get_logger().info("start to checkpoint") + instance = InstanceManager().instance() + try: + result = Serialization().serialize((instance, InstanceManager().config), "", False) + except TypeError as e: + log.get_logger().exception(e) + return bytes() + return result.data + + +def recover(state: bytes): + """recover state""" + log.get_logger().info('start to recover state') + if len(state) == 0: + log.get_logger().error("no instance when try to recover") + raise RuntimeError("No recover state") + instance, cfg = Serialization().deserialize(state) + InstanceManager().init(instance) + apis.init(cfg) + + +def shutdown(grace_period_second: int): + """shutdown""" + apis.finalize() + + +def signal(signal_num: int): + """ + signal + yr-api custom signal handler + """ + if signal_num == CUSTOM_SIGNAL_EXIT: + apis.finalize() + apis.exit() diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/log.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/log.py new file mode 100644 index 0000000000000000000000000000000000000000..0bdd78065ba4f6a35b63f9a55aeeea77227a6f8f --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/log.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +CustomFilter class: custom filter +""" +import json +import logging +import logging.config +import os +import socket +import stat +import sys +import time + +HOST_NAME = socket.gethostname() +POD_NAME = {'podname': HOST_NAME} + + +class CustomFilter(logging.Filterer): + """ + CustomFilter custom filter + """ + + def filter(self, record): + if len(record.msg) > MAX_ROW_SIZE: + record.msg = record.msg[:MAX_ROW_SIZE] + return True + + +# MAX_ROW_SIZE max row size of a log +MAX_ROW_SIZE = 1024 +# python logging config +LOG_FORMAT = "[%(asctime)s %(levelname)s %(filename)s:%(lineno)d]" \ + " [%(podname)s %(thread)d] %(message)s" + +# python log handler +RUNTIME_LOG = None + + +def setup_logging(runtime_id): + """ + Set chmod of log file + """ + # logging will collect processes information by default. Set these values + # to False can improve performance + logging.logProcesses = False + logging.logMultiprocessing = False + path = "/home/snuser/config/python-runtime-log.json" + if os.getenv("YR_BARE_MENTAL") is not None: + path = sys.path[0] + '/../config/python-runtime-log.json' + with open(path, "r") as config_file: + config = json.load(config_file) + try: + log_file_name = config["handlers"]["file"]["filename"] + except BaseException as exp: + log_file_name = "/home/snuser/log" + print("log file name is invalid", exp) + if runtime_id != "": + os.path.join(log_file_name, runtime_id) + log_file_name = os.path.join(log_file_name, runtime_id) + os.makedirs(log_file_name, mode=0o755, exist_ok=True) + os.environ["DATASYSTEM_CLIENT_LOG_DIR"] = log_file_name + config["handlers"]["file"]["filename"] = os.path.join(log_file_name, "python-runtime.log") + logging.config.dictConfig(config) + os.chmod(config["handlers"]["file"]["filename"], stat.S_IWUSR | stat.S_IRUSR | stat.S_IRGRP) + logging.Formatter.default_msec_format = "%s.%03d" + logging.Formatter.converter = time.localtime + + +def init_log(runtime_id="", logger=None): + """init log handler""" + global RUNTIME_LOG + if logger is not None: + RUNTIME_LOG = logger + else: + setup_logging(runtime_id) + RUNTIME_LOG = logging.getLogger("FileLogger") + RUNTIME_LOG.addFilter(CustomFilter()) + RUNTIME_LOG = logging.LoggerAdapter(RUNTIME_LOG, POD_NAME) + + +def get_logger(): + """ + get_logger return runtime logger with basic config + return: runtime_log + """ + global RUNTIME_LOG + if RUNTIME_LOG is None: + init_log() + return RUNTIME_LOG diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/pythonRT.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/pythonRT.py new file mode 100644 index 0000000000000000000000000000000000000000..f46a94b7ddc108452368e87ad253e7bbc3f4d4d8 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/pythonRT.py @@ -0,0 +1,93 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""log and context""" + +from abc import ABCMeta +from abc import abstractmethod +from enum import Enum +from enum import unique + + +@unique +class LogLevel(Enum): + """ + LogLevel defines for log level + """ + INFO = 'INFO' + WARN = 'WARN' + DEBUG = 'DEBUG' + ERROR = 'ERROR' + + +class Logger(metaclass=ABCMeta): + """ + Class log to write log + """ + @abstractmethod + def log(self, loglevel: LogLevel, message: str): + """ + write log + """ + + +class Context(metaclass=ABCMeta): + """ + Runtime context + """ + + def __init__(self): + """ + init + """ + + def get_env(self, key: str): + """ + Get environment + """ + + def get_state(self): + """ + Get state + """ + + def get_invoke_id(self): + """ + Get invoke id + """ + + def http_response(self, + body=None, + headers=None, + content_type="text/plain", + status_code=200): + """ + Get http response body + """ + + def get_state_id(self): + """ + Get state id + """ + + def get_invoke_property(self): + """ + Get invoke priority + """ + + def get_trace_id(self): + """ + Get invoke trace id + """ diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/__init__.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..281a6697f1b5806cb2acf673fa8f5753527ca0de --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/__init__.py @@ -0,0 +1,22 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""rpc sdk""" + +from fnruntime.rpc.sdk import create +from fnruntime.rpc.sdk import exit +from fnruntime.rpc.sdk import invoke +from fnruntime.rpc.sdk import load_state +from fnruntime.rpc.sdk import save_state diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/constants.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/constants.py new file mode 100644 index 0000000000000000000000000000000000000000..baff4f08384a66a7ca9683d34d4cf19b2833fb8c --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/constants.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""constants""" + +DEFAULT_REQUEST_TIMEOUT = 900 diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/function_manager.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/function_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..3e8521774ae264c85c259529ffe121af03ed6d28 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/function_manager.py @@ -0,0 +1,335 @@ +# !/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. You can use this software +# according to the terms and conditions of the Mulan PSL v2. You may obtain a +# copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""Function manager""" +import sys +import traceback +from dataclasses import dataclass +from types import ModuleType + +import yr.objref_counting_manager +import yr.storage.reference_count +from fnruntime import log +from fnruntime.common.utils import binary_to_hex, dependency_objref_process +from fnruntime.handlers import utils +from fnruntime.rpc.sdk import post_shutdown +from yr.exception import YRError, YRInvokeError +from yr.rpc import common_pb2 as common +from yr.rpc import core_service_pb2 as core +from yr.rpc import runtime_service_pb2 as rt +from yr.runtime.runtime import Runtime +from yr.serialization import Serialization +from yr.object_ref import ObjectRef + + +def _set_layer_path(layer_paths): + """ + Set layer code path + """ + for layer_path in layer_paths: + log.get_logger().debug("add layer path %s to system", layer_path) + sys.path.insert(1, layer_path) + + +@dataclass +class CrossLanguageParam: + """ + CrossLanguageParam + """ + use_msgpack: bool + + +@dataclass +class HookFunction: + """ + Hook Function, including the module and the hook name + """ + module: ModuleType = None + entry: str = None + + +_INIT = "INIT_HANDLER" +_CALL = "CALL_HANDLER" +_CHECKPOINT = "CHECKPOINT_HANDLER" +_RECOVER = "RECOVER_HANDLER" +_SHUTDOWN = "SHUTDOWN_HANDLER" +_SIGNAL = "SIGNAL_HANDLER" + + +def get_hooks_key(): + """get all hooks key""" + return [_INIT, _CALL, _CHECKPOINT, _RECOVER, _SHUTDOWN, _SIGNAL] + + +def _process_result(request: rt.CallRequest, use_msgpack, result): + """process call result""" + object_id_list = request.returnObjectIDs + result_list = result + if len(object_id_list) == 0: + return common.ERR_INNER_SYSTEM_ERROR, f"object list is null, requestID: {request.requestID}" + + if len(object_id_list) == 1 or isinstance(result, YRInvokeError): + result_list = [result] * len(object_id_list) + + log.get_logger().debug(f"[Reference Counting] put ret_val, object_ids: {object_id_list}") + for (object_id, res) in zip(object_id_list, result_list): + ref = ObjectRef(object_id=object_id, need_incre=False, need_decre=False) + try: + Runtime().rt.put(ref, res, use_msgpack) + except (RuntimeError, TypeError, ValueError) as e: + return common.ERR_INNER_SYSTEM_ERROR, f"put object failed, " \ + f"id: {object_id}, requestID:{request.requestID}, err: {e}" + + return common.ERR_NONE, "" + + +class FunctionManager: + """Function manager""" + + def __init__(self, instance_id, init_hooks=True): + self._logic_instance_id = instance_id + self._custom_handler = None + self._module = None + self._code_dir = None + self._init_err = None + self.function_kind = None + self._init_hooks = init_hooks + + self._hooks = {} + + @staticmethod + def process_obj_ref(request: rt.CallRequest): + """preprocess for call request, mainly handle the obj ref counting""" + code = common.ERR_NONE + message = None + + log.get_logger().debug("[Reference Counting] Increase obj reference counting in call preprocess.") + try: + dependency_objref_process(request.args, is_increase=True) + except RuntimeError as err: + code = common.ERR_INNER_SYSTEM_ERROR + message = f"fail to process (increase) object reference counting. " \ + f"requestID:{request.requestID}, err: {err}, Traceback: {traceback.format_exc()}" + log.get_logger().exception(message) + + return rt.CallResponse(code=code, message=message) + + def init(self, python_path, code_dir="", layer_paths=None, hooks=None): + """ + init function manager + """ + sys.path.insert(1, python_path) + sys.path.insert(1, code_dir) + self._code_dir = code_dir + + if layer_paths is not None: + for i in layer_paths: + sys.path.insert(1, i) + + if self._init_hooks: + self._load_hooks(hooks) + + def call(self, request: rt.CallRequest): + """call request""" + if Runtime().rt is not None: + Runtime().rt.set_trace_id(request.traceID) + + cross_param = CrossLanguageParam(use_msgpack=False) + code, message, result = self._invoke_call_request(request, cross_param) + + if not request.isCreate and code == common.ERR_NONE: + if self.function_kind == 'yrlib': + # yrlib requires to put the result to datasystem + code, message = _process_result(request, cross_param.use_msgpack, result) + else: + # faas promise the result is a str, has been converted and checked + message = result + + # If result is a YRError instance, should also be treated as an error + if code == common.ERR_NONE and isinstance(result, YRError): + code, message = _package_yr_error(result=result, cross_param=cross_param) + + return core.CallResult(code=code, message=message, instanceID=request.senderID, + requestID=request.requestID) + + def checkpoint(self, request: rt.CheckpointRequest) -> rt.CheckpointResponse: + """checkpoint request""" + log.get_logger().info(f"begin checkpoint, id: {request.checkpointID}") + code, message, result = self._invoke_hook(_CHECKPOINT, + request.checkpointID) + return rt.CheckpointResponse(code=code, message=message, state=result) + + def recover(self, request: rt.RecoverRequest) -> rt.RecoverResponse: + """recover request""" + log.get_logger().info("begin to recover") + code, message, _ = self._invoke_hook(_RECOVER, + request.state) + return rt.RecoverResponse(code=code, message=message) + + def shutdown(self, request) -> rt.ShutdownResponse: + """shutdown request""" + log.get_logger().info(f"begin shutdown, exit period second: {request.gracePeriodSecond}") + yr.objref_counting_manager.ObjRefCountingManager().wait_invoke_ack() + code, message, _ = self._invoke_hook(_SHUTDOWN, + request.gracePeriodSecond) + post_shutdown() + return rt.ShutdownResponse(code=code, message=message) + + def signal(self, request: rt.SignalRequest) -> rt.SignalResponse: + """kill request""" + log.get_logger().info(f"receive the signal: {request.signal}") + code, message, _ = self._invoke_hook(_SIGNAL, + request.signal) + return rt.SignalResponse(code=code, message=message) + + def set_init_exception(self, err, trace): + """set init exception""" + self._init_err = YRInvokeError(err, trace) + + def _load_hooks(self, hooks): + """ + Will import and store user defined hooks to self._hooks + If some hooks are not defined in environment, + but custom_handler is defined and successfully imported, + will try to use the method in self._module. + Throw error if miss any hooks. + """ + + def _log_and_throw(_hook_name): + error_info = f'failed to import handler \"{_hook_name}\"' + log.get_logger().error(error_info) + raise RuntimeError(error_info) + + all_hooks = [_INIT, _CALL, _CHECKPOINT, _RECOVER, _SHUTDOWN, _SIGNAL] + for hook_name in all_hooks: + if hook_name in hooks: + user_hook_module, user_hook_name = self._import_hook(hooks[hook_name], self._code_dir) + if user_hook_module is None: + _log_and_throw(hook_name) + self._hooks[hook_name] = HookFunction(user_hook_module, user_hook_name) + elif hook_name in [_INIT, _CALL]: + _log_and_throw(hook_name) + + def _invoke_call_request(self, request, cross_param): + """invoke user's hook, init or call""" + if self._init_err is not None: + code = common.ERR_USER_CODE_LOAD # Not common.ERR_NONE + message = str(self._init_err) + result = None + return code, message, result + + if request.isCreate: + code, message, user_result = self._invoke_hook(_INIT, + request.args, + cross_param=cross_param, + code_dir=self._code_dir) + else: + code, message, user_result = self._invoke_hook(_CALL, + request.args, + cross_param=cross_param) + try: + result = _check_return_list(request, user_result) + except (TypeError, ValueError) as e: + result = YRInvokeError(e, traceback.format_exc()) + return code, message, result + + def _import_hook(self, hook_handler, code_dir): + hook_args = hook_handler.split(".") + log.get_logger().debug(f"import handler {hook_handler}") + if len(hook_args) != 2: + raise ValueError(f"{hook_handler} doesn't satisfy pattern 'module_name.entry_name'") + module_name = hook_args[0] + function_name = hook_args[1] + if module_name == "yrlib_handler": + self.function_kind = "yrlib" + + module = utils.CodeManager().load_module(code_dir, module_name) + if hasattr(module, function_name) is False: + error_info = f"failed to found {function_name} attribute in {module}" + log.get_logger().error(error_info) + raise RuntimeError(error_info) + return module, function_name + + def _invoke_hook(self, hook_name, *args, **kwargs) -> (int, str, object): + """ + Invoke hooks, hook signature expectation: + init(*args, **kwargs) -> object + call(*args, **kwargs) -> object + checkpoint(checkpointID: str) -> bytes + recover(state: bytes) -> None + shutdown(gracePeriodSecond: int) -> None + signal(signal: int, payload: bytes) -> None + either return single object/bytes or None, the return value will be named "result" + invoke hook will return (code, message, result) + """ + # if invoke failed, message should be encoded by pickle + code = common.ERR_NONE + message = None + result = None + + if hook_name not in self._hooks and \ + hook_name not in [_INIT, _CALL]: + # didn't specify some optional hook, do nothing, just return + return code, message, result + + hook_function = self._hooks.get(hook_name, HookFunction()) + module, entry = hook_function.module, hook_function.entry + if module is None or entry is None: + code = common.ERR_USER_CODE_LOAD + message = f"failed to invoke hook function '{hook_name}', module: {module}, entry: {entry}" + log.get_logger().exception(message) + return code, message, result + + try: + call = getattr(module, entry) + except AttributeError: + code = common.ERR_USER_CODE_LOAD + message = f"failed to found {entry} attribute in {module}" + log.get_logger().exception(message) + return code, message, result + + try: + result = call(*args, **kwargs) + except Exception as err: + code = common.ERR_USER_FUNCTION_EXCEPTION + message = f"failed to invoke {entry} in {module},err: {err}, Traceback: {traceback.format_exc()}" + log.get_logger().exception(message) + + return code, message, result + + +def _check_return_list(request, result): + if isinstance(result, Exception): + return result + if len(request.returnObjectIDs) > 1: + if not hasattr(result, "__len__"): + raise TypeError(f"cannot unpack non-iterable {type(result)} object") + if hasattr(result, "__len__") and len(result) != len(request.returnObjectIDs): + raise ValueError(f"not enough values to unpack (expected {len(request.returnObjectIDs)}, " + f"got {len(result)})") + return result + + +def _package_yr_error(result, cross_param): + code = common.ERR_USER_FUNCTION_EXCEPTION + message = None + if not cross_param.use_msgpack: + code = common.ERR_USER_FUNCTION_EXCEPTION + message = binary_to_hex(Serialization().serialize(result, "", False).data) + + if cross_param.use_msgpack: + code = common.ERR_USER_FUNCTION_EXCEPTION + message = str(result) + return code, message diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/inorder_manager.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/inorder_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..f38542438815b95ce260cf547be3de926c268cae --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/inorder_manager.py @@ -0,0 +1,85 @@ +# !/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. You can use this software +# according to the terms and conditions of the Mulan PSL v2. You may obtain a +# copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""Inorder manager""" +import heapq + + +class RequestQueue: + """ + RequestQueue is used to keep the sequence of call requests + """ + def __init__(self): + self._next_serial_num = 0 + self._queue = [] + + def next_serial_num(self): + """ + Get serial number indicating the next request to be processed + """ + return self._next_serial_num + + def set_next_serial_num(self): + """ + Set next serial number, auto increasing 1 + """ + self._next_serial_num += 1 + + def head_serial_num(self): + """ + Get the serial number of the head request in the queue + """ + return self._queue[0][0] + + def pop_head_request(self): + """ + Pop the head request in the queue + """ + return heapq.heappop(self._queue)[-1] + + def store_request(self, serial_num, request): + """ + Push + """ + heapq.heappush(self._queue, (serial_num, request)) + + def empty(self): + """ + Check if the queue is empty + """ + return not self._queue + + +class InOrderManager: + """ + InOrderManager is used to store RequestQueue + """ + def __init__(self): + self._inorder_manager = {} + + def get_instance_request_queue(self, key): + """ + Get RequestQueue by the key of runtime id + + :param key: runtime key + :return: RequestQueue instance + """ + return self._inorder_manager.setdefault(key, RequestQueue()) + + def clear(self): + """ + Clear the inorder manager + """ + self._inorder_manager.clear() diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/request_queue.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/request_queue.py new file mode 100644 index 0000000000000000000000000000000000000000..de7c3d499b35d8c0e0c1b1f61b0bea1576522e94 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/request_queue.py @@ -0,0 +1,369 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms +# and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT +# WARRANTIES OF ANY KIND, EITHER EXPRESS OR IMPLIED, +# INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""request queue""" +import ctypes +import queue +import threading +import traceback +from collections.abc import Callable +from concurrent.futures import ThreadPoolExecutor +from dataclasses import dataclass + +from fnruntime.common.utils import dependency_objref_process +from fnruntime import log +from fnruntime.rpc.sdk import RPClient, get_call_map +from fnruntime.rpc.inorder_manager import InOrderManager +from yr import utils +from yr.rpc import common_pb2 as common, runtime_rpc_pb2 as rt_rpc, runtime_service_pb2 as rt +from yr.rpc import core_service_pb2 as core +from yr.runtime.task_spec import Signal + +# The request id length, split by "-" +_LEN_IN_ORDER_REQUEST_ID = 10 + +_MAX_RETRY_TIME = 3 +_REQ_TABLE = { + "callReq": "call", + "checkpointReq": "checkpoint", + "recoverReq": "recover", + "shutdownReq": "shutdown", + "signalReq": "signal" +} + + +@dataclass +class ReqData: + """ReqData""" + request: rt_rpc.StreamingMessage + callback: Callable + + +def _send_call_ack(response: core.CallResult): + """send call result req """ + rsp = None + try: + rsp = RPClient().send( + rt_rpc.StreamingMessage(callResultReq=response, messageID=utils.generate_task_id())) + except TimeoutError as e: + log.get_logger().warning(f"failed to get callResultAck: {e}") + if rsp.HasField("callResultAck") and rsp.callResultAck.code == 0: + log.get_logger().debug(f"succeed to callResultAck: {rsp}") + else: + log.get_logger().warning(f"failed to get callResultAck: {rsp}") + + +def _get_concurrency(create_options): + """get concurrency """ + pool_size = 100 + if 'Concurrency' in create_options: + concurrency = int(create_options['Concurrency']) + if 1 <= concurrency <= 1000: + pool_size = concurrency + return pool_size + + +class RuntimeThreadPoolExecutor(ThreadPoolExecutor): + """ + Runtime thread pool + """ + + def __init__(self, max_workers=None, thread_name_prefix=""): + super(RuntimeThreadPoolExecutor, self).__init__(max_workers=max_workers, thread_name_prefix=thread_name_prefix) + self._task_thread = {} + # cancel may be faster than call, should record it + self._canceled = [] + + def cancel_task(self, request_id: str) -> str: + """ + Cancel task + """ + thread = self._task_thread.pop(request_id, None) + if thread is not None: + log.get_logger().debug("found thread of requestID: %s", request_id) + # discard threads from ThreadPoolExecutor + self._threads.discard(thread) + tid = ctypes.c_long(thread.ident) + res = ctypes.pythonapi.PyThreadState_SetAsyncExc( + tid, ctypes.py_object(KeyboardInterrupt)) + if res == 0: + msg = f"failed to cancel: invalid tid: {tid}, requestID: {request_id}" + log.get_logger().warning(msg) + elif res != 1: + ctypes.pythonapi.PyThreadState_SetAsyncExc(tid, None) + msg = f"failed to cancel: PyThreadState_SetAsyncExc failed, requestID: {request_id}" + log.get_logger().warning(msg) + else: + msg = f"success to cancel, thread: {thread}, requestID: {request_id}" + log.get_logger().info(msg) + + sender_id = get_call_map().pop(request_id, None) + if sender_id is not None: + _send_call_ack(core.CallResult( + code=common.ERR_NONE, + message=msg, + instanceID=sender_id, + requestID=request_id)) + else: + log.get_logger().error( + "failed to send call result: can't find sender_id, thread: {thread}, requestID: {request_id}", + thread, request_id) + else: + msg = f"failed to cancel,thread has been finished or cancel faster than invoke, requestID: {request_id}" + self._canceled.append(request_id) + log.get_logger().warning(msg) + return msg + + def wrapper(self, fn: Callable, request_id: str, sender_id: str): + """ + Wrapper process a task + """ + call_map = get_call_map() + thread = threading.currentThread() + call_map[request_id] = sender_id + self._task_thread[request_id] = thread + try: + fn() + except KeyboardInterrupt: + # When we cancel a thread, this throws an KeyboardInterrupt exception. This operation may cause a deadlock. + # Such as logger, we avoid this by generating a new lock. + for h in log.get_logger().logger.handler: + h.createLock() + log.get_logger().info(f"task has been canceled: {request_id}") + finally: + call_map.pop(request_id, None) + self._task_thread.pop(request_id, None) + + def submit(self, *args, **kwargs): + """ + submit a task to thread pool + """ + fn = kwargs.pop("fn") + request_id = kwargs.pop("request_id") + sender_id = kwargs.pop("sender_id") + # cancel may be faster than call, no need to execute, send ack directly + if request_id in self._canceled: + _send_call_ack(core.CallResult( + code=common.ERR_NONE, + message="success to cancel", + instanceID=sender_id, + requestID=request_id)) + self._canceled.remove(request_id) + raise RuntimeError("cancel faster than call, call task cancel directly") + + return super(RuntimeThreadPoolExecutor, self).submit( + self.wrapper, fn, request_id, sender_id) + + +class RequestManager(threading.Thread): + """ + Runtime Request Queue, receive request from rpc server + """ + + def __init__(self, mgr): + """init""" + super().__init__(name="RequestManager") + self._queue = queue.Queue() + self._mgr = mgr + self._inorder_manager = InOrderManager() + self._lock = threading.Lock() + self.invoke_pool = None + self._running = True + self.setDaemon(True) + self.start() + + @staticmethod + def check_in_order(req: rt.CallRequest): + """check if the request needs to be processed with in order queue""" + if req.isCreate: + return False + request_id_parts = req.requestID.split("-") + if len(request_id_parts) == _LEN_IN_ORDER_REQUEST_ID: + return True + return False + + def cancel_thread(self, request: rt.SignalRequest) -> rt.SignalResponse: + """ + cancel_thread + """ + if self.invoke_pool is None: + return rt.SignalResponse(code=common.ERR_INSTANCE_NOT_FOUND, + message="instance should be created before canceling") + + if len(request.payload) == 0: + log.get_logger().warnning(f"no request_id from payload: {request.payload}") + return rt.SignalResponse(code=common.ERR_NONE, + message=f"no request_id from payload: {request.payload}") + request_id = request.payload.decode('utf-8') + log.get_logger().debug(f"start to cancel task, requestID: {request_id}") + message = self.invoke_pool.cancel_task(request_id) + log.get_logger().debug(f"end to cancel task, requestID: {request_id}, message: {message}") + return rt.SignalResponse(code=common.ERR_NONE, message=message) + + def init_invoke_pool(self, concurrency=100): + """init invoke pool""" + if self.invoke_pool is None: + + self.invoke_pool = RuntimeThreadPoolExecutor(max_workers=concurrency, + thread_name_prefix="invoke_pool") + + def run(self) -> None: + """main loop""" + while self._running: + req_data = self._queue.get() + if req_data is None: + break + try: + self._process_request(req_data) + except Exception as e: + log.get_logger().warning(f"{e} {traceback.format_exc()}") + log.get_logger().warning(f"RequestManager exit") + + def init_call_env(self, req: rt.CallRequest, req_id: str, + callback: Callable): + """init call environment""" + if req.isCreate: + pool_size = _get_concurrency(req.createOptions) + log.get_logger().debug(f"pool_size {pool_size}") + self.init_invoke_pool(concurrency=pool_size) + else: + if self.invoke_pool is None: + callback(rt_rpc.StreamingMessage( + callRsp=rt.CallResponse(code=common.ERR_INSTANCE_NOT_FOUND, + message="instance should be " + "created before invoking"), + messageID=req_id)) + return + + def call(self, req: rt.CallRequest, req_id: str, callback: Callable): + """call""" + self.init_call_env(req, req_id, callback) + + need_in_order = self.check_in_order(req) + if need_in_order: + runtime_key = utils.extract_runtime_id(req.requestID) + request_queue = \ + self._inorder_manager.get_instance_request_queue(runtime_key) + + def call_task(request=req, need_order=False): + log.get_logger().debug(f"start to call " + f"{request.requestID} {request.traceID}") + response = self._mgr.call(request) + if need_order: + request_queue.set_next_serial_num() + log.get_logger().debug(f"end to call " + f"{request.requestID} {request.traceID}") + _send_call_ack(response) + + def process_current(): + temp_serial_num = utils.extract_serial_num(req.requestID) + if temp_serial_num != request_queue.next_serial_num(): + request_queue.store_request(temp_serial_num, req) + else: + call_task(req, True) + + def process_next(_): + while not request_queue.empty(): + self._lock.acquire() + if request_queue.head_serial_num() == \ + request_queue.next_serial_num(): + next_req = request_queue.pop_head_request() + self._lock.release() + call_task(next_req, True) + else: + self._lock.release() + break + + call_ack = increase_objref(req) + callback(rt_rpc.StreamingMessage(callRsp=call_ack, messageID=req_id)) + if call_ack.code == common.ERR_NONE: + if req.isCreate: + call_task() + elif need_in_order: + self.invoke_pool.submit(fn=process_current, + request_id=req.requestID, sender_id=req.senderID). \ + add_done_callback(process_next) + else: + self.invoke_pool.submit(fn=call_task, + request_id=req.requestID, sender_id=req.senderID) + + def checkpoint(self, req: rt.CheckpointRequest, req_id: str, callback: Callable): + """checkpoint""" + resp = self._mgr.checkpoint(req) + log.get_logger().debug(f"end to checkpoint {resp}") + callback(rt_rpc.StreamingMessage(checkpointRsp=resp, messageID=req_id)) + + def recover(self, req: rt.RecoverRequest, req_id: str, callback: Callable): + """recover""" + self.init_invoke_pool() + resp = self._mgr.recover(req) + callback(rt_rpc.StreamingMessage(recoverRsp=resp, messageID=req_id)) + + def shutdown(self, req: rt.ShutdownRequest, req_id: str, callback: Callable): + """shutdown""" + resp = self._mgr.shutdown(req) + callback(rt_rpc.StreamingMessage(shutdownRsp=resp, messageID=req_id)) + + def signal(self, req: rt.SignalRequest, req_id: str, callback: Callable): + """signal""" + if req.signal == Signal.CANCEL.value: + resp = self.cancel_thread(req) + else: + resp = self._mgr.signal(req) + callback(rt_rpc.StreamingMessage(signalRsp=resp, messageID=req_id)) + + def submit(self, request: rt_rpc.StreamingMessage, callback: Callable): + """submit a request""" + self._queue.put(ReqData(request, callback)) + + def clear(self, timeout=None): + """ clear """ + self._running = False + self._queue.put(None) + self.join(timeout) + self.invoke_pool.shutdown(wait=False) + self._inorder_manager.clear() + + def _process_request(self, req_data): + """process request""" + request = req_data.request + req_name = request.WhichOneof("body") + try: + req_attr = getattr(request, req_name) + except AttributeError: + log.get_logger().error(f"invalid request: {request}") + else: + func_name = _REQ_TABLE.get(req_name, None) + try: + func = getattr(self, func_name) + except AttributeError: + log.get_logger().error(f"can't find request from request table: {request}") + else: + func(req_attr, request.messageID, req_data.callback) + + +def increase_objref(request: rt.CallRequest): + """preprocess for call request """ + code = common.ERR_NONE + message = None + try: + dependency_objref_process(request.args, is_increase=True) + except TimeoutError as err: + code = common.ERR_INNER_SYSTEM_ERROR + message = f"failed to increase obj ref count of request: {request.requestID}, err: {err}" + log.get_logger().error(message) + return rt.CallResponse(code=code, message=message) diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/rpc_server.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/rpc_server.py new file mode 100644 index 0000000000000000000000000000000000000000..5475927f16598331b58cdf0bfa42e58bec1552b6 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/rpc_server.py @@ -0,0 +1,215 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""RPC Server""" +import queue +import traceback +import logging +import os +from concurrent import futures +from threading import Thread + +import grpc + +from fnruntime import log +from fnruntime.rpc.function_manager import FunctionManager, get_hooks_key +from fnruntime.rpc.request_queue import RequestManager +from fnruntime.rpc.sdk import RPClient +from fnruntime.util import get_tls_config +from yr.rpc import runtime_rpc_pb2 as rt_rpc +from yr.rpc import runtime_rpc_pb2_grpc as rt_rpc_grpc +from yr.rpc import runtime_service_pb2 as rt + +MAX_MESSAGE_SIZE = 1024 * 1024 * 500 +MAX_FRAME_SIZE = 1024 * 1024 * 2 +MAX_CHUNK_SIZE = MAX_FRAME_SIZE + +_STOPPING = False + + +class RuntimeService(rt_rpc_grpc.RuntimeRPCServicer): + """Runtime rpc server""" + __RESPONSE_FIELD = {"createRsp", "invokeRsp", "exitRsp", "saveRsp", "loadRsp", "killRsp", "callResultAck"} + __REQUEST_FIELD = {"callReq", "checkpointReq", "recoverReq", "shutdownReq", "signalReq"} + + def __init__(self, task_queue=None): + self._queue = queue.Queue() + RPClient().queue = self._queue + self._task_queue = task_queue + + def MessageStream(self, request_iterator, context): + """streaming""" + t = Thread(target=self._receive_request, args=(request_iterator,), name="streaming", daemon=True) + t.start() + + while t.is_alive(): + try: + request = self._queue.get(timeout=1) + except queue.Empty: + continue + body_type = request.WhichOneof("body") + if log.get_logger().isEnabledFor(logging.DEBUG) and body_type != "heartbeatRsp": + log.get_logger().debug(f"send request: {body_type} {request.messageID}") + yield request + log.get_logger().info('end to streaming') + + def send_response(self, resp: rt_rpc.StreamingMessage): + """send response by streaming""" + self._queue.put(resp) + + def _receive_request(self, request_iterator): + try: + for request in request_iterator: + self._process_request(request) + except StopIteration: + log.get_logger().warning('bus connection broke') + except grpc.RpcError as e: + global _STOPPING + if not _STOPPING: + log.get_logger().warning('rpc error') + log.get_logger().exception(e) + + def _process_request(self, request: rt_rpc.StreamingMessage): + body_type = request.WhichOneof("body") + if log.get_logger().isEnabledFor(logging.DEBUG) and body_type != "heartbeatReq": + log.get_logger().debug(f"receive request: {body_type} {request.messageID}") + if body_type in self.__RESPONSE_FIELD: + RPClient().receive(request) + elif body_type == "notifyReq": + RPClient().notify(request.notifyReq) + self.send_response(rt_rpc.StreamingMessage(notifyRsp=rt.NotifyResponse(), messageID=request.messageID)) + elif body_type == "heartbeatReq": + if self._task_queue.is_alive(): + self.send_response( + rt_rpc.StreamingMessage(heartbeatRsp=rt.HeartbeatResponse(), messageID=request.messageID)) + elif body_type in self.__REQUEST_FIELD: + self._task_queue.submit(request, self.send_response) + else: + log.get_logger().warning(f"request invalid: {request}") + + +def _init_ds_client(mgr): + ds_address = os.environ.get("DATASYSTEM_ADDR") + log.get_logger().info(f"data-system worker addr: {ds_address}") + try: + RPClient().init_ds_client(ds_address) + except Exception as e: + mgr.set_init_exception(e, traceback.format_exc()) + log.get_logger().error(f"failed to init runtime, {e}, {traceback.format_exc()}") + return + log.get_logger().debug(f"succeed to init runtime, instanceID: {RPClient().instance_id}") + return + + +def _init_function_manager(python_path, init_hooks=True): + + code_dir = os.environ.get("FUNCTION_LIB_PATH") + instance_id = os.environ.get("INSTANCE_ID") + layer_paths = os.environ.get("LAYER_LIB_PATH", "").split(",") + hooks_key = get_hooks_key() + hooks = {} + for hook_name in hooks_key: + if hook_name in os.environ: + hooks[hook_name] = os.environ[hook_name] + mgr = FunctionManager(instance_id, init_hooks) + + mgr.init(python_path, code_dir, layer_paths, hooks) + return mgr + + +def serve(address, python_path, future=None) -> None: + """ + start grpc server + + Args: + address: bind address + python_path: user code path + future: to tell main thread that grpc server startup complete + """ + log.get_logger().info("Starting server on %s", address) + options = [ + ('grpc.max_receive_message_length', MAX_MESSAGE_SIZE), + ('grpc.max_send_message_length', MAX_MESSAGE_SIZE), + ('grpc.http2.max_frame_size', MAX_FRAME_SIZE), + ('grpc.experimental.tcp_read_chunk_size', MAX_CHUNK_SIZE), + ] + mgr = _init_function_manager(python_path) + task_queue = RequestManager(mgr) + _init_ds_client(mgr) + try: + tls_enable, tls_config = get_tls_config() + except Exception: + tls_enable, tls_config = False, None + + server = grpc.server(futures.ThreadPoolExecutor(max_workers=100, thread_name_prefix="grpc_server"), options=options) + if tls_enable: + creds = grpc.ssl_server_credentials([(tls_config.module_key_data, tls_config.module_cert_data)], + root_certificates=tls_config.root_cert_data) + server.add_secure_port(address=address, server_credentials=creds) + log.get_logger().info("Succeed to load tls config on adder:%s", address) + else: + server.add_insecure_port(address) + rt_server = RuntimeService(task_queue) + rt_rpc_grpc.add_RuntimeRPCServicer_to_server(rt_server, server) + server.start() + log.get_logger().info("Starting server successful.") + if future is not None: + future.set_result("") + server.wait_for_termination() + + +def driver_serve(address) -> int: + """ + start driver mode grpc server + + Args: + address: bind address + """ + options = [ + ('grpc.max_receive_message_length', MAX_MESSAGE_SIZE), + ('grpc.max_send_message_length', MAX_MESSAGE_SIZE), + ('grpc.http2.max_frame_size', MAX_FRAME_SIZE), + ('grpc.experimental.tcp_read_chunk_size', MAX_CHUNK_SIZE), + ] + server = grpc.server(futures.ThreadPoolExecutor(max_workers=10, thread_name_prefix="grpc_server"), options=options) + mgr = _init_function_manager("", init_hooks=False) + task_queue = RequestManager(mgr) + rt_server = RuntimeService(task_queue) + rt_rpc_grpc.add_RuntimeRPCServicer_to_server(rt_server, server) + port = server.add_insecure_port(address) + Thread(target=_internal_start, args=(server,), name="_internal_start", daemon=True).start() + Thread(target=_driver_stop, args=(server,), name="driver_stop", daemon=True).start() + return port + + +def _internal_start(server): + global _STOPPING + _STOPPING = False + + server.start() + server.wait_for_termination() + + +def _driver_stop(server): + """ + stop driver mode gracefully + + Args: + server: server to stop + """ + RPClient().init_thread.join() + global _STOPPING + _STOPPING = True + server.stop(None).wait() diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/sdk.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/sdk.py new file mode 100644 index 0000000000000000000000000000000000000000..fcb97c142a82cd63dff1a6dfc50c56d1fb70a60b --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/rpc/sdk.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""RPC client""" +import os +import threading +import traceback +from concurrent.futures import ThreadPoolExecutor, Future + +from datasystem.object_cache import ObjectClient +from datasystem.stream_client import StreamClient +from fnruntime import log +from yr.exception import YRInvokeError +from yr.objref_counting_manager import ObjRefCountingManager +from yr.rpc import common_pb2 +from yr.rpc import core_service_pb2 as core +from yr.rpc import runtime_rpc_pb2 as rt_rpc +from yr.rpc import runtime_service_pb2 as rt +from yr.storage.ds_client import DSClient +from yr.utils import Singleton +from yr.utils import generate_task_id + +_CALL_MAP = {} +_DEFAULT_BUS_PORT = "22769" +MAX_RETRY_TIME = 3 +REQUEST_TIMEOUT = 5 + + +def post_shutdown(): + """process request after shutdown""" + for request_id in list(_CALL_MAP.keys()): + sender_id = _CALL_MAP.get(request_id) + if sender_id is None: + continue + log.get_logger().warning(f"processing request will be cancel, {request_id}") + _ = RPClient().send(rt_rpc.StreamingMessage(callResultReq=core.CallResult( + code=common_pb2.ERR_USER_FUNCTION_EXCEPTION, + message="This instance has been shutdown", + instanceID=sender_id, + requestID=request_id), messageID=generate_task_id())) + + +def get_call_map(): + """get call map""" + return _CALL_MAP + + +@Singleton +class RPClient: + """RPC client""" + + def __init__(self): + self.ds_client = None + self.instance_id = os.environ.get("INSTANCE_ID") + self._invoke_callback = {} + self.ds_address = '' + self._response_futures = {} + self.queue = None + self._pool = ThreadPoolExecutor(max_workers=1, thread_name_prefix="rpc_client") + # Preserve the record of the first thread created by self._pool, + # which would die out as a daemon thread, in order to block driver + # stop thread and stop driver server as it's join() func is executed. + thread_id = self._pool.submit(threading.get_ident).result() + self.init_thread = threading._active.get(thread_id) + + def init_ds_client(self, ds_address: str): + """Init dataSystem client""" + self.ds_address = ds_address + self.ds_client = DSClient() + host, port = ds_address.split(":") + client = ObjectClient(host=host, port=int(port), connect_timeout=5000) + client.init() + stream_client = StreamClient(host=host, port=int(port)) + stream_client.init() + self.ds_client.init(client, stream_client) + + def send(self, request: rt_rpc.StreamingMessage, timeout=REQUEST_TIMEOUT) -> rt_rpc.StreamingMessage: + """send request to core""" + for _ in range(MAX_RETRY_TIME): + future = Future() + self._response_futures[request.messageID] = future + self.queue.put(request) + try: + result = future.result(timeout=timeout) + except TimeoutError: + continue + return result + self._response_futures.pop(request.messageID) + raise TimeoutError(f"failed to send request after {MAX_RETRY_TIME} time retry") + + def receive(self, response: rt_rpc.StreamingMessage): + """receive response from core""" + future = self._response_futures.get(response.messageID) + if future is not None: + future.set_result(response) + self._response_futures.pop(response.messageID) + else: + log.get_logger().warning(f"response not found future, {response}") + + def register_invoke_callback(self, request_id: str, callback: Future): + """ + register invoke callback + callback must be a future because notify may be earlier than response + """ + self._invoke_callback[request_id] = callback + + def unregister_invoke_callback(self, request_id: str): + """ + unregister invoke callback + """ + if request_id in self._invoke_callback: + self._invoke_callback.pop(request_id) + + def notify(self, request: rt.NotifyRequest): + """callback for invoke request""" + # callback is a future, because NotifyRequest need wait for the corresponding response + callback = self._invoke_callback.get(request.requestID) + if callback is not None: + self._pool.submit(lambda: callback.result()(request)) + else: + log.get_logger().warning(f"callback not found, {request}") + + +def create(request: core.CreateRequest, callback, timeout=None): + """ + Create instance request + + Args: + request: create request + callback: process response + timeout: request timeout, default 900s + + Returns: + str: instance id + """ + log.get_logger().debug(f"start to create: {request}") + future = Future() + RPClient().register_invoke_callback(request.requestID, future) + response = RPClient().send(rt_rpc.StreamingMessage(createReq=request, messageID=generate_task_id()), timeout) + + if not response.HasField("createRsp"): + callback(None, RuntimeError(f"error response, {response}")) + else: + if response.createRsp.code != common_pb2.ERR_NONE: + RPClient().unregister_invoke_callback(request.requestID) + callback(response.createRsp) + return + + def _notify(notify): + try: + callback(core.CreateResponse(code=notify.code, message=notify.message, + instanceID=response.createRsp.instanceID)) + except (ValueError, AttributeError, RuntimeError) as e: + callback("", YRInvokeError(e, traceback.format_exc())) + + future.set_result(_notify) + + +def invoke(request: core.InvokeRequest, callback, timeout=None): + """ + send a invoke request to instance + + Args: + request: invoke request + callback: process response + timeout: request timeout, default 900s + + Returns: + str: object id + asyncio.Future: future + """ + log.get_logger().debug(f"start to invoke {request}") + future = Future() + RPClient().register_invoke_callback(request.requestID, future) + response = RPClient().send(rt_rpc.StreamingMessage(invokeReq=request, messageID=generate_task_id()), timeout) + + if not response.HasField("invokeRsp"): + callback(None, RuntimeError(f"error response, {response}")) + else: + if response.invokeRsp.code != common_pb2.ERR_NONE: + RPClient().unregister_invoke_callback(request.requestID) + callback(response.invokeRsp) + return + + def _notify(notify): + callback(core.InvokeResponse(code=notify.code, message=notify.message, + returnObjectID=response.invokeRsp.returnObjectID)) + + future.set_result(_notify) + + +def kill(instance_id: str, sig: int, request_id: str, callback, timeout=None): + """ + kill instance with signal + """ + kill_req = core.KillRequest(instanceID=instance_id, signal=sig, payload=request_id.encode(encoding='utf-8')) + response = RPClient().send(rt_rpc.StreamingMessage(killReq=kill_req, + messageID=generate_task_id()), timeout) + if not response.HasField("killRsp"): + callback(None, RuntimeError(f"error response, {response}")) + else: + callback(response.killRsp) + + +def exit(timeout=None): + """ + kill instance with signal + """ + ObjRefCountingManager().wait_invoke_ack() + try: + _ = RPClient().send(rt_rpc.StreamingMessage(exitReq=core.ExitRequest(), messageID=generate_task_id()), timeout) + except TimeoutError as e: + log.get_logger().exception(str(e)) + + +def save_state(state: bytes, timeout=None) -> str: + """ + save the instance state to cluster + + Args: + state: state info + timeout: request timeout, default 900s + + Returns: + str: checkpoint id,use to load state + """ + response = RPClient().send( + rt_rpc.StreamingMessage(saveReq=core.StateSaveRequest(state=state), messageID=generate_task_id()), timeout) + if not response.HasField("saveRsp"): + raise RuntimeError(f"error response, {response}") + if response.saveRsp.code != common_pb2.ErrorCode.ERR_NONE: + raise RuntimeError( + f"failed to save state, code: {response.saveRsp.code}, message: {response.saveRsp.message}") + return response.saveRsp.checkpointID + + +def load_state(checkpoint_id: str, timeout=None) -> bytes: + """ + load a instance state from cluster + + Args: + checkpoint_id: instance checkpoint id + timeout: request timeout, default 900s + + Returns: + bytes: state + """ + response = RPClient().send( + rt_rpc.StreamingMessage(loadReq=core.StateLoadRequest(checkpointID=checkpoint_id), + messageID=generate_task_id()), + timeout) + if not response.HasField("loadRsp"): + raise RuntimeError(f"error response, {response}") + if response.loadRsp.code != common_pb2.ErrorCode.ERR_NONE: + raise RuntimeError( + f"failed to load state, code: {response.loadRsp.code}, message: {response.loadRsp.message}") + return response.loadRsp.state diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/server.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/server.py new file mode 100644 index 0000000000000000000000000000000000000000..20ca69f555d247f4c4140d39d069ba4ddda20f1a --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/server.py @@ -0,0 +1,39 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""runtime server""" +import sys + +from fnruntime import log +from fnruntime.rpc import rpc_server + + +def start_up(): + """ + Start up + """ + runtime_id = "" + if len(sys.argv) == 5: + runtime_id = sys.argv[4] + log.init_log(runtime_id) + if len(sys.argv) < 4: + log.get_logger().error("invalid starting parameters of python runtime") + sys.exit(1) + log.get_logger().debug(f"{sys.argv}") + rpc_server.serve(sys.argv[2], sys.argv[3]) + + +if __name__ == '__main__': + start_up() diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/tls.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/tls.py new file mode 100644 index 0000000000000000000000000000000000000000..677c0bb198fc2ec7ecce6e910bef679eb55e10b9 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/tls.py @@ -0,0 +1,47 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and +# conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, +# WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""tls for two-way authentication""" + +from dataclasses import dataclass + + +@dataclass +class ClusterTLSConfig: + """ + In cluster for ssl/tls + """ + root_cert_data: bytes = None + module_cert_data: bytes = None + module_key_data: bytes = None + + +def read_cert_from_cluster(config_json) -> ClusterTLSConfig: + """ + Read cert file from cluster config.json + """ + config = ClusterTLSConfig() + root_cert_file_path = config_json["rootCAFile"] + module_cert_file_path = config_json["moduleCertFile"] + module_key_file_path = config_json["moduleKeyFile"] + with open(root_cert_file_path, 'rb') as f: + config.root_cert_data = f.read() + with open(module_cert_file_path, 'rb') as f: + config.module_cert_data = f.read() + with open(module_key_file_path, 'rb') as f: + config.module_key_data = f.read() + return config diff --git a/dsoftbus/dist_executor/modules/runtime/python/fnruntime/util.py b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/util.py new file mode 100644 index 0000000000000000000000000000000000000000..7e9e44dbbef52be74f84751f502a45e7f9946786 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/fnruntime/util.py @@ -0,0 +1,41 @@ +# !/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. You can use this software +# according to the terms and conditions of the Mulan PSL v2. You may obtain a +# copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""util common methods""" +import json +import os +import sys + +from fnruntime.tls import read_cert_from_cluster + + +def get_tls_config(): + """ + get tls configuration + """ + config_path = "/home/snuser/config/runtime.json" + if os.getenv("YR_BARE_MENTAL") is not None: + config_path = sys.path[0] + '/../config/runtime.json' + with open(config_path, 'r') as config_file: + config_json = json.load(config_file) + + tls_config = None + if "tlsEnable" in config_json: + tls_enable = config_json["tlsEnable"] + if tls_enable: + tls_config = read_cert_from_cluster(config_json) + else: + tls_enable = False + return tls_enable, tls_config diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/__init__.py b/dsoftbus/dist_executor/modules/runtime/python/yr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ecb7b7889859427a91a93f887e19b43c4a17def6 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/__init__.py @@ -0,0 +1,44 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +yr api +""" + +from yr.config import Config, DeploymentConfig, UserTLSConfig +from yr.apis import init +from yr.apis import is_initialized +from yr.apis import get +from yr.apis import put +from yr.apis import wait +from yr.apis import invoke +from yr.apis import instance +from yr.apis import method +from yr.apis import finalize +from yr.apis import exit +from yr.apis import cancel +from yr.apis import cpp_function +from yr.apis import cpp_instance_class +from yr.apis import is_on_cloud +from yr.apis import get_stream_client +from yr.apis import get_instance +from yr.runtime.task_spec import InvokeOptions +from yr.runtime.task_spec import AffinityType + +__all__ = ["Config", "DeploymentConfig", "UserTLSConfig", "init", "is_initialized", "get", "put", + "wait", "invoke", "instance", "method", "finalize", "exit", + "cancel", "cpp_function", "cpp_instance_class", "is_on_cloud", + "get_stream_client", "get_instance", + "InvokeOptions", "AffinityType"] diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/apis.py b/dsoftbus/dist_executor/modules/runtime/python/yr/apis.py new file mode 100644 index 0000000000000000000000000000000000000000..d54a311a5c4d2ecd45c0b3b187226f88ea019c5e --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/apis.py @@ -0,0 +1,472 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +function which need export +""" +import atexit +import logging +import os +import json +from typing import Union, List + +from datasystem.agent_client import AgentClient +from datasystem.object_cache import ObjectClient +from datasystem.stream_client import StreamClient + +from yr import Config +from yr import utils +from yr.common.timer import Timer +from yr.config import ClientInfo +from yr.config import ConfigManager +from yr.config import check_init +from yr.decorator import function_proxy +from yr.decorator import instance_proxy +from yr.object_ref import ObjectRef +from yr.objref_counting_manager import ObjRefCountingManager +from yr.runtime import cluster_mode_runtime +from yr.runtime.local_mode_runtime import LocalModeRuntime +from yr.runtime.runtime import Runtime +from yr.storage.reference_count import ReferenceCount +from yr.utils import get_function_from_urn, generate_runtime_id +from yr.custom import mpctl + +_DATASYSTEM_DEFAULT_TIMEOUT = 5 * 60 +_BASE_LOG_NAME = "yr" +_MAX_INT = 0x7FFFFFFF +_logger = logging.getLogger(__name__) +_HTTP_PROXY = "http_proxy" +_HTTPS_PROXY = "https_proxy" +_DATASYSTEM_CLIENT_LOG_DIR = "DATASYSTEM_CLIENT_LOG_DIR" + + +def _unset_http_proxy(): + if _HTTP_PROXY in os.environ: + os.environ[_HTTP_PROXY] = "" + + if _HTTPS_PROXY in os.environ: + os.environ[_HTTPS_PROXY] = "" + + +def _set_datasystem_client_log_dir(): + if _DATASYSTEM_CLIENT_LOG_DIR in os.environ: + return + + config_path = os.path.realpath(os.path.join( + os.path.dirname(os.path.abspath(__file__)), "../config/python-runtime-log.json") + ) + if not os.path.isfile(config_path): + return + + with open(config_path, "r") as fr: + try: + data = json.load(fr) + except json.decoder.JSONDecodeError: + return + + log_path = data.get("handlers", {}).get("file", {}).get("filename", "") + if log_path != "": + os.environ[_DATASYSTEM_CLIENT_LOG_DIR] = log_path + + +def _gen_random_address(mgr): + host, port = mgr.server_address.split(":") + return host, port + + +def _get_runtime(mgr: ConfigManager, logger): + if mgr.local_mode: + return LocalModeRuntime() + + if mgr.on_cloud: + from yr.invoke_client.posix_client import PosixClient + invoke_client = PosixClient() + from fnruntime.rpc.sdk import RPClient + ds_client = RPClient().ds_client + else: + host, port = mgr.ds_address.split(":") + if mgr.in_cluster: + client = ObjectClient(host=host, port=int(port)) + stream_client = StreamClient(host=host, port=int(port)) + stream_client.init() + from yr.invoke_client.posix_client import PosixClient, start_server + invoke_client = PosixClient() + server_ip, _ = _gen_random_address(mgr) + real_port = start_server(server_ip + ":0", logger) + import grpc + from yr.rpc import bus_service_pb2, bus_service_pb2_grpc + channel = grpc.insecure_channel(mgr.server_address) + stub = bus_service_pb2_grpc.BusServiceStub(channel) + stub.DiscoverDriver(bus_service_pb2.DiscoverDriverRequest( + driverIP=server_ip, + driverPort=str(real_port), + jobID=mgr.job_id) + ) + else: + client = AgentClient(host=host, port=int(port)) + stream_client = None + from yr.invoke_client.outcluster_client import HttpInvokeClient + invoke_client = HttpInvokeClient() + invoke_client.init(mgr.server_address, mgr.invoke_timeout) + + client.init() + from yr.storage.ds_client import DSClient + ds_client = DSClient() + ds_client.init(client, stream_client) + ReferenceCount().init(ds_client) + + return cluster_mode_runtime.ClusterModeRuntime(ds_client=ds_client, + invoke_client=invoke_client) + + +def init(conf: Config = None) -> ClientInfo: + """ + Init yr api + + Args: + conf (Config) : This is the config which set by user, see the detail in class `yr.Config`. + """ + if ConfigManager().is_init: + raise RuntimeError("yr.init cannot be called twice") + if conf is None: + conf = Config() + + _unset_http_proxy() + ConfigManager().init(conf) + + mpctl.start() + _set_datasystem_client_log_dir() + logger = logging.getLogger(_BASE_LOG_NAME) + handler = logging.StreamHandler() + fmt = logging.Formatter( + fmt='[%(asctime)s.%(msecs)03d %(levelname)s %(funcName)s %(filename)s:%(lineno)d %(thread)d] %(message)s', + datefmt='%Y-%m-%d %H:%M:%S') + handler.setFormatter(fmt) + logger.setLevel(ConfigManager().log_level) + logger.handlers.clear() + logger.addHandler(handler) + logger.propagate = False + runtime = _get_runtime(ConfigManager(), logger) + Runtime().init(runtime) + ConfigManager().runtime_id = generate_runtime_id() + atexit.register(finalize) + ConfigManager().is_init = True + Timer().init() + _logger.info(f"success to init, {ConfigManager().job_id}") + return ClientInfo(ConfigManager().job_id) + + +def is_initialized() -> bool: + """Check if yr.init has been called yet. + + Returns: + True if yr.init has already been called and false otherwise. + """ + return ConfigManager().is_init + + +def finalize(): + """ + Finalize the yr context, delete object and stateless instance + """ + if ConfigManager().is_init: + ReferenceCount().stop() + Timer().stop() + ObjRefCountingManager().clear() + Runtime().rt.clear() + ConfigManager().is_init = False + mpctl.stop() + + +def exit(): + """ + Exit instance + """ + if ConfigManager().on_cloud: + Runtime().rt.exit_instance() + else: + _logger.warning("Not support exit out of cluster.") + + +@check_init +def put(obj: object): + """ + Put object to datasystem + + Args: + obj(object): This is a python object, will pickle and save to datasystem + """ + # Make sure that the value is not an object ref. + if isinstance(obj, ObjectRef): + raise TypeError( + "Calling 'put' on an ObjectRef is not allowed. If you really want to do this, " + "you can wrap the ObjectRef in a list and call 'put' on it.") + ref = ObjRefCountingManager().create_object_ref() + return Runtime().rt.put(ref[0], obj) + + +def _check_object_ref(obj_refs): + if not isinstance(obj_refs, list): + raise TypeError(f"obj_refs type error, actual: [{type(obj_refs)}], element expect: ") + for ref in obj_refs: + if not isinstance(ref, ObjectRef): + raise TypeError(f"obj_refs type error, actual: [{type(obj_refs)}], element expect: ") + + +def get(obj_refs: Union[ObjectRef, List], timeout=_DATASYSTEM_DEFAULT_TIMEOUT): + """ + Get object from datasystem. + + This function will block when the key not in datasystem. + + Args: + obj_refs (ObjectRef, List[ObjectRef]): The key of the object. + timeout (int): Timeout(sec) for get, will be block when timeout set -1, default: 300, limit: -1, (0,∞) + """ + if not ConfigManager().is_init: + raise RuntimeError("system not initialized") + lower_limit = 0 + if timeout <= lower_limit and timeout != -1: + raise ValueError(f"timeout should be greater than {lower_limit}") + is_single_obj = isinstance(obj_refs, ObjectRef) + if is_single_obj: + obj_refs = [obj_refs] + _check_object_ref(obj_refs) + objects = Runtime().rt.get(obj_refs, timeout) + if is_single_obj: + return objects[0] + return objects + + +def is_on_cloud() -> bool: + """ + Check weather running on cloud + """ + return ConfigManager().on_cloud + + +@check_init +def wait(obj_refs: Union[ObjectRef, List], wait_num: int = 1, timeout: int = None) -> (List, List): + """ + Wait for objectRefs complete + + Args: + obj_refs(ObjectRef, List[ObjectRef]): List of objectRef to attempt to wait + wait_num(int): Number of minimum objectRef completions required, default: 1, limit: [0, len(List[ObjectRef])] + timeout(int): Timeout(sec), default: None,limit: None, -1, [0, MAX_INT] + None, -1 as long as possible wait until the future is complete + Returns: + ready(list[ObjectRef]): Completed objectRef list + unready(list[ObjectRef]): Uncompleted objectRef list + """ + if isinstance(obj_refs, ObjectRef): + obj_refs = [obj_refs] + _check_object_ref(obj_refs) + if len(obj_refs) != len(set(obj_refs)): + raise ValueError("obj_refs value error: duplicate obj_ref exists in the list") + if wait_num == 0 or timeout == 0: + return [], obj_refs + + if not isinstance(wait_num, int): + raise TypeError(f"'invalid wait_num type, actual: {type(wait_num)}, expect: ") + if wait_num < 0 or wait_num > len(obj_refs): + raise ValueError(f"invalid wait_num value, actual: {wait_num}, expect: [0, {len(obj_refs)}]") + + if timeout is not None: + if not isinstance(timeout, int): + raise TypeError(f"invalid timeout type, actual: {type(timeout)}, expect: ") + if timeout != -1 and timeout < 0 or timeout > _MAX_INT: + raise ValueError(f"invalid timeout value, actual: {timeout}, expect:None, -1, [0, {_MAX_INT}]") + # -1, None, both blocking and waiting, adapting to the cpp + timeout = None if timeout == -1 else timeout + return cluster_mode_runtime.wait(obj_refs, wait_num, timeout) + + +@check_init +def cancel(obj_refs: Union[List[ObjectRef], ObjectRef]): + """ + Cancel tasks + + Args: + obj_refs(ObjectRef, list[ObjectRef]): List of objectRef to attempt to cancel + """ + if isinstance(obj_refs, ObjectRef): + obj_refs = [obj_refs] + _check_object_ref(obj_refs) + Runtime().rt.cancel(obj_refs) + + +def invoke(*args, **kwargs): + """ + Function decorator + + This decorator will make a function invoked in cluster + Args: + func(Callable): function which need call in cluster + invoke_options(InvokeOptions): invoke options for users to set resources + return_nums(int): number of return values + """ + if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): + return function_proxy.make_decorator()(args[0]) + invoke_options = kwargs.get("invoke_options", None) + return_nums = kwargs.get("return_nums", None) + return function_proxy.make_decorator(invoke_options, return_nums) + + +def instance(*args, **kwargs): + """ + Class decorator + + This decorator will make a class created in cluster + Args: + invoke_options(InvokeOptions): invoke options for users to set resources + """ + if len(args) == 1 and len(kwargs) == 0 and callable(args[0]): + return instance_proxy.make_decorator()(args[0]) + invoke_options = kwargs.get("invoke_options", None) + return instance_proxy.make_decorator(invoke_options) + + +def method(*args, **kwargs): + """ + Class method decorator + + This decorator will make a class method in cluster + Args: + return_nums(int): number of return values of method + """ + if len(args) != 0 or "return_nums" not in kwargs: + raise ValueError("invalid params") + + def annotate_method(class_method): + return_nums = kwargs.get("return_nums", None) + if not isinstance(return_nums, int): + raise TypeError(f"invalid return_nums type: {type(return_nums)}, should be an int") + class_method.__return_nums__ = return_nums + return class_method + + return annotate_method + + +def get_instance(name, namespace=None) -> instance_proxy.InstanceProxy: + """ + Get a proxy of the named instance. + + Gets a proxy of an instance with the given name and namespace. The instance + name and namespace must have been set by Instance.options() + + Args: + name(str): the name of the instance + namespace(str): the namespace of the instance + + Returns: + InstanceProxy of the instance + """ + if not isinstance(name, str): + raise TypeError(f"invalid name type: {type(name)}, should be a str") + if namespace and not isinstance(namespace, str): + raise TypeError(f"invalid namespace type: {type(namespace)}, " + f"should be a str") + try: + ins_proxy = instance_proxy.get_instance_by_name(name, namespace) + except Exception as e: + raise RuntimeError(f"failed to get instance by name, {str(e)}") from e + return ins_proxy + + +class cpp_instance_class: + """ + Use to create a cpp instance class + """ + + def __init__(self, class_name: str, factory_name: str): + """ + CPP Class Creator + + Args: + class_name: CPP class name + factory_name: CPP static constructor name + """ + function_urn = ConfigManager().get_function_id_by_language(utils.LANGUAGE_CPP) + self.__function_key__ = '' + if function_urn is not None: + self.__function_key__ = get_function_from_urn(function_urn) + self.__class_name__ = class_name + self.__factory_name__ = factory_name + + def invoke(self, *args, **kwargs): + """ + Create a CPP instance in cluster + """ + creator = instance_proxy.make_cpp_instance_creator(self) + return creator.invoke(*args, **kwargs) + + def options(self, invoke_options): + """ + Set user invoke options + Args: + invoke_options: invoke options for users to set resources + """ + creator = instance_proxy.make_cpp_instance_creator(self) + return creator.options(invoke_options) + + def get_function_key(self): + """ + Get function key in this CPP instance + """ + return self.__function_key__ + + def get_class_name(self): + """ + Get class name in this CPP instance + """ + return self.__class_name__ + + def get_factory_name(self): + """ + Get factory function name in this CPP instance + """ + return self.__factory_name__ + + +def cpp_function(function_name: str): + """ + Create proxy for CPP normal function + + Args: + function_name: CPP normal function name + + Returns: + FunctionProxy: the proxy for CPP function + + """ + function_urn = ConfigManager().get_function_id_by_language(utils.LANGUAGE_CPP) + function_key = '' + if function_urn is not None: + function_key = get_function_from_urn(function_urn) + return function_proxy.make_cpp_function_proxy(function_name, function_key) + + +@check_init +def get_stream_client() -> StreamClient: + """ Get data system stream client. + + Return: + StreamClient: The stream client for data system. + + Raise: + RuntimeError: Raise a runtime error if stream client is None. + """ + return Runtime().rt.get_stream_client() diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/common/__init__.py b/dsoftbus/dist_executor/modules/runtime/python/yr/common/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..22d08a0525105525c2ee6aad2619303adeb6f1b3 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/common/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/common/response_helper.py b/dsoftbus/dist_executor/modules/runtime/python/yr/common/response_helper.py new file mode 100644 index 0000000000000000000000000000000000000000..f2cbbf064d4429d89d3c19fb869ecab84e718ee9 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/common/response_helper.py @@ -0,0 +1,69 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""response helper""" +import logging +from concurrent.futures import Future +from typing import Union + +from yr import exception +from yr.rpc import core_service_pb2 +from yr.runtime.task_spec import Signal + +_logger = logging.getLogger(__name__) + + +def make_callback_for_kill(future: Future, instance_id: str, signal: Signal = Signal.EXIT): + """deal with kill response""" + + def callback(data: Union[core_service_pb2.KillResponse, str, bytes], err: Exception = None): + if future.done(): + return + if err: + exception.deal_with_yr_error(future, err) + return + if isinstance(data, core_service_pb2.KillResponse): + rsp = data + else: + rsp = core_service_pb2.KillResponse() + rsp.ParseFromString(data) + if rsp.code != 0: + msg = f"signal failed: {instance_id} {signal} code:{rsp.code}, msg: {rsp.message}" + future.set_exception(RuntimeError(msg)) + return + future.set_result(rsp.code) + + return callback + + +def make_callback_for_create(future: Future): + """deal with create response""" + + def callback(response: Union[core_service_pb2.CreateResponse, str, bytes], err: Exception = None): + if future.done(): + return + if err: + exception.deal_with_yr_error(future, err) + return + if not isinstance(response, core_service_pb2.CreateResponse): + resp = core_service_pb2.CreateResponse() + resp.ParseFromString(response) + else: + resp = response + if resp.code == 0: + future.set_result(resp.instanceID) + else: + exception.deal_with_error(future, resp.code, resp.message) + + return callback diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/common/timer.py b/dsoftbus/dist_executor/modules/runtime/python/yr/common/timer.py new file mode 100644 index 0000000000000000000000000000000000000000..27e60f04e4ead5effcc095dd1875f1fbb14fd30a --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/common/timer.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""global timer""" +import logging +import sched +import time +from threading import Thread +from typing import Callable + +from yr.utils import Singleton + +_logger = logging.getLogger(__name__) + + +@Singleton +class Timer: + """timer for task schedule""" + __slots__ = ["__scheduler", "__is_running", "__thread", "__interval"] + + def __init__(self): + self.__scheduler = sched.scheduler(time.time, time.sleep) + self.__is_running = False + self.__thread = None + self.__interval = 0.1 + + def init(self, interval=0.1): + """start thread to scheduler timer""" + if self.__is_running and self.__thread.is_alive(): + return + self.__interval = interval + self.__thread = Thread(target=self.run, name="YRTimer", daemon=True) + self.__is_running = True + self.__thread.start() + + def clear(self): + """clear tasks""" + if self.__scheduler.empty(): + return + for event in self.__scheduler.queue: + try: + self.__scheduler.cancel(event) + except ValueError: + continue + + def stop(self): + """stop timer thread""" + if self.__is_running: + self.clear() + self.__is_running = False + self.__thread.join() + + def run(self) -> None: + """timer loop""" + while self.__is_running: + try: + self.__scheduler.run(blocking=False) + except Exception as e: + _logger.exception(e) + time.sleep(self.__interval) + + def after(self, delay: float, action: Callable, *args, **kwargs) -> None: + """run a task after some time""" + self.__scheduler.enter(delay, 0, action, argument=args, kwargs=kwargs) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/config.py b/dsoftbus/dist_executor/modules/runtime/python/yr/config.py new file mode 100644 index 0000000000000000000000000000000000000000..695724c2d7ce20e666bec8cad43f9ca97613d973 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/config.py @@ -0,0 +1,485 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +yr api config for user +""" +import logging +import os +import uuid +from dataclasses import dataclass +import functools + +from yr import utils +from yr.utils import Singleton + +_DEFAULT_CLUSTER_PORT = "31220" +_DEFAULT_IN_CLUSTER_CLUSTER_PORT = "21003" +_DEFAULT_DS_PORT = "31501" +_DEFAULT_DS_PORT_OUTER = "31502" +_URN_LENGTH = 7 +_DEFAULT_CONNECTION_NUMS = 100 +DEFAULT_CONCURRENCY = 1 +DEFAULT_RECYCLE_TIME = 2 +_PREFIX_OF_FUNCTION_ID = "sn:cn:yrk:12345678901234561234567890123456:function:0-adminservice-" +_SUFFIX_OF_FUNCTION_ID = ":$latest" +_JOB_ID_LENGTH = 8 + + +@dataclass +class UserTLSConfig: + """ + Out cluster for user ssl/tls + """ + root_cert_path: str + module_cert_path: str + module_key_path: str + server_name: str = None + + +@dataclass +class DeploymentConfig: + """ + AutoDeploymentConfig + + Attributes: + cpu(str): cpu acquired, the unit is millicpu + mem(str): mem acquiored (MB) + datamem(str): data system mem acquired (MB) + spill_path(str): spill path, when out of memory will flush data to disk + spill_size(str): spill size limit (MB) + """ + cpu: int = 0 + mem: int = 0 + datamem: int = 0 + spill_path: str = "" + spill_limit: int = 0 + + +@dataclass +class Config: + """ + yr API config + + Attributes: + function_id(str): function id which you deploy, get default by env `YRFUNCID` + etc. sn:cn:yrk:12345678901234561234567890123456:function:0-test-test:$latest. + auto_function_name(str): use default function which define in admin-service, see the document for details. + cpp_function_id(str): cpp function id which you deploy, get default by env `YR_CPP_FUNCID`. + cpp_auto_function_name(str): use default function for cpp. + function_name(str): function name which need in runtime. + server_address(str): System cluster address, get default by env `YR_SERVER_ADDRESS`. + ds_address(str): DataSystem address, get default by env `YR_DS_ADDRESS`. + app_id(str): System cluster app id, required when authentication + is enabled. + app_key(str): System cluster app key, required when authentication + is enabled. + on_cloud(bool): only True when initialize in runtime. + log_level: yr api log level:ERROR/WARNING/INFO/DEBUG, default: WARNING. + invoke_timeout(int): http client read timeout(sec), default 900. + local_mode(bool): run code in local. + code_dir(str): need set which init in runtime. + connection_nums(int): http client connection nums, default: 100, limit: [1,∞). + recycle_time(int): instance recycle period(sec), default: 2 second, limit: (0,300]. + in_cluster: if True will use datasystem in cluster client + job_id(str): auto generated by init. + tls_config(UserTLSConfig): for out cluster https ssl + auto(bool): auto start distribute-executor when yr.init, and auto stop distribute-executor when yr.finalize. + deployment_config(DeploymentConfig): when auto=True needed, use to define deployment detail. + """ + function_id: str = "" + auto_function_name: str = "" + cpp_function_id: str = "" + cpp_auto_function_name: str = "" + function_name: str = "" + server_address: str = "" + ds_address: str = "" + app_id: str = "" + app_key: str = "" + on_cloud: bool = False + log_level: (str, int) = logging.WARNING + invoke_timeout: int = 900 + local_mode: bool = False + code_dir: str = "" + connection_nums: int = _DEFAULT_CONNECTION_NUMS + recycle_time: int = DEFAULT_RECYCLE_TIME + in_cluster: bool = False + job_id: str = "" + tls_config: UserTLSConfig = None + auto: bool = False + deployment_config: DeploymentConfig = None + + +def _get_from_env(conf): + if conf.function_id == "": + conf.function_id = os.environ.get("YRFUNCID", "") + if conf.cpp_function_id == "": + conf.cpp_function_id = os.environ.get("YR_CPP_FUNCID", "") + if conf.server_address == "": + conf.server_address = os.environ.get("YR_SERVER_ADDRESS", "") + if conf.ds_address == "": + conf.ds_address = os.environ.get("YR_DS_ADDRESS", "") + return conf + + +def _check_function_urn(on_cloud, value): + if value is None: + value = '' + if on_cloud and value == '': + return False + items = value.split(':') + if len(items) != _URN_LENGTH: + raise ValueError("invalid function id") + return True + + +def _get_function_id_by_function_name(function_name, function_id): + """Get function id by auto function""" + if function_name is None or function_name == "": + return function_id + return _PREFIX_OF_FUNCTION_ID + function_name + _SUFFIX_OF_FUNCTION_ID + + +@dataclass +class ClientInfo: + """ + Use to storage yr client info + """ + job_id: str + + +@dataclass +class FunctionInfo: + """ + Use to storage yr function info + """ + function_id: str + function_name: str + app_id: str + app_key: str + + +@Singleton +class ConfigManager: + """ + Config manager singleton + + Attributes: + function_id: function id which you deploy + server_address: System cluster address. + ds_address: DataSystem address. + app_id: System cluster app id, required when authentication + is enabled. + app_key: System cluster app key, required when authentication + is enabled. + on_cloud: only True when initialize in runtime + log_level: yr api log level, default: WARNING + """ + + def __init__(self): + self.__function_id = None + self.__all_function_id = {} + self.__server_address = None + self.__ds_address = None + self.__recycle_time = None + self.__connection_nums = None + self.__runtime_id = None + self.app_id = None + self.app_key = None + self.is_init = False + self.on_cloud = False + self.__log_level = logging.WARNING + self.job_id = None + self.function_name = None + self.invoke_timeout = None + self.local_mode = False + self.code_dir = None + self.__in_cluster = False + self.__tls_config = None + self.__auto = False + self.__deployment_config = DeploymentConfig() + + @property + def auto(self) -> bool: + """ is auto deployment mode """ + return self.__auto + + @property + def deployment_config(self) -> DeploymentConfig: + """ when auto=True needed, use to define deployment detail. """ + return self.__deployment_config + + @property + def in_cluster(self) -> bool: + """if True will use datasystem in cluster client""" + return self.__in_cluster + + @property + def log_level(self): + """ + YR api log level + """ + return self.__log_level + + @log_level.setter + def log_level(self, value): + """ + YR api log level + """ + if isinstance(value, str): + value = value.upper() + self.__log_level = value + + @property + def function_id(self): + """ + Get function id + """ + return self.__function_id + + @function_id.setter + def function_id(self, value: str): + """ + Set function id + + Args: + value (str): The function id which user deploy + """ + if _check_function_urn(self.on_cloud, value): + self.__function_id = value + self.function_name = utils.get_function_from_urn(value) + + @property + def all_function_id(self): + """ + Get all function id + """ + return self.__all_function_id + + @all_function_id.setter + def all_function_id(self, value: dict): + """ + Set function id dict + example: <"cpp", "sn:cn:yrk:12345678901234561234567890123456:function:0-test-cpp:$latest"> + + Args: + value (dict): The function id dictionary + """ + for urn in value.values(): + if urn != '': + _check_function_urn(self.on_cloud, urn) + + self.__all_function_id = value + + @property + def server_address(self): + """ + Get server address + """ + return self.__server_address + + @server_address.setter + def server_address(self, value: str): + """ + Set server address + + Args: + value (str): System cluster ip + """ + if self.on_cloud: + return + if utils.validate_ip(value): + if self.__in_cluster: + self.__server_address = value + ":" + _DEFAULT_IN_CLUSTER_CLUSTER_PORT + else: + self.__server_address = value + ":" + _DEFAULT_CLUSTER_PORT + return + _, _ = utils.validate_address(value) + self.__server_address = value + + @property + def ds_address(self): + """ + Get datasystem address + """ + return self.__ds_address + + @ds_address.setter + def ds_address(self, value: str): + """ + Set datasystem address + + Args: + value (str): Datasystem worker address : : or , + default port : 31501 + """ + if self.on_cloud: + return + if utils.validate_ip(value): + if self.on_cloud or self.__in_cluster: + self.__ds_address = value + ":" + _DEFAULT_DS_PORT + else: + self.__ds_address = value + ":" + _DEFAULT_DS_PORT_OUTER + return + _, _ = utils.validate_address(value) + self.__ds_address = value + + @property + def connection_nums(self): + """ + Get connection_nums + """ + return self.__connection_nums + + @connection_nums.setter + def connection_nums(self, value: int): + """ + Set connection_nums + + Args: + value (int): max connection number + """ + if not isinstance(value, int): + raise TypeError(f"connection_nums {type(value)} type error, 'int' is expected.") + if (value >= 1) is False: + raise ValueError(f"invalid connection_nums value, expect connection_nums >= 1, actual {value}") + + self.__connection_nums = value + + @property + def recycle_time(self): + """ + Get recycle time + """ + if self.__recycle_time: + return self.__recycle_time + return DEFAULT_RECYCLE_TIME + + @recycle_time.setter + def recycle_time(self, value: int): + """ + Set recycle time + + Args: + value (int): instance recycle period + """ + if not isinstance(value, int): + raise TypeError(f"recycle_time {type(value)} type error, 'int' is expected.") + if (1 <= value <= 300) is False: + raise ValueError(f"invalid recycle_time value, expect 1 <= time <= 300, actual {value}") + + self.__recycle_time = value + + @property + def runtime_id(self): + """ + Get runtime id + """ + return self.__runtime_id + + @runtime_id.setter + def runtime_id(self, value: str): + """ + Set runtime id + + Args: + value (str): runtime id + """ + self.__runtime_id = value + + @property + def tls_config(self): + """ + Get tls config + """ + return self.__tls_config + + def init(self, conf: Config): + """ + Init the ConfigManager + + Args: + conf (Config): The yr api config which set by user. + """ + conf = _get_from_env(conf) + self.__auto = conf.auto + self.__deployment_config = conf.deployment_config + self.connection_nums = conf.connection_nums + self.log_level = conf.log_level + self.invoke_timeout = conf.invoke_timeout + self.__tls_config = conf.tls_config + if conf.job_id != "": + self.job_id = conf.job_id + else: + # Use 8-bit uuid to reduce the length of requestID + self.job_id = "job-" + str(uuid.uuid4().hex)[:9] + utils.set_job_id(self.job_id) + self.recycle_time = conf.recycle_time + self.local_mode = conf.local_mode + if self.local_mode: + return + self.on_cloud = conf.on_cloud + self.__in_cluster = conf.in_cluster + conf.function_id = _get_function_id_by_function_name(conf.auto_function_name, conf.function_id) + conf.cpp_function_id = _get_function_id_by_function_name(conf.cpp_auto_function_name, conf.cpp_function_id) + self.function_id = conf.function_id + if conf.function_id is None or conf.function_id == "": + self.function_name = conf.function_name + + self.all_function_id = { + utils.LANGUAGE_CPP: conf.cpp_function_id, + utils.LANGUAGE_PYTHON: conf.function_id, + } + self.server_address = conf.server_address + self.ds_address = conf.ds_address + self.code_dir = conf.code_dir + if conf.app_id == "": + self.app_id = "accessservice" + else: + self.app_id = conf.app_id + if conf.app_key == "": + self.app_key = "a9abff86a849f0d40f5a399252c05def4d744a28d3ad27fd73c80db11b706ac8" + else: + self.app_key = conf.app_key + + def get_function_info(self): + """ + Get function info which user deploy + """ + return FunctionInfo(function_id=self.function_id, function_name=self.function_name, + app_id=self.app_id, app_key=self.app_key) + + def get_function_id_by_language(self, language): + """ + Get function id by language from function id dict + + Args: + language (str): The language of target function or class + """ + return self.__all_function_id.get(language) + + +def check_init(func): + """ + The decorator to check whether yr api init. + """ + + @functools.wraps(func) + def wrapper(*args, **kw): + if not ConfigManager().is_init: + raise RuntimeError("system not initialized") + return func(*args, **kw) + + return wrapper diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/custom/__init__.py b/dsoftbus/dist_executor/modules/runtime/python/yr/custom/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9c9dd874f24d862a78307ba03a8c60fed6fd6d1a --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/custom/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/custom/mpctl.py b/dsoftbus/dist_executor/modules/runtime/python/yr/custom/mpctl.py new file mode 100644 index 0000000000000000000000000000000000000000..b70dc0f85407a01be6978c224fe33b9d039b7fb7 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/custom/mpctl.py @@ -0,0 +1,170 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" apply mpctl start/stop method """ +import os +import subprocess +import secrets +import string +from xml.etree import ElementTree + +from yr.config import ConfigManager +from yr.utils import Singleton + +RUNTIME_MGR_PORT = "runtime_mgr_port" +DEFAULT_RANDOM_LENGTH = 16 + + +@Singleton +class AutoDeploymentState: + """ + DeploymentState singleton + """ + + def __init__(self): + self.__state = False + + @property + def state(self) -> bool: + """ get state """ + return self.__state + + @state.setter + def state(self, val: bool) -> bool: + """ set state + + Args: + val (bool): auto deployment state + """ + self.__state = val + + def reset(self): + """ + reset state + """ + self.__state = False + + +def _gen_random_str(length=DEFAULT_RANDOM_LENGTH) -> str: + system_random = secrets.SystemRandom() + + if length < DEFAULT_RANDOM_LENGTH // 2: + return ''.join(system_random.sample(string.ascii_lowercase, length)) + + digits_count = system_random.randint(1, length // 2) + uppercase_count = system_random.randint(1, length - digits_count - 2) + lowercase_count = length - digits_count - uppercase_count + + contents = system_random.sample(string.digits, digits_count) + \ + system_random.sample(string.ascii_uppercase, uppercase_count) + \ + system_random.sample(string.ascii_lowercase, lowercase_count) + + system_random.shuffle(contents) + return ''.join(contents) + + +def _get_mpctl_path() -> str: + """ + get mpctl path + """ + current_path = os.path.abspath(__file__) + mpctl_path = os.path.join(os.path.dirname(current_path), "../../../../bin/mpctl") + mpctl_path = os.path.realpath(mpctl_path) + if not os.path.isfile(mpctl_path): + raise FileNotFoundError(f"{mpctl_path} not found") + return mpctl_path + + +def _get_config_path() -> str: + """ + get config.xml path + """ + current_path = os.path.abspath(__file__) + config_path = os.path.join(os.path.dirname(current_path), "../../../../config/config.xml") + config_path = os.path.realpath(config_path) + if not os.path.isfile(config_path): + raise FileNotFoundError(f"{config_path} not found") + return config_path + + +def _is_runtime_manager_running() -> bool: + def _get_runtime_mgr_port() -> str: + config_path = _get_config_path() + tree_root = ElementTree.parse(config_path) + element = tree_root.findall(RUNTIME_MGR_PORT) + if not element: + raise RuntimeError("parse runtime_mgr_port from config.xml failed") + return element[0].text + + runtime_mgr_port = _get_runtime_mgr_port() + cmd = f"lsof -i:{runtime_mgr_port}" + exit_code, _ = subprocess.getstatusoutput(cmd) + if exit_code != 0: + return False + return True + + +def _get_resource_option(): + resources = { + "--cpu": ConfigManager().deployment_config.cpu, + "--mem": ConfigManager().deployment_config.mem, + "--datamem": ConfigManager().deployment_config.datamem, + } + return " ".join([f"{name} {value}" for name, value in resources.items() if value > 0]) + + +def _get_spill_option(): + options = { + "--spillPath": ConfigManager().deployment_config.spill_path, + "--spillLimit": ConfigManager().deployment_config.spill_limit, + } + return " ".join([f"{name} {value}" for name, value in options.items() if value]) + + +def start(): + """ + start distribute-executor + """ + if not ConfigManager().auto: + return + + if _is_runtime_manager_running(): + return + + AutoDeploymentState().state = True + ip_addr, _ = ConfigManager().server_address.split(':') + cmd = f"{_get_mpctl_path()} start -m -a {ip_addr} -l {ip_addr} -p {_gen_random_str()} " \ + f"{_get_resource_option()} {_get_spill_option()}" + exit_code, _ = subprocess.getstatusoutput(cmd) + if exit_code != 0: + stop() + raise RuntimeError("failed to start distribute-executor") + + +def stop(): + """ + stop distribute-executor + """ + if not ConfigManager().auto: + return + + if not AutoDeploymentState().state: + return + + cmd = f"{_get_mpctl_path()} stop" + exit_code, _ = subprocess.getstatusoutput(cmd) + AutoDeploymentState().state = False + if exit_code != 0: + raise RuntimeError("failed to stop distribute-executor") diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/decorator/__init__.py b/dsoftbus/dist_executor/modules/runtime/python/yr/decorator/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..6ed131cc3b330010208e1e2e2959dbce2b9de985 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/decorator/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""decorator""" diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/decorator/executor.py b/dsoftbus/dist_executor/modules/runtime/python/yr/decorator/executor.py new file mode 100644 index 0000000000000000000000000000000000000000..6577ad686b89945e2063378bb111093acd4830ae --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/decorator/executor.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +Function executor +""" + +import logging +from dataclasses import dataclass + +from yr.runtime.task_spec import Task + +_MIN_CHECK_TIME = 0.1 +_CHECK_RECYCLE_RATIO = 0.1 +_MAX_RETRY_TIME = 3 +_MAX_QUEUE_SIZE = 200 +_TIME_CALIBRATE = 1E-6 + +_logger = logging.getLogger(__name__) + + +@dataclass +class Request: + """ + Request includes the request parameter task and callback. + """ + task: Task + callback: None + + +def get_func_request(task, callback): + """ + Assemble the request array + """ + return Request(task=task, callback=callback) + + +def _callback(data, err=None, future_map=None, future=None, instance_id=None, request=None): + future.set_result("") + request.callback(data, err) + future_map.pop(instance_id) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/decorator/function_proxy.py b/dsoftbus/dist_executor/modules/runtime/python/yr/decorator/function_proxy.py new file mode 100644 index 0000000000000000000000000000000000000000..c9a534604336d8f10796738c23b3155f88e55cbc --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/decorator/function_proxy.py @@ -0,0 +1,167 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +Function decorator +""" +import logging +import types +from functools import wraps + +import yr +from yr import exception +from yr import signature, utils +from yr.objref_counting_manager import ObjRefCountingManager +from yr.rpc import core_service_pb2 +from yr.runtime.runtime import Runtime +from yr.runtime.task_spec import InvokeType, Task, InvokeOptions +from yr.storage.reference_count import ReferenceCount +from yr.utils import ObjectDescriptor, LANGUAGE_CPP, CrossLanguageInfo + +_logger = logging.getLogger(__name__) + + +class FunctionProxy: + """ + Use to decorate user function + """ + + def __init__(self, func, cross_language_info=None, invoke_options=None, return_nums=None): + """Initialize an Initiator object""" + self.cross_language_info = cross_language_info + self.invoke_options = invoke_options + self.__code_ref__ = None + self.__original_func__ = func + if return_nums is None: + self.return_nums = 1 + else: + if not isinstance(return_nums, int): + raise TypeError(f"invalid return_nums type: {type(return_nums)}, should be an int") + if return_nums < 0 or return_nums > 100: + raise RuntimeError(f"invalid return_nums: {return_nums}, should be an integer between 0 and 100") + self.return_nums = return_nums + if self.cross_language_info is not None: + def _cross_invoke_proxy(*args, **kwargs): + return self._invoke(func, args=args, kwargs=kwargs) + + self.invoke = _cross_invoke_proxy + else: + @wraps(func) + def _invoke_proxy(*args, **kwargs): + return self._invoke(func, args=args, kwargs=kwargs) + + self.invoke = _invoke_proxy + + def __call__(self, *args, **kwargs): + """ + invalid call + """ + raise RuntimeError("invoke function cannot be called directly") + + def __getstate__(self): + self.__code_ref__ = None + return self.__dict__ + + def options(self, invoke_options: InvokeOptions): + """ + Set user invoke options + Args: + invoke_options: invoke options for users to set resources + """ + self.invoke_options = invoke_options + return self + + def get_original_func(self): + """ + The original function + """ + return self.__original_func__ + + def _invoke(self, func, args=None, kwargs=None): + """ + The real realization of the invoke function + """ + if self.cross_language_info is None and not ReferenceCount().is_obj_in_ctx(self.__code_ref__): + self.__code_ref__ = yr.put(func) + _logger.debug(f"[Reference Counting] put code with id = {self.__code_ref__.id}, " + f"functionName={func.__qualname__}") + + sig = None + if func is not None: + sig = signature.get_signature(func) + + args_list = signature.package_args(sig, args, kwargs) + task_id = utils.generate_task_id() + future = Runtime().rt.task_mgr.add_task(task_id) + + def callback(response, err=None): + if future.done(): + return + if err is not None: + exception.deal_with_yr_error(future, err) + return + + if not isinstance(response, core_service_pb2.InvokeResponse): + resp = core_service_pb2.InvokeResponse() + resp.ParseFromString(response) + else: + resp = response + + if resp.code == 0: + future.set_result(resp.returnObjectID) + else: + exception.deal_with_error(future, resp.code, resp.message) + + if self.cross_language_info is None: + function_descriptor = ObjectDescriptor.get_from_function(func) + target_function_key = "" + else: + function_descriptor = ObjectDescriptor(function_name=self.cross_language_info.function_name, + target_language=self.cross_language_info.target_language) + target_function_key = self.cross_language_info.function_key + + obj_list = ObjRefCountingManager().create_object_ref(task_id, self.return_nums) + ObjRefCountingManager().join_record(task_id, self.__code_ref__) + task = Task(task_id=task_id, + object_descriptor=function_descriptor, + code_id=self.__code_ref__.id if self.__code_ref__ is not None else "", + invoke_options=self.invoke_options, + args_list=args_list, + target_function_key=target_function_key, + return_obj_list=obj_list, + invoke_type=InvokeType.INVOKE_NORMAL_FUNCTION, + trace_id=Runtime().rt.get_trace_id()) + Runtime().rt.submit_task(task, callback) + return obj_list[0] if self.return_nums == 1 else obj_list + + +def make_decorator(invoke_options=None, return_nums=None): + """ + Make decorator for invoke function + """ + + def decorator(func): + if isinstance(func, types.FunctionType): + return FunctionProxy(func, invoke_options=invoke_options, return_nums=return_nums) + raise RuntimeError("@yr.invoke decorator must be applied to a function") + + return decorator + + +def make_cpp_function_proxy(function_name, function_key): + """ + Make proxy for invoke cpp function + """ + return FunctionProxy(None, CrossLanguageInfo(function_name, function_key, LANGUAGE_CPP)) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/decorator/instance_proxy.py b/dsoftbus/dist_executor/modules/runtime/python/yr/decorator/instance_proxy.py new file mode 100644 index 0000000000000000000000000000000000000000..2021fe5924195eef3a1b0fb7615f847e593fda54 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/decorator/instance_proxy.py @@ -0,0 +1,448 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +Instance decorator +""" + +import concurrent.futures +import inspect +import logging +import weakref +from functools import partial + +import yr +from yr import exception +from yr import signature +from yr import utils +from yr.common.response_helper import make_callback_for_create +from yr.config import ConfigManager +from yr.exception import deal_with_yr_error +from yr.instance_ref import InstanceRef +from yr.objref_counting_manager import ObjRefCountingManager +from yr.rpc.core_service_pb2 import InvokeResponse +from yr.runtime.runtime import Runtime +from yr.runtime.task_spec import InvokeType, Task, InvokeOptions +from yr.serialization import register_unpack_hook, register_pack_hook +from yr.storage.reference_count import ReferenceCount + +_logger = logging.getLogger(__name__) + + +class InstanceCreator: + """ + User instance creator + """ + + def __init__(self): + self.__user_class__ = None + self.__user_class_descriptor__ = None + self.__user_class_methods__ = {} + self.__code_ref__ = None + self.__invoke_options__ = None + self.__need_in_order__ = False + self.__target_function_key__ = None + + @classmethod + def create_from_user_class(cls, user_class, invoke_options): + """ + Create from user class + """ + + class DerivedInstanceCreator(cls, user_class): + pass + + name = f"YRInstance({user_class.__name__})" + DerivedInstanceCreator.__module__ = user_class.__module__ + DerivedInstanceCreator.__name__ = name + DerivedInstanceCreator.__qualname__ = name + self = DerivedInstanceCreator.__new__(DerivedInstanceCreator) + self.__user_class__ = user_class + self.__invoke_options__ = invoke_options + if invoke_options is not None: + self.__need_in_order__ = invoke_options.need_order() + else: + self.__need_in_order__ = True + self.__user_class_descriptor__ = utils.ObjectDescriptor.get_from_class(user_class) + self.__user_class_descriptor__.target_language = utils.LANGUAGE_PYTHON + class_methods = inspect.getmembers(user_class, + utils.is_function_or_method) + self.__user_class_methods__ = dict(class_methods) + self.__code_ref__ = None + + function_urn = ConfigManager().get_function_id_by_language(utils.LANGUAGE_PYTHON) + self.__target_function_key__ = None + if function_urn is not None: + self.__target_function_key__ = utils.get_function_from_urn(function_urn) + return self + + @classmethod + def create_cpp_user_class(cls, cpp_class): + """ + Create a cpp user class + """ + self = cls() + self.__user_class__ = None + self.__user_class_descriptor__ = utils.ObjectDescriptor(class_name=cpp_class.get_class_name(), + function_name=cpp_class.get_factory_name(), + target_language=utils.LANGUAGE_CPP) + self.__user_class_methods__ = None + self.__code_ref__ = None + self.__target_function_key__ = cpp_class.get_function_key() + return self + + def options(self, invoke_options: InvokeOptions): + """ + Set user invoke options + Args: + invoke_options: invoke options for users to set resources + """ + self.__invoke_options__ = invoke_options + self.__need_in_order__ = invoke_options.need_order() + return self + + def get_original_cls(self): + """ + The original class + """ + return self.__user_class__ + + def invoke(self, *args, **kwargs): + """ + Create a instance in cluster + """ + return self._invoke(args=args, kwargs=kwargs) + + def _get_designated_instance_id(self, task_id): + instance_id = None + if self.__invoke_options__: + designated_instance_id = self.__invoke_options__.build_designated_instance_id() + instance_id = InstanceRef(designated_instance_id, task_id) + return instance_id + + def _invoke(self, args=None, kwargs=None): + is_cpp_invoke = self.__user_class_descriptor__.target_language == utils.LANGUAGE_CPP + if not is_cpp_invoke and not ReferenceCount().is_obj_in_ctx(self.__code_ref__): + self.__code_ref__ = yr.put(self.__user_class__) + _logger.info(f"[Reference Counting] put code with id = {self.__code_ref__.id}, " + f"className={self.__user_class_descriptor__.class_name}") + # __init__ existed when user-defined + if self.__user_class_methods__ is not None and '__init__' in self.__user_class_methods__: + sig = signature.get_signature(self.__user_class_methods__.get('__init__'), + ignore_first=True) + else: + sig = None + args_list = signature.package_args(sig, args, kwargs) + + future = concurrent.futures.Future() + + task_id = utils.generate_task_id() + instance_id = self._get_designated_instance_id(task_id) + ObjRefCountingManager().join_record(task_id, self.__code_ref__) + Runtime().rt.submit_task( + Task(task_id=task_id, + object_descriptor=self.__user_class_descriptor__, + code_id=self.__code_ref__.id if self.__code_ref__ is not None else "", + invoke_options=self.__invoke_options__, + args_list=args_list, + target_function_key=self.__target_function_key__, + invoke_type=InvokeType.CREATE_INSTANCE, + trace_id=Runtime().rt.get_trace_id(), + instance_id=instance_id), + make_callback_for_create(future)) + + ref = InstanceRef(future, task_id, is_user_defined_id=instance_id is not None) + + need_order = True + if self.__invoke_options__: + need_order = self.__invoke_options__.need_order() + + return InstanceProxy(ref, + self.__user_class_descriptor__, + self.__user_class_methods__, + self.__target_function_key__, + need_order) + + +class InstanceProxy: + """ + Use to decorate a user class + """ + + def __init__(self, instance_id, class_descriptor, class_methods, target_function_key, need_order=True): + self._class_descriptor = class_descriptor + self.instance_id = instance_id + self._class_methods = class_methods + self._method_descriptor = {} + self.task_serial_num = 0 + self.__instance_activate__ = True + self.__target_function_key__ = target_function_key + self.need_order = need_order + + if self._class_methods is not None: + for method_name, value in self._class_methods.items(): + function_descriptor = utils.ObjectDescriptor(module_name=self._class_descriptor.module_name, + function_name=method_name, + class_name=self._class_descriptor.class_name) + self._method_descriptor[method_name] = function_descriptor + sig = signature.get_signature(value, ignore_first=True) + return_nums = value.__return_nums__ if hasattr(value, "__return_nums__") else 1 + method = MethodProxy(self, self.instance_id, + self._method_descriptor.get(method_name), + sig, return_nums) + setattr(self, method_name, method) + + def __getattr__(self, method_name): + if self._class_descriptor.target_language == utils.LANGUAGE_PYTHON: + raise AttributeError(f"'{self._class_descriptor.class_name}' object has " + f"no attribute '{method_name}'") + + function_name = method_name + if self._class_descriptor.target_language == utils.LANGUAGE_CPP: + function_name = "&" + self._class_descriptor.class_name + "::" + method_name + method_descriptor = utils.ObjectDescriptor(module_name=self._class_descriptor.module_name, + function_name=function_name, + class_name=self._class_descriptor.class_name, + target_language=self._class_descriptor.target_language) + return MethodProxy( + self, + self.instance_id, + method_descriptor, + None, + 1) + + def __reduce__(self): + state = self.serialization_(False) + return InstanceProxy.deserialization_, (state,) + + @classmethod + def deserialization_(cls, state): + """ + deserialization to rebuild instance proxy + """ + class_method = None + if utils.CLASS_METHOD in state: + class_method = state[utils.CLASS_METHOD] + target_function_key = None + if utils.FUNCTION_KEY in state: + target_function_key = state[utils.FUNCTION_KEY] + function_name = state[utils.FUNC_NAME] if utils.FUNC_NAME in state else "" + need_order = state[utils.NEED_ORDER] if utils.NEED_ORDER in state else True + return cls(InstanceRef(state[utils.INSTANCE_ID]), + utils.ObjectDescriptor(state[utils.MODULE_NAME], state[utils.CLASS_NAME], + function_name, state[utils.TARGET_LANGUAGE]), + class_method, + target_function_key, + need_order) + + def serialization_(self, is_cross_language: False): + """ + serialization of instance proxy + """ + info_ = {utils.INSTANCE_ID: self.instance_id.id} + if is_cross_language is False: + info_[utils.CLASS_METHOD] = self._class_methods + + info_[utils.FUNCTION_KEY] = \ + self.__target_function_key__ if self.__target_function_key__ is not None else "" + info_[utils.NEED_ORDER] = self.need_order + self._class_descriptor.to_dict() + state = {**info_, **self._class_descriptor.to_dict()} + return state + + def terminate(self): + """ + Terminate the instance + """ + if not Runtime().rt or not self.is_activate(): + return + + if self.need_order and self.instance_id.is_user_defined_id: + serial_manager = Runtime().rt.get_serial_manager() + serial_manager.del_instance_serial_num(self.instance_id.id) + + Runtime().rt.kill_instance(self.instance_id) + self.__instance_activate__ = False + _logger.info(f"{self.instance_id} is terminated") + + def is_activate(self): + """ + Return the instance status + """ + return self.__instance_activate__ + + def get_function_key(self): + """ + Return the target function key + """ + return self.__target_function_key__ + + +@register_pack_hook +def msgpack_encode_hook(obj): + """ + register msgpack encode hook + """ + if isinstance(obj, InstanceProxy): + return obj.serialization_(True) + return obj + + +@register_unpack_hook +def msgpack_decode_hook(obj): + """ + register msgpack decode hook + """ + if utils.INSTANCE_ID in obj: + return InstanceProxy.deserialization_(obj) + return obj + + +class MethodProxy: + """ + Use to decorate a user class method + """ + + def __init__(self, instance, instance_id, method_descriptor, sig, return_nums=1): + self._instance_ref = weakref.ref(instance) + self._instance_id = instance_id + self._method_descriptor = method_descriptor + self._signature = sig + self.__target_function_key__ = instance.get_function_key() + self._return_nums = return_nums + if return_nums < 0 or return_nums > 100: + raise RuntimeError(f"invalid return_nums: {return_nums}, should be an integer between 0 and 100") + + def invoke(self, *args, **kwargs): + """ + invoke a class method in cluster + """ + return self._invoke(args, kwargs) + + def _invoke(self, args, kwargs): + if not self._instance_ref().is_activate(): + raise RuntimeError("this instance is terminated") + args_list = signature.package_args(self._signature, args, kwargs) + + if self._instance_ref().need_order: + if self._instance_id.is_user_defined_id: + serial_manager = Runtime().rt.get_serial_manager() + task_serial_num = serial_manager.get_serial_number(self._instance_id.id) + else: + task_serial_num = self._instance_ref().task_serial_num + self._instance_ref().task_serial_num += 1 + + runtime_id = ConfigManager().runtime_id + task_id = utils.generate_task_id_with_serial_num(runtime_id, task_serial_num) + else: + task_id = utils.generate_task_id() + + future = Runtime().rt.task_mgr.add_task(task_id) + callback = partial(deal_with_response, future) + + obj_list = ObjRefCountingManager().create_object_ref(task_id, self._return_nums) + task = Task(task_id=task_id, + object_descriptor=self._method_descriptor, + instance_id=self._instance_id, + args_list=args_list, + code_id=utils.MEMBER_FUNCTION, + target_function_key=self.__target_function_key__, + invoke_type=InvokeType.INVOKE_MEMBER_FUNCTION, + return_obj_list=obj_list) + Runtime().rt.submit_task(task, callback) + return obj_list[0] if self._return_nums == 1 else obj_list + + +def make_decorator(invoke_options=None): + """ + Make decorator for invoke function + """ + + def decorator(cls): + if inspect.isclass(cls): + return InstanceCreator.create_from_user_class(cls, invoke_options) + raise RuntimeError("@yr.instance decorator must be applied to a class") + + return decorator + + +def make_cpp_instance_creator(cpp_class): + """ + Make cpp_instance creator for invoke function + """ + + return InstanceCreator.create_cpp_user_class(cpp_class) + + +def get_instance_by_name(name, namespace): + """ + Get instance proxy by name and namespace + """ + + task_id = utils.generate_task_id() + future = Runtime().rt.task_mgr.add_task(task_id) + callback = partial(deal_with_response, future) + + named_instance_id = namespace + "-" + name if namespace else name + named_instance_ref = InstanceRef(named_instance_id, task_id, is_user_defined_id=True) + + target_function_key = None + function_urn = ConfigManager().get_function_id_by_language( + utils.LANGUAGE_PYTHON) + if function_urn is not None: + target_function_key = utils.get_function_from_urn(function_urn) + obj_list = ObjRefCountingManager().create_object_ref(task_id) + + task = Task(task_id=task_id, + object_descriptor=utils.ObjectDescriptor(), + instance_id=named_instance_ref, + args_list=[], + code_id="", + target_function_key=target_function_key, + invoke_type=InvokeType.GET_NAMED_INSTANCE_METADATA, + return_obj_list=obj_list) + Runtime().rt.submit_task(task, callback) + + user_class = yr.get(obj_list[0]) + if not inspect.isclass(user_class): + raise RuntimeError("get class metadata error") + + user_class_descriptor = utils.ObjectDescriptor.get_from_class(user_class) + user_class_descriptor.target_language = utils.LANGUAGE_PYTHON + class_methods = inspect.getmembers(user_class, + utils.is_function_or_method) + user_class_methods = dict(class_methods) + + return InstanceProxy(named_instance_ref, user_class_descriptor, + user_class_methods, target_function_key) + + +def deal_with_response(future, data, err=None): + """ + As a callback function to deal with the response + """ + + if err is not None: + deal_with_yr_error(future, err) + return + if isinstance(data, InvokeResponse): + resp = data + else: + resp = InvokeResponse() + resp.ParseFromString(data) + if resp.code == 0: + future.set_result(resp.returnObjectID) + else: + exception.deal_with_error(future, resp.code, resp.message) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/exception.py b/dsoftbus/dist_executor/modules/runtime/python/yr/exception.py new file mode 100644 index 0000000000000000000000000000000000000000..84fc1a91c0b8333bb8e531c4eb6435962c445109 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/exception.py @@ -0,0 +1,121 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""yr exception type""" +import cloudpickle + +from yr import utils + + +class YRError(Exception): + """base Error""" + + +class CancelError(YRError): + """task cancel error""" + __slots__ = ["__task_id"] + + def __init__(self, task_id: str = ""): + super().__init__() + self.__task_id = task_id + + def __str__(self): + return f"task request has been cancelled: {self.__task_id}" + + +class YRInvokeError(YRError): + """Invoke error""" + + def __init__(self, cause, traceback_str: str): + self.traceback_str = traceback_str + self.cause = cause + + def __str__(self): + return str(self.traceback_str) + + def origin_error(self): + """Return a origin error for invoke task""" + + cause_cls = self.cause.__class__ + if issubclass(YRInvokeError, cause_cls): + return self + + if issubclass(cause_cls, YRError): + return self + + error_msg = str(self) + + class Cls(YRInvokeError, cause_cls): + """New error inherit from origin cause""" + + def __init__(self, cause): + self.cause = cause + self.args = (cause,) + + def __getattr__(self, name): + return getattr(self.cause, name) + + def __str__(self): + return error_msg + + Cls.__name__ = f"YRInvokeError({cause_cls.__name__})" + Cls.__qualname__ = Cls.__name__ + + return Cls(self.cause) + + +class YRequestError(YRError, RuntimeError): + """request failed error""" + __slots__ = ["__code", "__message"] + + def __init__(self, code: int = 0, message: str = ""): + self.__code = code + self.__message = message + super().__init__() + + def __str__(self): + return str(f"failed to request, code: {self.__code}, message: {self.__message}") + + @property + def code(self): + """code""" + return self.__code + + @property + def message(self): + """message""" + return self.__message + + +def deal_with_yr_error(future, err): + """deal with yr invoke error""" + if isinstance(err, YRInvokeError): + future.set_exception(err.origin_error()) + else: + future.set_exception(err) + + +def deal_with_error(future, code, message): + """ + processing request exceptions + """ + try: + obj = cloudpickle.loads(utils.hex_to_binary(message)) + except ValueError: + future.set_exception(YRequestError(code, message)) + return + if isinstance(obj, YRInvokeError): + future.set_exception(obj.origin_error()) + return + future.set_exception(YRequestError(code, str(obj))) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/instance_manage.py b/dsoftbus/dist_executor/modules/runtime/python/yr/instance_manage.py new file mode 100644 index 0000000000000000000000000000000000000000..6942511b5a8d7d78e87d92515c28f1933c8ad670 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/instance_manage.py @@ -0,0 +1,40 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +""" +class instance manager +""" +from yr.utils import Singleton + + +@Singleton +class InstanceManager: + """ + InstanceManager stores class instance + """ + + def __init__(self): + self.__instance_map = {} + + def load_instance(self, instance_id): + """ + load class instance + """ + return self.__instance_map.get(instance_id) + + def save_instance(self, instance_id, instance_object): + """ + save class instance + """ + self.__instance_map[instance_id] = instance_object diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/instance_ref.py b/dsoftbus/dist_executor/modules/runtime/python/yr/instance_ref.py new file mode 100644 index 0000000000000000000000000000000000000000..9467a4622385450e7ebfd20fd8b307ca6168a8b5 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/instance_ref.py @@ -0,0 +1,102 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""InstanceRef""" +import concurrent.futures +from typing import Union + + +class InstanceRef: + """InstanceRef + + Notify: InstanceRef not thread safe + """ + _id = None + _id_future = None + _task_id = None + + def __init__(self, instance_id: Union[str, concurrent.futures.Future], task_id=None, is_user_defined_id=False): + if isinstance(instance_id, str): + self._id = instance_id + elif isinstance(instance_id, concurrent.futures.Future): + self._id_future = instance_id + else: + raise TypeError(f"Unexpected type for id {instance_id}") + self._task_id = task_id + self._is_user_defined_id = is_user_defined_id + + def __str__(self): + return self.id + + @property + def is_user_defined_id(self): + """task id""" + return self._is_user_defined_id + + @property + def task_id(self): + """task id""" + return self._task_id + + @property + def id(self): + """object id""" + self._wait_for_id() + return self._id + + @property + def is_failed(self) -> bool: + """return task result""" + f = self._id_future + if not f or not f.done(): + return False + if f.exception(): + return True + return False + + def get_id(self, timeout=None): + """get object id""" + self._wait_for_id(timeout) + return self.id + + def get_future(self): + """get future""" + return self._id_future + + def wait(self, timeout=None): + """wait task done""" + self._wait_for_id(timeout) + + def done(self): + """Return True if the obj future was cancelled or finished executing.""" + if self._id_future: + return self._id_future.done() + return True + + def cancel(self): + """cancel the obj future""" + if self._id_future: + self._id_future.cancel() + + def on_complete(self, callback): + """register callback""" + if self._id_future and not self._id_future.done(): + self._id_future.add_done_callback(callback) + return + callback(self._id_future) + + def _wait_for_id(self, timeout=None): + if self._id_future: + self._id = self._id_future.result(timeout=timeout) + self._id_future = None diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/__init__.py b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..762938b7de09e115abeedbe39cf6e3eaeaf4c21e --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +invoke client +""" diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/async_http_client.py b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/async_http_client.py new file mode 100644 index 0000000000000000000000000000000000000000..8b17f1dd1ee48981bd64f14ebece3d5ad815708a --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/async_http_client.py @@ -0,0 +1,138 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +async http client +""" +import atexit +import http +import threading +from concurrent.futures import ThreadPoolExecutor +from concurrent.futures import thread +import logging +from dataclasses import dataclass +import traceback + +import urllib3 +from urllib3 import HTTPConnectionPool, HTTPSConnectionPool + +from yr.exception import YRInvokeError +from yr.config import ConfigManager + +_CONNECT_TIMEOUT = 5 +_READ_TIMEOUT = 900 +_GETCON_TIMEOUT = 295 + +_logger = logging.getLogger(__name__) + + +@dataclass +class HttpTask: + """http task""" + task_id: str + url: str + headers: dict + payload: str + connect_timeout: int = _CONNECT_TIMEOUT + read_timeout: int = _READ_TIMEOUT + + +class AsyncHttpClient: + """ + async http client for frontend + """ + + def __init__(self): + self.client = None + self.pool = None + self.timeout = None + self._shutdown = False + self._shutdown_lock = threading.Lock() + atexit.register(self._python_exit) + + def init(self, address, timeout): + """ + init async http client + """ + ip, port = address.split(":") + connection_nums = ConfigManager().connection_nums + self.pool = ThreadPoolExecutor(max_workers=connection_nums, thread_name_prefix="AsyncHttpClient") + self.timeout = timeout + pool_class = HTTPConnectionPool + kw = {} + if ConfigManager().tls_config is not None: + pool_class = HTTPSConnectionPool + kw.update({ + 'cert_reqs': 'CERT_REQUIRED', + 'ca_certs': ConfigManager().tls_config.root_cert_path, + 'cert_file': ConfigManager().tls_config.module_cert_path, + 'key_file': ConfigManager().tls_config.module_key_path, + 'assert_hostname': ConfigManager().tls_config.server_name + }) + _logger.info(f"succeed to load tls config, on adder:{ip}:{port}") + self.client = pool_class(host=ip, port=port, block=False, retries=3, maxsize=connection_nums, **kw) + + def submit_invoke_request(self, task: HttpTask, callback): + """ + submit invoke request to frontend + """ + with self._shutdown_lock: + if self._shutdown: + callback(None, RuntimeError("client is shutdown")) + _logger.warning('cannot schedule new futures after shutdown') + return + + self.pool.submit(self._call_request, task, callback) + + def shutdown(self): + """ + clear pool + """ + with self._shutdown_lock: + self.client.close() + self.pool.shutdown(wait=False) + # clear threads for this ThreadPoolExecutor, avoid blocking process by join thread + for k in self.pool._threads: + thread._threads_queues.pop(k) + self.pool._threads.clear() + self._shutdown = True + + def _call_request(self, task: HttpTask, callback): + """" + Call Invoke Request. + """ + _logger.debug(f"start to request: {task.task_id}") + try: + resp = self.client.request('POST', task.url, redirect=False, + body=task.payload, headers=task.headers, + timeout=urllib3.Timeout(connect=task.connect_timeout, + read=task.read_timeout), + pool_timeout=_GETCON_TIMEOUT) + except urllib3.exceptions.HTTPError as err: + callback("", YRInvokeError(err, traceback.format_exc())) + return + _logger.debug(f"end to request: {task.task_id}") + if resp.status != http.HTTPStatus.OK: + if len(resp.data) == 0: + callback(None, YRInvokeError(RuntimeError( + f"failed to send request, body is empty. code: {resp.status}"), traceback.format_exc())) + return + try: + callback(resp.data) + except Exception as err: + callback(None, YRInvokeError(err, traceback.format_exc())) + + def _python_exit(self): + self._shutdown = True diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/invoke_client.py b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/invoke_client.py new file mode 100644 index 0000000000000000000000000000000000000000..1dca57ebd800c9e9b48de920ba564a5597454582 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/invoke_client.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""Invoke client""" +import decimal +import json +import logging +import time +from abc import abstractmethod, ABCMeta + +from google.protobuf import json_format + +from yr import utils +from yr.invoke_client.yr_client import auth_tools +from yr.runtime.task_spec import TaskSpec + +_logger = logging.getLogger(__name__) + + +class InvokeClient(metaclass=ABCMeta): + """Invoke client for in cluster or out cluster""" + + @abstractmethod + def create(self, task: TaskSpec) -> None: + """create instance""" + + @abstractmethod + def invoke(self, task: TaskSpec) -> None: + """invoke""" + + @abstractmethod + def kill(self, task: TaskSpec) -> None: + """kill""" + + @abstractmethod + def exit(self) -> None: + """exit instance""" + + @abstractmethod + def save_state(self, state) -> str: + """save instance state""" + + @abstractmethod + def load_state(self, checkpoint_id) -> bytes: + """load instance state""" + + @abstractmethod + def clear(self) -> None: + """clear""" + + +def create_headers(path, func_info, payload, invoke_id): + """create headers""" + sign_struct = { + "method": "POST", + "path": path, + "body": payload, + "appid": func_info.app_id, + "timestamp": str(decimal.Decimal(time.time() * 1000).quantize(decimal.Decimal('0'), decimal.ROUND_HALF_UP)) + } + authorization = auth_tools.get_authorization(func_info.app_key.encode(), + sign_struct) + headers = {utils.CONTENT_TYPE: utils.CONTENT_TYPE_APPLICATION_JSON, + utils.HEADER_EVENT_SOURCE: func_info.app_id, + utils.HEADER_TRACE_ID: invoke_id, + utils.HEADER_INVOKE_URN: func_info.function_id, + utils.AUTHORIZATION: authorization} + return headers + + +def create_payload(task: TaskSpec, invoke_type): + """create payload in faas""" + args = [json_format.MessageToDict(arg) for arg in task.args] + payload = { + "functionUrn": task.function_info.function_id, + "args": args, + "type": invoke_type, + "returnObjectIDs": task.object_ids + } + return json.dumps(payload) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/local_client.py b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/local_client.py new file mode 100644 index 0000000000000000000000000000000000000000..ee31412c59e09cc3c208322e6297650f57bc1ba7 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/local_client.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""local handler""" +import logging +import threading +from abc import ABC + +from yr.invoke_client.invoke_client import InvokeClient +from yr.rpc.core_service_pb2 import KillResponse +from yr.runtime.local_object_store import LocalObjectStore +from yr.runtime.task_spec import TaskSpec +from yr.runtime.worker import Worker +from yr.utils import generate_random_id + +_logger = logging.getLogger(__name__) + + +class LocalClient(InvokeClient, ABC): + """local handler""" + + def __init__(self): + self.workers = {} + self._running = True + self._lock = threading.Lock() + + def create(self, task: TaskSpec): + """create worker""" + if not self._running: + return + instance_id = generate_random_id() + concurrency = 100 if task.invoke_options is None else task.invoke_options.concurrency + worker = Worker() + worker.init(instance_id, concurrency) + with self._lock: + self.workers[instance_id] = worker + worker.submit(task) + + def invoke(self, task: TaskSpec): + """instance function invoke""" + if not self._running: + return + worker = self.workers.get(task.instance_id.id) + if worker is None: + raise RuntimeError(f"No such instance: {task.instance_id.id}") + worker.submit(task) + + def kill(self, task: TaskSpec): + """kill worker""" + with self._lock: + if task.instance_id.id in self.workers: + worker = self.workers.pop(task.instance_id.id) + worker.stop() + task.callback(KillResponse(), None) + + def exit(self) -> None: + """exit instance""" + _logger.warning("local mode not support exit") + + def save_state(self, state) -> str: + """save instance state""" + key = generate_random_id() + LocalObjectStore().put(key, state) + return key + + def load_state(self, checkpoint_id) -> bytes: + """load instance state""" + return LocalObjectStore().get(checkpoint_id) + + def clear(self): + """stop all workers""" + self._running = False + with self._lock: + for worker in self.workers.values(): + worker.stop() + self.workers.clear() diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/outcluster_client.py b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/outcluster_client.py new file mode 100644 index 0000000000000000000000000000000000000000..dfe93476e611044ab87263c68d5cb3969e5103ac --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/outcluster_client.py @@ -0,0 +1,133 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +http client +This is a client of yr cluster in outCluster +""" +import logging +from abc import ABC + +from yr.invoke_client.async_http_client import AsyncHttpClient, HttpTask +from yr.invoke_client.invoke_client import InvokeClient, create_headers +from yr.rpc import core_service_pb2 +from yr.runtime.task_spec import TaskSpec, InvokeOptions + +_CLUSTER_PORT = 31220 +_MAX_HTTP = 1024 +_CONNECT_TIMEOUT = 5 +_READ_TIMEOUT = 900 +_GETCON_TIMEOUT = 295 + +_INSTANCE_INVOKE_TYPE = "invoke" +_INSTANCE_TERMINATE_TYPE = "terminate" +_INSTANCE_INVOKE_SOURCE = "yrapi" + +_INVOKE_PATH = "/serverless/v1/functions/" +_OUTCLUSTER_CREATE_PATH = "/serverless/v1/posix/instance/create" +_OUTCLUSTER_INVOKE_PATH = "/serverless/v1/posix/instance/invoke" +_OUTCLUSTER_KILL_PATH = "/serverless/v1/posix/instance/kill" + +logger = logging.getLogger(__name__) + + +class HttpInvokeClient(InvokeClient, ABC): + """ + http invoke client for api invoke + """ + + def __init__(self): + self.http_client = None + + def init(self, server_address, timeout): + """ + init client + """ + self.http_client = AsyncHttpClient() + self.http_client.init(address=server_address, timeout=timeout) + + def create(self, task: TaskSpec): + """create instance""" + logger.debug(f"create task:{task.task_id} ,trace_id:{task.trace_id}") + if task.invoke_options is not None: + scheduling_ops = task.invoke_options.to_pb() + create_ops = {'Concurrency': f"{task.invoke_options.concurrency}"} + labels = task.invoke_options.labels + else: + scheduling_ops = core_service_pb2.SchedulingOptions() + create_ops = {'Concurrency': f"{InvokeOptions().concurrency}"} + labels = [] + instance_id = task.instance_id.id if task.instance_id else None + payload = core_service_pb2.CreateRequest(function=task.function_info.function_name, + schedulingOps=scheduling_ops, + createOptions=create_ops, + requestID=task.task_id, + traceID=task.trace_id, + labels=labels, + args=task.args, + designatedInstanceID=instance_id).SerializeToString() + + url = _OUTCLUSTER_CREATE_PATH + headers = create_headers(url, task.function_info, payload, task.task_id) + self.http_client.submit_invoke_request( + HttpTask(url=url, headers=headers, payload=payload, read_timeout=task.invoke_timeout, task_id=task.task_id), + task.callback) + + def invoke(self, task: TaskSpec): + """invoke""" + logger.debug(f"invoke task:{task.task_id} ,trace_id:{task.trace_id}") + req = core_service_pb2.InvokeRequest(function=task.function_info.function_name, + requestID=task.task_id, + instanceID=task.instance_id.id, + traceID=task.trace_id, + args=task.args, + returnObjectIDs=task.object_ids) + payload = req.SerializeToString() + url = _OUTCLUSTER_INVOKE_PATH + headers = create_headers(url, task.function_info, payload, task.task_id) + self.http_client.submit_invoke_request( + HttpTask(url=url, headers=headers, payload=payload, read_timeout=task.invoke_timeout, task_id=task.task_id), + task.callback) + + def kill(self, task: TaskSpec): + """kill""" + # instance_id.id is a future object and will wait for instance creation to complete + req = core_service_pb2.KillRequest(instanceID=task.instance_id.id, + signal=task.signal, payload=task.request_id.encode(encoding='utf-8')) + + logger.debug( + f"send kill, id: {task.instance_id}, signal: {task.signal}, task: {task.task_id}") + url = _OUTCLUSTER_KILL_PATH + payload = req.SerializeToString() + headers = create_headers(url, task.function_info, payload, task.task_id) + self.http_client.submit_invoke_request( + HttpTask(url=url, headers=headers, payload=payload, read_timeout=task.invoke_timeout, task_id=task.task_id), + task.callback) + + def exit(self): + """exit""" + raise RuntimeError("Not support exit out of cluster") + + def save_state(self, state): + """save state""" + raise RuntimeError("Not support save state out of cluster") + + def load_state(self, checkpoint_id) -> bytes: + """load state""" + raise RuntimeError("Not support load state out of cluster") + + def clear(self): + """clear pool""" + self.http_client.shutdown() diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/posix_client.py b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/posix_client.py new file mode 100644 index 0000000000000000000000000000000000000000..4f457f0f6554f4071c64e802f7dfa8752266d187 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/posix_client.py @@ -0,0 +1,92 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""Posix client""" +from abc import ABC + +from fnruntime.rpc import sdk, rpc_server +from yr.invoke_client.invoke_client import InvokeClient +from yr.rpc import core_service_pb2 +from yr.runtime.task_spec import TaskSpec, InvokeOptions + + +class PosixClient(InvokeClient, ABC): + """Posix invoke client""" + + def create(self, task: TaskSpec): + """create instance""" + if task.invoke_options is not None: + scheduling_ops = task.invoke_options.to_pb() + create_ops = {'Concurrency': f"{task.invoke_options.concurrency}"} + labels = task.invoke_options.labels + else: + scheduling_ops = core_service_pb2.SchedulingOptions() + create_ops = {'Concurrency': f"{InvokeOptions().concurrency}"} + labels = [] + instance_id = task.instance_id.id if task.instance_id else None + + request = core_service_pb2.CreateRequest(function=task.function_info.function_name, + schedulingOps=scheduling_ops, + createOptions=create_ops, + requestID=task.task_id, + traceID=task.trace_id, + labels=labels, + args=task.args, + designatedInstanceID=instance_id) + sdk.create(request, task.callback) + + def invoke(self, task: TaskSpec): + """invoke""" + request = core_service_pb2.InvokeRequest(function=task.function_info.function_name, + requestID=task.task_id, + instanceID=task.instance_id.id, + traceID=task.trace_id, + args=task.args, + returnObjectIDs=task.object_ids) + sdk.invoke(request, task.callback) + + def kill(self, task: TaskSpec): + """kill""" + sdk.kill(task.instance_id.id, task.signal, task.request_id, task.callback, task.invoke_timeout) + + def exit(self): + """exit""" + sdk.exit() + + def save_state(self, state) -> str: + """save state""" + + return sdk.save_state(state) + + def load_state(self, checkpoint_id) -> bytes: + """load state""" + + return sdk.load_state(checkpoint_id) + + def clear(self): + """clear""" + + +def start_server(address, logger) -> int: + """ + start server for communicating with the kernel + + Args: + address: bind address + logger: logger that has been initialized + """ + from fnruntime import log + log.init_log("", logger) + return rpc_server.driver_serve(address) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/yr_client/__init__.py b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/yr_client/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..61b64eef0bb88c4c88b44a584dae33dcf513565c --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/yr_client/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +yr client +""" diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/yr_client/auth_tools.py b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/yr_client/auth_tools.py new file mode 100644 index 0000000000000000000000000000000000000000..b66303d509984db4c2828ff0b3b7566dd84ff946 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/invoke_client/yr_client/auth_tools.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and +# conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, +# WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +Authorization tool +""" + +import hmac +import base64 +import hashlib +from urllib import parse + +__APP_KEY = \ + "a9abff86a849f0d40f5a399252c05def4d744a28d3ad27fd73c80db11b706ac8" + + +def _sign(sign_key: bytes, sign_struct: dict): + if "query_str" in sign_struct: + canonical_query_str = _canonical_query(sign_struct["query_str"]) + else: + canonical_query_str = "" + if "body" in sign_struct: + body_str = sign_struct["body"] + else: + body_str = "" + if isinstance(body_str, str): + body_str = body_str.encode() + sign_bytes = sign_struct["method"].encode() + b'&' + \ + sign_struct["path"].encode() + b'&' + \ + canonical_query_str.encode() + b'&' + \ + body_str + b'&' + \ + (("appid=" + sign_struct["appid"]).encode()) + b'&' + \ + ("timestamp=" + sign_struct["timestamp"]).encode() + hash_hmac = hmac.new(sign_key, sign_bytes, hashlib.sha256) + signkey = "\"" + base64.b64encode(hash_hmac.digest()).decode() + "\"" + return signkey + + +def get_authorization(sign_key: bytes, sign_struct): + """ + get authorization for frontend + """ + appid = sign_struct["appid"] + cur_time = sign_struct["timestamp"] + signkey = _sign(sign_key, sign_struct) + authorization = "CLOUDSOA-HMAC-SHA256" + " appid=" + appid + \ + ", timestamp=" + cur_time + ", signature=" + signkey + return authorization + + +def _canonical_query(query): + pairs = query.split("&") + pairs.sort() + pairs = _escape_query_pairs(pairs) + return '#'.join(pairs) + + +def _escape_query_pairs(pairs): + escaped = [] + for pair in pairs: + kv = pair.split("=") + if len(kv) > 1: + k = _url_encode(kv[0]) + v = _url_encode(kv[1]) + escaped.append("".join([k, "=", v])) + return escaped + + +def _url_encode(url): + if url is None or url == "": + return "" + encode_url = parse.quote(url) + + encode_url.replace("+", "%20") + encode_url.replace("*", "%2A") + encode_url.replace("%7E", "~") + return encode_url + + +def _canonical_header(headers): + kvs = [] + for k, v in headers.items(): + k = _url_encode(k) + v = _url_encode(v) + kvs.append("".join([k, "=", v])) + kvs.sort() + return "&".join(kvs) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/object_ref.py b/dsoftbus/dist_executor/modules/runtime/python/yr/object_ref.py new file mode 100644 index 0000000000000000000000000000000000000000..da79def5a03791e86ac521811409a110f7b97e20 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/object_ref.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""ObjectRef""" + +import yr +import yr.storage.reference_count + + +class ObjectRef: + """ObjectRef""" + _id = None + _task_id = None + + def __init__(self, object_id: str, task_id=None, need_incre=True, need_decre=True): + self._id = object_id + self._task_id = task_id + self._need_decre = need_decre + + if need_incre: + yr.storage.reference_count.increase_reference_count(self._id) + + def __del__(self): + if self._need_decre: + yr.storage.reference_count.decrease_reference_count(self._id) + + def __copy__(self): + return self + + def __deepcopy__(self, memo): + return self + + def __str__(self): + return self.id + + def __eq__(self, other): + return self.id == other + + def __hash__(self): + return hash(self.id) + + @property + def task_id(self): + """task id""" + return self._task_id + + @task_id.setter + def task_id(self, value): + """task id""" + if value is not None: + self._task_id = value + + @property + def id(self): + """object id""" + return self._id + + def get_future(self): + """get future""" + return yr.runtime.runtime.Runtime().rt.task_mgr.get_future(self.task_id) + + def wait(self, timeout=None): + """wait task done""" + future = self.get_future() + if future is not None: + future.result(timeout=timeout) + + def is_exception(self) -> bool: + """is future exception""" + future = self.get_future() + if future is None: + return False + return future.exception() is not None + + def done(self): + """Return True if the obj future was cancelled or finished executing.""" + future = self.get_future() + if future: + return future.done() + return True + + def cancel(self): + """cancel the obj future""" + future = self.get_future() + if future: + future.cancel() + + def on_complete(self, callback): + """register callback""" + if self.task_id is None: + callback(None) + else: + yr.runtime.runtime.Runtime().rt.task_mgr.on_complete(self.task_id, callback) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/objref_counting_manager.py b/dsoftbus/dist_executor/modules/runtime/python/yr/objref_counting_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..c25273a31131d2cd96477ee9ba344267dffd0dca --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/objref_counting_manager.py @@ -0,0 +1,113 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" objref_counting_manager provide reference counting relative functions""" + +import threading +from typing import Union +from collections import deque + +from yr.utils import Singleton, generate_random_id +from yr.object_ref import ObjectRef +import yr.storage.reference_count + +INVOKE_ACK_TIMEOUT = 15 +OBJECT_REF_SCALE_STEP = 100 + + +class ObjectRefPool: + """ObjectRefPool""" + + def __init__(self, pool_size=1): + self.__ref_pool = deque() + self.__pool_size = pool_size + self._pool_lock = threading.Lock() + self.__scale(pool_size) + + def pop(self, task_id=None): + """pop and return an ObjectRef""" + with self._pool_lock: + if len(self.__ref_pool) == 0: + self.__scale(self.__pool_size) + + ref = self.__ref_pool.pop() + ref.task_id = task_id + return ref + + def clear(self): + """clear object ref pool""" + self.__ref_pool.clear() + + def __scale(self, size): + ids = [] + for _ in range(size): + object_id = generate_random_id() + ref = ObjectRef(object_id, None, need_incre=False, need_decre=True) + self.__ref_pool.append(ref) + ids.append(object_id) + yr.storage.reference_count.increase_reference_count(ids) + + +@Singleton +class ObjRefCountingManager: + """ObjRefCountingManager""" + + def __init__(self): + self._invoke_map = {} + self._empty_signal = threading.Event() + self._ref_pool = ObjectRefPool(OBJECT_REF_SCALE_STEP) + + def join_record(self, key: str, refs: Union[ObjectRef, list]): + """add and merge record to the map""" + if len(key) == 0 or refs is None: + return + if isinstance(refs, list) and len(refs) == 0: + return + if isinstance(refs, ObjectRef): + refs = [refs] + if key not in self._invoke_map: + self._invoke_map[key] = [] + + self._invoke_map[key] += refs + self._empty_signal.clear() + + def del_record(self, key: str): + """rmv record from the map""" + self._invoke_map.pop(key, None) + + if self.is_empty(): + self._empty_signal.set() + + def is_empty(self): + """check if the map is empty""" + return len(self._invoke_map) == 0 + + def wait_invoke_ack(self, timeout=INVOKE_ACK_TIMEOUT): + """wait until the map is empty""" + if not self.is_empty(): + self._empty_signal.wait(timeout) + + def create_object_ref(self, task_id=None, count=1): + """generate object reference""" + refs = [] + for _ in range(count): + ref = self._ref_pool.pop(task_id) + refs.append(ref) + + return refs + + def clear(self): + """clear object ref pool""" + self._ref_pool.clear() diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/__init__.py b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fdaa157f694738e5227925d8a06a90c2ac5db673 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/__init__.py @@ -0,0 +1,16 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""rpc protobuf""" diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/bus_service_pb2.py b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/bus_service_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..720750900b87a3cef082962614be387200f5edda --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/bus_service_pb2.py @@ -0,0 +1,341 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: rpc/bus_service.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from yr.rpc import common_pb2 as rpc_dot_common__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='rpc/bus_service.proto', + package='bus_service', + syntax='proto3', + serialized_options=b'Z\017grpc/pb/bus;bus', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n\x15rpc/bus_service.proto\x12\x0b\x62us_service\x1a\x10rpc/common.proto\"L\n\x15\x44iscoverDriverRequest\x12\x10\n\x08\x64riverIP\x18\x01 \x01(\t\x12\x12\n\ndriverPort\x18\x02 \x01(\t\x12\r\n\x05jobID\x18\x03 \x01(\t\"\x18\n\x16\x44iscoverDriverResponse\"C\n\x17\x44iscoverFrontendRequest\x12\x12\n\nfrontendIP\x18\x01 \x01(\t\x12\x14\n\x0c\x66rontendPort\x18\x02 \x01(\t\"\x1a\n\x18\x44iscoverFrontendResponse\"*\n\x14QueryInstanceRequest\x12\x12\n\ninstanceID\x18\x01 \x01(\t\"Y\n\x15QueryInstanceResponse\x12\x1f\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x0e\n\x06status\x18\x03 \x01(\t2\xa6\x02\n\nBusService\x12\x61\n\x10\x44iscoverFrontend\x12$.bus_service.DiscoverFrontendRequest\x1a%.bus_service.DiscoverFrontendResponse\"\x00\x12X\n\rQueryInstance\x12!.bus_service.QueryInstanceRequest\x1a\".bus_service.QueryInstanceResponse\"\x00\x12[\n\x0e\x44iscoverDriver\x12\".bus_service.DiscoverDriverRequest\x1a#.bus_service.DiscoverDriverResponse\"\x00\x42\x11Z\x0fgrpc/pb/bus;busb\x06proto3' + , + dependencies=[rpc_dot_common__pb2.DESCRIPTOR,]) + + + + +_DISCOVERDRIVERREQUEST = _descriptor.Descriptor( + name='DiscoverDriverRequest', + full_name='bus_service.DiscoverDriverRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='driverIP', full_name='bus_service.DiscoverDriverRequest.driverIP', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='driverPort', full_name='bus_service.DiscoverDriverRequest.driverPort', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='jobID', full_name='bus_service.DiscoverDriverRequest.jobID', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=56, + serialized_end=132, +) + + +_DISCOVERDRIVERRESPONSE = _descriptor.Descriptor( + name='DiscoverDriverResponse', + full_name='bus_service.DiscoverDriverResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=134, + serialized_end=158, +) + + +_DISCOVERFRONTENDREQUEST = _descriptor.Descriptor( + name='DiscoverFrontendRequest', + full_name='bus_service.DiscoverFrontendRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='frontendIP', full_name='bus_service.DiscoverFrontendRequest.frontendIP', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='frontendPort', full_name='bus_service.DiscoverFrontendRequest.frontendPort', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=160, + serialized_end=227, +) + + +_DISCOVERFRONTENDRESPONSE = _descriptor.Descriptor( + name='DiscoverFrontendResponse', + full_name='bus_service.DiscoverFrontendResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=229, + serialized_end=255, +) + + +_QUERYINSTANCEREQUEST = _descriptor.Descriptor( + name='QueryInstanceRequest', + full_name='bus_service.QueryInstanceRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='instanceID', full_name='bus_service.QueryInstanceRequest.instanceID', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=257, + serialized_end=299, +) + + +_QUERYINSTANCERESPONSE = _descriptor.Descriptor( + name='QueryInstanceResponse', + full_name='bus_service.QueryInstanceResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='bus_service.QueryInstanceResponse.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='bus_service.QueryInstanceResponse.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='status', full_name='bus_service.QueryInstanceResponse.status', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=301, + serialized_end=390, +) + +_QUERYINSTANCERESPONSE.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +DESCRIPTOR.message_types_by_name['DiscoverDriverRequest'] = _DISCOVERDRIVERREQUEST +DESCRIPTOR.message_types_by_name['DiscoverDriverResponse'] = _DISCOVERDRIVERRESPONSE +DESCRIPTOR.message_types_by_name['DiscoverFrontendRequest'] = _DISCOVERFRONTENDREQUEST +DESCRIPTOR.message_types_by_name['DiscoverFrontendResponse'] = _DISCOVERFRONTENDRESPONSE +DESCRIPTOR.message_types_by_name['QueryInstanceRequest'] = _QUERYINSTANCEREQUEST +DESCRIPTOR.message_types_by_name['QueryInstanceResponse'] = _QUERYINSTANCERESPONSE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +DiscoverDriverRequest = _reflection.GeneratedProtocolMessageType('DiscoverDriverRequest', (_message.Message,), { + 'DESCRIPTOR' : _DISCOVERDRIVERREQUEST, + '__module__' : 'rpc.bus_service_pb2' + # @@protoc_insertion_point(class_scope:bus_service.DiscoverDriverRequest) + }) +_sym_db.RegisterMessage(DiscoverDriverRequest) + +DiscoverDriverResponse = _reflection.GeneratedProtocolMessageType('DiscoverDriverResponse', (_message.Message,), { + 'DESCRIPTOR' : _DISCOVERDRIVERRESPONSE, + '__module__' : 'rpc.bus_service_pb2' + # @@protoc_insertion_point(class_scope:bus_service.DiscoverDriverResponse) + }) +_sym_db.RegisterMessage(DiscoverDriverResponse) + +DiscoverFrontendRequest = _reflection.GeneratedProtocolMessageType('DiscoverFrontendRequest', (_message.Message,), { + 'DESCRIPTOR' : _DISCOVERFRONTENDREQUEST, + '__module__' : 'rpc.bus_service_pb2' + # @@protoc_insertion_point(class_scope:bus_service.DiscoverFrontendRequest) + }) +_sym_db.RegisterMessage(DiscoverFrontendRequest) + +DiscoverFrontendResponse = _reflection.GeneratedProtocolMessageType('DiscoverFrontendResponse', (_message.Message,), { + 'DESCRIPTOR' : _DISCOVERFRONTENDRESPONSE, + '__module__' : 'rpc.bus_service_pb2' + # @@protoc_insertion_point(class_scope:bus_service.DiscoverFrontendResponse) + }) +_sym_db.RegisterMessage(DiscoverFrontendResponse) + +QueryInstanceRequest = _reflection.GeneratedProtocolMessageType('QueryInstanceRequest', (_message.Message,), { + 'DESCRIPTOR' : _QUERYINSTANCEREQUEST, + '__module__' : 'rpc.bus_service_pb2' + # @@protoc_insertion_point(class_scope:bus_service.QueryInstanceRequest) + }) +_sym_db.RegisterMessage(QueryInstanceRequest) + +QueryInstanceResponse = _reflection.GeneratedProtocolMessageType('QueryInstanceResponse', (_message.Message,), { + 'DESCRIPTOR' : _QUERYINSTANCERESPONSE, + '__module__' : 'rpc.bus_service_pb2' + # @@protoc_insertion_point(class_scope:bus_service.QueryInstanceResponse) + }) +_sym_db.RegisterMessage(QueryInstanceResponse) + + +DESCRIPTOR._options = None + +_BUSSERVICE = _descriptor.ServiceDescriptor( + name='BusService', + full_name='bus_service.BusService', + file=DESCRIPTOR, + index=0, + serialized_options=None, + create_key=_descriptor._internal_create_key, + serialized_start=393, + serialized_end=687, + methods=[ + _descriptor.MethodDescriptor( + name='DiscoverFrontend', + full_name='bus_service.BusService.DiscoverFrontend', + index=0, + containing_service=None, + input_type=_DISCOVERFRONTENDREQUEST, + output_type=_DISCOVERFRONTENDRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='QueryInstance', + full_name='bus_service.BusService.QueryInstance', + index=1, + containing_service=None, + input_type=_QUERYINSTANCEREQUEST, + output_type=_QUERYINSTANCERESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='DiscoverDriver', + full_name='bus_service.BusService.DiscoverDriver', + index=2, + containing_service=None, + input_type=_DISCOVERDRIVERREQUEST, + output_type=_DISCOVERDRIVERRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), +]) +_sym_db.RegisterServiceDescriptor(_BUSSERVICE) + +DESCRIPTOR.services_by_name['BusService'] = _BUSSERVICE + +# @@protoc_insertion_point(module_scope) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/bus_service_pb2_grpc.py b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/bus_service_pb2_grpc.py new file mode 100644 index 0000000000000000000000000000000000000000..ced112e443e77ac4432a5bce5fd2afa64abab4a5 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/bus_service_pb2_grpc.py @@ -0,0 +1,138 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from yr.rpc import bus_service_pb2 as rpc_dot_bus__service__pb2 + + +class BusServiceStub(object): + """bus service provides APIs to runtime, + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.DiscoverFrontend = channel.unary_unary( + '/bus_service.BusService/DiscoverFrontend', + request_serializer=rpc_dot_bus__service__pb2.DiscoverFrontendRequest.SerializeToString, + response_deserializer=rpc_dot_bus__service__pb2.DiscoverFrontendResponse.FromString, + ) + self.QueryInstance = channel.unary_unary( + '/bus_service.BusService/QueryInstance', + request_serializer=rpc_dot_bus__service__pb2.QueryInstanceRequest.SerializeToString, + response_deserializer=rpc_dot_bus__service__pb2.QueryInstanceResponse.FromString, + ) + self.DiscoverDriver = channel.unary_unary( + '/bus_service.BusService/DiscoverDriver', + request_serializer=rpc_dot_bus__service__pb2.DiscoverDriverRequest.SerializeToString, + response_deserializer=rpc_dot_bus__service__pb2.DiscoverDriverResponse.FromString, + ) + + +class BusServiceServicer(object): + """bus service provides APIs to runtime, + """ + + def DiscoverFrontend(self, request, context): + """notify bus to connect frontend + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def QueryInstance(self, request, context): + """query instance info from frontend + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def DiscoverDriver(self, request, context): + """notify bus to connect driver + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_BusServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'DiscoverFrontend': grpc.unary_unary_rpc_method_handler( + servicer.DiscoverFrontend, + request_deserializer=rpc_dot_bus__service__pb2.DiscoverFrontendRequest.FromString, + response_serializer=rpc_dot_bus__service__pb2.DiscoverFrontendResponse.SerializeToString, + ), + 'QueryInstance': grpc.unary_unary_rpc_method_handler( + servicer.QueryInstance, + request_deserializer=rpc_dot_bus__service__pb2.QueryInstanceRequest.FromString, + response_serializer=rpc_dot_bus__service__pb2.QueryInstanceResponse.SerializeToString, + ), + 'DiscoverDriver': grpc.unary_unary_rpc_method_handler( + servicer.DiscoverDriver, + request_deserializer=rpc_dot_bus__service__pb2.DiscoverDriverRequest.FromString, + response_serializer=rpc_dot_bus__service__pb2.DiscoverDriverResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'bus_service.BusService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class BusService(object): + """bus service provides APIs to runtime, + """ + + @staticmethod + def DiscoverFrontend(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/bus_service.BusService/DiscoverFrontend', + rpc_dot_bus__service__pb2.DiscoverFrontendRequest.SerializeToString, + rpc_dot_bus__service__pb2.DiscoverFrontendResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def QueryInstance(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/bus_service.BusService/QueryInstance', + rpc_dot_bus__service__pb2.QueryInstanceRequest.SerializeToString, + rpc_dot_bus__service__pb2.QueryInstanceResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def DiscoverDriver(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/bus_service.BusService/DiscoverDriver', + rpc_dot_bus__service__pb2.DiscoverDriverRequest.SerializeToString, + rpc_dot_bus__service__pb2.DiscoverDriverResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/common_pb2.py b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/common_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..8a7214229e29dd0ef18a5c4433d4d0d924542ecc --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/common_pb2.py @@ -0,0 +1,197 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: rpc/common.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='rpc/common.proto', + package='common', + syntax='proto3', + serialized_options=b'Z\027bus/proto/common;common', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n\x10rpc/common.proto\x12\x06\x63ommon\"r\n\x03\x41rg\x12!\n\x04type\x18\x01 \x01(\x0e\x32\x13.common.Arg.ArgType\x12\r\n\x05value\x18\x02 \x01(\x0c\x12\x13\n\x0bnested_refs\x18\x03 \x03(\t\"$\n\x07\x41rgType\x12\t\n\x05VALUE\x10\x00\x12\x0e\n\nOBJECT_REF\x10\x01*\xc4\x02\n\tErrorCode\x12\x0c\n\x08\x45RR_NONE\x10\x00\x12\x16\n\x11\x45RR_PARAM_INVALID\x10\xe9\x07\x12\x1c\n\x17\x45RR_RESOURCE_NOT_ENOUGH\x10\xea\x07\x12\x1b\n\x16\x45RR_INSTANCE_NOT_FOUND\x10\xeb\x07\x12\x1c\n\x17\x45RR_INSTANCE_DUPLICATED\x10\xec\x07\x12\x1c\n\x17\x45RR_INVOKE_RATE_LIMITED\x10\xed\x07\x12\x17\n\x12\x45RR_USER_CODE_LOAD\x10\xd1\x0f\x12 \n\x1b\x45RR_USER_FUNCTION_EXCEPTION\x10\xd2\x0f\x12$\n\x1f\x45RR_REQUEST_BETWEEN_RUNTIME_BUS\x10\xb9\x17\x12\x1c\n\x17\x45RR_INNER_COMMUNICATION\x10\xba\x17\x12\x1b\n\x16\x45RR_INNER_SYSTEM_ERROR\x10\xbb\x17\x42\x19Z\x17\x62us/proto/common;commonb\x06proto3' +) + +_ERRORCODE = _descriptor.EnumDescriptor( + name='ErrorCode', + full_name='common.ErrorCode', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='ERR_NONE', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ERR_PARAM_INVALID', index=1, number=1001, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ERR_RESOURCE_NOT_ENOUGH', index=2, number=1002, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ERR_INSTANCE_NOT_FOUND', index=3, number=1003, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ERR_INSTANCE_DUPLICATED', index=4, number=1004, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ERR_INVOKE_RATE_LIMITED', index=5, number=1005, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ERR_USER_CODE_LOAD', index=6, number=2001, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ERR_USER_FUNCTION_EXCEPTION', index=7, number=2002, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ERR_REQUEST_BETWEEN_RUNTIME_BUS', index=8, number=3001, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ERR_INNER_COMMUNICATION', index=9, number=3002, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='ERR_INNER_SYSTEM_ERROR', index=10, number=3003, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=145, + serialized_end=469, +) +_sym_db.RegisterEnumDescriptor(_ERRORCODE) + +ErrorCode = enum_type_wrapper.EnumTypeWrapper(_ERRORCODE) +ERR_NONE = 0 +ERR_PARAM_INVALID = 1001 +ERR_RESOURCE_NOT_ENOUGH = 1002 +ERR_INSTANCE_NOT_FOUND = 1003 +ERR_INSTANCE_DUPLICATED = 1004 +ERR_INVOKE_RATE_LIMITED = 1005 +ERR_USER_CODE_LOAD = 2001 +ERR_USER_FUNCTION_EXCEPTION = 2002 +ERR_REQUEST_BETWEEN_RUNTIME_BUS = 3001 +ERR_INNER_COMMUNICATION = 3002 +ERR_INNER_SYSTEM_ERROR = 3003 + + +_ARG_ARGTYPE = _descriptor.EnumDescriptor( + name='ArgType', + full_name='common.Arg.ArgType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='VALUE', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='OBJECT_REF', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=106, + serialized_end=142, +) +_sym_db.RegisterEnumDescriptor(_ARG_ARGTYPE) + + +_ARG = _descriptor.Descriptor( + name='Arg', + full_name='common.Arg', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='type', full_name='common.Arg.type', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='common.Arg.value', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='nested_refs', full_name='common.Arg.nested_refs', index=2, + number=3, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + _ARG_ARGTYPE, + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=28, + serialized_end=142, +) + +_ARG.fields_by_name['type'].enum_type = _ARG_ARGTYPE +_ARG_ARGTYPE.containing_type = _ARG +DESCRIPTOR.message_types_by_name['Arg'] = _ARG +DESCRIPTOR.enum_types_by_name['ErrorCode'] = _ERRORCODE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +Arg = _reflection.GeneratedProtocolMessageType('Arg', (_message.Message,), { + 'DESCRIPTOR' : _ARG, + '__module__' : 'rpc.common_pb2' + # @@protoc_insertion_point(class_scope:common.Arg) + }) +_sym_db.RegisterMessage(Arg) + + +DESCRIPTOR._options = None +# @@protoc_insertion_point(module_scope) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/core_service_pb2.py b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/core_service_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..18e7f170b2e350780562c5e7e30928597852530d --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/core_service_pb2.py @@ -0,0 +1,1252 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: rpc/core_service.proto +"""Generated protocol buffer code.""" +from google.protobuf.internal import enum_type_wrapper +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from yr.rpc import common_pb2 as rpc_dot_common__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='rpc/core_service.proto', + package='core_service', + syntax='proto3', + serialized_options=b'Z\021grpc/pb/core;core', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n\x16rpc/core_service.proto\x12\x0c\x63ore_service\x1a\x10rpc/common.proto\"\x9d\x03\n\x11SchedulingOptions\x12\x10\n\x08priority\x18\x01 \x01(\x05\x12\x41\n\tresources\x18\x02 \x03(\x0b\x32..core_service.SchedulingOptions.ResourcesEntry\x12\x41\n\textension\x18\x03 \x03(\x0b\x32..core_service.SchedulingOptions.ExtensionEntry\x12?\n\x08\x61\x66\x66inity\x18\x04 \x03(\x0b\x32-.core_service.SchedulingOptions.AffinityEntry\x1a\x30\n\x0eResourcesEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\x01:\x02\x38\x01\x1a\x30\n\x0e\x45xtensionEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x1aK\n\rAffinityEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12)\n\x05value\x18\x02 \x01(\x0e\x32\x1a.core_service.AffinityType:\x02\x38\x01\"\xc3\x02\n\rCreateRequest\x12\x10\n\x08\x66unction\x18\x01 \x01(\t\x12\x19\n\x04\x61rgs\x18\x02 \x03(\x0b\x32\x0b.common.Arg\x12\x36\n\rschedulingOps\x18\x03 \x01(\x0b\x32\x1f.core_service.SchedulingOptions\x12\x11\n\trequestID\x18\x04 \x01(\t\x12\x0f\n\x07traceID\x18\x05 \x01(\t\x12\x0e\n\x06labels\x18\x06 \x03(\t\x12\x1c\n\x14\x64\x65signatedInstanceID\x18\x07 \x01(\t\x12\x45\n\rcreateOptions\x18\x08 \x03(\x0b\x32..core_service.CreateRequest.CreateOptionsEntry\x1a\x34\n\x12\x43reateOptionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"V\n\x0e\x43reateResponse\x12\x1f\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x12\n\ninstanceID\x18\x03 \x01(\t\"\x8d\x01\n\rInvokeRequest\x12\x10\n\x08\x66unction\x18\x01 \x01(\t\x12\x19\n\x04\x61rgs\x18\x02 \x03(\x0b\x32\x0b.common.Arg\x12\x12\n\ninstanceID\x18\x03 \x01(\t\x12\x11\n\trequestID\x18\x04 \x01(\t\x12\x0f\n\x07traceID\x18\x05 \x01(\t\x12\x17\n\x0freturnObjectIDs\x18\x06 \x03(\t\"Z\n\x0eInvokeResponse\x12\x1f\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x16\n\x0ereturnObjectID\x18\x03 \x01(\t\"e\n\nCallResult\x12\x1f\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x12\n\ninstanceID\x18\x03 \x01(\t\x12\x11\n\trequestID\x18\x04 \x01(\t\"A\n\rCallResultAck\x12\x1f\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\"&\n\x10TerminateRequest\x12\x12\n\ninstanceID\x18\x01 \x01(\t\"E\n\x11TerminateResponse\x12\x1f\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\"\r\n\x0b\x45xitRequest\"\x0e\n\x0c\x45xitResponse\"!\n\x10StateSaveRequest\x12\r\n\x05state\x18\x01 \x01(\x0c\"[\n\x11StateSaveResponse\x12\x1f\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\x14\n\x0c\x63heckpointID\x18\x03 \x01(\t\"(\n\x10StateLoadRequest\x12\x14\n\x0c\x63heckpointID\x18\x01 \x01(\t\"T\n\x11StateLoadResponse\x12\x1f\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\r\n\x05state\x18\x03 \x01(\x0c\"B\n\x0bKillRequest\x12\x12\n\ninstanceID\x18\x01 \x01(\t\x12\x0e\n\x06signal\x18\x02 \x01(\x05\x12\x0f\n\x07payload\x18\x03 \x01(\x0c\"@\n\x0cKillResponse\x12\x1f\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t*p\n\x0c\x41\x66\x66inityType\x12\x15\n\x11PreferredAffinity\x10\x00\x12\x19\n\x15PreferredAntiAffinity\x10\x01\x12\x14\n\x10RequiredAffinity\x10\x02\x12\x18\n\x14RequiredAntiAffinity\x10\x03\x32\x8d\x04\n\x0b\x43oreService\x12\x45\n\x06\x43reate\x12\x1b.core_service.CreateRequest\x1a\x1c.core_service.CreateResponse\"\x00\x12\x45\n\x06Invoke\x12\x1b.core_service.InvokeRequest\x1a\x1c.core_service.InvokeResponse\"\x00\x12N\n\tTerminate\x12\x1e.core_service.TerminateRequest\x1a\x1f.core_service.TerminateResponse\"\x00\x12?\n\x04\x45xit\x12\x19.core_service.ExitRequest\x1a\x1a.core_service.ExitResponse\"\x00\x12N\n\tSaveState\x12\x1e.core_service.StateSaveRequest\x1a\x1f.core_service.StateSaveResponse\"\x00\x12N\n\tLoadState\x12\x1e.core_service.StateLoadRequest\x1a\x1f.core_service.StateLoadResponse\"\x00\x12?\n\x04Kill\x12\x19.core_service.KillRequest\x1a\x1a.core_service.KillResponse\"\x00\x42\x13Z\x11grpc/pb/core;coreb\x06proto3' + , + dependencies=[rpc_dot_common__pb2.DESCRIPTOR,]) + +_AFFINITYTYPE = _descriptor.EnumDescriptor( + name='AffinityType', + full_name='core_service.AffinityType', + filename=None, + file=DESCRIPTOR, + create_key=_descriptor._internal_create_key, + values=[ + _descriptor.EnumValueDescriptor( + name='PreferredAffinity', index=0, number=0, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='PreferredAntiAffinity', index=1, number=1, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='RequiredAffinity', index=2, number=2, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + _descriptor.EnumValueDescriptor( + name='RequiredAntiAffinity', index=3, number=3, + serialized_options=None, + type=None, + create_key=_descriptor._internal_create_key), + ], + containing_type=None, + serialized_options=None, + serialized_start=1826, + serialized_end=1938, +) +_sym_db.RegisterEnumDescriptor(_AFFINITYTYPE) + +AffinityType = enum_type_wrapper.EnumTypeWrapper(_AFFINITYTYPE) +PreferredAffinity = 0 +PreferredAntiAffinity = 1 +RequiredAffinity = 2 +RequiredAntiAffinity = 3 + + + +_SCHEDULINGOPTIONS_RESOURCESENTRY = _descriptor.Descriptor( + name='ResourcesEntry', + full_name='core_service.SchedulingOptions.ResourcesEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='core_service.SchedulingOptions.ResourcesEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='core_service.SchedulingOptions.ResourcesEntry.value', index=1, + number=2, type=1, cpp_type=5, label=1, + has_default_value=False, default_value=float(0), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=297, + serialized_end=345, +) + +_SCHEDULINGOPTIONS_EXTENSIONENTRY = _descriptor.Descriptor( + name='ExtensionEntry', + full_name='core_service.SchedulingOptions.ExtensionEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='core_service.SchedulingOptions.ExtensionEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='core_service.SchedulingOptions.ExtensionEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=347, + serialized_end=395, +) + +_SCHEDULINGOPTIONS_AFFINITYENTRY = _descriptor.Descriptor( + name='AffinityEntry', + full_name='core_service.SchedulingOptions.AffinityEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='core_service.SchedulingOptions.AffinityEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='core_service.SchedulingOptions.AffinityEntry.value', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=397, + serialized_end=472, +) + +_SCHEDULINGOPTIONS = _descriptor.Descriptor( + name='SchedulingOptions', + full_name='core_service.SchedulingOptions', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='priority', full_name='core_service.SchedulingOptions.priority', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='resources', full_name='core_service.SchedulingOptions.resources', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='extension', full_name='core_service.SchedulingOptions.extension', index=2, + number=3, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='affinity', full_name='core_service.SchedulingOptions.affinity', index=3, + number=4, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_SCHEDULINGOPTIONS_RESOURCESENTRY, _SCHEDULINGOPTIONS_EXTENSIONENTRY, _SCHEDULINGOPTIONS_AFFINITYENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=59, + serialized_end=472, +) + + +_CREATEREQUEST_CREATEOPTIONSENTRY = _descriptor.Descriptor( + name='CreateOptionsEntry', + full_name='core_service.CreateRequest.CreateOptionsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='core_service.CreateRequest.CreateOptionsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='core_service.CreateRequest.CreateOptionsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=746, + serialized_end=798, +) + +_CREATEREQUEST = _descriptor.Descriptor( + name='CreateRequest', + full_name='core_service.CreateRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='function', full_name='core_service.CreateRequest.function', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='args', full_name='core_service.CreateRequest.args', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='schedulingOps', full_name='core_service.CreateRequest.schedulingOps', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='requestID', full_name='core_service.CreateRequest.requestID', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='traceID', full_name='core_service.CreateRequest.traceID', index=4, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='labels', full_name='core_service.CreateRequest.labels', index=5, + number=6, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='designatedInstanceID', full_name='core_service.CreateRequest.designatedInstanceID', index=6, + number=7, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='createOptions', full_name='core_service.CreateRequest.createOptions', index=7, + number=8, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_CREATEREQUEST_CREATEOPTIONSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=475, + serialized_end=798, +) + + +_CREATERESPONSE = _descriptor.Descriptor( + name='CreateResponse', + full_name='core_service.CreateResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='core_service.CreateResponse.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='core_service.CreateResponse.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='instanceID', full_name='core_service.CreateResponse.instanceID', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=800, + serialized_end=886, +) + + +_INVOKEREQUEST = _descriptor.Descriptor( + name='InvokeRequest', + full_name='core_service.InvokeRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='function', full_name='core_service.InvokeRequest.function', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='args', full_name='core_service.InvokeRequest.args', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='instanceID', full_name='core_service.InvokeRequest.instanceID', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='requestID', full_name='core_service.InvokeRequest.requestID', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='traceID', full_name='core_service.InvokeRequest.traceID', index=4, + number=5, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='returnObjectIDs', full_name='core_service.InvokeRequest.returnObjectIDs', index=5, + number=6, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=889, + serialized_end=1030, +) + + +_INVOKERESPONSE = _descriptor.Descriptor( + name='InvokeResponse', + full_name='core_service.InvokeResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='core_service.InvokeResponse.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='core_service.InvokeResponse.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='returnObjectID', full_name='core_service.InvokeResponse.returnObjectID', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1032, + serialized_end=1122, +) + + +_CALLRESULT = _descriptor.Descriptor( + name='CallResult', + full_name='core_service.CallResult', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='core_service.CallResult.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='core_service.CallResult.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='instanceID', full_name='core_service.CallResult.instanceID', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='requestID', full_name='core_service.CallResult.requestID', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1124, + serialized_end=1225, +) + + +_CALLRESULTACK = _descriptor.Descriptor( + name='CallResultAck', + full_name='core_service.CallResultAck', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='core_service.CallResultAck.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='core_service.CallResultAck.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1227, + serialized_end=1292, +) + + +_TERMINATEREQUEST = _descriptor.Descriptor( + name='TerminateRequest', + full_name='core_service.TerminateRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='instanceID', full_name='core_service.TerminateRequest.instanceID', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1294, + serialized_end=1332, +) + + +_TERMINATERESPONSE = _descriptor.Descriptor( + name='TerminateResponse', + full_name='core_service.TerminateResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='core_service.TerminateResponse.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='core_service.TerminateResponse.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1334, + serialized_end=1403, +) + + +_EXITREQUEST = _descriptor.Descriptor( + name='ExitRequest', + full_name='core_service.ExitRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1405, + serialized_end=1418, +) + + +_EXITRESPONSE = _descriptor.Descriptor( + name='ExitResponse', + full_name='core_service.ExitResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1420, + serialized_end=1434, +) + + +_STATESAVEREQUEST = _descriptor.Descriptor( + name='StateSaveRequest', + full_name='core_service.StateSaveRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='state', full_name='core_service.StateSaveRequest.state', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1436, + serialized_end=1469, +) + + +_STATESAVERESPONSE = _descriptor.Descriptor( + name='StateSaveResponse', + full_name='core_service.StateSaveResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='core_service.StateSaveResponse.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='core_service.StateSaveResponse.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='checkpointID', full_name='core_service.StateSaveResponse.checkpointID', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1471, + serialized_end=1562, +) + + +_STATELOADREQUEST = _descriptor.Descriptor( + name='StateLoadRequest', + full_name='core_service.StateLoadRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='checkpointID', full_name='core_service.StateLoadRequest.checkpointID', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1564, + serialized_end=1604, +) + + +_STATELOADRESPONSE = _descriptor.Descriptor( + name='StateLoadResponse', + full_name='core_service.StateLoadResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='core_service.StateLoadResponse.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='core_service.StateLoadResponse.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='state', full_name='core_service.StateLoadResponse.state', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1606, + serialized_end=1690, +) + + +_KILLREQUEST = _descriptor.Descriptor( + name='KillRequest', + full_name='core_service.KillRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='instanceID', full_name='core_service.KillRequest.instanceID', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='signal', full_name='core_service.KillRequest.signal', index=1, + number=2, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='payload', full_name='core_service.KillRequest.payload', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1692, + serialized_end=1758, +) + + +_KILLRESPONSE = _descriptor.Descriptor( + name='KillResponse', + full_name='core_service.KillResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='core_service.KillResponse.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='core_service.KillResponse.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1760, + serialized_end=1824, +) + +_SCHEDULINGOPTIONS_RESOURCESENTRY.containing_type = _SCHEDULINGOPTIONS +_SCHEDULINGOPTIONS_EXTENSIONENTRY.containing_type = _SCHEDULINGOPTIONS +_SCHEDULINGOPTIONS_AFFINITYENTRY.fields_by_name['value'].enum_type = _AFFINITYTYPE +_SCHEDULINGOPTIONS_AFFINITYENTRY.containing_type = _SCHEDULINGOPTIONS +_SCHEDULINGOPTIONS.fields_by_name['resources'].message_type = _SCHEDULINGOPTIONS_RESOURCESENTRY +_SCHEDULINGOPTIONS.fields_by_name['extension'].message_type = _SCHEDULINGOPTIONS_EXTENSIONENTRY +_SCHEDULINGOPTIONS.fields_by_name['affinity'].message_type = _SCHEDULINGOPTIONS_AFFINITYENTRY +_CREATEREQUEST_CREATEOPTIONSENTRY.containing_type = _CREATEREQUEST +_CREATEREQUEST.fields_by_name['args'].message_type = rpc_dot_common__pb2._ARG +_CREATEREQUEST.fields_by_name['schedulingOps'].message_type = _SCHEDULINGOPTIONS +_CREATEREQUEST.fields_by_name['createOptions'].message_type = _CREATEREQUEST_CREATEOPTIONSENTRY +_CREATERESPONSE.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +_INVOKEREQUEST.fields_by_name['args'].message_type = rpc_dot_common__pb2._ARG +_INVOKERESPONSE.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +_CALLRESULT.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +_CALLRESULTACK.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +_TERMINATERESPONSE.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +_STATESAVERESPONSE.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +_STATELOADRESPONSE.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +_KILLRESPONSE.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +DESCRIPTOR.message_types_by_name['SchedulingOptions'] = _SCHEDULINGOPTIONS +DESCRIPTOR.message_types_by_name['CreateRequest'] = _CREATEREQUEST +DESCRIPTOR.message_types_by_name['CreateResponse'] = _CREATERESPONSE +DESCRIPTOR.message_types_by_name['InvokeRequest'] = _INVOKEREQUEST +DESCRIPTOR.message_types_by_name['InvokeResponse'] = _INVOKERESPONSE +DESCRIPTOR.message_types_by_name['CallResult'] = _CALLRESULT +DESCRIPTOR.message_types_by_name['CallResultAck'] = _CALLRESULTACK +DESCRIPTOR.message_types_by_name['TerminateRequest'] = _TERMINATEREQUEST +DESCRIPTOR.message_types_by_name['TerminateResponse'] = _TERMINATERESPONSE +DESCRIPTOR.message_types_by_name['ExitRequest'] = _EXITREQUEST +DESCRIPTOR.message_types_by_name['ExitResponse'] = _EXITRESPONSE +DESCRIPTOR.message_types_by_name['StateSaveRequest'] = _STATESAVEREQUEST +DESCRIPTOR.message_types_by_name['StateSaveResponse'] = _STATESAVERESPONSE +DESCRIPTOR.message_types_by_name['StateLoadRequest'] = _STATELOADREQUEST +DESCRIPTOR.message_types_by_name['StateLoadResponse'] = _STATELOADRESPONSE +DESCRIPTOR.message_types_by_name['KillRequest'] = _KILLREQUEST +DESCRIPTOR.message_types_by_name['KillResponse'] = _KILLRESPONSE +DESCRIPTOR.enum_types_by_name['AffinityType'] = _AFFINITYTYPE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +SchedulingOptions = _reflection.GeneratedProtocolMessageType('SchedulingOptions', (_message.Message,), { + + 'ResourcesEntry' : _reflection.GeneratedProtocolMessageType('ResourcesEntry', (_message.Message,), { + 'DESCRIPTOR' : _SCHEDULINGOPTIONS_RESOURCESENTRY, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.SchedulingOptions.ResourcesEntry) + }) + , + + 'ExtensionEntry' : _reflection.GeneratedProtocolMessageType('ExtensionEntry', (_message.Message,), { + 'DESCRIPTOR' : _SCHEDULINGOPTIONS_EXTENSIONENTRY, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.SchedulingOptions.ExtensionEntry) + }) + , + + 'AffinityEntry' : _reflection.GeneratedProtocolMessageType('AffinityEntry', (_message.Message,), { + 'DESCRIPTOR' : _SCHEDULINGOPTIONS_AFFINITYENTRY, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.SchedulingOptions.AffinityEntry) + }) + , + 'DESCRIPTOR' : _SCHEDULINGOPTIONS, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.SchedulingOptions) + }) +_sym_db.RegisterMessage(SchedulingOptions) +_sym_db.RegisterMessage(SchedulingOptions.ResourcesEntry) +_sym_db.RegisterMessage(SchedulingOptions.ExtensionEntry) +_sym_db.RegisterMessage(SchedulingOptions.AffinityEntry) + +CreateRequest = _reflection.GeneratedProtocolMessageType('CreateRequest', (_message.Message,), { + + 'CreateOptionsEntry' : _reflection.GeneratedProtocolMessageType('CreateOptionsEntry', (_message.Message,), { + 'DESCRIPTOR' : _CREATEREQUEST_CREATEOPTIONSENTRY, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.CreateRequest.CreateOptionsEntry) + }) + , + 'DESCRIPTOR' : _CREATEREQUEST, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.CreateRequest) + }) +_sym_db.RegisterMessage(CreateRequest) +_sym_db.RegisterMessage(CreateRequest.CreateOptionsEntry) + +CreateResponse = _reflection.GeneratedProtocolMessageType('CreateResponse', (_message.Message,), { + 'DESCRIPTOR' : _CREATERESPONSE, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.CreateResponse) + }) +_sym_db.RegisterMessage(CreateResponse) + +InvokeRequest = _reflection.GeneratedProtocolMessageType('InvokeRequest', (_message.Message,), { + 'DESCRIPTOR' : _INVOKEREQUEST, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.InvokeRequest) + }) +_sym_db.RegisterMessage(InvokeRequest) + +InvokeResponse = _reflection.GeneratedProtocolMessageType('InvokeResponse', (_message.Message,), { + 'DESCRIPTOR' : _INVOKERESPONSE, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.InvokeResponse) + }) +_sym_db.RegisterMessage(InvokeResponse) + +CallResult = _reflection.GeneratedProtocolMessageType('CallResult', (_message.Message,), { + 'DESCRIPTOR' : _CALLRESULT, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.CallResult) + }) +_sym_db.RegisterMessage(CallResult) + +CallResultAck = _reflection.GeneratedProtocolMessageType('CallResultAck', (_message.Message,), { + 'DESCRIPTOR' : _CALLRESULTACK, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.CallResultAck) + }) +_sym_db.RegisterMessage(CallResultAck) + +TerminateRequest = _reflection.GeneratedProtocolMessageType('TerminateRequest', (_message.Message,), { + 'DESCRIPTOR' : _TERMINATEREQUEST, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.TerminateRequest) + }) +_sym_db.RegisterMessage(TerminateRequest) + +TerminateResponse = _reflection.GeneratedProtocolMessageType('TerminateResponse', (_message.Message,), { + 'DESCRIPTOR' : _TERMINATERESPONSE, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.TerminateResponse) + }) +_sym_db.RegisterMessage(TerminateResponse) + +ExitRequest = _reflection.GeneratedProtocolMessageType('ExitRequest', (_message.Message,), { + 'DESCRIPTOR' : _EXITREQUEST, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.ExitRequest) + }) +_sym_db.RegisterMessage(ExitRequest) + +ExitResponse = _reflection.GeneratedProtocolMessageType('ExitResponse', (_message.Message,), { + 'DESCRIPTOR' : _EXITRESPONSE, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.ExitResponse) + }) +_sym_db.RegisterMessage(ExitResponse) + +StateSaveRequest = _reflection.GeneratedProtocolMessageType('StateSaveRequest', (_message.Message,), { + 'DESCRIPTOR' : _STATESAVEREQUEST, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.StateSaveRequest) + }) +_sym_db.RegisterMessage(StateSaveRequest) + +StateSaveResponse = _reflection.GeneratedProtocolMessageType('StateSaveResponse', (_message.Message,), { + 'DESCRIPTOR' : _STATESAVERESPONSE, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.StateSaveResponse) + }) +_sym_db.RegisterMessage(StateSaveResponse) + +StateLoadRequest = _reflection.GeneratedProtocolMessageType('StateLoadRequest', (_message.Message,), { + 'DESCRIPTOR' : _STATELOADREQUEST, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.StateLoadRequest) + }) +_sym_db.RegisterMessage(StateLoadRequest) + +StateLoadResponse = _reflection.GeneratedProtocolMessageType('StateLoadResponse', (_message.Message,), { + 'DESCRIPTOR' : _STATELOADRESPONSE, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.StateLoadResponse) + }) +_sym_db.RegisterMessage(StateLoadResponse) + +KillRequest = _reflection.GeneratedProtocolMessageType('KillRequest', (_message.Message,), { + 'DESCRIPTOR' : _KILLREQUEST, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.KillRequest) + }) +_sym_db.RegisterMessage(KillRequest) + +KillResponse = _reflection.GeneratedProtocolMessageType('KillResponse', (_message.Message,), { + 'DESCRIPTOR' : _KILLRESPONSE, + '__module__' : 'rpc.core_service_pb2' + # @@protoc_insertion_point(class_scope:core_service.KillResponse) + }) +_sym_db.RegisterMessage(KillResponse) + + +DESCRIPTOR._options = None +_SCHEDULINGOPTIONS_RESOURCESENTRY._options = None +_SCHEDULINGOPTIONS_EXTENSIONENTRY._options = None +_SCHEDULINGOPTIONS_AFFINITYENTRY._options = None +_CREATEREQUEST_CREATEOPTIONSENTRY._options = None + +_CORESERVICE = _descriptor.ServiceDescriptor( + name='CoreService', + full_name='core_service.CoreService', + file=DESCRIPTOR, + index=0, + serialized_options=None, + create_key=_descriptor._internal_create_key, + serialized_start=1941, + serialized_end=2466, + methods=[ + _descriptor.MethodDescriptor( + name='Create', + full_name='core_service.CoreService.Create', + index=0, + containing_service=None, + input_type=_CREATEREQUEST, + output_type=_CREATERESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='Invoke', + full_name='core_service.CoreService.Invoke', + index=1, + containing_service=None, + input_type=_INVOKEREQUEST, + output_type=_INVOKERESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='Terminate', + full_name='core_service.CoreService.Terminate', + index=2, + containing_service=None, + input_type=_TERMINATEREQUEST, + output_type=_TERMINATERESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='Exit', + full_name='core_service.CoreService.Exit', + index=3, + containing_service=None, + input_type=_EXITREQUEST, + output_type=_EXITRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='SaveState', + full_name='core_service.CoreService.SaveState', + index=4, + containing_service=None, + input_type=_STATESAVEREQUEST, + output_type=_STATESAVERESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='LoadState', + full_name='core_service.CoreService.LoadState', + index=5, + containing_service=None, + input_type=_STATELOADREQUEST, + output_type=_STATELOADRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='Kill', + full_name='core_service.CoreService.Kill', + index=6, + containing_service=None, + input_type=_KILLREQUEST, + output_type=_KILLRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), +]) +_sym_db.RegisterServiceDescriptor(_CORESERVICE) + +DESCRIPTOR.services_by_name['CoreService'] = _CORESERVICE + +# @@protoc_insertion_point(module_scope) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/core_service_pb2_grpc.py b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/core_service_pb2_grpc.py new file mode 100644 index 0000000000000000000000000000000000000000..c9c0f63764e02bd9122c143755e39aebd778056b --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/core_service_pb2_grpc.py @@ -0,0 +1,274 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from yr.rpc import core_service_pb2 as rpc_dot_core__service__pb2 + + +class CoreServiceStub(object): + """Core service provides APIs to runtime, + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.Create = channel.unary_unary( + '/core_service.CoreService/Create', + request_serializer=rpc_dot_core__service__pb2.CreateRequest.SerializeToString, + response_deserializer=rpc_dot_core__service__pb2.CreateResponse.FromString, + ) + self.Invoke = channel.unary_unary( + '/core_service.CoreService/Invoke', + request_serializer=rpc_dot_core__service__pb2.InvokeRequest.SerializeToString, + response_deserializer=rpc_dot_core__service__pb2.InvokeResponse.FromString, + ) + self.Terminate = channel.unary_unary( + '/core_service.CoreService/Terminate', + request_serializer=rpc_dot_core__service__pb2.TerminateRequest.SerializeToString, + response_deserializer=rpc_dot_core__service__pb2.TerminateResponse.FromString, + ) + self.Exit = channel.unary_unary( + '/core_service.CoreService/Exit', + request_serializer=rpc_dot_core__service__pb2.ExitRequest.SerializeToString, + response_deserializer=rpc_dot_core__service__pb2.ExitResponse.FromString, + ) + self.SaveState = channel.unary_unary( + '/core_service.CoreService/SaveState', + request_serializer=rpc_dot_core__service__pb2.StateSaveRequest.SerializeToString, + response_deserializer=rpc_dot_core__service__pb2.StateSaveResponse.FromString, + ) + self.LoadState = channel.unary_unary( + '/core_service.CoreService/LoadState', + request_serializer=rpc_dot_core__service__pb2.StateLoadRequest.SerializeToString, + response_deserializer=rpc_dot_core__service__pb2.StateLoadResponse.FromString, + ) + self.Kill = channel.unary_unary( + '/core_service.CoreService/Kill', + request_serializer=rpc_dot_core__service__pb2.KillRequest.SerializeToString, + response_deserializer=rpc_dot_core__service__pb2.KillResponse.FromString, + ) + + +class CoreServiceServicer(object): + """Core service provides APIs to runtime, + """ + + def Create(self, request, context): + """Create an instance for specify function + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Invoke(self, request, context): + """invoke the created instance + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Terminate(self, request, context): + """terminate the created instance + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Exit(self, request, context): + """exit the created instance + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def SaveState(self, request, context): + """save state of the created instance + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def LoadState(self, request, context): + """load state of the created instance + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Kill(self, request, context): + """Kill the signal to instance + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_CoreServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'Create': grpc.unary_unary_rpc_method_handler( + servicer.Create, + request_deserializer=rpc_dot_core__service__pb2.CreateRequest.FromString, + response_serializer=rpc_dot_core__service__pb2.CreateResponse.SerializeToString, + ), + 'Invoke': grpc.unary_unary_rpc_method_handler( + servicer.Invoke, + request_deserializer=rpc_dot_core__service__pb2.InvokeRequest.FromString, + response_serializer=rpc_dot_core__service__pb2.InvokeResponse.SerializeToString, + ), + 'Terminate': grpc.unary_unary_rpc_method_handler( + servicer.Terminate, + request_deserializer=rpc_dot_core__service__pb2.TerminateRequest.FromString, + response_serializer=rpc_dot_core__service__pb2.TerminateResponse.SerializeToString, + ), + 'Exit': grpc.unary_unary_rpc_method_handler( + servicer.Exit, + request_deserializer=rpc_dot_core__service__pb2.ExitRequest.FromString, + response_serializer=rpc_dot_core__service__pb2.ExitResponse.SerializeToString, + ), + 'SaveState': grpc.unary_unary_rpc_method_handler( + servicer.SaveState, + request_deserializer=rpc_dot_core__service__pb2.StateSaveRequest.FromString, + response_serializer=rpc_dot_core__service__pb2.StateSaveResponse.SerializeToString, + ), + 'LoadState': grpc.unary_unary_rpc_method_handler( + servicer.LoadState, + request_deserializer=rpc_dot_core__service__pb2.StateLoadRequest.FromString, + response_serializer=rpc_dot_core__service__pb2.StateLoadResponse.SerializeToString, + ), + 'Kill': grpc.unary_unary_rpc_method_handler( + servicer.Kill, + request_deserializer=rpc_dot_core__service__pb2.KillRequest.FromString, + response_serializer=rpc_dot_core__service__pb2.KillResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'core_service.CoreService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class CoreService(object): + """Core service provides APIs to runtime, + """ + + @staticmethod + def Create(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/core_service.CoreService/Create', + rpc_dot_core__service__pb2.CreateRequest.SerializeToString, + rpc_dot_core__service__pb2.CreateResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Invoke(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/core_service.CoreService/Invoke', + rpc_dot_core__service__pb2.InvokeRequest.SerializeToString, + rpc_dot_core__service__pb2.InvokeResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Terminate(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/core_service.CoreService/Terminate', + rpc_dot_core__service__pb2.TerminateRequest.SerializeToString, + rpc_dot_core__service__pb2.TerminateResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Exit(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/core_service.CoreService/Exit', + rpc_dot_core__service__pb2.ExitRequest.SerializeToString, + rpc_dot_core__service__pb2.ExitResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def SaveState(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/core_service.CoreService/SaveState', + rpc_dot_core__service__pb2.StateSaveRequest.SerializeToString, + rpc_dot_core__service__pb2.StateSaveResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def LoadState(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/core_service.CoreService/LoadState', + rpc_dot_core__service__pb2.StateLoadRequest.SerializeToString, + rpc_dot_core__service__pb2.StateLoadResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Kill(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/core_service.CoreService/Kill', + rpc_dot_core__service__pb2.KillRequest.SerializeToString, + rpc_dot_core__service__pb2.KillResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/runtime_rpc_pb2.py b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/runtime_rpc_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..6d33f04f9229c0c4527e37042aeb5ec1d458ca11 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/runtime_rpc_pb2.py @@ -0,0 +1,413 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: rpc/runtime_rpc.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from yr.rpc import core_service_pb2 as rpc_dot_core__service__pb2 +from yr.rpc import runtime_service_pb2 as rpc_dot_runtime__service__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='rpc/runtime_rpc.proto', + package='runtime_rpc', + syntax='proto3', + serialized_options=b'Z\013grpc/pb;api', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n\x15rpc/runtime_rpc.proto\x12\x0bruntime_rpc\x1a\x16rpc/core_service.proto\x1a\x19rpc/runtime_service.proto\"\xf5\x0b\n\x10StreamingMessage\x12\x11\n\tmessageID\x18\x01 \x01(\t\x12\x30\n\tcreateReq\x18\x02 \x01(\x0b\x32\x1b.core_service.CreateRequestH\x00\x12\x31\n\tcreateRsp\x18\x03 \x01(\x0b\x32\x1c.core_service.CreateResponseH\x00\x12\x30\n\tinvokeReq\x18\x04 \x01(\x0b\x32\x1b.core_service.InvokeRequestH\x00\x12\x31\n\tinvokeRsp\x18\x05 \x01(\x0b\x32\x1c.core_service.InvokeResponseH\x00\x12,\n\x07\x65xitReq\x18\x06 \x01(\x0b\x32\x19.core_service.ExitRequestH\x00\x12-\n\x07\x65xitRsp\x18\x07 \x01(\x0b\x32\x1a.core_service.ExitResponseH\x00\x12\x31\n\x07saveReq\x18\x08 \x01(\x0b\x32\x1e.core_service.StateSaveRequestH\x00\x12\x32\n\x07saveRsp\x18\t \x01(\x0b\x32\x1f.core_service.StateSaveResponseH\x00\x12\x31\n\x07loadReq\x18\n \x01(\x0b\x32\x1e.core_service.StateLoadRequestH\x00\x12\x32\n\x07loadRsp\x18\x0b \x01(\x0b\x32\x1f.core_service.StateLoadResponseH\x00\x12,\n\x07killReq\x18\x0c \x01(\x0b\x32\x19.core_service.KillRequestH\x00\x12-\n\x07killRsp\x18\r \x01(\x0b\x32\x1a.core_service.KillResponseH\x00\x12\x31\n\rcallResultReq\x18\x0e \x01(\x0b\x32\x18.core_service.CallResultH\x00\x12\x34\n\rcallResultAck\x18\x0f \x01(\x0b\x32\x1b.core_service.CallResultAckH\x00\x12/\n\x07\x63\x61llReq\x18\x10 \x01(\x0b\x32\x1c.runtime_service.CallRequestH\x00\x12\x30\n\x07\x63\x61llRsp\x18\x11 \x01(\x0b\x32\x1d.runtime_service.CallResponseH\x00\x12\x33\n\tnotifyReq\x18\x12 \x01(\x0b\x32\x1e.runtime_service.NotifyRequestH\x00\x12\x34\n\tnotifyRsp\x18\x13 \x01(\x0b\x32\x1f.runtime_service.NotifyResponseH\x00\x12;\n\rcheckpointReq\x18\x14 \x01(\x0b\x32\".runtime_service.CheckpointRequestH\x00\x12<\n\rcheckpointRsp\x18\x15 \x01(\x0b\x32#.runtime_service.CheckpointResponseH\x00\x12\x35\n\nrecoverReq\x18\x16 \x01(\x0b\x32\x1f.runtime_service.RecoverRequestH\x00\x12\x36\n\nrecoverRsp\x18\x17 \x01(\x0b\x32 .runtime_service.RecoverResponseH\x00\x12\x37\n\x0bshutdownReq\x18\x18 \x01(\x0b\x32 .runtime_service.ShutdownRequestH\x00\x12\x38\n\x0bshutdownRsp\x18\x19 \x01(\x0b\x32!.runtime_service.ShutdownResponseH\x00\x12\x33\n\tsignalReq\x18\x1a \x01(\x0b\x32\x1e.runtime_service.SignalRequestH\x00\x12\x34\n\tsignalRsp\x18\x1b \x01(\x0b\x32\x1f.runtime_service.SignalResponseH\x00\x12\x39\n\x0cheartbeatReq\x18\x1c \x01(\x0b\x32!.runtime_service.HeartbeatRequestH\x00\x12:\n\x0cheartbeatRsp\x18\x1d \x01(\x0b\x32\".runtime_service.HeartbeatResponseH\x00\x42\x06\n\x04\x62ody2a\n\nRuntimeRPC\x12S\n\rMessageStream\x12\x1d.runtime_rpc.StreamingMessage\x1a\x1d.runtime_rpc.StreamingMessage\"\x00(\x01\x30\x01\x42\rZ\x0bgrpc/pb;apib\x06proto3' + , + dependencies=[rpc_dot_core__service__pb2.DESCRIPTOR,rpc_dot_runtime__service__pb2.DESCRIPTOR,]) + + + + +_STREAMINGMESSAGE = _descriptor.Descriptor( + name='StreamingMessage', + full_name='runtime_rpc.StreamingMessage', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='messageID', full_name='runtime_rpc.StreamingMessage.messageID', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='createReq', full_name='runtime_rpc.StreamingMessage.createReq', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='createRsp', full_name='runtime_rpc.StreamingMessage.createRsp', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='invokeReq', full_name='runtime_rpc.StreamingMessage.invokeReq', index=3, + number=4, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='invokeRsp', full_name='runtime_rpc.StreamingMessage.invokeRsp', index=4, + number=5, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='exitReq', full_name='runtime_rpc.StreamingMessage.exitReq', index=5, + number=6, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='exitRsp', full_name='runtime_rpc.StreamingMessage.exitRsp', index=6, + number=7, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='saveReq', full_name='runtime_rpc.StreamingMessage.saveReq', index=7, + number=8, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='saveRsp', full_name='runtime_rpc.StreamingMessage.saveRsp', index=8, + number=9, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='loadReq', full_name='runtime_rpc.StreamingMessage.loadReq', index=9, + number=10, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='loadRsp', full_name='runtime_rpc.StreamingMessage.loadRsp', index=10, + number=11, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='killReq', full_name='runtime_rpc.StreamingMessage.killReq', index=11, + number=12, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='killRsp', full_name='runtime_rpc.StreamingMessage.killRsp', index=12, + number=13, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='callResultReq', full_name='runtime_rpc.StreamingMessage.callResultReq', index=13, + number=14, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='callResultAck', full_name='runtime_rpc.StreamingMessage.callResultAck', index=14, + number=15, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='callReq', full_name='runtime_rpc.StreamingMessage.callReq', index=15, + number=16, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='callRsp', full_name='runtime_rpc.StreamingMessage.callRsp', index=16, + number=17, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='notifyReq', full_name='runtime_rpc.StreamingMessage.notifyReq', index=17, + number=18, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='notifyRsp', full_name='runtime_rpc.StreamingMessage.notifyRsp', index=18, + number=19, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='checkpointReq', full_name='runtime_rpc.StreamingMessage.checkpointReq', index=19, + number=20, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='checkpointRsp', full_name='runtime_rpc.StreamingMessage.checkpointRsp', index=20, + number=21, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='recoverReq', full_name='runtime_rpc.StreamingMessage.recoverReq', index=21, + number=22, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='recoverRsp', full_name='runtime_rpc.StreamingMessage.recoverRsp', index=22, + number=23, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='shutdownReq', full_name='runtime_rpc.StreamingMessage.shutdownReq', index=23, + number=24, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='shutdownRsp', full_name='runtime_rpc.StreamingMessage.shutdownRsp', index=24, + number=25, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='signalReq', full_name='runtime_rpc.StreamingMessage.signalReq', index=25, + number=26, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='signalRsp', full_name='runtime_rpc.StreamingMessage.signalRsp', index=26, + number=27, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='heartbeatReq', full_name='runtime_rpc.StreamingMessage.heartbeatReq', index=27, + number=28, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='heartbeatRsp', full_name='runtime_rpc.StreamingMessage.heartbeatRsp', index=28, + number=29, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + _descriptor.OneofDescriptor( + name='body', full_name='runtime_rpc.StreamingMessage.body', + index=0, containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[]), + ], + serialized_start=90, + serialized_end=1615, +) + +_STREAMINGMESSAGE.fields_by_name['createReq'].message_type = rpc_dot_core__service__pb2._CREATEREQUEST +_STREAMINGMESSAGE.fields_by_name['createRsp'].message_type = rpc_dot_core__service__pb2._CREATERESPONSE +_STREAMINGMESSAGE.fields_by_name['invokeReq'].message_type = rpc_dot_core__service__pb2._INVOKEREQUEST +_STREAMINGMESSAGE.fields_by_name['invokeRsp'].message_type = rpc_dot_core__service__pb2._INVOKERESPONSE +_STREAMINGMESSAGE.fields_by_name['exitReq'].message_type = rpc_dot_core__service__pb2._EXITREQUEST +_STREAMINGMESSAGE.fields_by_name['exitRsp'].message_type = rpc_dot_core__service__pb2._EXITRESPONSE +_STREAMINGMESSAGE.fields_by_name['saveReq'].message_type = rpc_dot_core__service__pb2._STATESAVEREQUEST +_STREAMINGMESSAGE.fields_by_name['saveRsp'].message_type = rpc_dot_core__service__pb2._STATESAVERESPONSE +_STREAMINGMESSAGE.fields_by_name['loadReq'].message_type = rpc_dot_core__service__pb2._STATELOADREQUEST +_STREAMINGMESSAGE.fields_by_name['loadRsp'].message_type = rpc_dot_core__service__pb2._STATELOADRESPONSE +_STREAMINGMESSAGE.fields_by_name['killReq'].message_type = rpc_dot_core__service__pb2._KILLREQUEST +_STREAMINGMESSAGE.fields_by_name['killRsp'].message_type = rpc_dot_core__service__pb2._KILLRESPONSE +_STREAMINGMESSAGE.fields_by_name['callResultReq'].message_type = rpc_dot_core__service__pb2._CALLRESULT +_STREAMINGMESSAGE.fields_by_name['callResultAck'].message_type = rpc_dot_core__service__pb2._CALLRESULTACK +_STREAMINGMESSAGE.fields_by_name['callReq'].message_type = rpc_dot_runtime__service__pb2._CALLREQUEST +_STREAMINGMESSAGE.fields_by_name['callRsp'].message_type = rpc_dot_runtime__service__pb2._CALLRESPONSE +_STREAMINGMESSAGE.fields_by_name['notifyReq'].message_type = rpc_dot_runtime__service__pb2._NOTIFYREQUEST +_STREAMINGMESSAGE.fields_by_name['notifyRsp'].message_type = rpc_dot_runtime__service__pb2._NOTIFYRESPONSE +_STREAMINGMESSAGE.fields_by_name['checkpointReq'].message_type = rpc_dot_runtime__service__pb2._CHECKPOINTREQUEST +_STREAMINGMESSAGE.fields_by_name['checkpointRsp'].message_type = rpc_dot_runtime__service__pb2._CHECKPOINTRESPONSE +_STREAMINGMESSAGE.fields_by_name['recoverReq'].message_type = rpc_dot_runtime__service__pb2._RECOVERREQUEST +_STREAMINGMESSAGE.fields_by_name['recoverRsp'].message_type = rpc_dot_runtime__service__pb2._RECOVERRESPONSE +_STREAMINGMESSAGE.fields_by_name['shutdownReq'].message_type = rpc_dot_runtime__service__pb2._SHUTDOWNREQUEST +_STREAMINGMESSAGE.fields_by_name['shutdownRsp'].message_type = rpc_dot_runtime__service__pb2._SHUTDOWNRESPONSE +_STREAMINGMESSAGE.fields_by_name['signalReq'].message_type = rpc_dot_runtime__service__pb2._SIGNALREQUEST +_STREAMINGMESSAGE.fields_by_name['signalRsp'].message_type = rpc_dot_runtime__service__pb2._SIGNALRESPONSE +_STREAMINGMESSAGE.fields_by_name['heartbeatReq'].message_type = rpc_dot_runtime__service__pb2._HEARTBEATREQUEST +_STREAMINGMESSAGE.fields_by_name['heartbeatRsp'].message_type = rpc_dot_runtime__service__pb2._HEARTBEATRESPONSE +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['createReq']) +_STREAMINGMESSAGE.fields_by_name['createReq'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['createRsp']) +_STREAMINGMESSAGE.fields_by_name['createRsp'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['invokeReq']) +_STREAMINGMESSAGE.fields_by_name['invokeReq'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['invokeRsp']) +_STREAMINGMESSAGE.fields_by_name['invokeRsp'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['exitReq']) +_STREAMINGMESSAGE.fields_by_name['exitReq'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['exitRsp']) +_STREAMINGMESSAGE.fields_by_name['exitRsp'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['saveReq']) +_STREAMINGMESSAGE.fields_by_name['saveReq'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['saveRsp']) +_STREAMINGMESSAGE.fields_by_name['saveRsp'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['loadReq']) +_STREAMINGMESSAGE.fields_by_name['loadReq'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['loadRsp']) +_STREAMINGMESSAGE.fields_by_name['loadRsp'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['killReq']) +_STREAMINGMESSAGE.fields_by_name['killReq'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['killRsp']) +_STREAMINGMESSAGE.fields_by_name['killRsp'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['callResultReq']) +_STREAMINGMESSAGE.fields_by_name['callResultReq'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['callResultAck']) +_STREAMINGMESSAGE.fields_by_name['callResultAck'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['callReq']) +_STREAMINGMESSAGE.fields_by_name['callReq'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['callRsp']) +_STREAMINGMESSAGE.fields_by_name['callRsp'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['notifyReq']) +_STREAMINGMESSAGE.fields_by_name['notifyReq'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['notifyRsp']) +_STREAMINGMESSAGE.fields_by_name['notifyRsp'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['checkpointReq']) +_STREAMINGMESSAGE.fields_by_name['checkpointReq'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['checkpointRsp']) +_STREAMINGMESSAGE.fields_by_name['checkpointRsp'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['recoverReq']) +_STREAMINGMESSAGE.fields_by_name['recoverReq'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['recoverRsp']) +_STREAMINGMESSAGE.fields_by_name['recoverRsp'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['shutdownReq']) +_STREAMINGMESSAGE.fields_by_name['shutdownReq'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['shutdownRsp']) +_STREAMINGMESSAGE.fields_by_name['shutdownRsp'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['signalReq']) +_STREAMINGMESSAGE.fields_by_name['signalReq'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['signalRsp']) +_STREAMINGMESSAGE.fields_by_name['signalRsp'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['heartbeatReq']) +_STREAMINGMESSAGE.fields_by_name['heartbeatReq'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +_STREAMINGMESSAGE.oneofs_by_name['body'].fields.append( + _STREAMINGMESSAGE.fields_by_name['heartbeatRsp']) +_STREAMINGMESSAGE.fields_by_name['heartbeatRsp'].containing_oneof = _STREAMINGMESSAGE.oneofs_by_name['body'] +DESCRIPTOR.message_types_by_name['StreamingMessage'] = _STREAMINGMESSAGE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +StreamingMessage = _reflection.GeneratedProtocolMessageType('StreamingMessage', (_message.Message,), { + 'DESCRIPTOR' : _STREAMINGMESSAGE, + '__module__' : 'rpc.runtime_rpc_pb2' + # @@protoc_insertion_point(class_scope:runtime_rpc.StreamingMessage) + }) +_sym_db.RegisterMessage(StreamingMessage) + + +DESCRIPTOR._options = None + +_RUNTIMERPC = _descriptor.ServiceDescriptor( + name='RuntimeRPC', + full_name='runtime_rpc.RuntimeRPC', + file=DESCRIPTOR, + index=0, + serialized_options=None, + create_key=_descriptor._internal_create_key, + serialized_start=1617, + serialized_end=1714, + methods=[ + _descriptor.MethodDescriptor( + name='MessageStream', + full_name='runtime_rpc.RuntimeRPC.MessageStream', + index=0, + containing_service=None, + input_type=_STREAMINGMESSAGE, + output_type=_STREAMINGMESSAGE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), +]) +_sym_db.RegisterServiceDescriptor(_RUNTIMERPC) + +DESCRIPTOR.services_by_name['RuntimeRPC'] = _RUNTIMERPC + +# @@protoc_insertion_point(module_scope) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/runtime_rpc_pb2_grpc.py b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/runtime_rpc_pb2_grpc.py new file mode 100644 index 0000000000000000000000000000000000000000..674f0f4084e7d08ad6194e4660c981d197abffe5 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/runtime_rpc_pb2_grpc.py @@ -0,0 +1,70 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from yr.rpc import runtime_rpc_pb2 as rpc_dot_runtime__rpc__pb2 + + +class RuntimeRPCStub(object): + """RuntimeRPC provide bidirectional streaming RPC interface + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.MessageStream = channel.stream_stream( + '/runtime_rpc.RuntimeRPC/MessageStream', + request_serializer=rpc_dot_runtime__rpc__pb2.StreamingMessage.SerializeToString, + response_deserializer=rpc_dot_runtime__rpc__pb2.StreamingMessage.FromString, + ) + + +class RuntimeRPCServicer(object): + """RuntimeRPC provide bidirectional streaming RPC interface + """ + + def MessageStream(self, request_iterator, context): + """build bidirection grpc communication channel, different message body type specify different api handler + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_RuntimeRPCServicer_to_server(servicer, server): + rpc_method_handlers = { + 'MessageStream': grpc.stream_stream_rpc_method_handler( + servicer.MessageStream, + request_deserializer=rpc_dot_runtime__rpc__pb2.StreamingMessage.FromString, + response_serializer=rpc_dot_runtime__rpc__pb2.StreamingMessage.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'runtime_rpc.RuntimeRPC', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class RuntimeRPC(object): + """RuntimeRPC provide bidirectional streaming RPC interface + """ + + @staticmethod + def MessageStream(request_iterator, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.stream_stream(request_iterator, target, '/runtime_rpc.RuntimeRPC/MessageStream', + rpc_dot_runtime__rpc__pb2.StreamingMessage.SerializeToString, + rpc_dot_runtime__rpc__pb2.StreamingMessage.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/runtime_service_pb2.py b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/runtime_service_pb2.py new file mode 100644 index 0000000000000000000000000000000000000000..1c4ef7a1d57f7fc9b3a813cd1ca4da58d56c4720 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/runtime_service_pb2.py @@ -0,0 +1,931 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: rpc/runtime_service.proto +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import message as _message +from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + +from yr.rpc import common_pb2 as rpc_dot_common__pb2 + + +DESCRIPTOR = _descriptor.FileDescriptor( + name='rpc/runtime_service.proto', + package='runtime_service', + syntax='proto3', + serialized_options=b'Z\027grpc/pb/runtime;runtime', + create_key=_descriptor._internal_create_key, + serialized_pb=b'\n\x19rpc/runtime_service.proto\x12\x0fruntime_service\x1a\x10rpc/common.proto\"\xb1\x02\n\x0b\x43\x61llRequest\x12\x10\n\x08\x66unction\x18\x01 \x01(\t\x12\x19\n\x04\x61rgs\x18\x02 \x03(\x0b\x32\x0b.common.Arg\x12\x0f\n\x07traceID\x18\x03 \x01(\t\x12\x16\n\x0ereturnObjectID\x18\x04 \x01(\t\x12\x10\n\x08isCreate\x18\x05 \x01(\x08\x12\x10\n\x08senderID\x18\x06 \x01(\t\x12\x11\n\trequestID\x18\x07 \x01(\t\x12\x17\n\x0freturnObjectIDs\x18\x08 \x03(\t\x12\x46\n\rcreateOptions\x18\t \x03(\x0b\x32/.runtime_service.CallRequest.CreateOptionsEntry\x1a\x34\n\x12\x43reateOptionsEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"@\n\x0c\x43\x61llResponse\x12\x1f\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\")\n\x11\x43heckpointRequest\x12\x14\n\x0c\x63heckpointID\x18\x01 \x01(\t\"U\n\x12\x43heckpointResponse\x12\x1f\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\x12\r\n\x05state\x18\x03 \x01(\x0c\"\x1f\n\x0eRecoverRequest\x12\r\n\x05state\x18\x01 \x01(\x0c\"C\n\x0fRecoverResponse\x12\x1f\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\"0\n\x13GracefulExitRequest\x12\x19\n\x11gracePeriodSecond\x18\x01 \x01(\x04\"H\n\x14GracefulExitResponse\x12\x1f\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\",\n\x0fShutdownRequest\x12\x19\n\x11gracePeriodSecond\x18\x01 \x01(\x04\"D\n\x10ShutdownResponse\x12\x1f\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t\"T\n\rNotifyRequest\x12\x11\n\trequestID\x18\x01 \x01(\t\x12\x1f\n\x04\x63ode\x18\x02 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x03 \x01(\t\"\x10\n\x0eNotifyResponse\"\x12\n\x10HeartbeatRequest\"\x13\n\x11HeartbeatResponse\"0\n\rSignalRequest\x12\x0e\n\x06signal\x18\x01 \x01(\x05\x12\x0f\n\x07payload\x18\x02 \x01(\x0c\"B\n\x0eSignalResponse\x12\x1f\n\x04\x63ode\x18\x01 \x01(\x0e\x32\x11.common.ErrorCode\x12\x0f\n\x07message\x18\x02 \x01(\t2\xa8\x05\n\x0eRuntimeService\x12\x45\n\x04\x43\x61ll\x12\x1c.runtime_service.CallRequest\x1a\x1d.runtime_service.CallResponse\"\x00\x12Q\n\x0cNotifyResult\x12\x1e.runtime_service.NotifyRequest\x1a\x1f.runtime_service.NotifyResponse\"\x00\x12W\n\nCheckpoint\x12\".runtime_service.CheckpointRequest\x1a#.runtime_service.CheckpointResponse\"\x00\x12N\n\x07Recover\x12\x1f.runtime_service.RecoverRequest\x1a .runtime_service.RecoverResponse\"\x00\x12]\n\x0cGracefulExit\x12$.runtime_service.GracefulExitRequest\x1a%.runtime_service.GracefulExitResponse\"\x00\x12Q\n\x08Shutdown\x12 .runtime_service.ShutdownRequest\x1a!.runtime_service.ShutdownResponse\"\x00\x12T\n\tHeartbeat\x12!.runtime_service.HeartbeatRequest\x1a\".runtime_service.HeartbeatResponse\"\x00\x12K\n\x06Signal\x12\x1e.runtime_service.SignalRequest\x1a\x1f.runtime_service.SignalResponse\"\x00\x42\x19Z\x17grpc/pb/runtime;runtimeb\x06proto3' + , + dependencies=[rpc_dot_common__pb2.DESCRIPTOR,]) + + + + +_CALLREQUEST_CREATEOPTIONSENTRY = _descriptor.Descriptor( + name='CreateOptionsEntry', + full_name='runtime_service.CallRequest.CreateOptionsEntry', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='key', full_name='runtime_service.CallRequest.CreateOptionsEntry.key', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='value', full_name='runtime_service.CallRequest.CreateOptionsEntry.value', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=b'8\001', + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=318, + serialized_end=370, +) + +_CALLREQUEST = _descriptor.Descriptor( + name='CallRequest', + full_name='runtime_service.CallRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='function', full_name='runtime_service.CallRequest.function', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='args', full_name='runtime_service.CallRequest.args', index=1, + number=2, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='traceID', full_name='runtime_service.CallRequest.traceID', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='returnObjectID', full_name='runtime_service.CallRequest.returnObjectID', index=3, + number=4, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='isCreate', full_name='runtime_service.CallRequest.isCreate', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=False, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='senderID', full_name='runtime_service.CallRequest.senderID', index=5, + number=6, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='requestID', full_name='runtime_service.CallRequest.requestID', index=6, + number=7, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='returnObjectIDs', full_name='runtime_service.CallRequest.returnObjectIDs', index=7, + number=8, type=9, cpp_type=9, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='createOptions', full_name='runtime_service.CallRequest.createOptions', index=8, + number=9, type=11, cpp_type=10, label=3, + has_default_value=False, default_value=[], + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[_CALLREQUEST_CREATEOPTIONSENTRY, ], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=65, + serialized_end=370, +) + + +_CALLRESPONSE = _descriptor.Descriptor( + name='CallResponse', + full_name='runtime_service.CallResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='runtime_service.CallResponse.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='runtime_service.CallResponse.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=372, + serialized_end=436, +) + + +_CHECKPOINTREQUEST = _descriptor.Descriptor( + name='CheckpointRequest', + full_name='runtime_service.CheckpointRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='checkpointID', full_name='runtime_service.CheckpointRequest.checkpointID', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=438, + serialized_end=479, +) + + +_CHECKPOINTRESPONSE = _descriptor.Descriptor( + name='CheckpointResponse', + full_name='runtime_service.CheckpointResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='runtime_service.CheckpointResponse.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='runtime_service.CheckpointResponse.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='state', full_name='runtime_service.CheckpointResponse.state', index=2, + number=3, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=481, + serialized_end=566, +) + + +_RECOVERREQUEST = _descriptor.Descriptor( + name='RecoverRequest', + full_name='runtime_service.RecoverRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='state', full_name='runtime_service.RecoverRequest.state', index=0, + number=1, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=568, + serialized_end=599, +) + + +_RECOVERRESPONSE = _descriptor.Descriptor( + name='RecoverResponse', + full_name='runtime_service.RecoverResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='runtime_service.RecoverResponse.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='runtime_service.RecoverResponse.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=601, + serialized_end=668, +) + + +_GRACEFULEXITREQUEST = _descriptor.Descriptor( + name='GracefulExitRequest', + full_name='runtime_service.GracefulExitRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='gracePeriodSecond', full_name='runtime_service.GracefulExitRequest.gracePeriodSecond', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=670, + serialized_end=718, +) + + +_GRACEFULEXITRESPONSE = _descriptor.Descriptor( + name='GracefulExitResponse', + full_name='runtime_service.GracefulExitResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='runtime_service.GracefulExitResponse.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='runtime_service.GracefulExitResponse.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=720, + serialized_end=792, +) + + +_SHUTDOWNREQUEST = _descriptor.Descriptor( + name='ShutdownRequest', + full_name='runtime_service.ShutdownRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='gracePeriodSecond', full_name='runtime_service.ShutdownRequest.gracePeriodSecond', index=0, + number=1, type=4, cpp_type=4, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=794, + serialized_end=838, +) + + +_SHUTDOWNRESPONSE = _descriptor.Descriptor( + name='ShutdownResponse', + full_name='runtime_service.ShutdownResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='runtime_service.ShutdownResponse.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='runtime_service.ShutdownResponse.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=840, + serialized_end=908, +) + + +_NOTIFYREQUEST = _descriptor.Descriptor( + name='NotifyRequest', + full_name='runtime_service.NotifyRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='requestID', full_name='runtime_service.NotifyRequest.requestID', index=0, + number=1, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='code', full_name='runtime_service.NotifyRequest.code', index=1, + number=2, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='runtime_service.NotifyRequest.message', index=2, + number=3, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=910, + serialized_end=994, +) + + +_NOTIFYRESPONSE = _descriptor.Descriptor( + name='NotifyResponse', + full_name='runtime_service.NotifyResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=996, + serialized_end=1012, +) + + +_HEARTBEATREQUEST = _descriptor.Descriptor( + name='HeartbeatRequest', + full_name='runtime_service.HeartbeatRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1014, + serialized_end=1032, +) + + +_HEARTBEATRESPONSE = _descriptor.Descriptor( + name='HeartbeatResponse', + full_name='runtime_service.HeartbeatResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1034, + serialized_end=1053, +) + + +_SIGNALREQUEST = _descriptor.Descriptor( + name='SignalRequest', + full_name='runtime_service.SignalRequest', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='signal', full_name='runtime_service.SignalRequest.signal', index=0, + number=1, type=5, cpp_type=1, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='payload', full_name='runtime_service.SignalRequest.payload', index=1, + number=2, type=12, cpp_type=9, label=1, + has_default_value=False, default_value=b"", + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1055, + serialized_end=1103, +) + + +_SIGNALRESPONSE = _descriptor.Descriptor( + name='SignalResponse', + full_name='runtime_service.SignalResponse', + filename=None, + file=DESCRIPTOR, + containing_type=None, + create_key=_descriptor._internal_create_key, + fields=[ + _descriptor.FieldDescriptor( + name='code', full_name='runtime_service.SignalResponse.code', index=0, + number=1, type=14, cpp_type=8, label=1, + has_default_value=False, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + _descriptor.FieldDescriptor( + name='message', full_name='runtime_service.SignalResponse.message', index=1, + number=2, type=9, cpp_type=9, label=1, + has_default_value=False, default_value=b"".decode('utf-8'), + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + serialized_options=None, + is_extendable=False, + syntax='proto3', + extension_ranges=[], + oneofs=[ + ], + serialized_start=1105, + serialized_end=1171, +) + +_CALLREQUEST_CREATEOPTIONSENTRY.containing_type = _CALLREQUEST +_CALLREQUEST.fields_by_name['args'].message_type = rpc_dot_common__pb2._ARG +_CALLREQUEST.fields_by_name['createOptions'].message_type = _CALLREQUEST_CREATEOPTIONSENTRY +_CALLRESPONSE.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +_CHECKPOINTRESPONSE.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +_RECOVERRESPONSE.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +_GRACEFULEXITRESPONSE.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +_SHUTDOWNRESPONSE.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +_NOTIFYREQUEST.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +_SIGNALRESPONSE.fields_by_name['code'].enum_type = rpc_dot_common__pb2._ERRORCODE +DESCRIPTOR.message_types_by_name['CallRequest'] = _CALLREQUEST +DESCRIPTOR.message_types_by_name['CallResponse'] = _CALLRESPONSE +DESCRIPTOR.message_types_by_name['CheckpointRequest'] = _CHECKPOINTREQUEST +DESCRIPTOR.message_types_by_name['CheckpointResponse'] = _CHECKPOINTRESPONSE +DESCRIPTOR.message_types_by_name['RecoverRequest'] = _RECOVERREQUEST +DESCRIPTOR.message_types_by_name['RecoverResponse'] = _RECOVERRESPONSE +DESCRIPTOR.message_types_by_name['GracefulExitRequest'] = _GRACEFULEXITREQUEST +DESCRIPTOR.message_types_by_name['GracefulExitResponse'] = _GRACEFULEXITRESPONSE +DESCRIPTOR.message_types_by_name['ShutdownRequest'] = _SHUTDOWNREQUEST +DESCRIPTOR.message_types_by_name['ShutdownResponse'] = _SHUTDOWNRESPONSE +DESCRIPTOR.message_types_by_name['NotifyRequest'] = _NOTIFYREQUEST +DESCRIPTOR.message_types_by_name['NotifyResponse'] = _NOTIFYRESPONSE +DESCRIPTOR.message_types_by_name['HeartbeatRequest'] = _HEARTBEATREQUEST +DESCRIPTOR.message_types_by_name['HeartbeatResponse'] = _HEARTBEATRESPONSE +DESCRIPTOR.message_types_by_name['SignalRequest'] = _SIGNALREQUEST +DESCRIPTOR.message_types_by_name['SignalResponse'] = _SIGNALRESPONSE +_sym_db.RegisterFileDescriptor(DESCRIPTOR) + +CallRequest = _reflection.GeneratedProtocolMessageType('CallRequest', (_message.Message,), { + + 'CreateOptionsEntry' : _reflection.GeneratedProtocolMessageType('CreateOptionsEntry', (_message.Message,), { + 'DESCRIPTOR' : _CALLREQUEST_CREATEOPTIONSENTRY, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.CallRequest.CreateOptionsEntry) + }) + , + 'DESCRIPTOR' : _CALLREQUEST, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.CallRequest) + }) +_sym_db.RegisterMessage(CallRequest) +_sym_db.RegisterMessage(CallRequest.CreateOptionsEntry) + +CallResponse = _reflection.GeneratedProtocolMessageType('CallResponse', (_message.Message,), { + 'DESCRIPTOR' : _CALLRESPONSE, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.CallResponse) + }) +_sym_db.RegisterMessage(CallResponse) + +CheckpointRequest = _reflection.GeneratedProtocolMessageType('CheckpointRequest', (_message.Message,), { + 'DESCRIPTOR' : _CHECKPOINTREQUEST, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.CheckpointRequest) + }) +_sym_db.RegisterMessage(CheckpointRequest) + +CheckpointResponse = _reflection.GeneratedProtocolMessageType('CheckpointResponse', (_message.Message,), { + 'DESCRIPTOR' : _CHECKPOINTRESPONSE, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.CheckpointResponse) + }) +_sym_db.RegisterMessage(CheckpointResponse) + +RecoverRequest = _reflection.GeneratedProtocolMessageType('RecoverRequest', (_message.Message,), { + 'DESCRIPTOR' : _RECOVERREQUEST, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.RecoverRequest) + }) +_sym_db.RegisterMessage(RecoverRequest) + +RecoverResponse = _reflection.GeneratedProtocolMessageType('RecoverResponse', (_message.Message,), { + 'DESCRIPTOR' : _RECOVERRESPONSE, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.RecoverResponse) + }) +_sym_db.RegisterMessage(RecoverResponse) + +GracefulExitRequest = _reflection.GeneratedProtocolMessageType('GracefulExitRequest', (_message.Message,), { + 'DESCRIPTOR' : _GRACEFULEXITREQUEST, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.GracefulExitRequest) + }) +_sym_db.RegisterMessage(GracefulExitRequest) + +GracefulExitResponse = _reflection.GeneratedProtocolMessageType('GracefulExitResponse', (_message.Message,), { + 'DESCRIPTOR' : _GRACEFULEXITRESPONSE, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.GracefulExitResponse) + }) +_sym_db.RegisterMessage(GracefulExitResponse) + +ShutdownRequest = _reflection.GeneratedProtocolMessageType('ShutdownRequest', (_message.Message,), { + 'DESCRIPTOR' : _SHUTDOWNREQUEST, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.ShutdownRequest) + }) +_sym_db.RegisterMessage(ShutdownRequest) + +ShutdownResponse = _reflection.GeneratedProtocolMessageType('ShutdownResponse', (_message.Message,), { + 'DESCRIPTOR' : _SHUTDOWNRESPONSE, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.ShutdownResponse) + }) +_sym_db.RegisterMessage(ShutdownResponse) + +NotifyRequest = _reflection.GeneratedProtocolMessageType('NotifyRequest', (_message.Message,), { + 'DESCRIPTOR' : _NOTIFYREQUEST, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.NotifyRequest) + }) +_sym_db.RegisterMessage(NotifyRequest) + +NotifyResponse = _reflection.GeneratedProtocolMessageType('NotifyResponse', (_message.Message,), { + 'DESCRIPTOR' : _NOTIFYRESPONSE, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.NotifyResponse) + }) +_sym_db.RegisterMessage(NotifyResponse) + +HeartbeatRequest = _reflection.GeneratedProtocolMessageType('HeartbeatRequest', (_message.Message,), { + 'DESCRIPTOR' : _HEARTBEATREQUEST, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.HeartbeatRequest) + }) +_sym_db.RegisterMessage(HeartbeatRequest) + +HeartbeatResponse = _reflection.GeneratedProtocolMessageType('HeartbeatResponse', (_message.Message,), { + 'DESCRIPTOR' : _HEARTBEATRESPONSE, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.HeartbeatResponse) + }) +_sym_db.RegisterMessage(HeartbeatResponse) + +SignalRequest = _reflection.GeneratedProtocolMessageType('SignalRequest', (_message.Message,), { + 'DESCRIPTOR' : _SIGNALREQUEST, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.SignalRequest) + }) +_sym_db.RegisterMessage(SignalRequest) + +SignalResponse = _reflection.GeneratedProtocolMessageType('SignalResponse', (_message.Message,), { + 'DESCRIPTOR' : _SIGNALRESPONSE, + '__module__' : 'rpc.runtime_service_pb2' + # @@protoc_insertion_point(class_scope:runtime_service.SignalResponse) + }) +_sym_db.RegisterMessage(SignalResponse) + + +DESCRIPTOR._options = None +_CALLREQUEST_CREATEOPTIONSENTRY._options = None + +_RUNTIMESERVICE = _descriptor.ServiceDescriptor( + name='RuntimeService', + full_name='runtime_service.RuntimeService', + file=DESCRIPTOR, + index=0, + serialized_options=None, + create_key=_descriptor._internal_create_key, + serialized_start=1174, + serialized_end=1854, + methods=[ + _descriptor.MethodDescriptor( + name='Call', + full_name='runtime_service.RuntimeService.Call', + index=0, + containing_service=None, + input_type=_CALLREQUEST, + output_type=_CALLRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='NotifyResult', + full_name='runtime_service.RuntimeService.NotifyResult', + index=1, + containing_service=None, + input_type=_NOTIFYREQUEST, + output_type=_NOTIFYRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='Checkpoint', + full_name='runtime_service.RuntimeService.Checkpoint', + index=2, + containing_service=None, + input_type=_CHECKPOINTREQUEST, + output_type=_CHECKPOINTRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='Recover', + full_name='runtime_service.RuntimeService.Recover', + index=3, + containing_service=None, + input_type=_RECOVERREQUEST, + output_type=_RECOVERRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='GracefulExit', + full_name='runtime_service.RuntimeService.GracefulExit', + index=4, + containing_service=None, + input_type=_GRACEFULEXITREQUEST, + output_type=_GRACEFULEXITRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='Shutdown', + full_name='runtime_service.RuntimeService.Shutdown', + index=5, + containing_service=None, + input_type=_SHUTDOWNREQUEST, + output_type=_SHUTDOWNRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='Heartbeat', + full_name='runtime_service.RuntimeService.Heartbeat', + index=6, + containing_service=None, + input_type=_HEARTBEATREQUEST, + output_type=_HEARTBEATRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), + _descriptor.MethodDescriptor( + name='Signal', + full_name='runtime_service.RuntimeService.Signal', + index=7, + containing_service=None, + input_type=_SIGNALREQUEST, + output_type=_SIGNALRESPONSE, + serialized_options=None, + create_key=_descriptor._internal_create_key, + ), +]) +_sym_db.RegisterServiceDescriptor(_RUNTIMESERVICE) + +DESCRIPTOR.services_by_name['RuntimeService'] = _RUNTIMESERVICE + +# @@protoc_insertion_point(module_scope) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/runtime_service_pb2_grpc.py b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/runtime_service_pb2_grpc.py new file mode 100644 index 0000000000000000000000000000000000000000..0c6e0f683e3dc01be0569a010ca863585e18609c --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/rpc/runtime_service_pb2_grpc.py @@ -0,0 +1,308 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + +from yr.rpc import runtime_service_pb2 as rpc_dot_runtime__service__pb2 + + +class RuntimeServiceStub(object): + """Runtime service provides APIs to core, + """ + + def __init__(self, channel): + """Constructor. + + Args: + channel: A grpc.Channel. + """ + self.Call = channel.unary_unary( + '/runtime_service.RuntimeService/Call', + request_serializer=rpc_dot_runtime__service__pb2.CallRequest.SerializeToString, + response_deserializer=rpc_dot_runtime__service__pb2.CallResponse.FromString, + ) + self.NotifyResult = channel.unary_unary( + '/runtime_service.RuntimeService/NotifyResult', + request_serializer=rpc_dot_runtime__service__pb2.NotifyRequest.SerializeToString, + response_deserializer=rpc_dot_runtime__service__pb2.NotifyResponse.FromString, + ) + self.Checkpoint = channel.unary_unary( + '/runtime_service.RuntimeService/Checkpoint', + request_serializer=rpc_dot_runtime__service__pb2.CheckpointRequest.SerializeToString, + response_deserializer=rpc_dot_runtime__service__pb2.CheckpointResponse.FromString, + ) + self.Recover = channel.unary_unary( + '/runtime_service.RuntimeService/Recover', + request_serializer=rpc_dot_runtime__service__pb2.RecoverRequest.SerializeToString, + response_deserializer=rpc_dot_runtime__service__pb2.RecoverResponse.FromString, + ) + self.GracefulExit = channel.unary_unary( + '/runtime_service.RuntimeService/GracefulExit', + request_serializer=rpc_dot_runtime__service__pb2.GracefulExitRequest.SerializeToString, + response_deserializer=rpc_dot_runtime__service__pb2.GracefulExitResponse.FromString, + ) + self.Shutdown = channel.unary_unary( + '/runtime_service.RuntimeService/Shutdown', + request_serializer=rpc_dot_runtime__service__pb2.ShutdownRequest.SerializeToString, + response_deserializer=rpc_dot_runtime__service__pb2.ShutdownResponse.FromString, + ) + self.Heartbeat = channel.unary_unary( + '/runtime_service.RuntimeService/Heartbeat', + request_serializer=rpc_dot_runtime__service__pb2.HeartbeatRequest.SerializeToString, + response_deserializer=rpc_dot_runtime__service__pb2.HeartbeatResponse.FromString, + ) + self.Signal = channel.unary_unary( + '/runtime_service.RuntimeService/Signal', + request_serializer=rpc_dot_runtime__service__pb2.SignalRequest.SerializeToString, + response_deserializer=rpc_dot_runtime__service__pb2.SignalResponse.FromString, + ) + + +class RuntimeServiceServicer(object): + """Runtime service provides APIs to core, + """ + + def Call(self, request, context): + """Call a method or init state of instance + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def NotifyResult(self, request, context): + """NotifyResult is applied to async notify result of create or invoke request invoked by runtime + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Checkpoint(self, request, context): + """Checkpoint request a state to save for failure recovery and state migration + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Recover(self, request, context): + """Recover state + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def GracefulExit(self, request, context): + """GracefulExit request an instance graceful exit + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Shutdown(self, request, context): + """Shutdown request an instance shutdown + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Heartbeat(self, request, context): + """check whether the runtime is alive + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + def Signal(self, request, context): + """Signal the signal to instance + """ + context.set_code(grpc.StatusCode.UNIMPLEMENTED) + context.set_details('Method not implemented!') + raise NotImplementedError('Method not implemented!') + + +def add_RuntimeServiceServicer_to_server(servicer, server): + rpc_method_handlers = { + 'Call': grpc.unary_unary_rpc_method_handler( + servicer.Call, + request_deserializer=rpc_dot_runtime__service__pb2.CallRequest.FromString, + response_serializer=rpc_dot_runtime__service__pb2.CallResponse.SerializeToString, + ), + 'NotifyResult': grpc.unary_unary_rpc_method_handler( + servicer.NotifyResult, + request_deserializer=rpc_dot_runtime__service__pb2.NotifyRequest.FromString, + response_serializer=rpc_dot_runtime__service__pb2.NotifyResponse.SerializeToString, + ), + 'Checkpoint': grpc.unary_unary_rpc_method_handler( + servicer.Checkpoint, + request_deserializer=rpc_dot_runtime__service__pb2.CheckpointRequest.FromString, + response_serializer=rpc_dot_runtime__service__pb2.CheckpointResponse.SerializeToString, + ), + 'Recover': grpc.unary_unary_rpc_method_handler( + servicer.Recover, + request_deserializer=rpc_dot_runtime__service__pb2.RecoverRequest.FromString, + response_serializer=rpc_dot_runtime__service__pb2.RecoverResponse.SerializeToString, + ), + 'GracefulExit': grpc.unary_unary_rpc_method_handler( + servicer.GracefulExit, + request_deserializer=rpc_dot_runtime__service__pb2.GracefulExitRequest.FromString, + response_serializer=rpc_dot_runtime__service__pb2.GracefulExitResponse.SerializeToString, + ), + 'Shutdown': grpc.unary_unary_rpc_method_handler( + servicer.Shutdown, + request_deserializer=rpc_dot_runtime__service__pb2.ShutdownRequest.FromString, + response_serializer=rpc_dot_runtime__service__pb2.ShutdownResponse.SerializeToString, + ), + 'Heartbeat': grpc.unary_unary_rpc_method_handler( + servicer.Heartbeat, + request_deserializer=rpc_dot_runtime__service__pb2.HeartbeatRequest.FromString, + response_serializer=rpc_dot_runtime__service__pb2.HeartbeatResponse.SerializeToString, + ), + 'Signal': grpc.unary_unary_rpc_method_handler( + servicer.Signal, + request_deserializer=rpc_dot_runtime__service__pb2.SignalRequest.FromString, + response_serializer=rpc_dot_runtime__service__pb2.SignalResponse.SerializeToString, + ), + } + generic_handler = grpc.method_handlers_generic_handler( + 'runtime_service.RuntimeService', rpc_method_handlers) + server.add_generic_rpc_handlers((generic_handler,)) + + + # This class is part of an EXPERIMENTAL API. +class RuntimeService(object): + """Runtime service provides APIs to core, + """ + + @staticmethod + def Call(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/runtime_service.RuntimeService/Call', + rpc_dot_runtime__service__pb2.CallRequest.SerializeToString, + rpc_dot_runtime__service__pb2.CallResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def NotifyResult(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/runtime_service.RuntimeService/NotifyResult', + rpc_dot_runtime__service__pb2.NotifyRequest.SerializeToString, + rpc_dot_runtime__service__pb2.NotifyResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Checkpoint(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/runtime_service.RuntimeService/Checkpoint', + rpc_dot_runtime__service__pb2.CheckpointRequest.SerializeToString, + rpc_dot_runtime__service__pb2.CheckpointResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Recover(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/runtime_service.RuntimeService/Recover', + rpc_dot_runtime__service__pb2.RecoverRequest.SerializeToString, + rpc_dot_runtime__service__pb2.RecoverResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def GracefulExit(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/runtime_service.RuntimeService/GracefulExit', + rpc_dot_runtime__service__pb2.GracefulExitRequest.SerializeToString, + rpc_dot_runtime__service__pb2.GracefulExitResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Shutdown(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/runtime_service.RuntimeService/Shutdown', + rpc_dot_runtime__service__pb2.ShutdownRequest.SerializeToString, + rpc_dot_runtime__service__pb2.ShutdownResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Heartbeat(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/runtime_service.RuntimeService/Heartbeat', + rpc_dot_runtime__service__pb2.HeartbeatRequest.SerializeToString, + rpc_dot_runtime__service__pb2.HeartbeatResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) + + @staticmethod + def Signal(request, + target, + options=(), + channel_credentials=None, + call_credentials=None, + insecure=False, + compression=None, + wait_for_ready=None, + timeout=None, + metadata=None): + return grpc.experimental.unary_unary(request, target, '/runtime_service.RuntimeService/Signal', + rpc_dot_runtime__service__pb2.SignalRequest.SerializeToString, + rpc_dot_runtime__service__pb2.SignalResponse.FromString, + options, channel_credentials, + insecure, call_credentials, compression, wait_for_ready, timeout, metadata) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/__init__.py b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ab9d850e7cd985591c08711d3569f057ef795200 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +Runtime +""" diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/cluster_mode_runtime.py b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/cluster_mode_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..456668848f5405f383ca2ec5ed54b825a0ed023d --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/cluster_mode_runtime.py @@ -0,0 +1,263 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +Cluster mode runtime +""" +import logging +import threading +import time +from abc import ABC +from concurrent import futures +from typing import Union, List +from inspect import isfunction + +from datasystem.stream_client import StreamClient + +import yr +from yr import serialization +from yr import utils +from yr.config import ConfigManager +from yr.invoke_client.invoke_client import InvokeClient +from yr.object_ref import ObjectRef +from yr.runtime.local_object_store import LocalObjectStore +from yr.runtime.runtime import BaseRuntime + +_PUT_RECORD_TAG = "put.record.list" +_logger = logging.getLogger(__name__) + + +class ClusterModeRuntime(BaseRuntime, ABC): + """ + Cluster mode runtime + + Attributes: + ds_client: datasystem client + """ + + def __init__(self, ds_client, invoke_client: InvokeClient): + """ + initialize cluster mode runtime + + Args: + ds_client (DSClient): datasystem client + """ + super().__init__(invoke_client) + self.__ds_client = ds_client + self.__invoke_client = invoke_client + self.__future_map = {} + self.__local_storage = {} + self.__return_id_map = {} + self.__thread_local = threading.local() + self.__user_serial_manager = UserSerialManager() + + def get_id(self, ref_id, timeout: int): + return self.__ds_client.get(ref_id, timeout) + + def put(self, ref: Union[str, ObjectRef], value, use_msgpack=False): + """ + put object to datasystem with objectref + """ + optimize = False + if not use_msgpack and ConfigManager().in_cluster and not isinstance(value, serialization.SerializeObject) \ + and not isfunction(value): + optimize = True + ref_id = ref.id if isinstance(ref, ObjectRef) else ref + + if not isinstance(value, serialization.SerializeObject): + value = serialization.Serialization().serialize(value, ref_id, optimize, use_msgpack) + nested_refs = [] + for dependency_ref in value.refs: + if isinstance(dependency_ref, ObjectRef): + dependency_ref.wait() + nested_refs.append(dependency_ref.id) + + if isinstance(ref, str): + ref = ObjectRef(ref) + + if optimize: + self.__ds_client.create(ref.id, nested_refs, value.writer, value.inband) + else: + self.__ds_client.put(ref.id, value.data, nested_refs) + return ref + + def clear(self): + """ + clean object in storage + """ + self.task_mgr.clear() + # the serial number of a stateful order queue needs to be reset. + self.__user_serial_manager.clear() + self.kill_all_instances() + self.__invoke_client.clear() + self.__ds_client.clear_ctx_reference() + + def exit_instance(self): + """ + Exit instance + """ + _logger.info("start to exit a function") + self.__invoke_client.exit() + + def get_trace_id(self): + """ + Get traceID + """ + if yr.is_on_cloud(): + if hasattr(self.__thread_local, "trace_id"): + return self.__thread_local.trace_id + trace_id = utils.generate_trace_id(ConfigManager().job_id) + _logger.warning(f"generate a new trace_id:{trace_id}") + return trace_id + return utils.generate_trace_id(ConfigManager().job_id) + + def get_serial_manager(self): + """ + Get serial manager + """ + return self.__user_serial_manager + + def set_trace_id(self, trace_id): + """ + Set traceID + """ + if yr.is_on_cloud(): + self.__thread_local.trace_id = trace_id + + def get_stream_client(self) -> StreamClient: + """ Create one Producer to send element. + """ + return self.__ds_client.get_stream_client() + + def _get_object(self, object_refs: List, timeout: int): + iteration_timeout = timeout if timeout != -1 else None + start = time.time() + object_refs = object_refs if isinstance(object_refs, list) else [object_refs] + wait_object_refs = list(set(object_refs)) + length = len(wait_object_refs) + _, unready = yr.wait(list(set(wait_object_refs)), length, iteration_timeout) + if len(unready) > 0: + raise TimeoutError(f"get object timeout: {[ref.id for ref in unready]}") + if iteration_timeout: + iteration_timeout -= int(time.time() - start) + else: + # datasystem default get timeout is 60s + iteration_timeout = 60 + ds_keys = [ref.id for ref in object_refs] + objs = self._get_object_from_local(ds_keys, iteration_timeout) + objs = objs if isinstance(objs, list) else [objs] + return serialization.Serialization().multi_deserialize(objs) + + def _get_object_from_local(self, ds_keys: List, timeout): + objs = LocalObjectStore().get(ds_keys) + keys_not_in_local = [] + for key, obj in zip(ds_keys, objs): + if obj is None: + keys_not_in_local.append(key) + if len(keys_not_in_local) != 0: + objs_in_ds = self.__ds_client.get(keys_not_in_local, timeout) + index = 0 + objs_in_ds = objs_in_ds if isinstance(objs_in_ds, list) else [objs_in_ds] + for obj in objs_in_ds: + while objs[index] is not None: + index += 1 + objs[index] = obj + return objs + + +class UserSerialManager: + """ + User Serial Manager + """ + + def __init__(self): + # use it to store need_order instance send serial number + self._task_serial_dict = {} + self._lock = threading.Lock() + + def clear(self): + """ + Clear serial dict + """ + self._task_serial_dict.clear() + + def get_serial_number(self, instance_id: str): + """ + Get serial number from instanceID + """ + with self._lock: + task_serial_num = self._task_serial_dict.get(instance_id, 0) + self._task_serial_dict[instance_id] = task_serial_num + 1 + return task_serial_num + + def del_instance_serial_num(self, instance_id: str): + """ + Delete instance serial number + """ + with self._lock: + self._task_serial_dict.pop(instance_id, None) + + +def _check_objs(obj_refs): + exist_exception = False + ready_objs = [] + unready_map = {} + for obj_ref in obj_refs: + if obj_ref.done(): + obj_ref.wait() + ready_objs.append(obj_ref) + if obj_ref.is_exception(): + exist_exception = True + continue + id_future = obj_ref.get_future() + # multiple return value:one future contain multiple obj + if id_future in unready_map: + unready_map[id_future].append(obj_ref) + else: + unready_map[id_future] = [obj_ref] + return ready_objs, unready_map, exist_exception + + +def wait(obj_refs: list, wait_num: int, timeout: int) -> (list, list): + """ + wait obj_refs + """ + start_time = time.time() + ready_objs, unready_map, exist_exception = _check_objs(obj_refs) + unready_futures = set(unready_map.keys()) + if len(unready_futures) == 0: + return obj_refs[:wait_num], obj_refs[wait_num:] + + while len(ready_objs) < wait_num and not exist_exception: + if timeout: + iteration_timeout = timeout - time.time() + start_time + if iteration_timeout <= 0: + break + else: + iteration_timeout = None + ready, _ = futures.wait(unready_futures, iteration_timeout, futures.FIRST_COMPLETED) + if not ready: + break + unready_futures -= ready + for r in ready: + objs = unready_map.pop(r) + # try to raise obj exception + _ = [obj.wait() for obj in objs] + ready_objs.extend(objs) + + ready_objs.sort(key=obj_refs.index) + ready_objs = ready_objs[:wait_num] + unready_objs = [obj for obj in obj_refs if obj not in ready_objs] + return ready_objs, unready_objs diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/dependency_manager.py b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/dependency_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..5e56036176ab944f7dc6b57e0d805666b0a36b6f --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/dependency_manager.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +Dependency Manager +""" +import concurrent.futures +import functools +import logging +from dataclasses import dataclass +from typing import Union, List, Callable, Optional + +from yr.object_ref import ObjectRef +from yr.instance_ref import InstanceRef +from yr.objref_counting_manager import ObjRefCountingManager +from yr.runtime.task_spec import TaskSpec +from yr.serialization import SerializeObject + +_logger = logging.getLogger(__name__) + + +@dataclass +class TaskState: + """TaskState""" + task: TaskSpec + object_dependencies: int + instance_dependencies: int + instance_error: Optional[BaseException] + error: List[BaseException] + + +def resolve_dependency(task: TaskSpec, on_complete: Callable[[Optional[BaseException], List], None]): + """resolve dependency""" + object_dependencies = [] + instance_dependencies = [] + + if task.instance_id is not None: + instance_dependencies.append(task.instance_id) + + for arg in task.args: + if not isinstance(arg, SerializeObject): + continue + for ref in arg.refs: + if isinstance(ref, ObjectRef): + object_dependencies.append(ref) + + ObjRefCountingManager().join_record(task.task_id, object_dependencies) + + if len(object_dependencies) == 0 and len(instance_dependencies) == 0: + on_complete(None, []) + return + state = TaskState(task, len(object_dependencies), len(instance_dependencies), None, []) + + def callback(future: concurrent.futures.Future, dependency: Union[ObjectRef, InstanceRef]): + err = future.exception() if future else None + + if isinstance(dependency, ObjectRef): + state.object_dependencies -= 1 + if err: + state.error.append(future.exception()) + else: + state.instance_dependencies -= 1 + if not err: + dependency.wait() + else: + state.instance_error = err + + if state.object_dependencies == 0 and state.instance_dependencies == 0: + on_complete(state.instance_error, state.error) + + for ref in object_dependencies: + ref.on_complete(functools.partial(callback, dependency=ref)) + for ref in instance_dependencies: + ref.on_complete(functools.partial(callback, dependency=ref)) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/instance.py b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/instance.py new file mode 100644 index 0000000000000000000000000000000000000000..64a94baa1ca0f0d8b0fec928b960f3adb74e12f5 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/instance.py @@ -0,0 +1,110 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""Instance class for scheduler""" +import time +from dataclasses import dataclass, field +from typing import Optional, Dict + +from yr.instance_ref import InstanceRef +from yr.utils import LANGUAGE_PYTHON + + +@dataclass +class Resource: + """instance resource""" + comparable_res: tuple = ('cpu', 'memory', 'concurrency', 'language', 'resources') + cpu: int = 500 + memory: int = 500 + concurrency: int = 1 + language: str = LANGUAGE_PYTHON + resources: Dict[str, float] = field(default_factory=dict) + + def __hash__(self): + return hash(str(self)) + + def __eq__(self, other: Optional['Resource']): + if not other: + return False + for res in self.comparable_res: + if getattr(self, res) != getattr(other, res): + return False + return True + + def __str__(self): + res = "".join(map(lambda x: f"({x[0]},{x[1]:.4f})", sorted(self.resources.items(), key=lambda x: x[0]))) + return f"{self.cpu}-{self.memory}-{self.concurrency}-{self.language}{res}" + + +class Instance: + """Instance used by Scheduler""" + __slots__ = ["__resource", "__instance_id", "__tasks", "__last_activate_time", "__is_recycled"] + + def __init__(self, instance_id: InstanceRef, resource: Resource): + self.__instance_id = instance_id + self.__resource = resource + self.__tasks = set() + self.__last_activate_time = time.time() + self.__is_recycled = False + + def __str__(self): + return f"instance id: {self.__instance_id.id if self.is_activate else 'not activate'} " \ + f"task id: {self.__instance_id.task_id} " \ + f"task count: {self.task_count} " \ + f"resource: {self.__resource} " \ + f"last activate time: {time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(self.__last_activate_time))}" + + @property + def last_activate_time(self) -> float: + """last activate time. used to recycle""" + return self.__last_activate_time + + @property + def is_activate(self) -> bool: + """is instance activate""" + if self.__is_recycled: + return False + return self.__instance_id.done() and not self.__instance_id.is_failed + + @property + def resource(self) -> Resource: + """return instance resource""" + return self.__resource + + @property + def task_count(self) -> int: + """return task count""" + return len(self.__tasks) + + @property + def instance_id(self): + """return instance id""" + return self.__instance_id + + def set_recycled(self): + """set recycled status""" + self.__is_recycled = True + + def add_task(self, task_id: str): + """add a task to instance""" + self.__tasks.add(task_id) + + def delete_task(self, task_id: str): + """delete task""" + self.__tasks.remove(task_id) + self.refresh() + + def refresh(self): + """refresh last activate time""" + self.__last_activate_time = time.time() diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/instance_manager.py b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/instance_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..11df300410b96ccdccf8a1046ba0e8b6e80b5ec2 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/instance_manager.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""instance manager""" +import concurrent.futures +import logging +import threading +import time +from collections import OrderedDict, defaultdict +from concurrent.futures import Future, wait +from typing import Optional, List + +import yr +from yr import utils +from yr.common.response_helper import make_callback_for_kill, make_callback_for_create +from yr.common.timer import Timer +from yr.config import ConfigManager, DEFAULT_CONCURRENCY +from yr.exception import YRInvokeError, YRequestError +from yr.instance_ref import InstanceRef +from yr.rpc.common_pb2 import Arg, ERR_RESOURCE_NOT_ENOUGH +from yr.runtime.instance import Instance, Resource +from yr.runtime.scheduler import Scheduler +from yr.runtime.task_spec import InvokeType, Signal, callback_hook +from yr.runtime.task_spec import TaskMetadata +from yr.runtime.task_spec import TaskSpec +from yr.utils import generate_task_id + +_logger = logging.getLogger(__name__) + +_PRINT_ERROR_PERIOD = 60 + + +def print_instance_create_error(future: Future): + """print instance create error""" + if future.exception(): + _logger.exception(future.exception()) + + +def warning_if_failed(future: concurrent.futures.Future, describe: str): + """warning for catching future exception""" + try: + future.result() + except RuntimeError as e: + _logger.warning(f"{describe}, err:{e}") + + +class InstanceManager: + """manager stateless instance""" + __slots__ = ["__instances", "__scheduler", "__invoke_client", "__recycle_scheduler", "__recycle_time", + "__recycle_period", "__lock", "__failed_count", "__last_failed_reason", + "__last_activate_instance_cnt", "__default_timeout"] + + def __init__(self, scheduler: Scheduler, invoke_client, recycle_time=2): + self.__instances = dict() + self.__scheduler = scheduler + self.__invoke_client = invoke_client + self.__recycle_time = recycle_time + self.__recycle_period = float(recycle_time) / 10 + self.__recycle_scheduler = Timer() + self.__recycle_scheduler.after(self.__recycle_period, self.__recycle) + self.__recycle_scheduler.after(_PRINT_ERROR_PERIOD, self.__print_create_error) + self.__lock = threading.RLock() + self.__failed_count = defaultdict(int) + self.__last_failed_reason = {} + self.__default_timeout = 30 + self.__last_activate_instance_cnt = 0 + + def get_instances(self, resource: Resource) -> List[Instance]: + """get instance by resource""" + with self.__lock: + instances = self.__instances.get(resource) + if instances: + return list(instances.values()) + return list() + + def get_instance_count(self, resource: Resource) -> int: + """get instance count by resource""" + with self.__lock: + instances = self.__instances.get(resource) + if instances: + return len(instances) + return 0 + + def get_failed_count(self, resource: Resource): + """get instance create failed count by resource""" + return self.__failed_count[resource] + + def get_last_failed_reason(self, resource: Resource): + """get instance create failed count by resource""" + return self.__failed_count.get(resource, None) + + def check_last_failed_reason(self, resource: Resource) -> (bool, Optional[Exception]): + """ + check whether an exception occurs during instance creation. + will not return true when instance can be create before but resource not enough now. + """ + err = self.__last_failed_reason.get(resource) + if not err: + return True, err + if isinstance(err, YRInvokeError): + return False, err.origin_error() + if isinstance(err, YRequestError): + if err.code != ERR_RESOURCE_NOT_ENOUGH: + return False, err + return True, err + + def schedule(self, task: TaskSpec, resource: Resource) -> Optional[Instance]: + """select a instance for task""" + + instance = None + if resource not in self.__instances: + return instance + instances = self.get_instances(resource) + if len(instances) != 0: + instance = self.__scheduler.schedule(task, instances[::-1]) + with self.__lock: + if instance and instance.is_activate: + instance.add_task(task_id=task.task_id) + return instance + return instance + + def scale_out(self, task: TaskSpec, resource: Resource) -> InstanceRef: + """scale out a instance""" + future = concurrent.futures.Future() + + descriptor = utils.ObjectDescriptor.parse(task.object_descriptor.to_dict()) + if task.object_descriptor.target_language == utils.LANGUAGE_CPP: + descriptor.function_name = "" + if task.invoke_options: + concurrency = task.invoke_options.concurrency + else: + concurrency = DEFAULT_CONCURRENCY + metadata = TaskMetadata.create(object_descriptor=descriptor, code_id=utils.NORMAL_FUNCTION, + invoke_type=InvokeType.CREATE_NORMAL_FUNCTION_INSTANCE, concurrency=concurrency) + trace_id = task.trace_id if yr.is_on_cloud() else utils.generate_trace_id(task.job_id) + create_task = TaskSpec(task_id=generate_task_id(), + job_id=ConfigManager().job_id, + invoke_type=InvokeType.CREATE_NORMAL_FUNCTION_INSTANCE, + invoke_timeout=self.__default_timeout, + callback=make_callback_for_create(future), + code_id=utils.NORMAL_FUNCTION, + function_info=task.function_info, + object_descriptor=descriptor, + args=[Arg(type=0, value=metadata.to_json().encode())], + trace_id=trace_id, + invoke_options=task.invoke_options + ) + create_task.callback = callback_hook(create_task.callback, create_task.task_id, create_task.invoke_type) + instance_id = InstanceRef(instance_id=future, task_id=create_task.task_id) + instance = Instance(instance_id=instance_id, resource=resource) + self.__add_instance(resource, instance) + future.add_done_callback(self.__scale_out_result_process(resource, instance)) + self.__invoke_client.create(create_task) + return instance_id + + def clear(self): + """clear instances""" + with self.__lock: + if _logger.isEnabledFor(logging.DEBUG): + delete_ids = [] + for instances in self.__instances.values(): + for instance_id in instances.keys(): + delete_ids.append(instance_id.id) + _logger.debug(f"instance killed: {delete_ids}") + # send kill(2) to clear stateless and stateful instances from job-id when finalize + self.__instances.clear() + + def kill_instance(self, instance_id: InstanceRef): + """kill stateless instance""" + need_kill = False + with self.__lock: + for instances in self.__instances.values(): + if instance_id in instances: + ins = instances.pop(instance_id) + _logger.debug(f"instance need kill: {ins}") + need_kill = True + if need_kill: + future = self.__delete_instance(instance_id) + warning_if_failed(future, "delete instance") + + def __add_instance(self, resource: Resource, instance: Instance): + with self.__lock: + self.__instances.setdefault(resource, OrderedDict())[instance.instance_id] = instance + + def __recycle(self): + self.__recycle_scheduler.after(self.__recycle_period, self.__recycle) + with self.__lock: + need_recycle = self.__clear_instance() + futures = [self.__delete_instance(ins.instance_id) for ins in need_recycle] + for future in concurrent.futures.as_completed(futures): + warning_if_failed(future, "delete instance") + + def __clear_instance(self) -> List[Instance]: + now = time.time() + need_recycle = [] + + for res, instances in self.__instances.items(): + activate_cnt = 0 + for instance_id in list(instances.keys()): + ins = instances.get(instance_id) + if not ins or not ins.instance_id.done(): + continue + if now - ins.last_activate_time > self.__recycle_time and ins.task_count == 0: + ins = instances.pop(instance_id) + ins.set_recycled() + _logger.debug(f"instance need recycle: {ins}") + need_recycle.append(ins) + activate_cnt += 1 + self.__last_activate_instance_cnt = activate_cnt + _logger.debug(f"activate instance count: {activate_cnt} res: {res}") + return need_recycle + + def __delete_instance(self, instance_id: InstanceRef): + func_info = ConfigManager().get_function_info() + future = concurrent.futures.Future() + if not instance_id.done(): + wait([instance_id.get_future()]) + if instance_id.is_failed: + return instance_id.get_future() + _logger.info(f"instance killed: {instance_id.id}") + self.__invoke_client.kill(TaskSpec(task_id=utils.generate_task_id(), + job_id=ConfigManager().job_id, + invoke_type=InvokeType.KILL_INSTANCE, + invoke_timeout=self.__default_timeout, + function_info=func_info, + callback=make_callback_for_kill(future, instance_id.id, + signal=Signal.KILL_INSTANCE), + instance_id=instance_id, + signal=Signal.KILL_INSTANCE.value)) + return future + + def __scale_out_result_process(self, resource: Resource, instance: Instance): + + def callback(future: Future): + """print instance create error""" + if future.exception(): + self.__failed_count[resource] += 1 + self.__last_failed_reason[resource] = future.exception() + with self.__lock: + instances = self.__instances.get(resource) + if instances: + instances.pop(instance.instance_id, None) + else: + self.__failed_count[resource] = 0 + self.__last_failed_reason.pop(resource, None) + instance.refresh() + + return callback + + def __print_create_error(self): + self.__recycle_scheduler.after(_PRINT_ERROR_PERIOD, self.__print_create_error) + for res in list(self.__last_failed_reason.keys()): + err = self.__last_failed_reason.get(res, None) + if err and isinstance(err, YRequestError): + if err.code == ERR_RESOURCE_NOT_ENOUGH: + _logger.info(f"{res} resource not enough") diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/local_mode_runtime.py b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/local_mode_runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..b95822cc3ca5381eb5cec013c174c6f25d3c053f --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/local_mode_runtime.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""local mode runtime""" +import logging +from abc import ABC +from typing import Union + +from datasystem.stream_client import StreamClient + +import yr +from yr import utils +from yr.config import ConfigManager +from yr.invoke_client.local_client import LocalClient +from yr.object_ref import ObjectRef +from yr.runtime.local_object_store import LocalObjectStore +from yr.runtime.runtime import BaseRuntime, package_python_args + +_logger = logging.getLogger(__name__) + + +class LocalModeRuntime(BaseRuntime, ABC): + """local mode runtime""" + + def __init__(self): + self.invoke_client = LocalClient() + super().__init__(self.invoke_client) + self.local_store = LocalObjectStore() + + def put(self, ref: Union[str, ObjectRef], value, use_msgpack=False): + """ + put object to local store + """ + if isinstance(ref, str): + ref = ObjectRef(ref) + self.local_store.put(ref.id, value) + return ref + + def get_trace_id(self) -> str: + """get_trace_id""" + return utils.generate_trace_id(ConfigManager().job_id) + + def clear(self): + """clear local object and stop workers""" + self.invoke_client.clear() + LocalObjectStore().release_all() + + def get_stream_client(self) -> StreamClient: + """ + get stream client for data system. + """ + raise RuntimeError("local mode not support stream") + + def get_id(self, ref_id, timeout: int): + pass + + def _package_python_args(self, args_list): + """package python args""" + return package_python_args(args_list) + + def _package_args_post(self, args_list): + return args_list + + def _get_object(self, object_refs: list, timeout: int): + wait_object_refs = list(set(object_refs)) + length = len(wait_object_refs) + timeout = timeout if timeout != -1 else None + _, unready = yr.wait(wait_object_refs, length, timeout) + if len(unready) > 0: + raise TimeoutError(f"get object timeout: {[ref.id for ref in unready]}") + objs = self.local_store.get([ref.id for ref in object_refs]) + if not isinstance(objs, list): + objs = [objs] + return objs diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/local_object_store.py b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/local_object_store.py new file mode 100644 index 0000000000000000000000000000000000000000..11c78e9c125c1c5e54d83be02fae80a8ece8836e --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/local_object_store.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""Local object store""" +from yr.serialization import Serialization +from yr.utils import Singleton + + +@Singleton +class LocalObjectStore: + """Local object store""" + _object_map = {} + + def put(self, key, value): + """put object""" + value = Serialization().serialize(value).data + self._object_map[key] = value + + def get(self, key): + """get object""" + if isinstance(key, list): + value = [self._object_map.get(k) for k in key] + return Serialization().multi_deserialize(value) + return Serialization().deserialize(self._object_map.get(key)) + + def release(self, key): + """release keys""" + keys = key if isinstance(key, list) else [key] + for i in keys: + self._object_map.pop(i) + + def release_all(self): + """release all keys""" + self._object_map.clear() diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/runtime.py b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/runtime.py new file mode 100644 index 0000000000000000000000000000000000000000..7dd3187af5a1eeb29b25d18154685f42623435bf --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/runtime.py @@ -0,0 +1,280 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +Runtime singleton +""" +import logging +from abc import ABCMeta, abstractmethod +from concurrent.futures import Future +from typing import Union, Callable, List +from urllib3.exceptions import HTTPError + +from datasystem.stream_client import StreamClient +from yr import utils, serialization +from yr.common.response_helper import make_callback_for_kill +from yr.config import ConfigManager, DEFAULT_CONCURRENCY +from yr.exception import YRInvokeError +from yr.instance_ref import InstanceRef +from yr.invoke_client.invoke_client import InvokeClient +from yr.object_ref import ObjectRef +from yr.objref_counting_manager import ObjRefCountingManager +from yr.rpc.common_pb2 import Arg +from yr.runtime.dependency_manager import resolve_dependency +from yr.runtime.local_object_store import LocalObjectStore +from yr.runtime.task_manager import TaskManager +from yr.runtime.task_spec import Task, TaskSpec, InvokeType, Signal, TaskMetadata, callback_hook +from yr.serialization import Serialization +from yr.signature import recover_args +from yr.utils import Singleton + +_logger = logging.getLogger(__name__) + +_MAX_SERIAL_LENGTH = 10 ** 6 + + +@Singleton +class Runtime: + """ + Runtime for user actions + """ + + def __init__(self): + self.__runtime = None + + @property + def rt(self): + """ + Get current runtime + """ + return self.__runtime + + def init(self, runtime): + """ + initialize runtime + """ + self.__runtime = runtime + + +class BaseRuntime(metaclass=ABCMeta): + """runtime interface""" + + def __init__(self, invoke_client: InvokeClient): + self.task_mgr = TaskManager(invoke_client) + self.__invoke_client = invoke_client + + @abstractmethod + def put(self, ref: Union[str, ObjectRef], value, use_msgpack=False): + """ + put object to datasystem with objectref + """ + + @abstractmethod + def clear(self): + """ + clean object in storage + """ + + @abstractmethod + def get_trace_id(self) -> str: + """get_trace_id""" + + @abstractmethod + def get_stream_client(self) -> StreamClient: + """ + get stream client for data system. + """ + + @abstractmethod + def get_id(self, ref_id, timeout: int): + """ + get object from datasystem with objectid + """ + + def get(self, object_ids: List[ObjectRef], timeout): + """ + get object from datasystem + """ + objects = self._get_object(object_ids, timeout) + for obj in objects: + if isinstance(obj, YRInvokeError): + raise obj.origin_error() + return objects + + def cancel(self, object_refs: List[ObjectRef]): + """cancel the stateless invoke""" + for object_ref in object_refs: + self.task_mgr.cancel(task_id=object_ref.task_id) + + def submit_task(self, task: Task, callback: Callable): + """submit normal function""" + func_info = ConfigManager().get_function_info() + if task.object_descriptor.target_language == utils.LANGUAGE_CPP: + func_info.function_name = task.target_function_key + args = self.__create_args(task) + + task_spec = TaskSpec(task_id=task.task_id, + job_id=ConfigManager().job_id, + object_descriptor=task.object_descriptor, + function_info=func_info, + code_id=task.code_id, + callback=callback_hook(callback, task.task_id, task.invoke_type, + object_counting_mgr=ObjRefCountingManager()), + invoke_type=task.invoke_type, + args=args, + trace_id=task.trace_id, + invoke_options=task.invoke_options, + instance_id=task.instance_id, + invoke_timeout=ConfigManager().invoke_timeout) + for return_obj in task.return_obj_list: + task_spec.object_ids.append(return_obj.id) + + def on_complete(instance_err, dependency_errors): + if instance_err: + callback(None, instance_err) + return + if len(dependency_errors) != 0: + callback(None, RuntimeError(f"dependent task failed: {[str(e) for e in dependency_errors]}")) + return + task_spec.args = self._package_args_post(task_spec.args) + self.task_mgr.submit_task(task_spec) + + resolve_dependency(task_spec, on_complete) + + def kill_instance(self, instance_id): + """ + kill instance + """ + if isinstance(instance_id, str): + instance_id = InstanceRef(instance_id) + func_info = ConfigManager().get_function_info() + _logger.info(f"instance killed, instance_id: {instance_id}") + future = Future() + + self.__invoke_client.kill(TaskSpec(task_id=utils.generate_task_id(), + job_id=ConfigManager().job_id, + invoke_type=InvokeType.KILL_INSTANCE, + invoke_timeout=ConfigManager().invoke_timeout, + function_info=func_info, + callback=make_callback_for_kill(future, instance_id, Signal.KILL_INSTANCE), + instance_id=instance_id, + signal=Signal.KILL_INSTANCE.value)) + future.result() + + def kill_all_instances(self): + """ + Kill all instances from job + """ + # Not support on cloud. + # That in_cluster is true means driver mode is being used, + # instances will be killed when driver disconnection happens. + if ConfigManager().on_cloud or ConfigManager().in_cluster: + return + job_id = ConfigManager().job_id + future = Future() + self.__invoke_client.kill(TaskSpec(task_id=utils.generate_task_id(), + job_id=job_id, + invoke_type=InvokeType.KILL_INSTANCE, + invoke_timeout=ConfigManager().invoke_timeout, + function_info=ConfigManager().get_function_info(), + callback=make_callback_for_kill(future, job_id, + Signal.KILL_ALL_INSTANCES), + instance_id=InstanceRef(job_id), + signal=Signal.KILL_ALL_INSTANCES.value)) + try: + future.result() + except (HTTPError, RuntimeError) as e: + _logger.exception(e) + + @abstractmethod + def _get_object(self, object_refs: list, timeout: int): + """ + get object from request + """ + + def _package_python_args(self, args_list): + """package python args""" + args_list_new = [] + for arg in args_list: + if isinstance(arg, ObjectRef): + serialized_arg = serialization.Serialization().serialize(arg, arg.id, True) + else: + serialized_arg = serialization.Serialization().serialize(arg, "", True) + if len(serialized_arg.data) >= _MAX_SERIAL_LENGTH: + key = utils.generate_random_id() + ref = self.put(key, serialized_arg) + serialized_arg = serialization.Serialization().serialize(ref, ref.id, True) + + args_list_new.append(serialized_arg) + return args_list_new + + def _package_args_post(self, args_list): + args_list_new = [] + for arg in args_list: + if isinstance(arg, Arg): + args_list_new.append(arg) + continue + if not isinstance(arg, serialization.SerializeObject): + continue + for ref in arg.refs: + self._put_local_object(ref) + args_list_new.append(Arg(type=0, value=arg.data, nested_refs=[ref.id for ref in arg.refs])) + return args_list_new + + def _put_local_object(self, ref): + obj = LocalObjectStore().get(ref.id) + if obj is not None: + # if obj is in local, put it to data system + self.put(ref.id, serialization.SerializeObject(object_type=serialization.OBJECT_TYPE_PYTHON, data=obj)) + LocalObjectStore().release(ref.id) + + def __create_args(self, task): + if task.invoke_options: + concurrency = task.invoke_options.concurrency + else: + concurrency = DEFAULT_CONCURRENCY + metadata = \ + TaskMetadata.create(object_descriptor=task.object_descriptor, + code_id=task.code_id, + invoke_type=task.invoke_type, + concurrency=concurrency) + meta_refs = [task.code_id] if task.code_id.startswith(utils.OBJECT_ID_PREFIX) else [] + args = [Arg(type=0, value=metadata.to_json().encode(), nested_refs=meta_refs)] + if task.object_descriptor.target_language == utils.LANGUAGE_CPP: + args += _package_cpp_args(task.args_list) + else: + args += self._package_python_args(task.args_list) + return args + + +def _package_cpp_args(args_list): + args_list_new = [] + args, _ = recover_args(args_list) + for arg in args: + serialized_arg = serialization.Serialization().serialize(arg, "", True, True) + args_list_new.append(Arg(type=0, value=serialized_arg.data, nested_refs=[])) + return args_list_new + + +def package_python_args(args_list): + """package python args""" + args_list_new = [] + for arg in args_list: + if isinstance(arg, ObjectRef): + args_list_new.append(arg) + continue + serialized_arg = Serialization().serialize(arg, "", True) + args_list_new.append(serialized_arg) + return args_list_new diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/scheduler.py b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/scheduler.py new file mode 100644 index 0000000000000000000000000000000000000000..217ef6b527f50641577c8a9f92cec1d9244340b6 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/scheduler.py @@ -0,0 +1,75 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""Scheduler""" + +from abc import abstractmethod, ABC +from typing import Optional, Iterable, List + +from yr.runtime.instance import Instance +from yr.runtime.task_spec import TaskSpec + + +class Scorer(ABC): + """abstract Scorer for scoring instances""" + + @staticmethod + @abstractmethod + def score(task: TaskSpec, instance: Instance) -> int: + """scoring instance""" + + +class ConcurrencyScorer(Scorer): + """Scorer for concurrency""" + + @staticmethod + def score(task: TaskSpec, instance: Instance) -> int: + """scoring instance""" + if instance.task_count < instance.resource.concurrency: + return 1 + return 0 + + +class Scheduler(ABC): + """abstract Scheduler""" + + @abstractmethod + def schedule(self, task: TaskSpec, instances: Iterable[Instance]) -> Optional[Instance]: + """select a instance""" + + +class NormalScheduler(Scheduler): + """base scheduler implementation""" + __slots__ = ["__scorers"] + + def __init__(self, scorers: List[Scorer] = None): + if scorers is None: + self.__scorers = [] + else: + self.__scorers = scorers + + def schedule(self, task: TaskSpec, instances: Iterable[Instance]) -> Optional[Instance]: + """select a instance""" + max_score = 0 + max_instance = None + for instance in instances: + if not instance.is_activate: + continue + score = 0 + for scorer in self.__scorers: + score += scorer.score(task, instance) + if score > max_score: + max_score = score + max_instance = instance + return max_instance diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/task_manager.py b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/task_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..1af8ae3c130fb33c1db3dd527965c25b792b3712 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/task_manager.py @@ -0,0 +1,281 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +"""task manager""" +import logging +from collections import deque +from concurrent.futures import Future +from dataclasses import dataclass +from enum import Enum +from threading import RLock, Lock +from typing import List +from typing import Optional + +from yr import utils +from yr.common.response_helper import make_callback_for_kill +from yr.common.timer import Timer +from yr.config import ConfigManager +from yr.exception import CancelError +from yr.instance_ref import InstanceRef +from yr.runtime.instance import Resource, Instance +from yr.runtime.instance_manager import InstanceManager +from yr.runtime.scheduler import NormalScheduler, ConcurrencyScorer +from yr.runtime.task_spec import InvokeType, InvokeOptions, Signal +from yr.runtime.task_spec import TaskSpec + + +class TaskState(Enum): + """task state""" + PENDING = 1 + RUNNING = 2 + CANCELED = 3 + + +@dataclass +class TaskRecord: + """record for task""" + state: TaskState + resource: Resource = None + task: TaskSpec = None + + +_logger = logging.getLogger(__name__) + + +class TaskManager: + """Task manager""" + __slots__ = ["__futures", "__invoke_client", "__queue", "__ins_mgr", "__pending_tasks", + "__timer", "__sleep_time_sequence", "__queue_lock", "__task_lock"] + + def __init__(self, invoke_client): + # task_id/future + self.__futures = {} + self.__invoke_client = invoke_client + self.__queue = deque() + self.__pending_tasks = dict() + scheduler = NormalScheduler(scorers=[ConcurrencyScorer()]) + self.__ins_mgr = InstanceManager(scheduler, invoke_client, ConfigManager().recycle_time) + self.__timer = Timer() + # delay to schedule 0,1,2,5,10,30,60,60 + self.__sleep_time_sequence = [0, 1, 2, 5, 10, 30, 60] + self.__queue_lock = RLock() + self.__task_lock = Lock() + + def add_task(self, task_id: str) -> Future: + """add task""" + future = Future() + self.__futures[task_id] = future + future.add_done_callback(lambda f: self.__pop_task(task_id)) + return future + + def get_future(self, task_id: str) -> Future: + """get future by task id""" + return self.__futures.get(task_id) + + def done(self, task_id: str) -> bool: + """get task status by task id""" + if task_id not in self.__futures: + return True + return self.__futures.get(task_id).done() + + def on_complete(self, task_id, callback) -> None: + """register callback when task finished""" + future = self.__futures.get(task_id) + if future is None: + callback(None) + else: + future.add_done_callback(callback) + + def submit_task(self, task: TaskSpec): + """submit task""" + if not ConfigManager().is_init: + _logger.warning(f"Can not submit task {task.task_id} before yr.init") + return + if task.invoke_options: + invoke_options = task.invoke_options + else: + invoke_options = InvokeOptions() + resource = Resource(cpu=invoke_options.cpu, + memory=invoke_options.memory, + concurrency=invoke_options.concurrency, + language=task.object_descriptor.target_language, + resources=invoke_options.custom_resources) + if task.invoke_type in (InvokeType.CREATE_INSTANCE, InvokeType.CREATE_NORMAL_FUNCTION_INSTANCE): + self.__invoke_client.create(task) + elif task.invoke_type in (InvokeType.INVOKE_MEMBER_FUNCTION, + InvokeType.GET_NAMED_INSTANCE_METADATA): + self.__invoke_client.invoke(task) + elif task.invoke_type == InvokeType.INVOKE_NORMAL_FUNCTION: + task_record = TaskRecord(state=TaskState.PENDING, resource=resource, task=task) + if self.__add_task(task_record): + self.__add_task_id(task.task_id) + self.__schedule() + elif self.__is_canceled(task.task_id): + task.callback(None, CancelError(task.task_id)) + else: + _logger.warning(f"task already in schedule, {task.task_id}") + else: + task.callback(None, TypeError(f"unexpect invoke type {task.invoke_type}")) + + def cancel(self, task_id: str) -> None: + """cancel a task""" + _logger.info(f"task canceled: {task_id}") + future = self.__futures.get(task_id) + if not future or future.done(): + return + state = self.__set_canceled(task_id) + if state == TaskState.CANCELED: + return + task_record = self.__get_task(task_id) + task_record.task.callback(None, CancelError(task_id)) + if state == TaskState.RUNNING: + self.__cancel_task(task_record.task.instance_id, task_id) + + def clear(self): + """clear tasks and instances""" + self.__futures.clear() + self.__queue.clear() + self.__pending_tasks.clear() + self.__ins_mgr.clear() + + def __schedule(self) -> None: + task_id = self.__pop_task_id() + if task_id == "": + return + task_record = self.__get_task(task_id) + if not task_record: + return + ret, err = self.__ins_mgr.check_last_failed_reason(resource=task_record.resource) + if not ret: + task_record.task.callback(None, err) + return + _logger.debug(f"schedule task {task_record.task.task_id}") + ins = self.__ins_mgr.schedule(task_record.task, task_record.resource) + if ins and ins.is_activate: + _logger.debug(f"schedule successfully {task_record.task.task_id} {ins.instance_id.id}") + task_record.state = TaskState.RUNNING + task_record.task.instance_id = ins.instance_id + + def callback(_): + ins.delete_task(task_id) + self.__schedule() + + self.on_complete(task_id, callback) + self.__invoke_client.invoke(task_record.task) + self.__schedule() + else: + self.__add_task_id(task_id, True) + if scale_out( + list(filter(lambda record: record.resource == task_record.resource, self.__get_all_tasks())), + list(self.__ins_mgr.get_instances(task_record.resource))): + self.__scale_out_after(self.__get_scale_out_delay(task_record.resource), task_record) + + def __scale_out_after(self, sleep_time: int, task_record: TaskRecord) -> None: + def scale_out_inner(): + instance_id = self.__ins_mgr.scale_out(task_record.task, task_record.resource) + instance_id.on_complete(lambda x: self.__schedule()) + + if sleep_time == 0: + scale_out_inner() + else: + self.__timer.after(sleep_time, scale_out_inner) + + def __get_scale_out_delay(self, resource: Resource) -> int: + count = self.__ins_mgr.get_failed_count(resource) + if count >= len(self.__sleep_time_sequence): + return self.__sleep_time_sequence[-1] + return self.__sleep_time_sequence[count] + + def __cancel_task(self, instance_id: InstanceRef, task_id: str): + if isinstance(instance_id, str): + instance_id = InstanceRef(instance_id) + func_info = ConfigManager().get_function_info() + future = Future() + + task_spec = TaskSpec(task_id=utils.generate_task_id(), + job_id=ConfigManager().job_id, + invoke_type=InvokeType.KILL_INSTANCE, + invoke_timeout=ConfigManager().invoke_timeout, + function_info=func_info, + callback=make_callback_for_kill(future, instance_id.id, Signal.CANCEL), + instance_id=instance_id, + signal=Signal.CANCEL.value, + request_id=task_id + ) + + self.__invoke_client.kill(task_spec) + try: + future.result() + except RuntimeError as e: + _logger.exception(e) + + def __add_task(self, task_record: TaskRecord) -> bool: + with self.__task_lock: + if task_record.task.task_id in self.__pending_tasks: + return False + self.__pending_tasks[task_record.task.task_id] = task_record + return True + + def __get_task(self, task_id: str) -> Optional[TaskRecord]: + with self.__task_lock: + return self.__pending_tasks.get(task_id, None) + + def __pop_task(self, task_id: str): + with self.__task_lock: + return self.__pending_tasks.pop(task_id, None) + + def __get_all_tasks(self) -> List[TaskRecord]: + with self.__task_lock: + return list(self.__pending_tasks.values()) + + def __add_task_id(self, task_id: str, left: bool = False): + with self.__queue_lock: + if left: + self.__queue.appendleft(task_id) + else: + self.__queue.append(task_id) + + def __pop_task_id(self) -> str: + with self.__queue_lock: + if len(self.__queue) != 0: + return self.__queue.pop() + return "" + + def __is_canceled(self, task_id: str): + with self.__task_lock: + task_record = self.__pending_tasks.get(task_id, None) + if not task_record: + return False + if task_record.state == TaskState.CANCELED: + return True + return False + + def __set_canceled(self, task_id: str) -> TaskState: + with self.__task_lock: + task_record = self.__pending_tasks.get(task_id, None) + if not task_record: + self.__pending_tasks[task_id] = TaskRecord(TaskState.CANCELED) + return TaskState.CANCELED + state = task_record.state + task_record.state = TaskState.CANCELED + return state + + +def scale_out(tasks: List[TaskRecord], instances: List[Instance]): + """judge whether scaling out""" + task_sum = len(list(tasks)) + concurrency_sum = sum([ins.resource.concurrency for ins in instances]) + _logger.debug(f"start to judge scale out {task_sum} {concurrency_sum}") + return task_sum > concurrency_sum diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/task_spec.py b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/task_spec.py new file mode 100644 index 0000000000000000000000000000000000000000..63d535f6259a96db8a3e7e391e8d42f4f8143b6e --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/task_spec.py @@ -0,0 +1,431 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""TaskSpec""" + +import json +import logging +import time +import re +from dataclasses import dataclass, field +from enum import Enum +from typing import Callable, Optional, List, Union, Dict + +from google.protobuf.message import Message + +from yr import utils +from yr.config import DEFAULT_RECYCLE_TIME, DEFAULT_CONCURRENCY, \ + ConfigManager, FunctionInfo +from yr.instance_ref import InstanceRef +from yr.rpc import core_service_pb2 +from yr.rpc.common_pb2 import Arg +from yr.serialization import SerializeObject +from yr.utils import ObjectDescriptor, CrossLanguageInfo + +_logger = logging.getLogger(__name__) + + +class Signal(Enum): + """signal for instance""" + KILL_INSTANCE = 1 + KILL_ALL_INSTANCES = 2 + EXIT = 64 + CANCEL = 65 + + +class AffinityType(Enum): + """AffinityType for task""" + PreferredAffinity = 0 + PreferredAntiAffinity = 1 + RequiredAffinity = 2 + RequiredAntiAffinity = 3 + + +class Strdesc: + """ + Descriptor for string fields in InvokeOptions + """ + + def __init__(self, default): + self.__default = default + self.__name = "" + self.name = "" + + def __set_name__(self, _, name): + self.__name = "__" + name + self.name = name + + def __get__(self, obj, _): + if obj is None: + return self.__default + return getattr(obj, self.__name, self.__default) + + def __set__(self, obj, value): + if not isinstance(value, str): + raise TypeError(f"{self.name} has wrong type {type(value)}, " + "'str' is expected.") + if re.search("[/-]", value): + raise ValueError(f"{self.name} contains illegal characters \"/\" or \"-\"") + setattr(obj, self.__name, value) + + +class InvokeType(Enum): + """InvokeType for task""" + INVALID = -1 + CREATE_INSTANCE = 0 + INVOKE_MEMBER_FUNCTION = 1 + CREATE_NORMAL_FUNCTION_INSTANCE = 2 + INVOKE_NORMAL_FUNCTION = 3 + KILL_INSTANCE = 4 + GET_NAMED_INSTANCE_METADATA = 5 + + +@dataclass +class InvokeOptions: + """ + Invoke options for users to set resources + + Attributes: + cpu: in 1/1000 cpu core, 300 to 16000 supported + memory: in 1MB, 128 to 65536 supported + + custom_resources: GPU and NPU resources, legal key including + "nvidia.com/gpu", "amd.com/gpu" and "huawei.com/Ascend910" + + concurrency: a int in the range of [1, 100] + + labels: a list contains affinity labels. The label as item of + this list should be a string of less than 16 characters without + special characters. + + affinity: a dict whose key-value mappings specify affinity labels + and affinity types. The legal key (affinity label) should be + a string of less than 16 characters without special characters. + The legal value (affinity type) should be one of the valid values + of yr.AffinityType, including "PreferredAffinity", + "PreferredAntiAffinity", "RequiredAffinity" and "RequiredAntiAffinity". + """ + cpu: int = 0 + memory: int = 0 + name: Strdesc = Strdesc("") + namespace: Strdesc = Strdesc("") + __concurrency: int = 1 + custom_resources: Dict = field(default_factory=dict) + custom_extensions: Dict = field(default_factory=dict) + __labels = [] + __affinity = {} + + @property + def concurrency(self): + """ + Get concurrency + """ + return self.__concurrency + + @concurrency.setter + def concurrency(self, value: int): + """ + Set concurrency + + Args: + value (int): concurrency + """ + + if isinstance(value, int) is False: + raise TypeError(f"recycle_time {type(value)} type error, 'int' is expected.") + if (1 <= value <= 1000) is False: + raise ValueError(f"invalid concurrency value, expect 1 <= concurrency <= 1000, actual {value}") + self.__concurrency = value + + @property + def labels(self): + """ + Get labels + """ + return self.__labels + + @labels.setter + def labels(self, labels: List): + """ + Set labels + + Args: + labels (List): labels + """ + if not isinstance(labels, List): + raise TypeError(f"labels has wrong type {type(labels)}, " + f"'list' is expected.") + for label in labels: + if not self._check_label_valid(label): + raise ValueError("label should be a string of " + "less than 16 characters without " + "special characters.") + self.__labels = labels + + @property + def affinity(self): + """ + Get affinity + """ + return self.__affinity + + @affinity.setter + def affinity(self, affinity: Dict): + """ + Set affinity + + Args: + affinity (Dict): affinity + """ + if not isinstance(affinity, Dict): + raise TypeError(f"affinity has wrong type {type(affinity)}, " + f"'dict' is expected.") + for label, affinity_type in affinity.items(): + if not self._check_label_valid(label): + raise ValueError("label should be a string of " + "less than 16 characters without " + "special characters.") + if not isinstance(affinity_type, AffinityType): + raise TypeError(f"affinity_type has wrong type " + f"{type(affinity_type)}, " + f"'AffinityType' is expected.") + self.__affinity = affinity + + @staticmethod + def _check_label_valid(label): + """ + check if label is valid + """ + if isinstance(label, str) is False: + return False + if len(label) > 16: + return False + if re.search(r"\W", label) is not None: + return False + return True + + def to_pb(self): + """ + Transfer invoke options to protobuf + """ + scheduling_ops = core_service_pb2.SchedulingOptions() + if self.cpu > 0: + scheduling_ops.resources["CPU"] = self.cpu + if self.memory > 0: + scheduling_ops.resources["Memory"] = self.memory + for resource_key, resource_value in self.custom_resources.items(): + scheduling_ops.resources[resource_key] = resource_value + for extension_key, extension_value in self.custom_extensions.items(): + scheduling_ops.extension[extension_key] = f"{extension_value}" + for affinity_key, affinity_value in self.__affinity.items(): + scheduling_ops.affinity[affinity_key] = affinity_value.value + return scheduling_ops + + def need_order(self): + """ + Judge if need send request in order + """ + return self.__concurrency == 1 + + def build_designated_instance_id(self) -> str: + """Builds an instanceID for the creation of an instance, depending on + the value of name and namespace. + + Raises: + ValueError: An error occurred when name is an empty string but namespace is not. + + Returns: + str: Returns a string builded from name and namespace. + """ + if not self.namespace: + return self.name + + if not self.name: + raise ValueError( + f"name is not defined in InvokeOptions while namespace=\"{self.namespace}\"") + return self.namespace + "-" + self.name + + +@dataclass +class Task: + """invoke task""" + invoke_type: InvokeType + task_id: str + object_descriptor: ObjectDescriptor + args_list: list + code_id: str + trace_id: str = "" + instance_id: InstanceRef = None + invoke_options: InvokeOptions = None + target_function_key: str = None + return_obj_list: list = field(default_factory=list) + + +@dataclass +class TaskMetadata: + """ + TaskMetadata is used to convey control information + { + "codeID": "yr-obj-xxx", + "invokeType": 0, + "concurrency": 1, + "objectDescriptor": { + "moduleName": "xxx", + "className": "xxx", + "functionName": "xxx", + "srcLanguage": "python", + "targetLanguage": "cpp" + }, + "config": { + "recycleTime": 10, + "functionID": { + "cpp": "xxx/xxx/$latest", + "python": "xxx/xxx/$latest" + }, + "jobID": "job-xxx", + "logLevel": "WARNING" + } + } + """ + function_id_python: str + job_id: str + object_descriptor: ObjectDescriptor + invoke_type: InvokeType + concurrency: int = DEFAULT_CONCURRENCY + recycle_time: int = DEFAULT_RECYCLE_TIME + code_id: str = "" + function_id_cpp: str = "" + function_id_java: str = "" + log_level: str = "WARNING" + __key_invoke_type = "invokeType" + __key_code_id = "codeID" + __key_object_descriptor = "objectDescriptor" + __key_config = "config" + __key_recycle_time = "recycleTime" + __key_function_id = "functionID" + __key_function_id_cpp = "cpp" + __key_function_id_python = "python" + __key_function_id_java = "java" + __key_job_id = "jobID" + __key_log_level = "logLevel" + __key_concurrency = "concurrency" + + @classmethod + def create(cls, object_descriptor, code_id, invoke_type, + concurrency=DEFAULT_CONCURRENCY): + """create metadata""" + level = ConfigManager().log_level + level = level if isinstance(level, str) else logging.getLevelName(level) + return cls(function_id_python=ConfigManager().get_function_id_by_language(utils.LANGUAGE_PYTHON), + function_id_cpp=ConfigManager().get_function_id_by_language(utils.LANGUAGE_CPP), + job_id=ConfigManager().job_id, + object_descriptor=object_descriptor, + recycle_time=ConfigManager().recycle_time, + code_id=code_id, + invoke_type=invoke_type, + log_level=level, + concurrency=concurrency) + + @classmethod + def parse(cls, data): + """ + parse TaskMetadata from json or dict + + Returns: TaskMetadata object + """ + tmp = data + if isinstance(tmp, bytes): + tmp = tmp.decode() + if isinstance(tmp, str): + tmp = json.loads(tmp) + if isinstance(tmp, dict): + if cls.__key_object_descriptor not in tmp or cls.__key_config not in tmp: + raise TypeError(f"can not parse object: {tmp}") + object_descriptor = ObjectDescriptor.parse(tmp.get(cls.__key_object_descriptor)) + config = tmp.get(cls.__key_config) + function_id = config.get(cls.__key_function_id, {}) + log_level = config.get(cls.__key_log_level, "WARNING") + concurrency = tmp.get(cls.__key_concurrency, DEFAULT_CONCURRENCY) + self = cls(function_id_python=function_id.get(cls.__key_function_id_python, ""), + job_id=config.get(cls.__key_job_id, ""), + object_descriptor=object_descriptor, + recycle_time=config.get(cls.__key_recycle_time, cls.recycle_time), + code_id=tmp.get(cls.__key_code_id, ""), + function_id_cpp=function_id.get(cls.__key_function_id_cpp, ""), + function_id_java=function_id.get(cls.__key_function_id_java, ""), + invoke_type=InvokeType(tmp.get(cls.__key_invoke_type, InvokeType.INVALID.value)), + log_level=log_level, + concurrency=concurrency) + else: + raise TypeError(f"can not parse from type {type(data)}") + return self + + def to_json(self): + """ + convert to json + Returns: json + """ + tmp = { + self.__key_code_id: self.code_id, + self.__key_invoke_type: self.invoke_type.value, + self.__key_concurrency: self.concurrency, + self.__key_object_descriptor: self.object_descriptor.to_dict(), + self.__key_config: { + self.__key_recycle_time: self.recycle_time, + self.__key_function_id: { + self.__key_function_id_cpp: self.function_id_cpp, + self.__key_function_id_python: self.function_id_python, + self.__key_function_id_java: self.function_id_java + }, + self.__key_job_id: self.job_id, + self.__key_log_level: self.log_level + } + } + return json.dumps(tmp) + + +@dataclass +class TaskSpec: + """TaskSpec""" + task_id: str + job_id: str + invoke_type: InvokeType + invoke_timeout: int + callback: Callable[[Optional[Message], Optional[Exception]], None] + object_ids: list = field(default_factory=list) + trace_id: str = "" + code_id: str = "" + request_id: str = "" + function_info: FunctionInfo = None + object_descriptor: ObjectDescriptor = None + instance_id: InstanceRef = None + invoke_options: InvokeOptions = None + cross_language_info: CrossLanguageInfo = None + args: List[Union[SerializeObject, Arg]] = None + signal: int = 0 + + +def callback_hook(callback: Callable, task_id: str = '', invoke_type: InvokeType = InvokeType.INVALID, + object_counting_mgr=None): + """process task exception""" + start = time.time() + _logger.info(f"task submitted: {task_id} type: {invoke_type}") + + def hook(data, err=None): + _logger.info(f"task finished: {task_id} cost: {time.time() - start:.3f}") + if object_counting_mgr: + object_counting_mgr.del_record(task_id) + callback(data, err) + + return hook diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/worker.py b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/worker.py new file mode 100644 index 0000000000000000000000000000000000000000..4713ea8d25eece39c4b4c9a6ca7af95d880483aa --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/runtime/worker.py @@ -0,0 +1,147 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""local mode worker""" +import concurrent.futures +import logging +import queue +import threading +import traceback + +import yr +from yr import signature +from yr.exception import YRInvokeError, YRError +from yr.object_ref import ObjectRef +from yr.rpc import common_pb2 +from yr.config import ConfigManager +from yr.rpc.core_service_pb2 import CreateResponse, InvokeResponse +from yr.runtime.local_object_store import LocalObjectStore +from yr.runtime.task_spec import InvokeType +from yr.runtime.task_spec import TaskSpec +from yr.serialization import SerializeObject, Serialization +from yr.utils import binary_to_hex + +_logger = logging.getLogger(__name__) + + +class Worker(threading.Thread): + """Local mode worker""" + _instance = None + _queue = None + _running = False + _instance_id = '' + pool = None + + def init(self, instance_id: str, concurrency): + """init worker and create instance""" + self.pool = concurrent.futures.ThreadPoolExecutor(max_workers=concurrency, thread_name_prefix=instance_id) + self._instance_id = instance_id + self._queue = queue.Queue() + self._running = True + self.setDaemon(True) + self.start() + _logger.debug(f"init {self._instance_id} finished.") + + def run(self) -> None: + """main loop""" + while self._running: + task = self._queue.get() + if task is None: + break + future = self.pool.submit(self._execute, task) + future.add_done_callback(_error_logging) + + def submit(self, task: TaskSpec): + """submit a invoke task""" + self._queue.put(task) + + def stop(self, timeout=None): + """stop worker""" + self._running = False + self._queue.put(None) + self.join(timeout) + while not self._queue.empty(): + item = self._queue.get_nowait() + if item is None: + break + item.callback(None, RuntimeError("worked is stopped.")) + self.pool.shutdown(wait=False) + + def _execute(self, task: TaskSpec): + try: + result = self._invoke_function(task) + except Exception as err: + if isinstance(err, YRInvokeError): + result = YRInvokeError(err.cause, traceback.format_exc()) + else: + result = YRInvokeError(err, traceback.format_exc()) + + try: + if task.invoke_type in ( + InvokeType.CREATE_INSTANCE, + InvokeType.CREATE_NORMAL_FUNCTION_INSTANCE): + if isinstance(result, YRError): + code = common_pb2.ERR_USER_FUNCTION_EXCEPTION + message = binary_to_hex(Serialization().serialize(result, "", ConfigManager.in_cluster).data) + else: + code = common_pb2.ERR_NONE + message = "" + task.callback(CreateResponse(code=code, message=message, instanceID=self._instance_id), None) + else: + object_id = task.object_ids[0] + LocalObjectStore().put(object_id, result) + task.callback(InvokeResponse(returnObjectID=object_id), None) + except Exception as e: + task.callback(None, YRInvokeError(e, traceback.format_exc())) + _logger.debug(f"succeed to call, traceID: {task.task_id}") + + def _invoke_function(self, task: TaskSpec): + task.args = _process_args(task.args[1:]) + args, kwargs = signature.recover_args(task.args) + if task.invoke_type == InvokeType.CREATE_INSTANCE: + return self._create_instance(task, *args, **kwargs) + if task.invoke_type == InvokeType.INVOKE_MEMBER_FUNCTION: + return self._instance_function(task, *args, **kwargs) + if task.invoke_type == InvokeType.CREATE_NORMAL_FUNCTION_INSTANCE: + return None + return _normal_function(task, *args, **kwargs) + + def _instance_function(self, task: TaskSpec, *args, **kwargs): + instance_function_name = task.object_descriptor.function_name + return getattr(self._instance, instance_function_name)(*args, **kwargs) + + def _create_instance(self, task: TaskSpec, *args, **kwargs): + code = LocalObjectStore().get(task.code_id) + self._instance = code(*args, **kwargs) + + +def _error_logging(future: concurrent.futures.Future): + if future.exception() is not None: + _logger.exception(future.exception()) + + +def _normal_function(task: TaskSpec, *args, **kwargs): + code = LocalObjectStore().get(task.code_id) + return code(*args, **kwargs) + + +def _process_args(args_list): + def func(arg): + if isinstance(arg, ObjectRef): + return yr.get(arg) + if isinstance(arg, SerializeObject): + return arg.origin_object + return arg + + return list(map(func, args_list)) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/serialization.py b/dsoftbus/dist_executor/modules/runtime/python/yr/serialization.py new file mode 100644 index 0000000000000000000000000000000000000000..0c28572b5f0d68e95f2edcd13816117b7a823739 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/serialization.py @@ -0,0 +1,188 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""Serialization""" +import threading +from dataclasses import dataclass +from dataclasses import field +from typing import Set, List, Union +import pickle + +import msgpack +import cloudpickle +from cloudpickle import CloudPickler +from datasystem import object_cache + +from yr.utils import Singleton +from yr.object_ref import ObjectRef +from yr.storage import reference_count +from yr.serialize.serialization import Pickle5Writer, unpack_pickle5_buffers + +OBJECT_TYPE_NORMAL = 0 +OBJECT_TYPE_PYTHON = 1 +YR_ZEROCOPY_PREFIX = b'yr-zerocopy!' +global_thread_local = threading.local() + + +@dataclass +class SerializeObject: + """ + SerializeObject + """ + object_type: int + data: bytes + refs: Set[ObjectRef] = field(default_factory=set) + origin_object: object = None + writer: object = None + inband: object = None + + +def _object_ref_deserializer(object_id): + ref = ObjectRef(object_id=object_id, need_incre=False) + global_thread_local.object_refs.append(ref) + return ref + + +@Singleton +class Serialization: + """serialization context""" + _lock = threading.Lock() + _protocol = None + _msgpack_serialize_hooks = [] + _msgpack_deserialize_hook = [] + + def __init__(self): + self._thread_local = threading.local() + + def object_ref_reducer(obj: ObjectRef): + global_thread_local.object_refs.append(obj) + return _object_ref_deserializer, (obj.id,) + + CloudPickler.dispatch[ObjectRef] = object_ref_reducer + + @staticmethod + def _serialize_to_pickle5(value): + writer = Pickle5Writer() + try: + inband = cloudpickle.dumps( + value, protocol=5, buffer_callback=writer.buffer_callback) + except Exception as e: + raise e + return writer, inband + + @staticmethod + def _deserialize_pickle5_data(data): + try: + in_band, buffers = unpack_pickle5_buffers(data) + except ValueError as e: + raise e + try: + if len(buffers) > 0: + obj = cloudpickle.loads(in_band, buffers=buffers) + else: + obj = cloudpickle.loads(in_band) + except cloudpickle.pickle.PicklingError as e: + raise e + except EOFError as e: + raise ValueError("EOFError") from e + return obj + + def serialize(self, value, ref="", zero_copy=False, use_msgpack=False) -> SerializeObject: + """serialize""" + if use_msgpack: + return SerializeObject(object_type=OBJECT_TYPE_NORMAL, data=msgpack.packb(value, default=self._pack)) + + global_thread_local.object_refs = [] + if zero_copy and ref != "": + writer, inband = self._serialize_to_pickle5(value) + data = cloudpickle.dumps(ref, self._protocol) + else: + data = cloudpickle.dumps(value, self._protocol) + refs = set(global_thread_local.object_refs) + global_thread_local.object_refs = [] + if zero_copy and ref != "": + return SerializeObject(object_type=OBJECT_TYPE_PYTHON, data=data, refs=refs, origin_object=value, + writer=writer, + inband=inband) + return SerializeObject(object_type=OBJECT_TYPE_PYTHON, data=data, refs=refs, origin_object=value, writer=None, + inband=None) + + def deserialize(self, value: bytes, use_msgpack=False): + """deserialize""" + result = self.multi_deserialize([value], use_msgpack) + return result[0] + + def multi_deserialize(self, values: List[Union[bytes, object_cache.Buffer]], use_msgpack=False): + """deserialize a list of value""" + if use_msgpack: + return [msgpack.unpackb(value, object_hook=self._unpack, use_list=False) for value in values] + + global_thread_local.object_refs = [] + result = [] + with self._lock: + self._handle_deserialize_value(values, result) + + refs = [ref.id for ref in global_thread_local.object_refs] + global_thread_local.object_refs = [] + reference_count.increase_reference_count(refs) + return result + + def register_pack_hook(self, hook): + """register pack hook to Serialization""" + self._msgpack_serialize_hooks.append(hook) + + def register_unpack_hook(self, hook): + """register unpack hook to Serialization""" + self._msgpack_deserialize_hook.append(hook) + + def _handle_deserialize_value(self, values, result): + for value in values: + v = value + if v is None: + result.append(v) + continue + try: + if isinstance(value, object_cache.Buffer): + v = value.immutable_data() + result.append(self._deserialize_pickle5_data(v)) + elif isinstance(value, bytes) and value[:len(YR_ZEROCOPY_PREFIX)] == YR_ZEROCOPY_PREFIX: + result.append(self._deserialize_pickle5_data(value)) + else: + result.append(cloudpickle.loads(v)) + except ValueError: + result.append(cloudpickle.loads(v)) + except pickle.UnpicklingError: + result.append(msgpack.unpackb(v, object_hook=self._unpack, use_list=False)) + + def _pack(self, obj): + for hook in self._msgpack_serialize_hooks: + if callable(hook): + obj = hook(obj) + return obj + + def _unpack(self, obj): + for hook in self._msgpack_deserialize_hook: + if callable(hook): + obj = hook(obj) + return obj + + +def register_pack_hook(hook): + """register pack hook helper""" + Serialization().register_pack_hook(hook) + + +def register_unpack_hook(hook): + """register unpack hook helper""" + Serialization().register_unpack_hook(hook) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/serialize/__init__.py b/dsoftbus/dist_executor/modules/runtime/python/yr/serialize/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..9c9dd874f24d862a78307ba03a8c60fed6fd6d1a --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/serialize/__init__.py @@ -0,0 +1,14 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. \ No newline at end of file diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/serialize/libprotobuf.so.24 b/dsoftbus/dist_executor/modules/runtime/python/yr/serialize/libprotobuf.so.24 new file mode 100644 index 0000000000000000000000000000000000000000..724d8c119371486580274689fc0ea3eda06df459 Binary files /dev/null and b/dsoftbus/dist_executor/modules/runtime/python/yr/serialize/libprotobuf.so.24 differ diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/serialize/libprotobuf.so.24.0.0 b/dsoftbus/dist_executor/modules/runtime/python/yr/serialize/libprotobuf.so.24.0.0 new file mode 100644 index 0000000000000000000000000000000000000000..724d8c119371486580274689fc0ea3eda06df459 Binary files /dev/null and b/dsoftbus/dist_executor/modules/runtime/python/yr/serialize/libprotobuf.so.24.0.0 differ diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/serialize/setup.py b/dsoftbus/dist_executor/modules/runtime/python/yr/serialize/setup.py new file mode 100644 index 0000000000000000000000000000000000000000..c084afa7af9734443ce80d85d4476b320076c0e0 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/serialize/setup.py @@ -0,0 +1,45 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +import os +from pathlib import Path + +from setuptools import Extension, setup +from Cython.Build import cythonize + +setup_abs_path = os.getcwd() + +protoc = os.popen("which protoc").read() +protoc_bin = protoc.split("\n")[0] +protoc_bin_path = protoc_bin.split("bin")[0] +PROTOBUF_INCLUDE_PATH = str(Path(protoc_bin_path, "include")) +PROTOBUF_LIBRARY_PATH = str(Path(protoc_bin_path, "lib")) +PROTOBUF_SO_PATH = str(Path(PROTOBUF_LIBRARY_PATH, "libprotobuf.so.24*")) +cp_cmd = "cp " + PROTOBUF_SO_PATH + " " + setup_abs_path +os.popen(cp_cmd) + +extensions = [ + Extension("serialization", ["serialization.pyx", "serialization.pb.cc"], + include_dirs=[setup_abs_path, PROTOBUF_INCLUDE_PATH], + library_dirs=[setup_abs_path, PROTOBUF_LIBRARY_PATH], + libraries=["protobuf"], + runtime_library_dirs=['$ORIGIN'], + language="c++"), +] + +setup( + name="serialization", + ext_modules=cythonize(extensions) +) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/signature.py b/dsoftbus/dist_executor/modules/runtime/python/yr/signature.py new file mode 100644 index 0000000000000000000000000000000000000000..dd3f94ad37078d9bb9083dd9bd30e42c966d6059 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/signature.py @@ -0,0 +1,56 @@ +#!/usr/bin/env python +# coding=utf-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""signature""" +import inspect + +PLACEHOLDER = b"__YR_PLACEHOLDER__" + + +def get_signature(func, ignore_first=False): + """get func signature""" + if not ignore_first: + return inspect.signature(func) + return inspect.Signature(parameters=list(inspect.signature(func).parameters.values())[1:]) + + +def package_args(signature, args, kwargs): + """check and package args to a list""" + if signature is not None: + try: + signature.bind(*args, **kwargs) + except TypeError as e: + raise TypeError(str(e)) from None + + args_list = [] + for arg in args: + args_list += [PLACEHOLDER, arg] + + for key, value in kwargs.items(): + args_list += [key, value] + return args_list + + +def recover_args(args_list): + """recover args from list""" + args = [] + kwargs = {} + for i in range(0, len(args_list), 2): + key, value = args_list[i], args_list[i + 1] + if key == PLACEHOLDER: + args.append(value) + else: + kwargs[key] = value + + return args, kwargs diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/storage/__init__.py b/dsoftbus/dist_executor/modules/runtime/python/yr/storage/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f773b9a79f633ef780b397df07d92359163c1124 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/storage/__init__.py @@ -0,0 +1,18 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +Storage +""" diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/storage/ds_client.py b/dsoftbus/dist_executor/modules/runtime/python/yr/storage/ds_client.py new file mode 100644 index 0000000000000000000000000000000000000000..75a5e0983de51287325d6db5bbab24ccfd497332 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/storage/ds_client.py @@ -0,0 +1,174 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +""" +Datasystem client +""" +import logging +import threading +from typing import List, Union + +from datasystem import object_cache +from datasystem.agent_client import AgentClient +from datasystem.stream_client import StreamClient + +_logger = logging.getLogger(__name__) +FLAG_LENGTH = 12 + + +class DSClient: + """ + Datasystem client + """ + __slots__ = ["__ds_client", "__ctx_ref_map", "__ctx_lock", "__stream_client"] + + def __init__(self): + self.__ds_client = None + self.__stream_client = None + self.__ctx_ref_map = {} + self.__ctx_lock = threading.Lock() + + def init(self, client: Union[AgentClient, object_cache.ObjectClient], stream_client: StreamClient): + """ + Initialize datasystem client + """ + if not isinstance(client, (AgentClient, object_cache.ObjectClient)): + raise RuntimeError(f"ds client not support: {type(client)}, only AgentClient or ObjectClient") + self.__ds_client = client + self.__stream_client = stream_client + + def create(self, object_id: str, dependency_refs: List[str], writer, inband): + """ + create buffer + """ + total_size = writer.get_total_bytes(inband) + FLAG_LENGTH + buf = self.__ds_client.create(object_id, total_size, + param={"write_mode": object_cache.WriteMode.NONE_L2_CACHE, + "consistency_type": object_cache.ConsistencyType.PRAM}) + + buf.wlatch() + try: + writer.write_to(inband, buf.mutable_data(), 1) + except ValueError as e: + buf.unwlatch() + raise RuntimeError(f"failed to copy object {object_id} in create") from e + try: + buf.seal(dependency_refs) + except (ValueError, RuntimeError) as e: + raise RuntimeError(f"failed to put object {object_id} in create") from e + finally: + buf.unwlatch() + + def put(self, object_id: str, value: bytes, dependency_refs: List[str]): + """ + Put object to datasystem + """ + if isinstance(self.__ds_client, object_cache.ObjectClient): + buf = self.__ds_client.create(object_id, len(value), + param={"write_mode": object_cache.WriteMode.NONE_L2_CACHE, + "consistency_type": object_cache.ConsistencyType.PRAM}) + buf.wlatch() + buf.memory_copy(value) + try: + buf.seal(dependency_refs) + except (ValueError, RuntimeError) as e: + raise RuntimeError(f"failed to put object {object_id}") from e + finally: + buf.unwlatch() + else: + try: + self.__ds_client.put(object_id, value, dependency_refs) + except (ValueError, RuntimeError) as e: + raise RuntimeError(f"failed to put object {object_id}") from e + + def get(self, object_id: Union[str, List], timeout: int) -> List[Union[bytes, object_cache.Buffer]]: + """ + Get object from datasystem + """ + object_ids = object_id + if isinstance(object_id, str): + object_ids = [object_id] + try: + return self.__ds_client.get(object_ids, int(timeout * 1000)) + except RuntimeError as e: + raise RuntimeError(f"get {object_id} failed") from e + + def increase_global_reference(self, object_ids: List[str]): + """ + Increase global reference + """ + self.__ds_client.g_increase_ref(object_ids) + self.increase_ctx_reference(object_ids) + + def increase_ctx_reference(self, object_ids: List[str]): + """ + Increase context reference + """ + with self.__ctx_lock: + for obj_id in object_ids: + self.__ctx_ref_map[obj_id] = self.__ctx_ref_map.get(obj_id, 0) + 1 + + def decrease_global_reference(self, object_ids: List[str]): + """ + Decrease global reference + """ + need_decrease_objs = self.decrease_ctx_reference(object_ids) + if len(need_decrease_objs) == 0: + return + self.__ds_client.g_decrease_ref(need_decrease_objs) + + def decrease_ctx_reference(self, object_ids: List[str]) -> List[str]: + """ + Decrease context reference + """ + with self.__ctx_lock: + need_decrease_objs = [] + for obj_id in object_ids: + cnt = self.__ctx_ref_map.get(obj_id, None) + if cnt is None: + continue + if cnt > 0: + cnt -= 1 + self.__ctx_ref_map[obj_id] -= 1 + need_decrease_objs.append(obj_id) + if cnt <= 0: + self.__ctx_ref_map.pop(obj_id) + return need_decrease_objs + + def clear_ctx_reference(self): + """ + Clear context reference + """ + need_decrease_objs = [] + for object_id, cnt in self.__ctx_ref_map.items(): + need_decrease_objs += [object_id] * cnt + _logger.debug(f"clear context reference objs:{need_decrease_objs}") + if len(need_decrease_objs) == 0: + return + self.__ctx_ref_map.clear() + self.__ds_client.g_decrease_ref(need_decrease_objs) + + def is_obj_id_in_ctx(self, object_id: str) -> bool: + """ + Is object_id in context + """ + return object_id in self.__ctx_ref_map.keys() + + def get_stream_client(self) -> StreamClient: + """ + get stream client for data system. + """ + if not self.__stream_client: + raise RuntimeError("not support stream out of cluster") + return self.__stream_client diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/storage/reference_count.py b/dsoftbus/dist_executor/modules/runtime/python/yr/storage/reference_count.py new file mode 100644 index 0000000000000000000000000000000000000000..547fe2b345d1b282700ff4f337f8e1d3a2f68c0e --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/storage/reference_count.py @@ -0,0 +1,139 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. +"""ReferenceCount""" +import logging +import threading +from collections import deque +from typing import List, Union + +from yr.storage.ds_client import DSClient +from yr.utils import Singleton + +_logger = logging.getLogger(__name__) + + +class BlockingQueue: + """BlockingQueue""" + __slots__ = ["__queue", "__not_empty"] + + def __init__(self): + self.__queue = deque() + self.__not_empty = threading.Event() + + def set_no_wait(self): + """set queue no wait""" + self.__not_empty.set() + + def len(self): + """get length of queue""" + return len(self.__queue) + + def append(self, elem: list): + """append an element to queue""" + if len(elem) == 0: + return + self.__queue.append(elem) + self.__not_empty.set() + + def pop_all(self) -> list: + """wait and pop all elements""" + if len(self.__queue) == 0: + self.__not_empty.wait() + self.__not_empty.clear() + + elems = [] + for _ in range(len(self.__queue)): + elems += self.__queue.popleft() + return elems + + +@Singleton +class ReferenceCount: + """global reference count""" + __slots__ = ["__ds_client", "__queue", "__is_running", "__send_thread"] + + def __init__(self): + self.__ds_client = None + self.__queue = None + self.__is_running = False + self.__send_thread = None + + @property + def is_init(self): + """return reference count state""" + return self.__ds_client is not None + + def init(self, ds_client: DSClient): + """init reference count""" + self.__ds_client = ds_client + self.__queue = BlockingQueue() + self.__is_running = True + self.__send_thread = threading.Thread(target=self.__process, name="YRReferenceCount", daemon=True) + self.__send_thread.start() + + def stop(self): + """stop reference count""" + if not self.is_init: + return + + _logger.debug(f"[Reference Counting] ReferenceCount stop") + self.__is_running = False + self.__queue.set_no_wait() + self.__send_thread.join() + self.__ds_client = None + + def increase(self, object_ids: list): + """increase global reference""" + _logger.debug(f"[Reference Counting] datasystem incre ref count, object_ids: {object_ids}") + self.__ds_client.increase_global_reference(object_ids) + + def decrease(self, object_ids: list): + """decrease global reference""" + self.__queue.append(object_ids) + + def is_obj_in_ctx(self, object_ref) -> bool: + """is objectRef in context""" + if self.__ds_client is None or object_ref is None: + return False + return self.__ds_client.is_obj_id_in_ctx(object_ref.id) + + def __process(self) -> None: + """decrease global reference""" + while self.__is_running or self.__queue.len() != 0: + object_ids = self.__queue.pop_all() + if len(object_ids) == 0: + continue + _logger.debug(f"[Reference Counting] datasystem decre ref count, object_ids: {object_ids}") + self.__ds_client.decrease_global_reference(object_ids) + + +def increase_reference_count(object_ids: Union[str, List]): + """increase global reference""" + if not ReferenceCount().is_init or len(object_ids) == 0: + return + if isinstance(object_ids, str): + object_ids = [object_ids] + + ReferenceCount().increase(object_ids) + + +def decrease_reference_count(object_ids: Union[str, List]): + """decrease global reference""" + if not ReferenceCount().is_init or len(object_ids) == 0: + return + if isinstance(object_ids, str): + object_ids = [object_ids] + + ReferenceCount().decrease(object_ids) diff --git a/dsoftbus/dist_executor/modules/runtime/python/yr/utils.py b/dsoftbus/dist_executor/modules/runtime/python/yr/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..9b40b396e5036403855001955c88a7a2c8f7cec3 --- /dev/null +++ b/dsoftbus/dist_executor/modules/runtime/python/yr/utils.py @@ -0,0 +1,397 @@ +#!/usr/bin/env python3 +# coding=UTF-8 +# Copyright (c) 2022 Huawei Technologies Co., Ltd +# +# This software is licensed under Mulan PSL v2. +# You can use this software according to the terms and conditions of the Mulan PSL v2. +# You may obtain a copy of Mulan PSL v2 at: +# +# http://license.coscl.org.cn/MulanPSL2 +# +# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, +# EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, +# MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE. +# See the Mulan PSL v2 for more details. + +""" +common tools +""" +import enum +import json +import threading +import re +import logging +import uuid +import inspect +import binascii +import sys +from dataclasses import dataclass + +import cloudpickle +from google.protobuf.json_format import MessageToDict + +# http headers +HEADER_TRACE_ID = 'X-Trace-ID' +HEADER_EVENT_SOURCE = "X-Event-Source-Id" +HEADER_INVOKE_URN = "X-Tag-VersionUrn" +CONTENT_TYPE = "Content-Type" +CONTENT_TYPE_APPLICATION_JSON = "application/json" +AUTHORIZATION = "authorization" +HEADER_INVOKE_TYPE = "X-Invoke-Type" +HEADER_STATE_KEY = "X-State-Key" + +NORMAL_FUNCTION = "normal function" +MEMBER_FUNCTION = "member function" +DEFAULT_TIMEOUT = 900 +OBJECT_ID_PREFIX = "yr-api-obj-" +_URN_SEPARATOR = ":" +_TRACE_ID = "-trace-" +_NAME_SEPARATOR = "-" + +LANGUAGE_PYTHON = "python" +LANGUAGE_CPP = "cpp" +FUNC_NAME = "functionName" +MODULE_NAME = "moduleName" +CLASS_NAME = "className" +TARGET_LANGUAGE = "targetLanguage" +SRC_LANGUAGE = "srcLanguage" +INSTANCE_ID = "instanceID" +FUNCTION_KEY = "functionKey" +CLASS_METHOD = "classMethod" +NEED_ORDER = "needOrder" +_logger = logging.getLogger(__name__) + +_JOB_ID = "" +_RUNTIME_ID_INDEX = -2 +_SERIAL_NUM_INDEX = -1 + + +class Singleton: + """ + Used as metaclass to implement Singleton. + """ + + def __init__(self, cls): + self._cls = cls + self._instance = {} + self.lock = threading.Lock() + + def __call__(self, *args, **kw): + if self._cls not in self._instance: + with self.lock: + if self._cls not in self._instance: + self._instance[self._cls] = self._cls(*args, **kw) + return self._instance.get(self._cls) + + +def validate_ip(input_ip: str): + """ + This is a checker for input ip string + + Checks validity of input ip string + + Returns: + True, the input ip string is valid + False, the input ip string is invalid + """ + ip_regex = \ + r"^((25[0-5]|2[0-4]\d|[01]?\d\d?)\.){3}(25[0-5]|2[0-4]\d|[01]?\d\d?)$" + compile_ip = re.compile(ip_regex) + return compile_ip.match(input_ip) + + +def validate_address(address): + """ + Validates address parameter + Args: + address: string address of the full address + + Returns: + ip: string of ip + port: integer of port + + """ + address_parts = address.split(":") + if len(address_parts) != 2: + raise ValueError("address format is wrong, ':' is expected.") + ip = address_parts[0] + try: + port = int(address_parts[1]) + except ValueError as e: + raise ValueError("port format is wrong, must be an integer.") from e + else: + if not 1 <= port <= 65535: + raise ValueError(f"port value {port} is out of range.") + if not validate_ip(ip): + raise ValueError(f"invalid ip {ip}") + return ip, port + + +def set_job_id(job_id): + """set global job id""" + global _JOB_ID + _JOB_ID = job_id + + +def generate_random_id(): + """ + This is a wrapper generating random id for user functions and objects + + Gets a random id string + + Example: yrobj-433ec3c1-ba11-5a16-ad97-ee4e68db67d5 + + Returns: + Unique uuid string with prefix for user functions and objects + """ + uuid_str = str(uuid.uuid4()) + return OBJECT_ID_PREFIX + uuid_str + + +def generate_task_id(): + """ + This is a wrapper generating random task id for user stateless invoke functions + + Gets a random id string + + Example: job-xxx-task-433ec3c1-ba11-5a16-ad97-ee4e68db67d5 + + Returns: + Unique uuid string with prefix for stateless invoke functions + """ + uuid_str = str(uuid.uuid4()) + return _JOB_ID + "-task-" + uuid_str + + +def generate_runtime_id(): + """ + Generating random runtime id + + :return: Unique 8-bit uuid string + """ + return str(uuid.uuid4().hex)[:8] + + +def generate_task_id_with_serial_num(runtime_id, serial_num): + """ + Generating random task id with last 8 characters recording serial number + for user class methods + + Example: job-xxx-task-433ec3c1-ba11-5a16-ad97-ee4e68db67d5-00000001 + + :param runtime_id: runtime id + :param serial_num: serial number of invoke task + :return: Unique uuid string with prefix and serial number + for user class methods + """ + uuid_str = str(uuid.uuid4()) + return f"{_JOB_ID}-task-{uuid_str}-{runtime_id}-{str(serial_num)}" + + +def extract_serial_num(task_id): + """ + Extract serial number from task id + + :param task_id: string of task id + :return: serial number of the task (int) + """ + return int(task_id.split("-")[_SERIAL_NUM_INDEX]) + + +def extract_runtime_id(task_id): + """ + Extract runtime id from task id + + :param task_id: string of task id + :return: runtime id of the task (str) + """ + return task_id.split("-")[_RUNTIME_ID_INDEX] + + +def generate_trace_id(job_id: str) -> str: + """ + TraceID is used to analyze the call chain between functions + + Example: job-fa60ccbb-trace-adc3f0b94c89457e8fedce36c0d0dc20 + + Returns: + Unique uuid string with prefix for stateless invoke functions + """ + trace_id = str(uuid.uuid4().hex) + return job_id + _TRACE_ID + trace_id + + +def get_module_name(obj): + """ + Get the module name from object. If the module is __main__, + get the module name from file. + + Returns: + Module name of object. + """ + module_name = obj.__module__ + n = None + if module_name == "__main__": + try: + file_path = inspect.getfile(obj) + n = inspect.getmodulename(file_path) + except TypeError: + pass + if n: + module_name = n + return module_name + + +def binary_to_hex(value): + """ + bytes to hex + """ + hex_id = binascii.hexlify(value) + if sys.version_info >= (3, 0): + hex_id = hex_id.decode() + return hex_id + + +def hex_to_binary(hex_id): + """ + hex to bytes + """ + return binascii.unhexlify(hex_id) + + +def package_args(args, kwargs): + """ + Package user invoke args. + """ + send_param = { + "args": binary_to_hex(cloudpickle.dumps(args)), + "kwargs": binary_to_hex(cloudpickle.dumps(kwargs)), + } + + param_str = json.dumps(send_param) + + return param_str + + +class ObjectDescriptor: + """ + object descriptor + """ + src_language = LANGUAGE_PYTHON + target_language = LANGUAGE_PYTHON + + def __init__(self, module_name="", + class_name="", + function_name="", + target_language=LANGUAGE_PYTHON, + src_language=LANGUAGE_PYTHON): + self.module_name = module_name + self.class_name = class_name + self.function_name = function_name + self.target_language = target_language + self.src_language = src_language + + @classmethod + def get_from_function(cls, func): + """ + get the function descriptor + """ + self = ObjectDescriptor.__new__(cls) + self.module_name = get_module_name(func) + self.function_name = func.__qualname__ + self.class_name = "" + return self + + @classmethod + def get_from_class(cls, obj): + """ + get the class descriptor + """ + self = ObjectDescriptor.__new__(cls) + self.module_name = get_module_name(obj) + self.class_name = obj.__qualname__ + self.function_name = "" + return self + + @classmethod + def parse(cls, data): + """parse from json or dict""" + tmp = data + if isinstance(tmp, bytes): + tmp = tmp.decode() + if isinstance(tmp, str): + tmp = json.loads(tmp) + if isinstance(tmp, dict): + self = cls(module_name=tmp.get(MODULE_NAME, ""), + class_name=tmp.get(CLASS_NAME, ""), + function_name=tmp.get(FUNC_NAME, ""), + target_language=tmp.get(TARGET_LANGUAGE, ""), + src_language=tmp.get(SRC_LANGUAGE, "")) + else: + raise TypeError(f"can not parse from type {type(data)}") + return self + + def to_dict(self): + """ + export the object descriptor to dict + """ + return {MODULE_NAME: self.module_name, + CLASS_NAME: self.class_name, + FUNC_NAME: self.function_name, + TARGET_LANGUAGE: self.target_language, + SRC_LANGUAGE: self.src_language} + + +def is_function_or_method(obj): + """ + judge the obj type + """ + return inspect.isfunction(obj) or inspect.ismethod(obj) + + +class _URNIndex(enum.IntEnum): + prefix = 0 + zone = 1 + business_id = 2 + tenant_id = 3 + name = 5 + version = 6 + + +def get_function_from_urn(urn: str): + """ + get function name which used by posix + example: 7e1ad6a6cc5c44fabd5425873f72a86a/0-test-helloclass/$latest + """ + parts = urn.split(_URN_SEPARATOR) + name = [parts[_URNIndex.tenant_id], parts[_URNIndex.name], parts[_URNIndex.version]] + return "/".join(name) + + +def serialize_code(code): + """ + Serialize the original user invoke function + """ + res = cloudpickle.dumps(code) + serial_func_bytes = binascii.hexlify(res) + serial_func_str = str(serial_func_bytes, encoding="utf-8") + return serial_func_str + + +def create_payload(args_list): + """args list to payload""" + args_list = [MessageToDict(i) for i in args_list] + for i in args_list: + if 'type' in i: + i['type'] = 1 + return json.dumps(args_list) + + +@dataclass +class CrossLanguageInfo: + """ + CrossLanguageFunctionInfo + """ + function_name: str + function_key: str + target_language: str