!57 [sync] PR-56: pkgship升级至2.2.0-1

From: @openeuler-sync-bot
Reviewed-by: @solarhu
Signed-off-by: @solarhu
This commit is contained in:
openeuler-ci-bot 2021-08-16 12:32:33 +00:00 committed by Gitee
commit ab564e5359
15 changed files with 9 additions and 5220 deletions

View File

@ -1,158 +0,0 @@
--- a/packageship/application/cli/commands/initialize.py
+++ b/packageship/application/cli/commands/initialize.py
@@ -15,7 +15,6 @@ Description: Entry method for custom commands
Class: InitDatabaseCommand
"""
import os
-import random
import time
import pwd
import threading
@@ -23,27 +22,23 @@ from packageship.application.cli.base import BaseCommand
from packageship.application.common.exc import InitializeError, ResourceCompetitionError
-class PrintThread(threading.Thread):
+class InitServiceThread(threading.Thread):
"""
- Description: Print Thread
- Attributes:
-
+ Description: Execute the initialization thread
"""
- def __init__(self, *args, **kwargs):
- super(PrintThread, self).__init__(*args, **kwargs)
- self.__clear = False
+ def __init__(self, func, param, *args, **kwargs):
+ super(InitServiceThread, self).__init__(*args, **kwargs)
+ self._func = func
+ self._args = param
+ self.error = False
def run(self):
- while True:
- print("\r", "initializing{}".format(
- "." * random.randint(1, 4)), end='', flush=True)
- time.sleep(0.5)
- if self.__clear:
- break
-
- def stop(self):
- self.__clear = True
+ try:
+ self._func(*self._args)
+ except (InitializeError, ResourceCompetitionError) as error:
+ self.error = True
+ print('\r', error)
class InitDatabaseCommand(BaseCommand):
@@ -63,6 +58,7 @@ class InitDatabaseCommand(BaseCommand):
'init', help='initialization of the database')
self.params = [
('-filepath', 'str', 'specify the path of conf.yaml', '', 'store')]
+ self._char = ["/", "-", "\\"]
def register(self):
"""
@@ -77,6 +73,13 @@ class InitDatabaseCommand(BaseCommand):
super(InitDatabaseCommand, self).register()
self.parse.set_defaults(func=self.do_command)
+ @property
+ def login_user(self):
+ """
+ Description: The user logged in to the system
+ """
+ return pwd.getpwuid(os.getuid())[0]
+
def do_command(self, params):
"""
Description: Action to execute command
@@ -88,10 +91,7 @@ class InitDatabaseCommand(BaseCommand):
"""
- def get_username():
- return pwd.getpwuid(os.getuid())[0]
-
- if get_username() not in ["root", "pkgshipuser"]:
+ if self.login_user not in ["root", "pkgshipuser"]:
print("The current user does not have initial execution permission")
return
@@ -100,14 +100,19 @@ class InitDatabaseCommand(BaseCommand):
file_path = params.filepath
if file_path:
file_path = os.path.abspath(file_path)
- try:
- print_t = PrintThread()
- print_t.start()
- init.import_depend(path=file_path)
- print_t.stop()
- except (InitializeError, ResourceCompetitionError) as error:
- print('\r', error)
- else:
+
+ _init_service_thread = InitServiceThread(
+ func=init.import_depend, param=(file_path,))
+ _init_service_thread.setDaemon(True)
+ _init_service_thread.start()
+
+ while _init_service_thread.isAlive():
+ for number in range(3):
+ print("\r", "initializing{}".format("." * 10),
+ self._char[number], end='', flush=True)
+ time.sleep(0.5)
+ print("\n")
+ if not _init_service_thread.error:
if init.success:
print('\r', 'Database initialize success')
else:
--- a/packageship/application/initialize/integration.py
+++ b/packageship/application/initialize/integration.py
@@ -653,7 +653,7 @@ class RepoConfig:
if not os.path.exists(path):
raise FileNotFoundError(
- "system initialization configuration file"
+ "system initialization configuration file "
"does not exist: %s" % path)
# load yaml configuration file
with open(path, 'r', encoding='utf-8') as file_context:
@@ -663,7 +663,7 @@ class RepoConfig:
except yaml.YAMLError as yaml_error:
LOGGER.error(yaml_error)
raise ValueError(
- "The format of the yaml configuration"
+ "The format of the yaml configuration "
"file is wrong please check and try again:{0}".format(yaml_error)) \
from yaml_error
@@ -748,9 +748,9 @@ class RepoConfig:
raise ValueError(
"content of the database initialization configuration file cannot be empty .")
if not isinstance(self._repo, list):
- raise ValueError("""format of the initial database configuration file isincorrect.
- When multiple databases need to be initialized,
- it needs to be configured in the form of multiple .""")
+ raise ValueError("format of the initial database configuration file is incorrect."
+ " When multiple databases need to be initialized,"
+ " it needs to be configured in the form of multiple .")
self._validate_database()
for repo in self._repo:
try:
--- a/packageship/pkgship
+++ b/packageship/pkgship
@@ -15,7 +15,7 @@ import signal
from signal import SIG_DFL
try:
def sig_handler(signum, frame):
- print('Exit command mode')
+ print("\n", 'Exit command mode')
sys.exit(0)
signal.signal(signal.SIGINT, sig_handler)

View File

@ -1,338 +0,0 @@
--- a/README.md
+++ b/README.md
@@ -34,10 +34,22 @@ pkgship是一款管理OS软件包依赖关系提供依赖和被依赖关系
## 运行环境
-* 可用内存700M以上
-* python版本 3.8及以上
-* Elasticsearch 版本7.10.1
-* Redis
+- 硬件配置:
+
+| 配置项 | 推荐规格 |
+| -------- | ----------- |
+| CPU | 8核 |
+| 内存 | 32G最小4G |
+| 网络带宽 | 300M |
+| I/O | 375MB/sec |
+
+- 软件配置:
+
+| 软件名 | 版本和规格 |
+| ------------- | ------------------------------------------ |
+| Elasticsearch | 版本7.10.1;单机部署可用;有能力可部署集群 |
+| Redis | 建议5.0.4及以上建议大小配置为内存的3/4 |
+| Python | 版本 3.8及以上 |
## 安装工具
**1、pkgship工具安装**
@@ -85,6 +97,10 @@ pkgship是一款管理OS软件包依赖关系提供依赖和被依赖关系
/bin/bash auto_install_pkgship_requires.sh redis
```
+**3、安装后添加用户**
+
+在安装pkgship软件后会自动创建名为pkgshipuser的用户和名为pkgshipuser的用户组无需手动创建后续服务启动和运行时都会以该用户角色操作。
+
## 配置参数
1.在配置文件中对相应参数进行配置,系统的默认配置文件存放在 /etc/pkgship/packge.ini请根据实际情况进行配置更改。
@@ -158,12 +174,12 @@ database_port=9200
conf.yaml 文件默认存放在 /etc/pkgship/ 路径下pkgship会通过该配置读取要建立的数据库名称以及需要导入的sqlite文件也支持配置sqlite文件所在的repo地址。conf.yaml 示例如下所示。
```yaml
-dbname: openEuler-20.03 #数据库名称
+dbname: oe20.03 #数据库名称
src_db_file: /etc/pkgship/repo/openEuler-20.09/src #源码包所在的本地路径
bin_db_file: /etc/pkgship/repo/openEuler-20.09/bin #二进制包所在的本地路径
priority: 1 #数据库优先级
-dbname: openEuler-20.09
+dbname: oe20.09
src_db_file: https://repo.openeuler.org/openEuler-20.09/source #源码包所在的repo源
bin_db_file: https://repo.openeuler.org/openEuler-20.09/everything/aarch64 #二进制包所在的repo源
priority: 2
@@ -174,6 +190,8 @@ priority: 2
> 如需更改存放路径请更改package.ini下的 init_conf_path 选项。
>
> 不支持直接配置sqlite文件路径。
+>
+> dbname请使用小写字母或者数字不支持大写字母。
## 服务启动和停止
pkgship启动和停止方式有两种systemctl方式和pkgshipd方式其中systemctl方式启动可以有异常停止自启动的机制。两种方式的执行命令为
@@ -194,50 +212,50 @@ pkgshipd stop 停止服务
> 每次起停周期内仅支持一种方式,不允许两种操作同时使用。
>
-> pkgshipd启动方式只允许在pkgshipduser用户下操作。
+> pkgshipd启动方式只允许在pkgshipuser用户下操作。
## 工具使用
1. 数据库初始化。
- > 使用场景服务启动后为了能查询对应的数据库比如openEuler-20.09 openEuler-21.03中的包信息及包依赖关系需要将这些数据库通过createrepo生成的sqlite分为源码库和二进制库导入进服务内生成对应的包信息json体然后插入Elasticsearch对应的数据库中。数据库名为根据config.yaml中配置的dbname生成的dbname-source/binary[-filepath]为可选参数。
+ > 使用场景服务启动后为了能查询对应的数据库比如oe20.03oe20.09中的包信息及包依赖关系需要将这些数据库通过createrepo生成的sqlite分为源码库和二进制库导入进服务内生成对应的包信息json体然后插入Elasticsearch对应的数据库中。数据库名为根据config.yaml中配置的dbname生成的dbname-source/binary。
```bash
pkgship init [-filepath path]
```
> 参数说明:
- > -filepath指定初始化配置文件config.yaml的路径可以使用相对路径和绝对路径不带参数则使用默认配置初始化。
+ > -filepath指定初始化配置文件config.yaml的路径可以使用相对路径和绝对路径不带参数则使用默认配置初始化可选参数。
2. 单包查询。
用户可查询源码包或者二进制包(packagename)在指定数据库表database中的具体信息。
- > 使用场景用户可查询源码包或者二进制包在指定数据库中的具体信息packagename,database为必选参数,-s为可选参数。
+ > 使用场景:用户可查询源码包或者二进制包在指定数据库中的具体信息。
```bash
pkgship pkginfo $packageName $database [-s]
```
> 参数说明:
- > packagename指定要查询的软件包名。
- > database指定具体的数据库名称。
+ > packagename指定要查询的软件包名必传参数。
+ > database指定具体的数据库名称必传参数。
>
- > -s: 指定`-s`将查询的是`src`源码包信息;若未指定 默认查询`bin`二进制包信息
+ > -s: 指定`-s`将查询的是`src`源码包信息;若未指定 默认查询`bin`二进制包信息,可选参数。
3. 所有包查询。
查询数据库下包含的所有包的信息。
- > 使用场景用户可查询指定数据库下包含的所有软件包信息。其中tablename为必选参数-s为可选参数。
+ > 使用场景:用户可查询指定数据库下包含的所有软件包信息。
```bash
pkgship list $database [-s]
```
> 参数说明:
- > database指定具体的数据库名称。
- > -s: 指定`-s`将查询的是`src`源码包信息;若未指定 默认查询`bin`二进制包信息
+ > database指定具体的数据库名称必传参数。
+ > -s: 指定`-s`将查询的是`src`源码包信息;若未指定 默认查询`bin`二进制包信息,可选参数。
4. 安装依赖查询。
@@ -285,7 +303,7 @@ pkgshipd stop 停止服务
> 参数说明:
>
- > pkgName需要查询安装的依赖的二进制包名字,支持传多个;必传参数。
+ > pkgName需要查询安装的依赖的软件包名字,支持传多个;必传参数。
>
> -dbs 指定需要查询的database优先级,不传按照系统默认优先级搜索;可选参数。
>
@@ -296,9 +314,9 @@ pkgshipd stop 停止服务
> -w指定-s表示引入某个二进制包的时候查询结果会显示出该二进制包对应的源码包以及该源码包生成的所有二进制包如果不指定-w参数表示引入某个二进制包的时候查询结果只显示对应的源码包可选参数。
7. 被依赖查询。
- 查询源码包(sourceName)在某数据库(dbName)中被哪些包所依赖。
+ 查询软件包(pkgName)在某数据库(dbName)中被哪些包所依赖。
- > 使用场景针对软件源码包A在升级或删除的情况下会影响哪些软件包可通过该命令查询。该命令会显示源码包A生成的所有二进制包被哪些源码包比如B编译依赖被哪些二进制包比如C1安装依赖以及B生成的二进制包及C1被哪些源码包比如D编译依赖被哪些二进制包比如E1安装依赖以此类推遍历这些二进制包的被依赖。
+ > 使用场景针对软件包A在升级或删除的情况下会影响哪些软件包可通过该命令查询。该命令会显示源码包A(若为源码包)生成的所有二进制包若输入为二进制包那此处即为输入的二进制包被哪些源码包比如B编译依赖被哪些二进制包比如C1安装依赖以及B生成的二进制包及C1被哪些源码包比如D编译依赖被哪些二进制包比如E1安装依赖以此类推遍历这些二进制包的被依赖。
```bash
pkgship bedepend dbName [$pkgName1 $pkgName2 $pkgName3] [-w] [-b] [-install/build]
@@ -306,9 +324,11 @@ pkgshipd stop 停止服务
> 参数说明:
>
- > dbName需要查询依赖关系的仓库不支持多个必选参数。
+ > dbName需要查询依赖关系的仓库不支持多个必选参数。
+ >
+ > pkgName待查询的软件包名称支持多个必选参数。
>
- > -w :当不指定-w 时,查询结果默认不包含对应二进制包的子包;当命令后指定配置参数[-w] 时不仅会查询二进制包C1的被依赖关系还会进一步去查询C1对应的源码包C生成的其他二进制包比如C2,C3的被依赖关系可选参数。
+ > -w :当不指定-w 时,查询结果默认不包含对应源码包的子包;当命令后指定配置参数[-w] 时不仅会查询二进制包C1的被依赖关系还会进一步去查询C1对应的源码包C生成的其他二进制包比如C2,C3的被依赖关系可选参数。
>
> -b指定`-b`表示查询的包是二进制,默认查询源码包;可选参数。
>
@@ -318,7 +338,7 @@ pkgshipd stop 停止服务
> 使用场景查看Elasticsearch中初始化了哪些数据库该功能会按照优先级顺序返回已经初始化的数据库列表。
- `pkgship db`
+ `pkgship dbs`
9. 获取版本号。
@@ -326,3 +346,64 @@ pkgshipd stop 停止服务
`pkgship -v`
+## 日志查看和转储
+
+ **日志查看**
+
+ pkgship服务在运行时会产生两种日志业务日志和操作日志。
+
+ 1、业务日志:
+
+ 路径:/var/log/pkgship/log_info.log支持在conf.yaml中通过log_path字段自定义路径
+
+ 功能:主要记录代码内部运行的日志,方便问题定位。
+
+ 权限路径权限755日志文件权限644普通用户可以查看。
+
+2、操作日志
+
+路径:/var/log/pkgship-operation/uwsgi.log 支持在conf.yaml中通过daemonize字段自定义路径
+
+功能记录使用者操作信息包括ip访问时间访问url访问结果等方便后续查阅以及记录攻击者信息。
+
+权限路径权限700日志文件权限644只有root和pkgshipuser可以查看。
+
+**日志转储**
+
+1、业务日志转储
+
+- 转储机制
+
+ 使用python自带的logging内置函数的转储机制按照日志大小来备份。
+
+> 配置项package.ini中配置每个日志的容量和备份数量
+>
+> ```ini
+> ; Maximum capacity of each file, the unit is byte, default is 30M
+> max_bytes=31457280
+>
+> ; Number of old logs to keep;default is 30
+> backup_count=30
+> ```
+
+- 转储过程
+
+ 当某次日志写入后日志文件大小超过配置的日志容量时会自动压缩转储压缩后文件名为log_info.log.x.gz x是数字数字越小为越新的备份。
+
+ 当备份日志数量到达配置的备份数量之后,最早的备份日志会被删除掉,然后备份一个最新的压缩日志文件。
+
+
+
+2、操作日志转储
+
+- 转储机制
+
+ 使用脚本进行转储按照时间转储每日转储一次共保留30天不支持自定义配置。
+
+ > 脚本位置:/etc/pkgship/uwsgi_logrotate.sh
+
+- 转储过程
+
+ pkgship启动时转储脚本后台运行从启动时每隔1天进行转储压缩共保留30份压缩文件压缩文件名称为uwsgi.log-20201010x.zip x为压缩时的小时数。
+
+ pkgship停止后转储脚本停止不再进行转储再次启动时转储脚本重新执行。
\ No newline at end of file
--- a/doc/design/pkgship-dev-2.0.md
+++ b/doc/design/pkgship-dev-2.0.md
@@ -1364,7 +1364,7 @@ query_ip_addr=127.0.0.1
; The address of the remote service, the command line can directly
; call the remote service to complete the data request
-remote_host=https://api.openeuler.org/pkgmanage
+remote_host=https://pkgmanage.openeuler.org
; A temporary directory for files downloaded from the network that are cleaned periodically
; The recommended free space in this dir is 1G
@@ -1379,6 +1379,12 @@ log_path=/var/log/pkgship/
; INFO DEBUG WARNING ERROR CRITICAL
log_level=INFO
+; Maximum capacity of each file, the unit is byte, default is 30M
+max_bytes=31457280
+
+; Number of old logs to keep;default is 30
+backup_count=30
+
[UWSGI]
; Operation log storage path
daemonize=/var/log/pkgship-operation/uwsgi.log
@@ -1402,9 +1408,6 @@ redis_port=6379
redis_max_connections=10
[DATABASE]
-;The database engines supported in the system is sqlite database by default
-database_engine_type=elastic
-
;Default ip address of database
database_host=127.0.0.1
@@ -3528,6 +3531,68 @@ binary_data = {
| DatabaseConfigException | 自定义 | 数据库配置异常,例如数据库地址为空,数据库类型不支持 |
| ElasticSearchQueryException | 自定义 | ES数据库查询异常例如数据库连接失败连接超时index不存在 |
+### 3.9、日志查看和转储
+
+#### 3.9.1、日志查看
+
+ pkgship服务在运行时会产生两种日志业务日志和操作日志。
+
+ 1、业务日志:
+
+ 路径:/var/log/pkgship/log_info.log支持在conf.yaml中配置
+
+ 功能:主要记录代码内部运行的日志,方便问题定位。
+
+ 权限路径权限755日志文件权限644普通用户可以查看。
+
+2、操作日志
+
+路径:/var/log/pkgship-operation/uwsgi.log 支持在conf.yaml中配置
+
+功能记录使用者操作信息包括ip访问时间访问url访问结果等方便后续查阅以及记录攻击者信息。
+
+权限路径权限700日志文件权限644只有root和pkgshipuser可以查看。
+
+#### 3.9.2、日志转储
+
+1、业务日志转储
+
+- 转储机制
+
+ 使用python自带的logging内置函数的转储机制按照日志大小来备份。
+
+> 配置项package.ini中配置每个日志的容量和备份数量
+>
+> ```ini
+> ; Maximum capacity of each file, the unit is byte, default is 30M
+> max_bytes=31457280
+>
+> ; Number of old logs to keep;default is 30
+> backup_count=30
+> ```
+
+- 转储过程
+
+ 当某次日志写入后日志文件大小超过配置的日志容量时会自动压缩转储压缩后文件名为log_info.log.x.gz x是数字数字越小为越新的备份。
+
+ 当备份日志数量到达配置的备份数量之后,最早的备份日志会被删除掉,然后备份一个最新的压缩日志文件。
+
+
+
+2、操作日志转储
+
+- 转储机制
+
+ 使用脚本进行转储按照时间转储每日转储一次共保留30天不支持自定义配置。
+
+ > 脚本位置:/etc/pkgship/uwsgi_logrotate.sh
+
+- 转储过程
+
+ pkgship启动时转储脚本后台运行从启动时每隔1天进行转储压缩共保留30份压缩文件压缩文件名称为uwsgi.log-20201010x.zip x为压缩时的小时数。
+
+ pkgship停止后转储脚本停止不再进行转储再次启动时转储脚本重新执行。
+
## 4、修改日志
|版本|发布说明|

View File

@ -1,24 +0,0 @@
--- a/packageship/application/apps/package/view.py
+++ b/packageship/application/apps/package/view.py
@@ -235,7 +235,9 @@ class SourcePackageInfo(Resource):
"""
# Get verification parameters
rspmsg = RspMsg()
- data = request.args
+ data = dict()
+ data["database_name"] = request.args.get("database_name")
+ data["pkg_name"] = pkg_name
result, error = validate(SingleSchema, data, load=True)
if error:
response = rspmsg.body('param_error')
@@ -289,7 +291,9 @@ class BinaryPackageInfo(Resource):
"""
# Get verification parameters
rspmsg = RspMsg()
- data = request.args
+ data = dict()
+ data["database_name"] = request.args.get("database_name")
+ data["pkg_name"] = pkg_name
result, error = validate(SingleSchema, data, load=True)
if error:
response = rspmsg.body('param_error')

View File

@ -1,66 +0,0 @@
--- a/packageship/pkgshipd
+++ b/packageship/pkgshipd
@@ -3,7 +3,7 @@ SYS_PATH=/etc/pkgship
OUT_PATH=/opt/pkgship/uwsgi
OPERATION=$1
PKGSHIP_CONSTANT="pkgship"
-MEM_THRESHOLD='700'
+MEM_THRESHOLD='2048'
MEM_FREE=$(free -m | grep "Mem" | awk '{print $7}')
function check_user() {
@@ -262,22 +262,34 @@ function uwsgi_log_logrotate() {
echo "[INFO] Start the logrotate task success"
}
+function is_started() {
+ pkgship_version=$(pkgship -v)
+ if [[ -n ${pkgship_version} ]] && [[ ${pkgship_version} =~ "Version" ]]; then
+ return 0
+ else
+ return 1
+ fi
+}
+
function start_service() {
- uwsgi_pid=$(ps -ef | grep -v grep | grep "uwsgi" | grep "${PKGSHIP_CONSTANT}.ini" | awk '{print $2}')
- if [ -n "${uwsgi_pid}" ]; then
+ if is_started; then
echo "[ERROR] ${PKGSHIP_CONSTANT} service is running, please stop it first."
exit 1
else
uwsgi -d --ini $OUT_PATH/${PKGSHIP_CONSTANT}.ini
echo "[INFO] START uwsgi service: ${PKGSHIP_CONSTANT}.ini"
- uwsgi_pid=$(ps -ef | grep -v grep | grep "uwsgi" | grep "${PKGSHIP_CONSTANT}.ini" | awk '{print $2}')
- if [ -z "${uwsgi_pid}" ]; then
- echo "[ERROR] Service failed to start, please check log $(get_config "daemonize")"
- exit 1
- fi
- uwsgi_log_logrotate
- echo "[INFO] Start pkgship service success!!!"
- exit 0
+
+ for i in {1..5}; do
+ if is_started; then
+ uwsgi_log_logrotate
+ echo "[INFO] Start pkgship service success!!!"
+ exit 0
+ fi
+ sleep 2s
+ done
+
+ echo "[ERROR] Service failed to start, please check log $(get_config "daemonize")"
+ exit 1
fi
}
@@ -288,8 +300,7 @@ function stop_service() {
exit 1
fi
- uwsgi_pid=$(ps -ef | grep -v grep | grep "uwsgi" | grep "${PKGSHIP_CONSTANT}.ini" | awk '{print $2}')
- if [ -n "${uwsgi_pid}" ]; then
+ if is_started; then
uwsgi --stop $OUT_PATH/${PKGSHIP_CONSTANT}.pid
echo "[INFO] STOP uwsgi service: $OUT_PATH/${PKGSHIP_CONSTANT}.ini"
echo "[INFO] The run log is saved into: $(get_config "daemonize")"

File diff suppressed because one or more lines are too long

View File

@ -1,38 +0,0 @@
--- a/packageship/application/cli/commands/initialize.py
+++ b/packageship/application/cli/commands/initialize.py
@@ -15,9 +15,12 @@ Description: Entry method for custom commands
Class: InitDatabaseCommand
"""
import os
+import threading
import time
+
import pwd
-import threading
+from requests import RequestException
+
from packageship.application.cli.base import BaseCommand
from packageship.application.common.exc import InitializeError, ResourceCompetitionError
@@ -59,6 +62,7 @@ class InitDatabaseCommand(BaseCommand):
self.params = [
('-filepath', 'str', 'specify the path of conf.yaml', '', 'store')]
self._char = ["/", "-", "\\"]
+ self._success_code = 200
def register(self):
"""
@@ -94,6 +98,13 @@ class InitDatabaseCommand(BaseCommand):
if self.login_user not in ["root", "pkgshipuser"]:
print("The current user does not have initial execution permission")
return
+ try:
+ _query_version_response = self.request.get("{}/version".format(self.read_host))
+ except RequestException:
+ _query_version_response = None
+ if not _query_version_response or _query_version_response.status_code != self._success_code:
+ print("The pkgship service is not started,please start the service first")
+ return
from packageship.application.initialize.integration import InitializeService
init = InitializeService()

View File

@ -1,45 +0,0 @@
--- a/README.md
+++ b/README.md
@@ -11,6 +11,7 @@
- [配置参数](#配置参数)
- [服务启动和停止](#服务启动和停止)
- [工具使用](#工具使用)
+ - [日志查看和转储](#日志查看和转储)
<!-- /TOC -->
@@ -91,7 +92,9 @@ pkgship是一款管理OS软件包依赖关系提供依赖和被依赖关系
/bin/bash auto_install_pkgship_requires.sh elasticsearch
```
- 或者
+> 目前由于rpm包方式安装Elasticsearch默认为无密码模式且pkgship需使用无密码设置的Elasticsearch所以建议Elasticsearch和pkgship安装在同一服务器以通过网络隔离提高安全性。后续将支持Elasticsearch设置用户名密码。
+
+或者
```
/bin/bash auto_install_pkgship_requires.sh redis
@@ -114,10 +117,12 @@ vim /etc/pkgship/package.ini
; 初始化数据库时导入的yaml文件存放位置该yaml中记录导入的sqlite文件位置。
init_conf_path=/etc/pkgship/conf.yaml
-; 数据库端口。
+; 若部署为客户端-服务端方式服务端需保证query_ip_addr为本机ip或者0.0.0.0
+; 并且客户端可通过query_ip_addr加query_port访问服务端或者通过设置映射的remote_host访问服务端。
+; 服务查询端口。
query_port=8090
-; 数据库ip地址。
+; 服务查询ip。
query_ip_addr=127.0.0.1
; 远程服务的地址,命令行可以直接调用远程服务来完成数据请求。
@@ -162,7 +167,7 @@ redis_port=6379
redis_max_connections=10
[DATABASE-数据库]
-;数据库访问地址,默认为本机地址
+;数据库访问地址,建议设置为本机地址
database_host=127.0.0.1
;数据库访问端口默认为9200

View File

@ -1,84 +0,0 @@
--- a/test/test_module/test_build/test_get_build_depend.py
+++ b/test/test_module/test_build/test_get_build_depend.py
@@ -16,7 +16,7 @@ test get binary package info
import unittest
import os
from unittest import mock
-from mock import patch
+from unittest.mock import patch
from redis import Redis
from packageship.application.core.depend.build_depend import BuildDepend
--- a/test/test_module/test_install/test_get_install_depend.py
+++ b/test/test_module/test_install/test_get_install_depend.py
@@ -16,8 +16,7 @@ test get install depend info
import unittest
import os
from unittest import mock
-
-from mock import patch
+from unittest.mock import patch
from redis import Redis
from packageship.application.core.depend.install_depend import InstallDepend
--- a/test/test_module/test_packages/test_all_bin_package/test_get_all_bin_package.py
+++ b/test/test_module/test_packages/test_all_bin_package/test_get_all_bin_package.py
@@ -16,7 +16,7 @@ test get all bin package
import os
import unittest
from unittest import mock
-from mock import patch
+from unittest.mock import patch
from packageship.application.common.exc import PackageInfoGettingError, DatabaseConfigException, \
ElasticSearchQueryException
--- a/test/test_module/test_packages/test_all_src_package/test_get_all_src_package.py
+++ b/test/test_module/test_packages/test_all_src_package/test_get_all_src_package.py
@@ -16,7 +16,7 @@ test get all src package
import unittest
import os
from unittest import mock
-from mock import patch
+from unittest.mock import patch
from packageship.application.common.exc import PackageInfoGettingError, DatabaseConfigException, \
ElasticSearchQueryException
--- a/test/test_module/test_packages/test_single_package_info/test_get_bin_package_info.py
+++ b/test/test_module/test_packages/test_single_package_info/test_get_bin_package_info.py
@@ -16,7 +16,7 @@ test get binary package info
import unittest
import os
from unittest import mock
-from mock import patch
+from unittest.mock import patch
from packageship.application.common.exc import DatabaseConfigException, ElasticSearchQueryException
from packageship.application.core.pkginfo.pkg import BinaryPackage
--- a/test/test_module/test_packages/test_single_package_info/test_get_src_package_info.py
+++ b/test/test_module/test_packages/test_single_package_info/test_get_src_package_info.py
@@ -16,7 +16,7 @@ test get src package info
import unittest
import os
from unittest import mock
-from mock import patch
+from unittest.mock import patch
from packageship.application.common.exc import DatabaseConfigException, ElasticSearchQueryException
from packageship.application.core. pkginfo.pkg import SourcePackage
--- a/test/test_module/test_selfbuild/test_get_self_depend_info.py
+++ b/test/test_module/test_selfbuild/test_get_self_depend_info.py
@@ -16,7 +16,7 @@ test get self_build depend info
import unittest
import os
from unittest import mock
-from mock import patch
+from unittest.mock import patch
from redis import Redis
from packageship.application.core.depend.self_depend import SelfDepend

View File

@ -1,168 +0,0 @@
--- a/test/coverage_count.py
+++ b/test/coverage_count.py
@@ -56,22 +56,24 @@ if __name__ == "__main__":
os.path.join(TEST_CASE_PATH, "test_module/test_pkgship_version/"),
os.path.join(TEST_CASE_PATH, "test_module/test_selfbuild/"),
os.path.join(TEST_CASE_PATH, "test_module/test_install/"),
- os.path.join(TEST_CASE_PATH, "test_module/test_build/"),]
- # os.path.join(TEST_CASE_PATH, "test_module/test_bedepend/test_database_query/")]
+ os.path.join(TEST_CASE_PATH, "test_module/test_build/"),
+ os.path.join(TEST_CASE_PATH, "test_module/test_graph/")
+ ]
+
errors = []
failures = []
for file in test_case_files:
runner_result = runner.run(specify_case(file))
errors.extend(runner_result.errors)
failures.extend(runner_result.failures)
-
+
if any([errors, failures]):
sys.exit(1)
-
+
cov.stop()
try:
cov.report(show_missing=True)
# cov.html_report()
except CoverageException:
print("No data to report")
- sys.exit(1)
\ No newline at end of file
+ sys.exit(1)
--- /dev/null
+++ b/test/test_module/test_database/data/__init__.py
@@ -0,0 +1,12 @@
+#!/usr/bin/python3
+# ******************************************************************************
+# Copyright (c) Huawei Technologies Co., Ltd. 2020-2020. All rights reserved.
+# licensed under the Mulan PSL v2.
+# You can use this software according to the terms and conditions of the Mulan PSL v2.
+# You may obtain a copy of Mulan PSL v2 at:
+# http://license.coscl.org.cn/MulanPSL2
+# THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND, EITHER EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT, MERCHANTABILITY OR FIT FOR A PARTICULAR
+# PURPOSE.
+# See the Mulan PSL v2 for more details.
+# ******************************************************************************/
--- /dev/null
+++ b/test/test_module/test_database/data/mapping.json
@@ -0,0 +1,5 @@
+{
+"name": "test_name",
+"version": "1.0.1",
+"release": 2
+}
\ No newline at end of file
--- a/test/test_module/test_database/test_es_query.py
+++ b/test/test_module/test_database/test_es_query.py
@@ -10,13 +10,19 @@
# PURPOSE.
# See the Mulan PSL v2 for more details.
# ******************************************************************************/
+import os
from unittest import TestCase, mock
+from unittest.mock import MagicMock
from elasticsearch import Elasticsearch, helpers
+from elasticsearch.client.indices import IndicesClient
+from elasticsearch.exceptions import ElasticsearchException, TransportError
from packageship.application.common.exc import ElasticSearchQueryException, DatabaseConfigException
from packageship.application.database.engines.elastic import ElasticSearch
+MOCK_DATA_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "data/mapping.json")
+
class TestEsQuery(TestCase):
@@ -112,6 +118,85 @@ class TestEsQuery(TestCase):
es2 = ElasticSearch(host="127.0.0.1")
self.assertIs(es1, es2)
+ def test_create_index_success(self):
+ """
+ Test create indices success
+ Returns:
+ """
+ IndicesClient.exists = MagicMock(side_effect=[False, False])
+ IndicesClient.create = MagicMock(side_effect=[True, True])
+
+ es_instance = self._es_init()
+ indices = [dict(file=MOCK_DATA_FILE, name="test1"), dict(file=MOCK_DATA_FILE, name="test2")]
+ result = es_instance.create_index(indices)
+ self.assertEqual(result, [])
+
+ def test_create_index_fail(self):
+ """
+ Test create indices failed
+ Returns:
+ """
+ IndicesClient.exists = MagicMock(side_effect=[False])
+ IndicesClient.create = MagicMock(side_effect=[ElasticsearchException])
+
+ es_instance = self._es_init()
+ indices = [dict(file=MOCK_DATA_FILE, name="test1")]
+ result = es_instance.create_index(indices)
+ self.assertEqual(result, ["test1"])
+
+ def test_delete_index_fail(self):
+ """
+ Test delete indices success
+ Returns:
+ """
+ IndicesClient.exists = MagicMock(side_effect=[True])
+ IndicesClient.delete = MagicMock(side_effect=[TransportError])
+
+ es_instance = self._es_init()
+ indices = [dict(file=MOCK_DATA_FILE, name="test1")]
+ result = es_instance.create_index(indices)
+ self.assertEqual(result, ["test1"])
+
+ def test_load_mapping_fail(self):
+ """
+ Test load mapping success
+ Returns:
+ """
+ es_instance = self._es_init()
+ indices = dict(file=MOCK_DATA_FILE + "1", name="test1")
+ result = es_instance.create_index(indices)
+ self.assertEqual(result, ["test1"])
+
+ def test_insert_fail(self):
+ """
+ Test insert indices success
+ Returns:
+ """
+ es_instance = self._es_init()
+ with self.assertRaises(ElasticSearchQueryException):
+ es_instance.insert(index="test", body={})
+
+ def test_delete_index_none(self):
+ """
+ Test delete indices is none
+ Returns:
+ """
+ es_instance = self._es_init()
+ result = es_instance.delete_index(index="")
+ self.assertIsNone(result)
+
+ def test_delete_many_indices_fail(self):
+ """
+ Test delete indices failed
+ Returns:
+ """
+ IndicesClient.delete = MagicMock(side_effect=[TransportError])
+
+ es_instance = self._es_init()
+ indices = ['test1', 'test2']
+ result = es_instance.delete_index(indices)
+ self.assertEqual(result, "test1,test2")
+
@staticmethod
def _es_init():
return ElasticSearch(host="127.0.0.1", port="9200")

View File

@ -1,25 +0,0 @@
--- a/packageship/pkgshipd
+++ b/packageship/pkgshipd
@@ -247,6 +247,22 @@ daemonize=$daemonize" >$OUT_PATH/${PKGSHIP_CONSTANT}.ini
chown pkgshipuser: $OUT_PATH/${PKGSHIP_CONSTANT}.ini
chmod 750 $OUT_PATH/${PKGSHIP_CONSTANT}.ini
echo "[INFO] create uwsgi file ok"
+
+ # create log_info file
+ echo "[INFO] start to create log_info file"
+ log_file_path=$(get_config "log_path")
+ log_file=${log_file_path}/"log_info.log"
+ if [ ! -e "${log_file}" ]; then
+ touch "${log_file}"
+ chmod 644 "${log_file}"
+ else
+ log_info=$(ls -al "${log_file}")
+ if [[ ! "${log_info}" =~ "pkgshipuser" ]]; then
+ echo "[ERROR] The owner of the ${log_file} is incorrect,please make sure the owner is pkgshipuser"
+ exit 1
+ fi
+ fi
+ echo "[INFO] create log_info file success"
}
function uwsgi_log_logrotate() {

View File

@ -1,89 +0,0 @@
--- a/packageship/application/query/depend.py
+++ b/packageship/application/query/depend.py
@@ -18,6 +18,7 @@ from gevent import monkey
monkey.patch_all()
+from collections import Counter
from packageship.application.common.constant import PROVIDES_NAME, FILES_NAME
from packageship.application.query import Query
from packageship.application.query.query_body import QueryBody
@@ -133,28 +134,28 @@ class RequireBase(Query):
for rpm_info in query_rpm_infos:
if rpm_info.get('requires'):
new_requires_list = []
- component_bin_count_dict = dict()
+ component_bin_list = []
multi_binary_component_list = []
for component_name in rpm_info.get('requires'):
- self._convert_multi_binary_components(all_component_info_dict, component_bin_count_dict,
+ self._convert_multi_binary_components(all_component_info_dict, component_bin_list,
component_name,
multi_binary_component_list, new_requires_list)
- self._filter_multi_binary_components(component_bin_count_dict, multi_binary_component_list,
+ self._filter_multi_binary_components(component_bin_list, multi_binary_component_list,
new_requires_list)
rpm_info['requires'] = new_requires_list
else:
rpm_info['requires'] = []
@staticmethod
- def _convert_multi_binary_components(all_component_info_dict, component_bin_count_dict, component_name,
+ def _convert_multi_binary_components(all_component_info_dict, component_bin_list, component_name,
multi_binary_component_list, new_requires_list):
"""
Add the uniquely determined binary package info to the result list
Construct a dictionary of occurrences of binary packages and list of repeated binary packages
Args:
all_component_info_dict: all components dict
- component_bin_count_dict: The number of times the binary package of the component is provided
+ component_bin_list: The list of the binary package of the component is provided
component_name: component name
multi_binary_component_list: list of repeated binary packages
new_requires_list: result list
@@ -171,37 +172,33 @@ class RequireBase(Query):
# If the component is provided by multiple binary packages, record first and then filter
multi_binary_component_list.append(component_info)
# Construct a dictionary of occurrences of binary packages
- for component in component_info:
- try:
- com_bin_name_count = component_bin_count_dict[component.get('com_bin_name')]
- component_bin_count_dict[component.get('com_bin_name')] = com_bin_name_count + 1
- except KeyError:
- component_bin_count_dict[component.get('com_bin_name')] = 1
+ component_bin_list.extend([component.get('com_bin_name') for component in component_info])
else:
new_requires_list.append(dict(component=component_name))
return
@staticmethod
- def _filter_multi_binary_components(component_bin_count_dict, multi_binary_component_list, new_requires_list):
+ def _filter_multi_binary_components(component_bin_list, multi_binary_component_list, new_requires_list):
"""
Filter results based on component name and number of occurrences of binary packages
Args:
- component_bin_count_dict: The number of times the binary package of the component is provided
+ component_bin_list: The list of the binary package of the component is provided
multi_binary_component_list: list of repeated binary packages
new_requires_list: result list
Returns: None
"""
+ component_bin_counter = Counter(component_bin_list)
for component_list in multi_binary_component_list:
max_count = 0
final_component_info = dict()
# Sort by name first, then filter the results according to the number of occurrences of binary packages
- component_list.sort(key=lambda x: x.get('component'))
+ component_list.sort(key=lambda x: x.get('com_bin_name'))
for component in component_list:
- if component_bin_count_dict.get(component.get('com_bin_name')) > max_count:
+ if component_bin_counter.get(component.get('com_bin_name')) > max_count:
final_component_info = component
- max_count = component_bin_count_dict.get(component.get('com_bin_name'))
+ max_count = component_bin_counter.get(component.get('com_bin_name'))
new_requires_list.append(final_component_info)

View File

@ -1,29 +0,0 @@
diff --git a/packageship/application/core/depend/be_depend.py b/packageship/application/core/depend/be_depend.py
index 04f5b02..06dbd44 100644
--- a/packageship/application/core/depend/be_depend.py
+++ b/packageship/application/core/depend/be_depend.py
@@ -45,6 +45,7 @@ class BeDepend(BaseDepend):
searched_pkg.add(pkg_info.get("src_name"))
binary_pkgs.update(set(pkg_info.get("subpacks", [])))
+ self._search_set.update(searched_pkg)
if is_init:
not_found_pkg = str(set(pkg_name_lst) - searched_pkg)
self.log_msg = f"source packages {not_found_pkg} not found in {self.database}"
@@ -158,6 +159,8 @@ class BeDepend(BaseDepend):
"""
next_search_pkgs = set()
src_key = req_src_info["req_src_name"]
+ if src_key not in self._search_set:
+ next_search_pkgs.update(self.__get_subpacks([src_key]))
if src_key not in self.source_dict:
self.source_dict[src_key] = {
"name": src_key,
@@ -165,7 +168,6 @@ class BeDepend(BaseDepend):
"database": self.database,
"build": [dep_name],
}
- next_search_pkgs.update(self.__get_subpacks([src_key]))
else:
if dep_name not in self.source_dict[src_key]["build"]:
self.source_dict[src_key]["build"].append(dep_name)

Binary file not shown.

BIN
pkgship-2.2.0.tar.gz Normal file

Binary file not shown.

View File

@ -1,24 +1,11 @@
Name: pkgship
Version: 2.1.0
Release: 10
Version: 2.2.0
Release: 1
Summary: Pkgship implements rpm package dependence ,maintainer, patch query and so on.
License: Mulan 2.0
URL: https://gitee.com/openeuler/pkgship
Source0: https://gitee.com/openeuler/pkgship-%{version}.tar.gz
patch0001: 0001-optimization-printing-progress-bar.patch
patch0002: 0002-update-doc.patch
patch0003: 0003-fix-pkginfo-queries.patch
patch0004: 0004-wrong-judgment-of-startup-success.patch
patch0005: 0005-canonical-naming.patch
patch0006: 0006-add-check-service-status-when-init.patch
patch0007: 0007-update-readme.patch
patch0008: 0008-update-patch-import.patch
patch0009: 0009-add-es-insert-test-cases.patch
patch0010: 0010-create-log-when-start.patch
patch0011: 0011-fix-binary-rpm-sort.patch
patch0012: 0012-fix-bedepend-data-error.patch
BuildArch: noarch
BuildRequires: shadow python3-mock
@ -101,6 +88,7 @@ chown -R $user:$group $1
}
create_dir_file /opt/pkgship/ 750 d
create_dir_file /opt/pkgship/compare 755 d
create_dir_file /var/log/pkgship 755 d
create_dir_file /var/log/pkgship-operation 700 d
@ -122,6 +110,11 @@ create_dir_file /var/log/pkgship-operation 700 d
%attr(0640,pkgshipuser,pkgshipuser) /lib/systemd/system/pkgship.service
%changelog
* Sun Aug 15 2021 Haiwei Li <lihaiwei8@huawei.com> - 2.2.0-1
- pkgship Upgrade to 2.2.0
- Merge the patch before 2021/8/16 into the source code
- Added the function of comparing the dependency information of different database packages
* Thu Apr 08 2021 zhang tao <zhangtao307@huawei.com> - 2.1.0-10
- fix bedepend data error
@ -366,4 +359,4 @@ create_dir_file /var/log/pkgship-operation 700 d
- add macro to build cli bin when rpm install
* Sat Jun 6 2020 Feng Hu <solar.hu@foxmail.com> - 1.0-0
- init package
- init package