Initial YakPanel commit
This commit is contained in:
0
mod/project/backup_restore/__init__.py
Normal file
0
mod/project/backup_restore/__init__.py
Normal file
616
mod/project/backup_restore/backup_manager.py
Normal file
616
mod/project/backup_restore/backup_manager.py
Normal file
@@ -0,0 +1,616 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 yakpanel(http://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: miku <wzz@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
if "/www/server/panel/class_v2" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class_v2")
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
import public
|
||||
from mod.project.backup_restore.modules.site_module import SiteModule
|
||||
from mod.project.backup_restore.modules.database_module import DatabaseModule
|
||||
from mod.project.backup_restore.modules.ftp_module import FtpModule
|
||||
from mod.project.backup_restore.modules.crontab_module import CrontabModule
|
||||
from mod.project.backup_restore.modules.ssh_module import SshModule
|
||||
from mod.project.backup_restore.modules.firewall_module import FirewallModule
|
||||
from mod.project.backup_restore.modules.plugin_module import PluginModule
|
||||
from mod.project.backup_restore.modules.mail_module import MailModule
|
||||
from mod.project.backup_restore.modules.ssl_model import SSLModel
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning)
|
||||
|
||||
|
||||
class BackupManager(SiteModule, DatabaseModule, FtpModule, SSLModel):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.base_path = '/www/backup/backup_restore'
|
||||
self.bakcup_task_json = self.base_path + '/backup_task.json'
|
||||
self.backup_log_file = self.base_path + '/backup.log'
|
||||
self.backup_pl_file = self.base_path + '/backup.pl'
|
||||
self.backup_success_file = self.base_path + '/success.pl'
|
||||
self.backup_save_config = self.base_path + '/backup_save_config.json'
|
||||
self.history_log_path = '/www/backup/backup_restore/history/log'
|
||||
self.history_info_path = '/www/backup/backup_restore/history/info'
|
||||
self.migrate_backup_info_path = '/www/backup/backup_restore/migrate_backup_info.json'
|
||||
|
||||
def get_local_backup(self, get=None):
|
||||
backup_list = []
|
||||
if os.path.exists(self.bakcup_task_json):
|
||||
backup_list = json.loads(public.ReadFile(self.bakcup_task_json))
|
||||
|
||||
file_names = os.listdir(self.base_path)
|
||||
pattern = re.compile(r"\d{8}-\d{4}_\d+_backup\.tar\.gz")
|
||||
matched_files = [f for f in file_names if pattern.match(f)]
|
||||
for file in matched_files:
|
||||
if "upload.tmp" in file:
|
||||
continue
|
||||
file_timestamp = file.split('_')[1]
|
||||
matched = any(item["timestamp"] == int(file_timestamp) for item in backup_list)
|
||||
if not matched:
|
||||
done_time = datetime.datetime.fromtimestamp(int(file_timestamp)).strftime('%Y-%m-%d %H:%M:%S')
|
||||
# file_size = public.ExecShell("du -sb /www/backup/backup_restore/{}".format(file))[0].split("\t")[0]
|
||||
file_conf = {
|
||||
'backup_name': str(file),
|
||||
'timestamp': int(file_timestamp),
|
||||
'create_time': done_time,
|
||||
'backup_time': done_time,
|
||||
'backup_file': self.base_path + "/" + file,
|
||||
'storage_type': "local",
|
||||
'auto_exit': 0,
|
||||
'restore_status': 0,
|
||||
'backup_status': 2,
|
||||
'backup_path': self.base_path + "/" + file,
|
||||
'done_time': done_time,
|
||||
'backup_file_size': str(self.get_file_size(self.base_path + "/" + file)),
|
||||
'backup_file_sha256': self.get_file_sha256(self.base_path + "/" + file),
|
||||
'backup_count': {
|
||||
"success": None,
|
||||
"failed": None,
|
||||
"total_time": None
|
||||
},
|
||||
}
|
||||
backup_list.append(file_conf)
|
||||
|
||||
if os.path.exists(self.migrate_backup_info_path):
|
||||
migrate_backup_info = json.loads(public.ReadFile(self.migrate_backup_info_path))
|
||||
backup_list.append(migrate_backup_info)
|
||||
public.ExecShell("rm -f {}".format(self.migrate_backup_info_path))
|
||||
|
||||
public.WriteFile(self.bakcup_task_json, json.dumps(backup_list))
|
||||
return backup_list
|
||||
|
||||
def get_backup_file_msg(self, timestamp):
|
||||
import tarfile
|
||||
backup_file = str(timestamp) + "_backup.tar.gz"
|
||||
print(backup_file)
|
||||
file_names = os.listdir(self.base_path)
|
||||
for file in file_names:
|
||||
if backup_file in file:
|
||||
backup_file = file
|
||||
path = self.base_path + "/" + backup_file
|
||||
path_data = {}
|
||||
if not os.path.exists(path):
|
||||
return path_data
|
||||
try:
|
||||
with tarfile.open(path, 'r:gz') as tar:
|
||||
# 提前获取文件列表
|
||||
members = tar.getnames()
|
||||
# 提取备份 JSON 配置
|
||||
if '{}_backup/backup.json'.format(timestamp) in members:
|
||||
json_file_name = '{}_backup/backup.json'.format(timestamp)
|
||||
json_file = tar.extractfile(json_file_name)
|
||||
json_content = json_file.read().decode('utf-8')
|
||||
path_data['config'] = json.loads(json_content)
|
||||
|
||||
# 提取备份日志文件
|
||||
if '{}_backup/backup.log'.format(timestamp) in members:
|
||||
log_file_name = '{}_backup/backup.log'.format(timestamp)
|
||||
log_file = tar.extractfile(log_file_name)
|
||||
log_content = log_file.read().decode('utf-8')
|
||||
path_data['log'] = log_content + path + "\n" + public.lang("Packaging completed")
|
||||
except:
|
||||
return False
|
||||
|
||||
# path_data['server_config']=self.get_server_config()
|
||||
# path_data['backup_path_size']=25256044
|
||||
# path_data['free_size'] = self.get_free_space()['free_space']
|
||||
|
||||
self.history_log_path = '/www/backup/backup_restore/history/log'
|
||||
self.history_info_path = '/www/backup/backup_restore/history/info'
|
||||
if not os.path.exists(self.history_log_path):
|
||||
public.ExecShell("mkdir -p {}".format(self.history_log_path))
|
||||
if not os.path.exists(self.history_info_path):
|
||||
public.ExecShell("mkdir -p {}".format(self.history_info_path))
|
||||
|
||||
try:
|
||||
public.WriteFile(self.history_log_path + "{}_backup.log".format(timestamp), path_data['log'])
|
||||
except:
|
||||
pass
|
||||
try:
|
||||
public.WriteFile(self.history_info_path + "/{}_backup.info".format(timestamp),
|
||||
json.dumps(path_data['config']))
|
||||
except:
|
||||
return False
|
||||
|
||||
try:
|
||||
backup_task_info = self.get_backup_conf(timestamp)
|
||||
hitory_info = json.loads(public.ReadFile(self.history_info_path + "/{}_backup.info".format(timestamp)))
|
||||
hitory_info['create_time'] = backup_task_info['create_time']
|
||||
hitory_info['backup_time'] = backup_task_info['backup_time']
|
||||
hitory_info['backup_file'] = backup_task_info['backup_file']
|
||||
hitory_info['backup_path'] = backup_task_info['backup_path']
|
||||
hitory_info['done_time'] = backup_task_info['done_time']
|
||||
hitory_info['total_time'] = backup_task_info['total_time']
|
||||
hitory_info['backup_file_size'] = backup_task_info['backup_file_size']
|
||||
hitory_info['backup_file_sha256'] = backup_task_info['backup_file_sha256']
|
||||
public.WriteFile(self.history_info_path + "/{}_backup.info".format(timestamp), json.dumps(hitory_info))
|
||||
except:
|
||||
pass
|
||||
|
||||
return True
|
||||
|
||||
def add_backup_task(self, timestamp: int):
|
||||
"""
|
||||
构造备份初始配置
|
||||
"""
|
||||
backup_path = self.base_path + '/{timestamp}_backup/'.format(timestamp=timestamp)
|
||||
if not os.path.exists(backup_path):
|
||||
os.makedirs(backup_path)
|
||||
|
||||
backup_conf = self.get_backup_conf(timestamp)
|
||||
if not backup_conf:
|
||||
print(public.lang("Backup configuration file does not exist"))
|
||||
return public.returnMsg(False, public.lang("Backup configuration file does not exist"))
|
||||
|
||||
backup_conf['data_list'] = {}
|
||||
backup_conf['data_list']['soft'] = self.get_soft_data(timestamp, packet=True)
|
||||
backup_conf['data_list']['site'] = self.get_site_backup_conf(timestamp)
|
||||
backup_conf['data_list']['ssl'] = self.get_ssl_backup_conf(timestamp)
|
||||
backup_conf['data_list']['database'] = self.get_database_backup_conf(timestamp)
|
||||
backup_conf['data_list']['ftp'] = self.backup_ftp_data(timestamp)
|
||||
backup_conf['backup_status'] = 1
|
||||
public.WriteFile(backup_path + 'backup.json', json.dumps(backup_conf))
|
||||
|
||||
def backup_data(self, timestamp: int):
|
||||
if os.path.exists(self.backup_log_file):
|
||||
public.ExecShell("rm -f {}".format(self.backup_log_file))
|
||||
|
||||
if os.path.exists(self.backup_pl_file):
|
||||
print(public.lang("A backup process is already running!"))
|
||||
return public.returnMsg(False, public.lang("A backup process is already running!"))
|
||||
|
||||
try:
|
||||
public.WriteFile(self.backup_pl_file, timestamp)
|
||||
|
||||
backup_conf = self.get_backup_conf(timestamp)
|
||||
backup_conf['backup_status'] = 1
|
||||
self.save_backup_conf(timestamp, backup_conf)
|
||||
start_time = int(time.time())
|
||||
# 构造备份初始配置
|
||||
self.add_backup_task(timestamp)
|
||||
try:
|
||||
self.backup_site_data(timestamp)
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
self.backup_database_data(timestamp)
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
self.backup_ssl_data(timestamp)
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
CrontabModule().backup_crontab_data(timestamp)
|
||||
except:
|
||||
pass
|
||||
|
||||
# TODO: 存在问题,下个版本修复
|
||||
# try:
|
||||
# SshModule().backup_ssh_data(timestamp)
|
||||
# except:
|
||||
# pass
|
||||
|
||||
try:
|
||||
FirewallModule().backup_firewall_data(timestamp)
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
MailModule().backup_vmail_data(timestamp)
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
PluginModule().backup_plugin_data(timestamp)
|
||||
except:
|
||||
pass
|
||||
|
||||
try:
|
||||
self.write_backup_data(timestamp)
|
||||
except:
|
||||
pass
|
||||
|
||||
end_time = int(time.time())
|
||||
done_time = datetime.datetime.fromtimestamp(int(end_time)).strftime('%Y-%m-%d %H:%M:%S')
|
||||
total_time = end_time - start_time
|
||||
|
||||
backup_conf = self.get_backup_conf(timestamp)
|
||||
backup_conf['backup_status'] = 2
|
||||
backup_conf['done_time'] = done_time
|
||||
backup_conf['total_time'] = total_time
|
||||
|
||||
self.save_backup_conf(timestamp, backup_conf)
|
||||
self.sync_backup_info(timestamp)
|
||||
|
||||
public.WriteFile(self.backup_success_file, timestamp)
|
||||
public.ExecShell("rm -f {}".format(self.backup_pl_file))
|
||||
self.create_history_file(timestamp)
|
||||
except Exception as e:
|
||||
return public.returnMsg(False, public.lang(f"something went wrong! Error: {str(e)}"))
|
||||
finally:
|
||||
if os.path.exists(self.backup_pl_file):
|
||||
public.ExecShell("rm -f {}".format(self.backup_pl_file))
|
||||
|
||||
def create_history_file(self, timestamp):
|
||||
if not os.path.exists(self.history_log_path):
|
||||
public.ExecShell("mkdir -p {}".format(self.history_log_path))
|
||||
if not os.path.exists(self.history_info_path):
|
||||
public.ExecShell("mkdir -p {}".format(self.history_info_path))
|
||||
|
||||
hitory_log_file = self.history_log_path + '/' + str(timestamp) + '_backup.log'
|
||||
history_info_file = self.history_info_path + '/' + str(timestamp) + '_backup.info'
|
||||
public.WriteFile(hitory_log_file, public.ReadFile("/www/backup/backup_restore/backup.log".format(timestamp)))
|
||||
public.WriteFile(history_info_file,
|
||||
public.ReadFile("/www/backup/backup_restore/{}_backup/backup.json".format(timestamp)))
|
||||
|
||||
def sync_backup_info(self, timestamp):
|
||||
backup_conf = self.get_backup_conf(timestamp)
|
||||
data_list = self.get_backup_data_list(timestamp)
|
||||
data_list['backup_status'] = backup_conf['backup_status']
|
||||
data_list['backup_file'] = backup_conf['backup_file']
|
||||
data_list['backup_file_sha256'] = backup_conf['backup_file_sha256']
|
||||
data_list['backup_file_size'] = backup_conf['backup_file_size']
|
||||
data_list['done_time'] = backup_conf['done_time']
|
||||
data_list['total_time'] = backup_conf['total_time']
|
||||
data_list['backup_count'] = backup_conf['backup_count']
|
||||
self.update_backup_data_list(timestamp, data_list)
|
||||
|
||||
def count_backup_status(self, data, status_code):
|
||||
return sum(
|
||||
1 for category in data.values()
|
||||
for item in category
|
||||
if isinstance(item, dict) and item.get('status') == status_code
|
||||
)
|
||||
|
||||
def write_backup_data(self, timestamp):
|
||||
self.print_log("====================================================", "backup")
|
||||
self.print_log(public.lang("Start compressing and packaging all data"), "backup")
|
||||
from datetime import datetime
|
||||
backup_conf = self.get_backup_conf(timestamp)
|
||||
|
||||
backup_log_path = self.base_path + str(timestamp) + "_backup/"
|
||||
public.ExecShell('\cp -rpa {} {}'.format(self.backup_log_file, backup_log_path))
|
||||
|
||||
conf_data = json.loads((public.ReadFile("/www/backup/backup_restore/{}_backup/backup.json".format(timestamp))))
|
||||
status_2_count = self.count_backup_status(conf_data['data_list'], 2)
|
||||
status_3_count = self.count_backup_status(conf_data['data_list'], 3)
|
||||
|
||||
dt_object = datetime.fromtimestamp(int(timestamp))
|
||||
file_time = dt_object.strftime('%Y%m%d-%H%M')
|
||||
tar_file_name = file_time + "_" + str(timestamp) + "_backup.tar.gz"
|
||||
conf_data['backup_status'] = 1
|
||||
public.WriteFile("/www/backup/backup_restore/{}_backup/backup.json".format(timestamp), json.dumps(conf_data))
|
||||
|
||||
public.ExecShell("cd /www/backup/backup_restore && tar -czvf {} {}_backup".format(tar_file_name, timestamp))
|
||||
file_size = public.ExecShell("du -sb /www/backup/backup_restore/{}".format(tar_file_name))[0].split("\t")[0]
|
||||
|
||||
backup_conf["backup_status"] = 2
|
||||
backup_conf["backup_file"] = "/www/backup/backup_restore/" + tar_file_name
|
||||
backup_conf["backup_file_sha256"] = self.get_file_sha256("/www/backup/backup_restore/" + tar_file_name)
|
||||
backup_conf["backup_file_size"] = file_size
|
||||
backup_conf["backup_count"] = {}
|
||||
backup_conf["backup_count"]['success'] = status_2_count
|
||||
backup_conf["backup_count"]['failed'] = status_3_count
|
||||
storage_type = backup_conf['storage_type']
|
||||
|
||||
backup_size = self.format_size(int(file_size))
|
||||
self.print_log(
|
||||
public.lang("Compression and packaging of all data completed. Data size: {}").format(backup_size),
|
||||
'backup'
|
||||
)
|
||||
self.print_log(public.lang("Backup completed. Backup file: {}").format(tar_file_name), "backup")
|
||||
self.print_log("====================================================", "backup")
|
||||
|
||||
tar_file_name = "/www/backup/backup_restore/" + tar_file_name
|
||||
if storage_type != "local" and os.path.exists(tar_file_name):
|
||||
cloud_name_cn = "cloud storage"
|
||||
self.print_log(public.lang("Uploading backup file to cloud storage..."), "backup")
|
||||
try:
|
||||
from cloud_stora_upload_v2 import CloudStoraUpload
|
||||
_cloud = CloudStoraUpload()
|
||||
_cloud.run(storage_type)
|
||||
cloud_name_cn = _cloud.obj._title
|
||||
if int(file_size) > 100 * 1024 * 1024:
|
||||
self.print_log(
|
||||
public.lang("{} Uploading in chunks...").format(cloud_name_cn), "backup"
|
||||
)
|
||||
else:
|
||||
self.print_log(
|
||||
public.lang("{} Uploading...").format(cloud_name_cn), "backup"
|
||||
)
|
||||
|
||||
backup_path = _cloud.obj.backup_path
|
||||
if not backup_path.endswith('/'):
|
||||
backup_path += '/'
|
||||
upload_path = os.path.join(backup_path, "backup_restore", os.path.basename(tar_file_name))
|
||||
if _cloud.cloud_upload_file(tar_file_name, upload_path):
|
||||
self.print_log(public.lang("Successfully uploaded to {}").format(cloud_name_cn), "backup")
|
||||
except Exception as e:
|
||||
import traceback
|
||||
public.print_log(traceback.format_exc())
|
||||
self.print_log(
|
||||
public.lang("Error occurred while uploading to {}: {}").format(cloud_name_cn, str(e)),
|
||||
"backup"
|
||||
)
|
||||
|
||||
self.save_backup_conf(timestamp, backup_conf)
|
||||
|
||||
def get_backup_details(self, timestamp):
|
||||
history_info_file = self.history_info_path + '/' + str(timestamp) + '_backup.info'
|
||||
if not os.path.exists(history_info_file):
|
||||
get_info = self.get_backup_file_msg(timestamp)
|
||||
if not get_info:
|
||||
return public.fail_v2(public.lang("Backup info not found"))
|
||||
|
||||
backup_info = json.loads(public.ReadFile(history_info_file))
|
||||
result = self.process_detail(backup_info)
|
||||
return public.success_v2(result)
|
||||
|
||||
def get_backup_log(self, timestamp):
|
||||
backup_log_file = self.base_path + '/backup.log'
|
||||
history_log_file = self.history_log_path + '/' + str(timestamp) + '_backup.log'
|
||||
if os.path.exists(self.backup_pl_file):
|
||||
backup_timestamp = int(public.ReadFile(self.backup_pl_file))
|
||||
if int(backup_timestamp) == int(timestamp):
|
||||
return public.ReadFile(backup_log_file)
|
||||
if os.path.exists(history_log_file):
|
||||
return public.ReadFile(history_log_file)
|
||||
else:
|
||||
return None
|
||||
|
||||
# todo 弃用
|
||||
def get_backup_progress(self, get=None):
|
||||
"""
|
||||
获取备份进度信息
|
||||
@param get: object 包含请求参数
|
||||
@return: dict 备份进度信息
|
||||
"""
|
||||
# 设置相关文件路径
|
||||
backup_pl_file = self.base_path + '/backup.pl'
|
||||
backup_log_file = self.base_path + '/backup.log'
|
||||
backup_success_file = self.base_path + '/success.pl'
|
||||
|
||||
# 创建处理已完成备份的函数,减少代码重复
|
||||
def create_completed_result(backup_timestamp):
|
||||
if not backup_timestamp:
|
||||
return public.ReturnMsg(False, public.lang("Backup completed but unable to retrieve timestamp"))
|
||||
|
||||
if not os.path.exists(self.bakcup_task_json):
|
||||
return public.ReturnMsg(False, public.lang("Backup configuration file does not exist"))
|
||||
|
||||
backup_configs = json.loads(public.ReadFile(self.bakcup_task_json))
|
||||
success_data = next(
|
||||
(item for item in backup_configs if str(item.get('timestamp')) == str(backup_timestamp)), {}
|
||||
)
|
||||
return {
|
||||
"task_type": "backup",
|
||||
"task_status": 2,
|
||||
"backup_data": None,
|
||||
"backup_name": None,
|
||||
"data_backup_status": 2,
|
||||
"progress": 100,
|
||||
"msg": None,
|
||||
'exec_log': public.ReadFile(backup_log_file) if os.path.exists(backup_log_file) else "",
|
||||
'timestamp': backup_timestamp,
|
||||
'backup_file_info': success_data,
|
||||
'err_info': []
|
||||
}
|
||||
|
||||
# 检查备份是否已完成
|
||||
if os.path.exists(backup_success_file):
|
||||
success_time = int(os.path.getctime(backup_success_file))
|
||||
local_time = int(time.time())
|
||||
# 如果success文件创建时间在10秒内,说明备份刚刚完成
|
||||
if success_time + 10 > local_time:
|
||||
try:
|
||||
backup_timestamp = public.ReadFile(backup_success_file).strip()
|
||||
return public.ReturnMsg(True, create_completed_result(backup_timestamp))
|
||||
except Exception as e:
|
||||
public.ExecShell("rm -f {}".format(backup_success_file))
|
||||
return public.ReturnMsg(False,
|
||||
public.lang("Error retrieving backup completion information: {}").format(
|
||||
str(e)))
|
||||
else:
|
||||
# 超过10秒,删除success文件
|
||||
public.ExecShell("rm -f {}".format(backup_success_file))
|
||||
|
||||
# 检查是否有备份进程运行
|
||||
try:
|
||||
# 检查备份进程锁文件
|
||||
if os.path.exists(backup_pl_file):
|
||||
timestamp = public.ReadFile(backup_pl_file).strip()
|
||||
if not timestamp:
|
||||
return public.ReturnMsg(False,
|
||||
public.lang("Backup process is running, but unable to retrieve timestamp"))
|
||||
else:
|
||||
# 等待2秒,可能是备份刚刚完成
|
||||
time.sleep(2)
|
||||
if os.path.exists(backup_success_file):
|
||||
success_time = int(os.path.getctime(backup_success_file))
|
||||
local_time = int(time.time())
|
||||
if success_time + 10 > local_time:
|
||||
backup_timestamp = public.ReadFile(backup_success_file).strip()
|
||||
return public.ReturnMsg(True, create_completed_result(backup_timestamp))
|
||||
|
||||
# 再次检查是否有备份进程
|
||||
if os.path.exists(backup_pl_file):
|
||||
timestamp = public.ReadFile(backup_pl_file).strip()
|
||||
if not timestamp:
|
||||
return public.ReturnMsg(False, public.lang(
|
||||
"Backup process is running, but unable to retrieve timestamp"))
|
||||
else:
|
||||
return public.ReturnMsg(False, public.lang(
|
||||
"No ongoing backup tasks found. Please check the backup list to see if the backup is completed"))
|
||||
|
||||
# 读取备份配置文件
|
||||
backup_json_path = f"{self.base_path}/{timestamp}_backup/backup.json"
|
||||
count = 0
|
||||
while 1:
|
||||
if count >= 3:
|
||||
return public.ReturnMsg(False, public.lang("Backup configuration file does not exist: {}").format(
|
||||
backup_json_path))
|
||||
count += 1
|
||||
if not os.path.exists(backup_json_path):
|
||||
time.sleep(1)
|
||||
else:
|
||||
break
|
||||
|
||||
conf_data = json.loads(public.ReadFile(backup_json_path))
|
||||
except Exception as e:
|
||||
return public.ReturnMsg(False,
|
||||
public.lang("Error retrieving backup progress information: {}").format(str(e)))
|
||||
|
||||
# 读取备份日志
|
||||
backup_log_data = public.ReadFile(backup_log_file) if os.path.exists(backup_log_file) else ""
|
||||
|
||||
# 定义备份类型及其处理逻辑
|
||||
backup_types = [
|
||||
{
|
||||
'type': 'site',
|
||||
'data_key': 'site',
|
||||
'display_name': 'site',
|
||||
'progress': 30
|
||||
},
|
||||
{
|
||||
'type': 'database',
|
||||
'data_key': 'database',
|
||||
'display_name': 'database',
|
||||
'progress': 60
|
||||
},
|
||||
{
|
||||
'type': 'ftp',
|
||||
'data_key': 'ftp',
|
||||
'display_name': 'ftp',
|
||||
'progress': 70
|
||||
},
|
||||
{
|
||||
'type': 'terminal',
|
||||
'data_key': 'terminal',
|
||||
'display_name': 'terminal',
|
||||
'progress': 75
|
||||
},
|
||||
{
|
||||
'type': 'firewall',
|
||||
'data_key': 'firewall',
|
||||
'display_name': 'firewall',
|
||||
'progress': 90
|
||||
}
|
||||
]
|
||||
|
||||
# 检查各类型备份进度
|
||||
for backup_type in backup_types:
|
||||
items = conf_data.get("data_list", {}).get(backup_type['data_key'], [])
|
||||
for item in items:
|
||||
try:
|
||||
if item.get("status") == 2:
|
||||
continue
|
||||
|
||||
return public.ReturnMsg(True, {
|
||||
"task_type": "backup",
|
||||
"task_status": 1,
|
||||
"data_type": backup_type['type'],
|
||||
"name": item.get("name", f"unknow {backup_type['display_name']}"),
|
||||
"data_backup_status": item.get("status", 0),
|
||||
"progress": backup_type['progress'],
|
||||
"msg": item.get("msg"),
|
||||
'exec_log': backup_log_data,
|
||||
'timestamp': timestamp
|
||||
})
|
||||
except:
|
||||
return public.ReturnMsg(True, {
|
||||
"task_type": "backup",
|
||||
"task_status": 1,
|
||||
"data_type": public.lang("Server Configuration"),
|
||||
"name": public.lang("Server Configuration"),
|
||||
"data_backup_status": 1,
|
||||
"progress": 80,
|
||||
"msg": public.lang("Backing up server configuration"),
|
||||
'exec_log': backup_log_data,
|
||||
'timestamp': timestamp
|
||||
})
|
||||
|
||||
# 检查数据打包进度
|
||||
try:
|
||||
backup_status = conf_data.get('backup_status')
|
||||
if backup_status == 1:
|
||||
return public.ReturnMsg(True, {
|
||||
"task_type": "backup",
|
||||
"task_status": 1,
|
||||
"data_type": "tar",
|
||||
"name": public.lang("Data Packaging"),
|
||||
"data_backup_status": 1,
|
||||
"progress": 90,
|
||||
'exec_log': backup_log_data,
|
||||
'timestamp': timestamp
|
||||
})
|
||||
except Exception:
|
||||
# 可能没有backup_status字段,继续处理
|
||||
pass
|
||||
|
||||
# 如果没有发现进行中的任务,但有备份进程
|
||||
if timestamp:
|
||||
return {
|
||||
"backup_data": "unknown",
|
||||
"backup_name": "unknow",
|
||||
"data_backup_status": 1,
|
||||
"progress": 10,
|
||||
'backup_msg': public.lang("Preparing backup data"),
|
||||
'backup_log': backup_log_data,
|
||||
'timestamp': timestamp
|
||||
}
|
||||
return public.ReturnMsg(False, public.lang(
|
||||
"No ongoing backup tasks found. Please check the backup list to see if the backup is completed"))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# 获取命令行参数
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: btpython backup_manager.py <method> <timestamp>")
|
||||
sys.exit(1)
|
||||
method_name = sys.argv[1] # 方法名
|
||||
timestamp = sys.argv[2] # IP地址
|
||||
backup_manager = BackupManager() # 实例化对象
|
||||
if hasattr(backup_manager, method_name): # 检查方法是否存在
|
||||
method = getattr(backup_manager, method_name) # 获取方法
|
||||
method(timestamp) # 调用方法
|
||||
else:
|
||||
print(f"Error: Method '{method_name}' does not exist")
|
||||
112
mod/project/backup_restore/base_util.py
Normal file
112
mod/project/backup_restore/base_util.py
Normal file
@@ -0,0 +1,112 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 yakpanel(http://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: miku <wzz@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
import hashlib
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
if "/www/server/panel/class_v2" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class_v2")
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
os.chdir("/www/server/panel")
|
||||
import public
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning)
|
||||
|
||||
|
||||
class BaseUtil:
|
||||
def __init__(self):
|
||||
self.base_path = '/www/backup/backup_restore'
|
||||
if not os.path.exists(self.base_path):
|
||||
public.ExecShell('mkdir -p {}'.format(self.base_path))
|
||||
self.history_path = '/www/backup/backup_restore/history'
|
||||
self.history_info_path = '/www/backup/backup_restore/history/info'
|
||||
self.nginx_bin_path = '/www/server/nginx/sbin/nginx'
|
||||
self.overwrite = 0 # 0 跳过, 1 覆盖
|
||||
self.auto_exit = 0 # 异常打断, 默认0
|
||||
|
||||
def print_log(self, log: str, type: str):
|
||||
time_str = time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
log = "[{}] {}".format(time_str, log)
|
||||
log_file = self.base_path + '/{}.log'.format(type)
|
||||
public.writeFile(log_file, log + "\n", 'a+')
|
||||
|
||||
def replace_log(self, old_str: str, new_str: str, type: str):
|
||||
log_file = self.base_path + '/{}.log'.format(type)
|
||||
log_data = public.ReadFile(log_file)
|
||||
if old_str in log_data:
|
||||
log_data = log_data.replace(old_str, new_str)
|
||||
public.WriteFile(log_file, log_data)
|
||||
|
||||
def get_file_sha256(self, file_path) -> str:
|
||||
sha256 = hashlib.sha256()
|
||||
with open(file_path, "rb") as f:
|
||||
while True:
|
||||
chunk = f.read(4096) # 先读取再判断
|
||||
if not chunk:
|
||||
break
|
||||
sha256.update(chunk)
|
||||
return sha256.hexdigest()
|
||||
|
||||
def get_free_space(self):
|
||||
result = {}
|
||||
path = "/www"
|
||||
diskstat = os.statvfs(path)
|
||||
free_space = diskstat.f_bavail * diskstat.f_frsize
|
||||
# total_space = diskstat.f_blocks * diskstat.f_frsize
|
||||
# used_space = (diskstat.f_blocks - diskstat.f_bfree) * diskstat.f_frsize
|
||||
result['free_space'] = free_space
|
||||
return result
|
||||
|
||||
def get_file_size(self, path: str) -> int:
|
||||
try:
|
||||
if os.path.isfile(path):
|
||||
return os.path.getsize(path)
|
||||
elif os.path.isdir(path):
|
||||
return int(public.ExecShell(f"du -sb {path}")[0].split("\t")[0])
|
||||
return 0
|
||||
except:
|
||||
return 0
|
||||
|
||||
def format_size(self, size: int):
|
||||
if size < 1024:
|
||||
return f"{size}B"
|
||||
elif size < 1024 * 1024:
|
||||
return f"{size / 1024:.2f}KB"
|
||||
elif size < 1024 * 1024 * 1024:
|
||||
return f"{size / 1024 / 1024:.2f}MB"
|
||||
elif size < 1024 * 1024 * 1024 * 1024:
|
||||
return f"{size / 1024 / 1024 / 1024:.2f}GB"
|
||||
else:
|
||||
return f"{size / 1024 / 1024 / 1024 / 1024:.2f}TB"
|
||||
|
||||
def web_config_check(self):
|
||||
if os.path.exists(self.nginx_bin_path):
|
||||
nginx_conf_test = public.ExecShell("ulimit -n 8192 ;{} -t".format(self.nginx_bin_path))[1]
|
||||
if "successful" in nginx_conf_test:
|
||||
return {
|
||||
"status": True,
|
||||
"msg": None
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": False,
|
||||
"msg": public.lang("Nginx Configuration file error, inventory!:{}".format(nginx_conf_test))
|
||||
}
|
||||
else:
|
||||
return {
|
||||
"status": True,
|
||||
"msg": None
|
||||
}
|
||||
691
mod/project/backup_restore/comMod.py
Normal file
691
mod/project/backup_restore/comMod.py
Normal file
@@ -0,0 +1,691 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 yakpanel(http://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: miku <wzz@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import socket
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
if "/www/server/panel/class_v2" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class_v2")
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
import public
|
||||
import public.validate
|
||||
from public import Param
|
||||
from public.exceptions import HintException
|
||||
from mod.project.backup_restore.data_manager import DataManager
|
||||
from mod.project.backup_restore.backup_manager import BackupManager
|
||||
from mod.project.backup_restore.restore_manager import RestoreManager
|
||||
from mod.project.backup_restore.ssh_manager import BtInstallManager
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning)
|
||||
|
||||
|
||||
class main(DataManager):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.base_path = '/www/backup/backup_restore'
|
||||
self.bakcup_task_json = self.base_path + '/backup_task.json'
|
||||
self.backup_pl_file = self.base_path + '/backup.pl'
|
||||
self.restore_pl_file = self.base_path + '/restore.pl'
|
||||
self.migrate_task_json = self.base_path + '/migration_task.json'
|
||||
self.migrate_pl_file = self.base_path + '/migrate.pl'
|
||||
self.migrate_success_pl = self.base_path + '/migrate_success.pl'
|
||||
|
||||
def return_data(self, status: bool = None, msg: str = None, error_msg: str = None, data: list | dict = None):
|
||||
aa_status = 0 if status else -1
|
||||
result = None
|
||||
if not isinstance(data, type(None)):
|
||||
result = data
|
||||
elif msg:
|
||||
result = public.lang(msg)
|
||||
elif error_msg:
|
||||
result = public.lang(error_msg)
|
||||
return public.return_message(aa_status, 0, result)
|
||||
|
||||
def add_backup(self, get):
|
||||
""" 备份"""
|
||||
try:
|
||||
get.validate([
|
||||
Param("backup_name").String().Require(),
|
||||
Param("storage_type").String().Require(),
|
||||
Param("timestamp").Integer().Require(),
|
||||
Param("auto_exit").Integer("in", [0, 1]).Require(),
|
||||
], [
|
||||
public.validate.trim_filter(),
|
||||
])
|
||||
except Exception as ex:
|
||||
public.print_log("error info: {}".format(ex))
|
||||
return public.fail_v2(str(ex))
|
||||
|
||||
if os.path.exists(self.backup_pl_file):
|
||||
self.task_stop()
|
||||
|
||||
if os.path.exists(self.base_path + "/success.pl"):
|
||||
try:
|
||||
if int(os.path.getctime(self.base_path + "/success.pl")) + 10 > int(time.time()):
|
||||
return public.fail_v2(public.lang("Please do not operate frequently, please wait a moment"))
|
||||
except:
|
||||
pass
|
||||
|
||||
web_check = self.web_config_check()
|
||||
if web_check['status'] is False:
|
||||
return self.return_data(error_msg=web_check['msg'])
|
||||
|
||||
backup_config = []
|
||||
if os.path.exists(self.bakcup_task_json):
|
||||
backup_config = json.loads(public.ReadFile(self.bakcup_task_json))
|
||||
|
||||
get.auto_exit = 0 # 强制不打断, 通过error msg交互
|
||||
backup_now = False
|
||||
if not hasattr(get, "timestamp"):
|
||||
get_time = ""
|
||||
else:
|
||||
try:
|
||||
get_time = int(get.timestamp)
|
||||
except:
|
||||
get_time = get.timestamp
|
||||
|
||||
local_timestamp = int(time.time())
|
||||
if get_time == "" or get_time == "0" or get_time == 0:
|
||||
backup_timestamp = local_timestamp
|
||||
get_time = local_timestamp
|
||||
backup_now = True
|
||||
else:
|
||||
backup_timestamp = get_time
|
||||
|
||||
backup_conf = {
|
||||
'backup_name': get.backup_name,
|
||||
'timestamp': get_time,
|
||||
'create_time': datetime.datetime.fromtimestamp(int(local_timestamp)).strftime('%Y-%m-%d %H:%M:%S'),
|
||||
'backup_time': datetime.datetime.fromtimestamp(int(backup_timestamp)).strftime('%Y-%m-%d %H:%M:%S'),
|
||||
'storage_type': get.storage_type,
|
||||
'auto_exit': int(get.auto_exit),
|
||||
'backup_status': 0 if not backup_now else 1,
|
||||
'restore_status': 0,
|
||||
'backup_path': self.base_path + "/" + str(get_time) + "_backup",
|
||||
'backup_file': "",
|
||||
'backup_file_sha256': "",
|
||||
'backup_file_size': "",
|
||||
'backup_count': {
|
||||
'success': None,
|
||||
'failed': None,
|
||||
},
|
||||
'total_time': None,
|
||||
'done_time': None,
|
||||
}
|
||||
backup_config.append(backup_conf)
|
||||
public.WriteFile(self.bakcup_task_json, json.dumps(backup_config))
|
||||
|
||||
if backup_now:
|
||||
public.ExecShell(
|
||||
"nohup btpython /www/server/panel/mod/project/backup_restore/backup_manager.py backup_data {} > /dev/null 2>&1 &".format(
|
||||
int(get_time)
|
||||
)
|
||||
)
|
||||
else:
|
||||
# todo at time
|
||||
# 2024-05-20 14:00
|
||||
at_time_str = time.strftime("%Y-%m-%d %H:%M", time.localtime(int(get_time)))
|
||||
exec_script = "btpython /www/server/panel/mod/project/backup_restore/backup_manager.py"
|
||||
exec_command = f"cd {public.get_panel_path()} && echo 'nohup {exec_script} backup_data {int(get_time)} > /dev/null 2>&1' | at {at_time_str}"
|
||||
public.print_log(f"{exec_command}")
|
||||
public.ExecShell(exec_command)
|
||||
public.set_module_logs('backup_restore', 'add_backup', 1)
|
||||
return self.return_data(True, public.lang("Added successfully"))
|
||||
|
||||
def get_backup_list(self, get=None):
|
||||
if not os.path.exists(self.base_path):
|
||||
public.ExecShell("mkdir -p {}".format(self.base_path))
|
||||
backup_config = BackupManager().get_local_backup()
|
||||
backup_config = sorted(backup_config, key=lambda x: int(x["timestamp"]), reverse=True)
|
||||
return self.return_data(True, public.lang("Successfully retrieved"), None, backup_config)
|
||||
|
||||
def del_backup(self, get):
|
||||
if not hasattr(get, "timestamp"):
|
||||
return self.return_data(False, public.lang("Parameter error"), public.lang("Parameter error"))
|
||||
|
||||
backup_config = []
|
||||
if os.path.exists(self.bakcup_task_json):
|
||||
backup_config = json.loads(public.ReadFile(self.bakcup_task_json))
|
||||
|
||||
for backup_conf in backup_config:
|
||||
if backup_conf['timestamp'] == int(get.timestamp):
|
||||
for i in [
|
||||
backup_conf.get("backup_file", ""),
|
||||
backup_conf.get("backup_path", ""),
|
||||
]:
|
||||
if i and os.path.exists(i):
|
||||
public.ExecShell(f"rm -rf {i}")
|
||||
backup_config.remove(backup_conf)
|
||||
public.WriteFile(self.bakcup_task_json, json.dumps(backup_config))
|
||||
|
||||
info_path = os.path.join(self.base_path, "history", "info")
|
||||
log_path = os.path.join(self.base_path, "history", "log")
|
||||
if os.path.exists(info_path):
|
||||
for item in os.listdir(info_path):
|
||||
if item.startswith(str(get.timestamp)):
|
||||
public.ExecShell("rm -rf {}".format(os.path.join(info_path, item)))
|
||||
if os.path.exists(log_path):
|
||||
for item in os.listdir(log_path):
|
||||
if item.startswith(str(get.timestamp)):
|
||||
public.ExecShell("rm -rf {}".format(os.path.join(log_path, item)))
|
||||
|
||||
return self.return_data(True, public.lang("Deleted successfully"))
|
||||
|
||||
backup_file_list = os.listdir(self.base_path)
|
||||
for backup_file in backup_file_list:
|
||||
if backup_file.endswith(".tar.gz") or backup_file.endswith("_backup") or backup_file.endswith("_migration"):
|
||||
if str(get.timestamp) in backup_file:
|
||||
if os.path.exists(os.path.join(self.base_path, backup_file)):
|
||||
public.ExecShell("rm -rf {}".format(os.path.join(self.base_path, backup_file)))
|
||||
return self.return_data(True, public.lang("Deleted successfully"))
|
||||
|
||||
return self.return_data(False, public.lang("Deletion failed"))
|
||||
|
||||
def get_data_total(self, get=None):
|
||||
server_data = self.get_data_list()
|
||||
return self.return_data(status=True, data=server_data)
|
||||
|
||||
def get_progress(self, get=None):
|
||||
try:
|
||||
get.validate([
|
||||
Param("type").String(opt="in", length_or_list=["backup", "restore"]).Require(),
|
||||
], [
|
||||
public.validate.trim_filter(),
|
||||
])
|
||||
except Exception as ex:
|
||||
public.print_log("error info: {}".format(ex))
|
||||
return public.fail_v2(str(ex))
|
||||
|
||||
type = get.type
|
||||
progress_data = self.get_progress_with_type(type)
|
||||
if progress_data['status'] is True:
|
||||
return self.return_data(True, public.lang("Successfully retrieved"), data=progress_data.get('msg'))
|
||||
|
||||
return self.return_data(False, error_msg=progress_data.get('msg', public.lang("Failed to get progress")))
|
||||
|
||||
def get_details(self, get):
|
||||
""" 获取备份或还原任务的详细信息"""
|
||||
try:
|
||||
get.validate([
|
||||
Param("type").String(opt="in", length_or_list=["backup", "restore"]).Require(),
|
||||
Param("timestamp").Timestamp().Require(),
|
||||
], [
|
||||
public.validate.trim_filter(),
|
||||
])
|
||||
except Exception as ex:
|
||||
public.print_log("error info: {}".format(ex))
|
||||
return public.fail_v2(str(ex))
|
||||
|
||||
if get.type == "backup":
|
||||
return BackupManager().get_backup_details(get.timestamp)
|
||||
elif get.type == "restore":
|
||||
return RestoreManager().get_restore_details(get.timestamp)
|
||||
|
||||
raise HintException(public.lang("Unknown Type"))
|
||||
|
||||
def get_exec_logs(self, get=None):
|
||||
try:
|
||||
get.validate([
|
||||
Param("timestamp").Integer().Require(),
|
||||
Param("type").String("in", ["backup", "restore"]).Require(),
|
||||
], [
|
||||
public.validate.trim_filter(),
|
||||
])
|
||||
except Exception as ex:
|
||||
public.print_log("error info: {}".format(ex))
|
||||
return public.fail_v2(str(ex))
|
||||
|
||||
timestamp = get.timestamp
|
||||
type = get.type
|
||||
exec_logs = ""
|
||||
if type == "backup":
|
||||
exec_logs = BackupManager().get_backup_log(timestamp)
|
||||
elif type == "restore":
|
||||
exec_logs = RestoreManager().get_restore_log(timestamp)
|
||||
return self.return_data(True, public.lang("Successfully retrieved"), "", exec_logs)
|
||||
|
||||
def task_stop(self, get=None):
|
||||
backup_task_pid = public.ExecShell(
|
||||
"ps -ef|grep 'backup_manager.py'|grep -v grep|awk '{print $2}'"
|
||||
)[0].replace("\n", "")
|
||||
if backup_task_pid:
|
||||
public.ExecShell("kill {}".format(backup_task_pid))
|
||||
|
||||
restore_task_pid = public.ExecShell(
|
||||
"ps -ef|grep 'restore_manager.py'|grep -v grep|awk '{print $2}'"
|
||||
)[0].replace("\n", "")
|
||||
if restore_task_pid:
|
||||
public.ExecShell("kill {}".format(restore_task_pid))
|
||||
|
||||
if os.path.exists(self.backup_pl_file):
|
||||
public.ExecShell("rm -f {}".format(self.backup_pl_file))
|
||||
|
||||
if os.path.exists(self.restore_pl_file):
|
||||
public.ExecShell("rm -f {}".format(self.restore_pl_file))
|
||||
|
||||
try:
|
||||
task_json_data = json.loads(public.ReadFile(self.bakcup_task_json))
|
||||
for item in task_json_data:
|
||||
if 'backup_status' in item and item['backup_status'] == 1:
|
||||
item['backup_status'] = 0
|
||||
if 'restore_status' in item and item['restore_status'] == 1:
|
||||
item['restore_status'] = 0
|
||||
public.WriteFile(self.bakcup_task_json, json.dumps(task_json_data))
|
||||
except:
|
||||
pass
|
||||
|
||||
if os.path.exists("/www/server/panel/data/migration.pl"):
|
||||
public.ExecShell("rm -f /www/server/panel/data/migration.pl")
|
||||
return self.return_data(True, public.lang("Task stopped successfully"), None, None)
|
||||
|
||||
def get_backup_detail(self, get=None):
|
||||
try:
|
||||
get.validate([
|
||||
Param("timestamp").Integer().Require(),
|
||||
], [
|
||||
public.validate.trim_filter(),
|
||||
])
|
||||
except Exception as ex:
|
||||
public.print_log("error info: {}".format(ex))
|
||||
return public.fail_v2(str(ex))
|
||||
|
||||
timestamp = get.timestamp
|
||||
data = BackupManager().get_backup_file_msg(timestamp)
|
||||
return self.return_data(True, public.lang("Successfully retrieved"), "", data)
|
||||
|
||||
def exec_backup(self, get=None):
|
||||
if not hasattr(get, "timestamp"):
|
||||
return self.return_data(False, public.lang("Parameter error"), public.lang("Parameter error"))
|
||||
timestamp = get.timestamp
|
||||
public.ExecShell(
|
||||
"nohup btpython /www/server/panel/mod/project/backup_restore/backup_manager.py backup_data {} > /dev/null 2>&1 &".format(
|
||||
int(timestamp)
|
||||
)
|
||||
)
|
||||
return self.return_data(True, public.lang("Executed successfully"), public.lang("Executed successfully"))
|
||||
|
||||
def add_restore(self, get=None):
|
||||
"""
|
||||
还原
|
||||
"""
|
||||
try:
|
||||
get.validate([
|
||||
Param("timestamp").Integer().Require(),
|
||||
Param("auto_exit").Integer("in", [0, 1]).Require(), # 打断任务
|
||||
Param("force_restore").Integer("in", [0, 1]).Require(), # 覆盖强制还原
|
||||
], [
|
||||
public.validate.trim_filter(),
|
||||
])
|
||||
except Exception as ex:
|
||||
public.print_log("error info: {}".format(ex))
|
||||
return public.fail_v2(str(ex))
|
||||
|
||||
if os.path.exists(self.restore_pl_file):
|
||||
self.task_stop()
|
||||
|
||||
if os.path.exists(self.base_path + "/restore_success.pl"):
|
||||
try:
|
||||
if int(os.path.getctime(self.base_path + "/restore_success.pl")) + 10 > int(time.time()):
|
||||
return public.fail_v2(public.lang("Please do not operate frequently, please wait a moment"))
|
||||
except:
|
||||
pass
|
||||
|
||||
timestamp = get.timestamp
|
||||
|
||||
public.ExecShell(
|
||||
"nohup btpython /www/server/panel/mod/project/backup_restore/restore_manager.py restore_data {} {} > /dev/null 2>&1 &".format(
|
||||
int(timestamp), int(get.force_restore)
|
||||
)
|
||||
)
|
||||
public.set_module_logs('backup_restore', 'add_restore', 1)
|
||||
return self.return_data(True, public.lang("Restore task added successfully"))
|
||||
|
||||
def ssh_auth_check(self, get):
|
||||
"""验证SSH连接信息是否正常"""
|
||||
try:
|
||||
get.validate([
|
||||
Param("server_ip").String().Require(),
|
||||
Param("ssh_port").Integer(),
|
||||
Param("ssh_user").String().Require(),
|
||||
Param("password").String(),
|
||||
Param("auth_type").String("in", ["password", "key"]).Require(),
|
||||
], [
|
||||
public.validate.trim_filter(),
|
||||
])
|
||||
except Exception as ex:
|
||||
public.print_log("error info: {}".format(ex))
|
||||
return public.fail_v2(str(ex))
|
||||
|
||||
web_check = self.web_config_check()
|
||||
if web_check['status'] is False:
|
||||
return self.return_data(error_msg="{}".format(web_check['msg']))
|
||||
|
||||
port = int(get.ssh_port) if hasattr(get, "ssh_port") and get.ssh_port else 22
|
||||
ssh_client = self.ssh_net_client_check(get.server_ip, port)
|
||||
if not ssh_client:
|
||||
return self.return_data(
|
||||
error_msg=public.lang("SSH connection test failed, please check if the IP and port are correct"))
|
||||
|
||||
password = None
|
||||
key_file = None
|
||||
# 至少需要提供密码或密钥文件之一
|
||||
if hasattr(get, "password") and get.password:
|
||||
password = get.password
|
||||
|
||||
if get.auth_type == "password":
|
||||
key_file = None
|
||||
elif get.auth_type == "key":
|
||||
key_file = "/www/backup/backup_restore/key_file"
|
||||
public.WriteFile(key_file, get.password)
|
||||
public.ExecShell("chmod 600 {}".format(key_file))
|
||||
|
||||
# 创建SSH管理器实例并验证连接
|
||||
manager = BtInstallManager(
|
||||
host=get.server_ip,
|
||||
port=port,
|
||||
username=get.ssh_user,
|
||||
password=password,
|
||||
key_file=key_file
|
||||
)
|
||||
|
||||
result = manager.verify_ssh_connection()
|
||||
if result["status"]:
|
||||
return self.return_data(True, public.lang("SSH connection verified successfully"), None, None)
|
||||
return self.return_data(error_msg=result["msg"])
|
||||
|
||||
def add_migrate_task(self, get=None):
|
||||
try:
|
||||
get.validate([
|
||||
Param("server_ip").String().Require(),
|
||||
Param("ssh_port").Integer(),
|
||||
Param("ssh_user").String().Require(),
|
||||
Param("password").String(),
|
||||
Param("auth_type").String("in", ["password", "key"]).Require(),
|
||||
], [
|
||||
public.validate.trim_filter(),
|
||||
])
|
||||
except Exception as ex:
|
||||
public.print_log("error info: {}".format(ex))
|
||||
return public.fail_v2(str(ex))
|
||||
|
||||
self.stop_migrate()
|
||||
if os.path.exists("/www/backup/backup_restore/migration.log"):
|
||||
public.ExecShell("rm -f /www/backup/backup_restore/migration.log")
|
||||
|
||||
server_ip = get.server_ip
|
||||
ssh_port = get.ssh_port
|
||||
ssh_user = get.ssh_user
|
||||
auth_type = get.auth_type
|
||||
password = get.password
|
||||
|
||||
if auth_type == "key":
|
||||
key_file = "/www/backup/backup_restore/key_file"
|
||||
public.WriteFile(key_file, password)
|
||||
public.ExecShell("chmod 600 {}".format(key_file))
|
||||
else:
|
||||
key_file = None
|
||||
|
||||
timestamp = int(time.time())
|
||||
migrate_conf = {
|
||||
'server_ip': server_ip,
|
||||
'ssh_port': ssh_port,
|
||||
'ssh_user': ssh_user,
|
||||
'auth_type': auth_type,
|
||||
'password': password,
|
||||
'timestamp': timestamp,
|
||||
'run_type': "INIT",
|
||||
'run_status': 1,
|
||||
'confirm': 0,
|
||||
'step': 1,
|
||||
'migrate_progress': 5,
|
||||
'migrate_msg': public.lang("Migration task initializing"),
|
||||
'task_info': None,
|
||||
}
|
||||
public.WriteFile(self.migrate_task_json, json.dumps(migrate_conf))
|
||||
|
||||
if auth_type == "password":
|
||||
public.ExecShell(
|
||||
"nohup btpython /www/server/panel/mod/project/backup_restore/ssh_manager.py --action migrate -H {server_ip} -P {ssh_port} -u {ssh_user} --password='{password}' --task-name '{task_name}' > /dev/null 2>&1 &".format(
|
||||
server_ip=server_ip, ssh_port=ssh_port, ssh_user=ssh_user, password=password,
|
||||
task_name=public.lang("My Migration Task")
|
||||
)
|
||||
)
|
||||
elif auth_type == "key":
|
||||
public.ExecShell(
|
||||
"nohup btpython /www/server/panel/mod/project/backup_restore/ssh_manager.py --action migrate -H {server_ip} -P {ssh_port} -u {ssh_user} --key-file {key_file} --task-name '{task_name}' > /dev/null 2>&1 &".format(
|
||||
server_ip=server_ip, ssh_port=ssh_port, ssh_user=ssh_user, key_file=key_file,
|
||||
task_name=public.lang("My Migration Task")
|
||||
)
|
||||
)
|
||||
public.set_module_logs('backup_restore', 'add_migrate_task', 1)
|
||||
return self.return_data(True, public.lang("Migration task added successfully"), None, None)
|
||||
|
||||
def get_migrate_status(self, get=None):
|
||||
if os.path.exists(self.migrate_task_json):
|
||||
migrate_config = json.loads(public.ReadFile(self.migrate_task_json))
|
||||
result = {
|
||||
"server_ip": migrate_config.get('server_ip', ''),
|
||||
"ssh_port": migrate_config.get('ssh_port', 22),
|
||||
"ssh_user": migrate_config.get('ssh_user', ''),
|
||||
"auth_type": migrate_config.get('auth_type', 'password'),
|
||||
"password": migrate_config.get('password', ''),
|
||||
"migrate_progress": migrate_config.get('migrate_progress', 0),
|
||||
"timestamp": migrate_config.get("timestamp", 0),
|
||||
"total_time": migrate_config.get("total_time", 0),
|
||||
"is_running": migrate_config['run_status'] == 1 or migrate_config.get("confirm", 0) == 0,
|
||||
}
|
||||
else:
|
||||
result = {
|
||||
"is_running": False,
|
||||
}
|
||||
return self.return_data(True, public.lang("Successfully retrieved"), None, result)
|
||||
|
||||
def close_migrate_popup(self, get=None):
|
||||
"""用户二次确认, 关闭迁移弹窗"""
|
||||
if os.path.exists(self.migrate_task_json):
|
||||
migrate_config = json.loads(public.ReadFile(self.migrate_task_json))
|
||||
if migrate_config.get("run_status") == 2:
|
||||
migrate_config['confirm'] = 1
|
||||
public.WriteFile(self.migrate_task_json, json.dumps(migrate_config))
|
||||
return self.return_data(True, public.lang("Successfully migrated"))
|
||||
self.stop_migrate()
|
||||
return self.return_data(True, public.lang("Successfully"))
|
||||
|
||||
def stop_migrate(self, get=None):
|
||||
migrate_pid = public.ExecShell(
|
||||
"ps -ef|grep 'ssh_manager.py'|grep -v grep|awk '{print $2}'"
|
||||
)[0].replace("\n", "")
|
||||
if migrate_pid:
|
||||
public.ExecShell("kill {}".format(migrate_pid))
|
||||
public.ExecShell("rm -f /www/backup/backup_restore/migrate_backup.pl")
|
||||
public.ExecShell("rm -f /www/backup/backup_restore/migration.pl")
|
||||
public.ExecShell("rm -f /www/backup/backup_restore/migrate_backup_success.pl")
|
||||
if os.path.exists(self.migrate_task_json):
|
||||
public.ExecShell("rm -f {}".format(self.migrate_task_json))
|
||||
return self.return_data(True, public.lang("Task stopped successfully"), None, None)
|
||||
else:
|
||||
return self.return_data(error_msg=public.lang("No migration task currently"))
|
||||
|
||||
def get_migrate_progress(self, get=None):
|
||||
if os.path.exists(self.migrate_task_json):
|
||||
try:
|
||||
migrate_config = json.loads(public.ReadFile(self.migrate_task_json))
|
||||
except:
|
||||
return self.return_data(error_msg=public.lang("read migration task fail, please try again later!"))
|
||||
|
||||
migrate_config['migrate_log'] = public.ReadFile('/www/backup/backup_restore/migration.log')
|
||||
if migrate_config['run_type'] == "PANEL_INSTALL":
|
||||
migrate_config['migrate_log'] = public.ReadFile('/www/backup/backup_restore/migration.log')
|
||||
if migrate_config['run_type'] == "LOCAL_BACKUP":
|
||||
if os.path.exists('/www/backup/backup_restore/backup.log'):
|
||||
backup_log_data = public.ReadFile('/www/backup/backup_restore/backup.log')
|
||||
else:
|
||||
backup_log_data = public.lang("Starting backup task...")
|
||||
migration_log_data = public.ReadFile('/www/backup/backup_restore/migration.log')
|
||||
migrate_config['migrate_log'] = migration_log_data + "\n" + backup_log_data
|
||||
if migrate_config['run_status'] == 2:
|
||||
if migrate_config['run_type'] == "COMPLETED":
|
||||
migrate_config['migrate_progress'] = 100
|
||||
migrate_config['migrate_err_msg'] = None
|
||||
migrate_config['migrate_msg'] = public.lang("yakpanel installation completed!")
|
||||
try:
|
||||
migrate_config['panel_addr'] = migrate_config['task_info']['panel_info']['panel_url']
|
||||
migrate_config['panel_user'] = migrate_config['task_info']['panel_info']['username']
|
||||
migrate_config['panel_password'] = migrate_config['task_info']['panel_info']['password']
|
||||
except KeyError:
|
||||
return self.return_data(error_msg=public.lang(
|
||||
f"Remote panel info not found! please cancel the task and try again!"
|
||||
))
|
||||
except Exception as e:
|
||||
return self.return_data(error_msg=public.lang(f"Migration task failed! {e}"))
|
||||
else:
|
||||
migrate_config['run_status'] = 1
|
||||
|
||||
else:
|
||||
migrate_config['migrate_err_msg'] = migrate_config['migrate_msg']
|
||||
run_name = public.lang("Migration Task")
|
||||
err_info = []
|
||||
if migrate_config['run_type'] == "PANEL_INSTALL":
|
||||
run_name = public.lang("yakpanel Installation")
|
||||
elif migrate_config['run_type'] == "LOCAL_BACKUP":
|
||||
run_name = public.lang("Local Backup")
|
||||
elif migrate_config['run_type'] == "UPLOAD_FILE":
|
||||
run_name = public.lang("File Upload")
|
||||
elif migrate_config['run_type'] == "REMOTE":
|
||||
run_name = public.lang("Restore Task")
|
||||
err_info_result = {
|
||||
"name": run_name,
|
||||
"type": public.lang("Environment"),
|
||||
"msg": migrate_config['migrate_msg']
|
||||
}
|
||||
err_info.append(err_info_result)
|
||||
migrate_config['err_info'] = err_info
|
||||
|
||||
return self.return_data(True, public.lang("Successfully retrieved"), None, migrate_config)
|
||||
else:
|
||||
return self.return_data(error_msg=public.lang("No migration task currently"))
|
||||
|
||||
def get_history_migrate_list(self, get=None):
|
||||
history_migrate = []
|
||||
if os.path.exists(self.base_path):
|
||||
for item in os.listdir(self.base_path):
|
||||
item_path = os.path.join(self.base_path, item)
|
||||
if os.path.isdir(item_path) and re.match(r'^(\d+)_migration$', item):
|
||||
timestamp = re.match(r'^(\d+)_migration$', item).group(1)
|
||||
if os.path.exists(os.path.join(item_path, "status.json")):
|
||||
status_data = json.loads(public.ReadFile(os.path.join(item_path, "status.json")))
|
||||
migrate_ip = status_data['server_ip']
|
||||
else:
|
||||
migrate_ip = None
|
||||
migrate_data = {
|
||||
"timestamp": int(timestamp),
|
||||
"migrate_time": int(timestamp),
|
||||
"migrate_path": item_path,
|
||||
"migrate_ip": migrate_ip
|
||||
}
|
||||
history_migrate.append(migrate_data)
|
||||
return history_migrate
|
||||
|
||||
def get_history_migrate_log(self, get=None):
|
||||
timestamp = get.timestamp
|
||||
history_migrate_log = self.base_path + "/" + str(timestamp) + "_migration/migration.log"
|
||||
if os.path.exists(history_migrate_log):
|
||||
return self.return_data(True, public.lang("Successfully retrieved"), None,
|
||||
public.ReadFile(history_migrate_log))
|
||||
else:
|
||||
return self.return_data(False, public.lang("Migration log does not exist"), None, None)
|
||||
|
||||
def get_history_migrate_info(self, get=None):
|
||||
try:
|
||||
get.validate([
|
||||
Param("timestamp").Timestamp().Require(),
|
||||
], [
|
||||
public.validate.trim_filter(),
|
||||
])
|
||||
except Exception as ex:
|
||||
public.print_log("error info: {}".format(ex))
|
||||
return public.fail_v2(str(ex))
|
||||
|
||||
timestamp = get.timestamp
|
||||
history_migrate_info = self.base_path + "/" + str(timestamp) + "_migration/status.json"
|
||||
if os.path.exists(history_migrate_info):
|
||||
return self.return_data(True, public.lang("Successfully retrieved"), None,
|
||||
json.loads(public.ReadFile(history_migrate_info)))
|
||||
else:
|
||||
return self.return_data(error_msg=public.lang("Migration log does not exist"))
|
||||
|
||||
def get_backup_log(self, get=None):
|
||||
if not hasattr(get, "timestamp"):
|
||||
return self.return_data(False, public.lang("Parameter error"), public.lang("Parameter error"))
|
||||
timestamp = get.timestamp
|
||||
return self.return_data(True, public.lang("Successfully retrieved"), "",
|
||||
BackupManager().get_backup_log(timestamp))
|
||||
|
||||
def ssh_net_client_check(self, server_ip, ssh_port):
|
||||
try:
|
||||
# 使用requests库测试SSH连接,设置3秒超时
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.settimeout(3)
|
||||
result = sock.connect_ex((server_ip, int(ssh_port)))
|
||||
sock.close()
|
||||
|
||||
if result == 0:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
except Exception as e:
|
||||
public.print_log(public.lang("SSH connection test failed: {}").format(e))
|
||||
return False
|
||||
|
||||
def del_migrate_tips(self, get=None):
|
||||
if os.path.exists("/www/server/panel/data/migration.pl"):
|
||||
public.ExecShell("rm -f /www/server/panel/data/migration.pl")
|
||||
return public.returnMsg(True, public.lang("Migration reminder deleted successfully"))
|
||||
|
||||
def del_history_migrate(self, get=None):
|
||||
try:
|
||||
get.validate([
|
||||
Param("timestamp").Timestamp().Require(),
|
||||
], [
|
||||
public.validate.trim_filter(),
|
||||
])
|
||||
except Exception as ex:
|
||||
public.print_log("error info: {}".format(ex))
|
||||
return public.fail_v2(str(ex))
|
||||
|
||||
timestamp = get.timestamp
|
||||
if os.path.exists(self.base_path + "/" + str(timestamp) + "_migration"):
|
||||
public.ExecShell("rm -rf {}".format(self.base_path + "/" + str(timestamp) + "_migration"))
|
||||
return self.return_data(True, public.lang("Migration history deleted successfully"))
|
||||
else:
|
||||
return self.return_data(error_msg=public.lang("Migration history does not exist"))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# 获取命令行参数
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: btpython backup_manager.py <method> <timestamp>")
|
||||
sys.exit(1)
|
||||
method_name = sys.argv[1] # 方法名 p
|
||||
timestamp = sys.argv[2]
|
||||
com_manager = main() # 实例化对象
|
||||
if hasattr(com_manager, method_name): # 检查方法是否存在
|
||||
method = getattr(com_manager, method_name) # 获取方法
|
||||
method(timestamp) # 调用方法
|
||||
else:
|
||||
print(f"Error: {public.lang('Method')} '{method_name}' {public.lang('does not exist')}")
|
||||
112
mod/project/backup_restore/config_manager.py
Normal file
112
mod/project/backup_restore/config_manager.py
Normal file
@@ -0,0 +1,112 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 yakpanel(http://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: miku <wzz@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
if "/www/server/panel/class_v2" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class_v2")
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
import public
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning)
|
||||
|
||||
|
||||
class ConfigManager:
|
||||
def __init__(self):
|
||||
self.base_path = '/www/backup/backup_restore'
|
||||
self.bakcup_task_json = self.base_path + '/backup_task.json'
|
||||
|
||||
def get_backup_conf(self, timestamp):
|
||||
if not os.path.exists(self.bakcup_task_json):
|
||||
return None
|
||||
task_json_data = json.loads(public.ReadFile(self.bakcup_task_json))
|
||||
data = [item for item in task_json_data if str(item['timestamp']) == timestamp]
|
||||
if not data:
|
||||
return None
|
||||
return data[0]
|
||||
|
||||
def save_backup_conf(self, timestamp, data):
|
||||
if not os.path.exists(self.bakcup_task_json):
|
||||
return None
|
||||
task_json_data = json.loads(public.ReadFile(self.bakcup_task_json))
|
||||
for item in task_json_data:
|
||||
if str(item['timestamp']) == timestamp:
|
||||
item.update(data)
|
||||
break
|
||||
public.WriteFile(self.bakcup_task_json, json.dumps(task_json_data))
|
||||
|
||||
def get_backup_data_list(self, timestamp):
|
||||
data_list_json = self.base_path + '/{timestamp}_backup/backup.json'.format(timestamp=timestamp)
|
||||
if not os.path.exists(data_list_json):
|
||||
return None
|
||||
data_list_data = json.loads(public.ReadFile(data_list_json))
|
||||
return data_list_data
|
||||
|
||||
def update_backup_data_list(self, timestamp, data_list):
|
||||
data_list_json = self.base_path + '/{timestamp}_backup/backup.json'.format(timestamp=timestamp)
|
||||
try:
|
||||
# 读取现有配置
|
||||
if os.path.exists(data_list_json):
|
||||
current_data = json.loads(public.ReadFile(data_list_json))
|
||||
# 更新数据
|
||||
current_data.update(data_list)
|
||||
data_list = current_data
|
||||
|
||||
# 写入更新后的配置
|
||||
public.WriteFile(data_list_json, json.dumps(data_list))
|
||||
return True
|
||||
except Exception as e:
|
||||
public.print_log("update_backup_data_list error: {}".format(str(e)))
|
||||
return False
|
||||
|
||||
def get_restore_data_list(self, timestamp):
|
||||
data_list_json = self.base_path + '/{timestamp}_backup/restore.json'.format(timestamp=timestamp)
|
||||
if not os.path.exists(data_list_json):
|
||||
return None
|
||||
data_list_data = json.loads(public.ReadFile(data_list_json))
|
||||
return data_list_data
|
||||
|
||||
def update_restore_data_list(self, timestamp, data_list):
|
||||
data_list_json = self.base_path + '/{timestamp}_backup/restore.json'.format(timestamp=timestamp)
|
||||
# 读取现有配置
|
||||
try:
|
||||
# 读取现有配置
|
||||
if os.path.exists(data_list_json):
|
||||
current_data = json.loads(public.ReadFile(data_list_json))
|
||||
# 更新数据
|
||||
current_data.update(data_list)
|
||||
data_list = current_data
|
||||
|
||||
# 写入更新后的配置
|
||||
public.WriteFile(data_list_json, json.dumps(data_list))
|
||||
return True
|
||||
except Exception as e:
|
||||
public.print_log("update_restore_data_list error: {}".format(str(e)))
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# 获取命令行参数
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: btpython config_manager.py <method> <timestamp>")
|
||||
sys.exit(1)
|
||||
method_name = sys.argv[1] # 方法名
|
||||
timestamp = sys.argv[2] # IP地址
|
||||
config = ConfigManager() # 实例化对象
|
||||
if hasattr(config, method_name): # 检查方法是否存在
|
||||
method = getattr(config, method_name) # 获取方法
|
||||
method(timestamp) # 调用方法
|
||||
else:
|
||||
print(f"Error: method '{method_name}' not found")
|
||||
1189
mod/project/backup_restore/data_manager.py
Normal file
1189
mod/project/backup_restore/data_manager.py
Normal file
File diff suppressed because it is too large
Load Diff
0
mod/project/backup_restore/modules/__init__.py
Normal file
0
mod/project/backup_restore/modules/__init__.py
Normal file
197
mod/project/backup_restore/modules/crontab_module.py
Normal file
197
mod/project/backup_restore/modules/crontab_module.py
Normal file
@@ -0,0 +1,197 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 yakpanel(http://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: miku <miku@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
if "/www/server/panel/class_v2" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class_v2")
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
import public
|
||||
from YakPanel import app
|
||||
import crontab_v2 as crontab
|
||||
from mod.project.backup_restore.base_util import BaseUtil
|
||||
from mod.project.backup_restore.config_manager import ConfigManager
|
||||
|
||||
|
||||
class CrontabModule(BaseUtil, ConfigManager):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.base_path = '/www/backup/backup_restore'
|
||||
self.bakcup_task_json = self.base_path + '/backup_task.json'
|
||||
|
||||
def backup_crontab_data(self, timestamp):
|
||||
self.print_log("====================================================", "backup")
|
||||
self.print_log(public.lang("Starting a Backup Scheduled Task"), "backup")
|
||||
|
||||
backup_path = self.base_path + "/{timestamp}_backup/crontab".format(timestamp=timestamp)
|
||||
if not os.path.exists(backup_path):
|
||||
public.ExecShell('mkdir -p {}'.format(backup_path))
|
||||
|
||||
field = ('id,name,type,where1,where_hour,where_minute,echo,addtime,status,'
|
||||
'save,backupTo,sName,sBody,sType,urladdress,save_local,notice,'
|
||||
'notice_channel,db_type,split_type,split_value,type_id,rname,'
|
||||
'keyword,post_param,flock,time_set,backup_mode,db_backup_path,'
|
||||
'time_type,special_time,log_cut_path,user_agent,version,table_list,result,second')
|
||||
crontab_data = public.M('crontab').order("id asc").field(field).select()
|
||||
for task in crontab_data:
|
||||
task['type_id'] = ""
|
||||
|
||||
crontab_json_path = "{}/crontab.json".format(backup_path)
|
||||
public.WriteFile(crontab_json_path, json.dumps(crontab_data))
|
||||
for item in crontab_data:
|
||||
self.print_log(public.lang("Crontab Task {} ✓".format(item['name'])), "backup")
|
||||
|
||||
public.ExecShell(f"\cp -rpa /www/server/cron/* {backup_path}/")
|
||||
crontab_info = {
|
||||
'status': 2,
|
||||
'msg': None,
|
||||
'crontab_json': crontab_json_path,
|
||||
'file_sha256': self.get_file_sha256(crontab_json_path)
|
||||
}
|
||||
self.print_log(public.lang("Backup Crontab Task completion"), 'backup')
|
||||
|
||||
data_list = self.get_backup_data_list(timestamp)
|
||||
data_list['data_list']['crontab'] = crontab_info
|
||||
self.update_backup_data_list(timestamp, data_list)
|
||||
|
||||
def _add_crontab(self, crontab_item: dict, timestamp: int) -> None:
|
||||
if crontab_item['name'] in ("Domain SSL Renew Let's Encrypt Certificate", "Renew Let's Encrypt Certificate"):
|
||||
import acme_v2
|
||||
|
||||
if crontab_item['name'] == "Domain SSL Renew Let's Encrypt Certificate":
|
||||
acme_v2.acme_v2().set_crond_v2()
|
||||
elif crontab_item['name'] == "Renew Let's Encrypt Certificate":
|
||||
acme_v2.acme_v2().set_crond()
|
||||
|
||||
self.print_log(
|
||||
public.lang(f"Crontab Task: {crontab_item['name']} Add successfully ✓"),
|
||||
"restore"
|
||||
)
|
||||
return
|
||||
|
||||
if crontab_item['name'] == "[Do not delete] Resource Manager - Get Process Traffic":
|
||||
return
|
||||
|
||||
s_body = crontab_item['sBody']
|
||||
s_body = re.sub(r'sudo -u .*? bash -c \'(.*?)\'', r'\1', s_body)
|
||||
new_crontab = {
|
||||
"name": crontab_item['name'],
|
||||
"echo": crontab_item['echo'],
|
||||
"type": crontab_item['type'],
|
||||
"where1": crontab_item['where1'],
|
||||
"hour": crontab_item['where_hour'],
|
||||
"minute": crontab_item['where_minute'],
|
||||
"status": crontab_item['status'],
|
||||
"save": crontab_item['save'],
|
||||
"backupTo": crontab_item['backupTo'],
|
||||
"sType": crontab_item['sType'],
|
||||
"sBody": s_body,
|
||||
"sName": crontab_item['sName'],
|
||||
"urladdress": crontab_item['urladdress'],
|
||||
"save_local": crontab_item['save_local'],
|
||||
"notice": crontab_item['notice'],
|
||||
"notice_channel": crontab_item['notice_channel'],
|
||||
"db_type": crontab_item['db_type'],
|
||||
"split_type": crontab_item['split_type'],
|
||||
"split_value": crontab_item['split_value'],
|
||||
"keyword": crontab_item['keyword'],
|
||||
"post_param": crontab_item['post_param'],
|
||||
"flock": crontab_item['flock'],
|
||||
"time_set": crontab_item['time_set'],
|
||||
"backup_mode": crontab_item['backup_mode'],
|
||||
"db_backup_path": crontab_item['db_backup_path'],
|
||||
"time_type": crontab_item['time_type'],
|
||||
"special_time": crontab_item['special_time'],
|
||||
"user_agent": crontab_item['user_agent'],
|
||||
"version": crontab_item['version'],
|
||||
"table_list": crontab_item['table_list'],
|
||||
"result": crontab_item['result'],
|
||||
"log_cut_path": crontab_item['log_cut_path'],
|
||||
"rname": crontab_item['rname'],
|
||||
"type_id": crontab_item['type_id'],
|
||||
"second": crontab_item.get('second', ''),
|
||||
}
|
||||
result = crontab.crontab().AddCrontab(new_crontab)
|
||||
|
||||
crontab_backup_path = self.base_path + f"/{timestamp}_backup/crontab"
|
||||
|
||||
back_up_echo_file = os.path.join(crontab_backup_path, crontab_item['echo'])
|
||||
panel_echo_file = f"/www/server/cron/{crontab_item['echo']}"
|
||||
if self.overwrite or not os.path.exists(panel_echo_file):
|
||||
public.ExecShell(
|
||||
f"\cp -rpa {back_up_echo_file} {panel_echo_file}"
|
||||
)
|
||||
|
||||
if result['status'] != 0:
|
||||
error_res = public.find_value_by_key(result, key="result", default="fail")
|
||||
self.print_log(
|
||||
public.lang(
|
||||
f"Crontab Task: {crontab_item['name']} add fail, "
|
||||
f"error: {error_res}, skip..."),
|
||||
"restore"
|
||||
)
|
||||
else:
|
||||
self.print_log(
|
||||
public.lang(f"Crontab Task: {crontab_item['name']} Add successfully ✓"),
|
||||
"restore"
|
||||
)
|
||||
|
||||
def restore_crontab_data(self, timestamp):
|
||||
self.print_log("==================================", "restore")
|
||||
self.print_log(public.lang("Start restoring Crontab Task"), "restore")
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
cron_list = public.M('crontab').select()
|
||||
cron_name_list = [i['name'] for i in cron_list]
|
||||
crontab_data_json = restore_data['data_list']['crontab']['crontab_json']
|
||||
restore_data['data_list']['crontab']['restore_status'] = 1
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
crontab_data = json.loads(public.ReadFile(crontab_data_json))
|
||||
with app.app_context():
|
||||
for crontab_item in crontab_data:
|
||||
if self.overwrite:
|
||||
try:
|
||||
crontab.crontab().DelCrontab(public.to_dict_obj({"id": crontab_item['id']}))
|
||||
except:
|
||||
pass
|
||||
self._add_crontab(crontab_item, timestamp)
|
||||
else: # not overwrite
|
||||
if crontab_item['name'] not in cron_name_list:
|
||||
self._add_crontab(crontab_item, timestamp)
|
||||
else:
|
||||
self.print_log(public.lang(f"Crontab Task: {crontab_item['name']} ✓"), "restore")
|
||||
self.print_log(public.lang("Crontab Task complished"), "restore")
|
||||
restore_data['data_list']['crontab']['restore_status'] = 2
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
def reload_crontab(self):
|
||||
try:
|
||||
crontab.crontab().CrondReload()
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# 获取命令行参数
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: btpython backup_manager.py <method> <timestamp>")
|
||||
sys.exit(1)
|
||||
method_name = sys.argv[1] # 方法名
|
||||
timestamp = sys.argv[2]
|
||||
crontab_manager = CrontabModule() # 实例化对象
|
||||
if hasattr(crontab_manager, method_name): # 检查方法是否存在
|
||||
method = getattr(crontab_manager, method_name) # 获取方法
|
||||
method(timestamp) # 调用方法
|
||||
else:
|
||||
print(f"Error: method '{method_name}' not found")
|
||||
1052
mod/project/backup_restore/modules/database_module.py
Normal file
1052
mod/project/backup_restore/modules/database_module.py
Normal file
File diff suppressed because it is too large
Load Diff
192
mod/project/backup_restore/modules/firewall_module.py
Normal file
192
mod/project/backup_restore/modules/firewall_module.py
Normal file
@@ -0,0 +1,192 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 yakpanel(http://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: miku <miku@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
if "/www/server/panel/class_v2" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class_v2")
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
import public
|
||||
|
||||
from YakPanel import app
|
||||
from mod.project.backup_restore.base_util import BaseUtil
|
||||
from mod.project.backup_restore.config_manager import ConfigManager
|
||||
from firewallModelV2.comModel import main as firewall_com
|
||||
from safeModelV2.firewallModel import main as safe_firewall_main
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning)
|
||||
|
||||
|
||||
class FirewallModule(BaseUtil, ConfigManager):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.base_path = '/www/backup/backup_restore'
|
||||
self.bakcup_task_json = self.base_path + '/backup_task.json'
|
||||
|
||||
def backup_firewall_data(self, timestamp):
|
||||
with app.app_context():
|
||||
try:
|
||||
self.print_log("====================================================", "backup")
|
||||
self.print_log(public.lang("Starting backup of firewall data"), "backup")
|
||||
backup_path = self.base_path + "/{timestamp}_backup/firewall".format(timestamp=timestamp)
|
||||
if not os.path.exists(backup_path):
|
||||
public.ExecShell('mkdir -p {}'.format(backup_path))
|
||||
data_list = self.get_backup_data_list(timestamp)
|
||||
|
||||
port_data_path = firewall_com().export_rules(
|
||||
public.to_dict_obj({"rule": 'port', 'chain': 'ALL'})
|
||||
)['message'].get('result', '')
|
||||
ip_data_path = firewall_com().export_rules(
|
||||
public.to_dict_obj({"rule": 'ip', 'chain': 'ALL'})
|
||||
)['message'].get('result', '')
|
||||
forward_data_path = firewall_com().export_rules(
|
||||
public.to_dict_obj({"rule": 'forward'})
|
||||
)['message'].get('result', '')
|
||||
country_data_path = safe_firewall_main().export_rules(
|
||||
public.to_dict_obj({'rule_name': 'country_rule'})
|
||||
)['message'].get('result', '')
|
||||
|
||||
firewall_info = {
|
||||
"status": 2,
|
||||
"err_msg": None
|
||||
}
|
||||
|
||||
for data_path in [
|
||||
port_data_path, ip_data_path, forward_data_path, country_data_path
|
||||
]:
|
||||
if "json" in data_path:
|
||||
public.ExecShell('\cp -rpa {} {}'.format(data_path, backup_path))
|
||||
file_name = data_path.split("/")[-1]
|
||||
if "port_rule" in file_name:
|
||||
self.print_log(public.lang("Firewall port rules ✓"), 'backup')
|
||||
firewall_info["port_data_path"] = backup_path + "/" + file_name
|
||||
elif "ip_rules" in file_name:
|
||||
self.print_log(public.lang("Firewall IP rules ✓"), 'backup')
|
||||
firewall_info["ip_data_path"] = backup_path + "/" + file_name
|
||||
elif "port_forward" in file_name:
|
||||
self.print_log(public.lang("Firewall forwarding rules ✓"), 'backup')
|
||||
firewall_info["forward_data_path"] = backup_path + "/" + file_name
|
||||
elif "country" in file_name:
|
||||
self.print_log(public.lang("Firewall region rules ✓"), 'backup')
|
||||
firewall_info["country_data_path"] = backup_path + "/" + file_name
|
||||
|
||||
# 将防火墙信息写入备份配置文件
|
||||
data_list = self.get_backup_data_list(timestamp)
|
||||
data_list['data_list']['firewall'] = firewall_info
|
||||
self.update_backup_data_list(timestamp, data_list)
|
||||
except Exception as e:
|
||||
data_list['data_list']['firewall'] = {
|
||||
"status": 3,
|
||||
"err_msg": e
|
||||
}
|
||||
self.update_backup_data_list(timestamp, data_list)
|
||||
|
||||
self.print_log(public.lang("Firewall data backup completed"), "backup")
|
||||
|
||||
def init_firewall_data(self):
|
||||
self.print_log(public.lang("Initializing firewall data"), "restore")
|
||||
if not os.path.exists('/etc/systemd/system/BT-FirewallServices.service'):
|
||||
panel_path = public.get_panel_path()
|
||||
exec_shell = '('
|
||||
if not os.path.exists('/usr/sbin/ipset'):
|
||||
exec_shell = exec_shell + '{} install ipset -y;'.format(public.get_sys_install_bin())
|
||||
exec_shell = exec_shell + 'sh {panel_path}/script/init_firewall.sh;btpython -u {panel_path}/script/upgrade_firewall.py )'.format(
|
||||
panel_path=panel_path
|
||||
)
|
||||
public.ExecShell(exec_shell)
|
||||
return {'status': True, 'msg': public.lang('Installed.')}
|
||||
elif public.ExecShell("iptables -C INPUT -j IN_BT")[1] != '': # 丢失iptable链 需要重新创建
|
||||
exec_shell = 'sh {}/script/init_firewall.sh'.format(public.get_panel_path())
|
||||
public.ExecShell(exec_shell)
|
||||
return {'status': True, 'msg': public.lang('Installed.')}
|
||||
else:
|
||||
return {'status': True, 'msg': public.lang('Installed.')}
|
||||
|
||||
def restore_firewall_data(self, timestamp):
|
||||
with app.app_context():
|
||||
self.print_log("====================================================", "restore")
|
||||
self.print_log(public.lang("Starting restoration of firewall data"), "restore")
|
||||
self.init_firewall_data()
|
||||
resotre_data = self.get_restore_data_list(timestamp)
|
||||
firewall_data = resotre_data['data_list']['firewall']
|
||||
port_rule_file = firewall_data.get('port_data_path')
|
||||
try:
|
||||
if port_rule_file:
|
||||
if os.path.exists(port_rule_file):
|
||||
self.print_log(public.lang("Starting restoration of firewall port rules"), "restore")
|
||||
result = firewall_com().import_rules(public.to_dict_obj({"rule": 'port', 'file': port_rule_file}))
|
||||
if result['status'] == 0:
|
||||
self.print_log(public.lang("Firewall port rules restored successfully ✓"), "restore")
|
||||
else:
|
||||
self.print_log(public.lang("Failed to restore firewall port rules"), "restore")
|
||||
ip_rule_file = firewall_data.get('ip_data_path')
|
||||
if ip_rule_file:
|
||||
if os.path.exists(ip_rule_file):
|
||||
self.print_log(public.lang("Starting restoration of firewall IP rules"), "restore")
|
||||
result = firewall_com().import_rules(public.to_dict_obj({"rule": 'ip', 'file': ip_rule_file}))
|
||||
if result['status'] == 0:
|
||||
self.print_log(public.lang("Firewall IP rules restored successfully ✓"), "restore")
|
||||
else:
|
||||
self.print_log(public.lang("Failed to restore firewall IP rules"), "restore")
|
||||
|
||||
forward_rule_file = firewall_data.get('forward_data_path')
|
||||
if forward_rule_file:
|
||||
if os.path.exists(forward_rule_file):
|
||||
self.print_log(public.lang("Starting restoration of firewall forwarding rules"), "restore")
|
||||
result = firewall_com().import_rules(
|
||||
public.to_dict_obj({"rule": 'forward', 'file': forward_rule_file}))
|
||||
if result['status'] == 0:
|
||||
self.print_log(public.lang("Firewall forwarding rules restored successfully ✓"), "restore")
|
||||
else:
|
||||
self.print_log(public.lang("Failed to restore firewall forwarding rules"), "restore")
|
||||
|
||||
country_rule_file = firewall_data.get('country_data_path')
|
||||
if country_rule_file:
|
||||
if os.path.exists(country_rule_file):
|
||||
self.print_log(public.lang("Starting restoration of firewall region rules"), "restore")
|
||||
public.ExecShell('\cp -rpa {} /www/server/panel/data/firewall'.format(country_rule_file))
|
||||
country_rule_file_last_path = country_rule_file.split("/")[-1]
|
||||
result = safe_firewall_main().import_rules(
|
||||
public.to_dict_obj({'rule_name': 'country_rule', 'file_name': country_rule_file_last_path}))
|
||||
if result['status'] == 0:
|
||||
self.print_log(public.lang("Firewall region rules restored successfully ✓"), "restore")
|
||||
else:
|
||||
self.print_log(public.lang("Failed to restore firewall region rules"), "restore")
|
||||
|
||||
# 重启防火墙
|
||||
self.print_log(public.lang("Starting firewall restart"), "restore")
|
||||
firewall_com().set_status(public.to_dict_obj({'status': 1}))
|
||||
self.print_log(public.lang("Firewall restart completed"), "restore")
|
||||
resotre_data['data_list']['firewall']['status'] = 2
|
||||
self.update_restore_data_list(timestamp, resotre_data)
|
||||
except Exception as e:
|
||||
self.print_log(public.lang("Failed to restore firewall data: {}").format(str(e)), "restore")
|
||||
resotre_data['data_list']['firewall']['status'] = 3
|
||||
resotre_data['data_list']['firewall']['err_msg'] = str(e)
|
||||
self.update_restore_data_list(timestamp, resotre_data)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# 获取命令行参数
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: btpython backup_manager.py <method> <timestamp>")
|
||||
sys.exit(1)
|
||||
method_name = sys.argv[1] # 方法名
|
||||
timestamp = sys.argv[2]
|
||||
firewall_manager = FirewallModule() # 实例化对象
|
||||
if hasattr(firewall_manager, method_name): # 检查方法是否存在
|
||||
method = getattr(firewall_manager, method_name) # 获取方法
|
||||
method(timestamp) # 调用方法
|
||||
else:
|
||||
print(f"Error: method '{method_name}' not found")
|
||||
138
mod/project/backup_restore/modules/ftp_module.py
Normal file
138
mod/project/backup_restore/modules/ftp_module.py
Normal file
@@ -0,0 +1,138 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 yakpanel(http://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: miku <miku@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
import os.path
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
if "/www/server/panel/class_v2" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class_v2")
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
import public
|
||||
from YakPanel import app
|
||||
import ftp_v2 as ftp_client
|
||||
from mod.project.backup_restore.data_manager import DataManager
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning)
|
||||
|
||||
|
||||
class FtpModule(DataManager):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.base_path = '/www/backup/backup_restore'
|
||||
self.bakcup_task_json = self.base_path + '/backup_task.json'
|
||||
|
||||
def backup_ftp_data(self, timestamp=None):
|
||||
self.print_log("==================================", "backup")
|
||||
self.print_log(public.lang("Start backing up FTP account information"), "backup")
|
||||
ftp_data = public.M('ftps').field('name,pid,id,password,path,ps').select()
|
||||
filtered_ftp = []
|
||||
for ftp in ftp_data:
|
||||
try:
|
||||
if ftp.get('pid', 0) == 0:
|
||||
ftp['related_site'] = ''
|
||||
else:
|
||||
ftp['related_site'] = self._get_current_site_name_by_pid(ftp.get('pid'))
|
||||
|
||||
ftp['status'] = 2
|
||||
ftp['msg'] = None
|
||||
filtered_ftp.append(ftp)
|
||||
self.print_log(public.lang("{} user ✓").format(ftp['name']), "backup")
|
||||
except Exception as e:
|
||||
self.print_log(public.lang("Failed to backup FTP account information: {}").format(str(e)), "backup")
|
||||
ftp['status'] = 3
|
||||
ftp['msg'] = str(e)
|
||||
filtered_ftp.append(ftp)
|
||||
continue
|
||||
|
||||
self.print_log(public.lang("FTP account information backup completed"), "backup")
|
||||
return filtered_ftp
|
||||
|
||||
def _add_ftp_user(self, ftp_client: ftp_client, ftp_data: dict) -> int:
|
||||
"""
|
||||
:return: ftp_data restore_status
|
||||
"""
|
||||
log_str = public.lang("Restoring {} account").format(ftp_data['name'])
|
||||
args = public.dict_obj()
|
||||
args.ftp_username = ftp_data['name']
|
||||
args.path = ftp_data['path']
|
||||
args.ftp_password = ftp_data['password']
|
||||
args.ps = ftp_data['ps']
|
||||
args.pid = self._get_current_pid_by_site_name(
|
||||
os.path.basename(ftp_data.get('path', ''))
|
||||
)
|
||||
res = ftp_client.ftp().AddUser(args)
|
||||
if res['status'] is False:
|
||||
self.replace_log(log_str, public.lang("FTP creation failed: {}").format(res.get('message', 'create fail')),
|
||||
"restore")
|
||||
return 3
|
||||
else:
|
||||
new_log_str = public.lang("{} account ✓").format(ftp_data['name'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
return 2
|
||||
|
||||
def restore_ftp_data(self, timestamp=None):
|
||||
self.print_log("====================================================", "restore")
|
||||
self.print_log(public.lang("Start restoring FTP account configuration"), "restore")
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
with app.app_context():
|
||||
for ftp_data in restore_data['data_list']['ftp']:
|
||||
try:
|
||||
if_exist = public.M('ftps').where('name=?', (ftp_data["name"],)).find()
|
||||
log_str = public.lang("Restoring {} account").format(ftp_data['name'])
|
||||
if if_exist:
|
||||
self.print_log(log_str, "restore")
|
||||
if not self.overwrite:
|
||||
self.replace_log(log_str, public.lang("{} account ✓").format(if_exist.get('name', 'ftp')),
|
||||
"restore")
|
||||
continue
|
||||
else:
|
||||
ftp_client.ftp().DeleteUser(public.to_dict_obj(
|
||||
{"id": if_exist['id'], "username": if_exist['name']}
|
||||
))
|
||||
time.sleep(0.5)
|
||||
ftp_data['restore_status'] = self._add_ftp_user(ftp_client, ftp_data)
|
||||
|
||||
else:
|
||||
ftp_data['restore_status'] = self._add_ftp_user(ftp_client, ftp_data)
|
||||
self.replace_log(
|
||||
log_str,
|
||||
public.lang("ftp: [{}] account restored successfully ✓").format(
|
||||
ftp_data.get('name', 'ftp')),
|
||||
"restore"
|
||||
)
|
||||
|
||||
except Exception as e:
|
||||
import traceback
|
||||
public.print_log(traceback.format_exc())
|
||||
self.print_log(public.lang("Failed to restore FTP account configuration: {}").format(str(e)),
|
||||
"restore")
|
||||
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
self.print_log(public.lang("FTP account configuration restoration completed"), "restore")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# 获取命令行参数
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: btpython backup_manager.py <method> <timestamp>")
|
||||
sys.exit(1)
|
||||
method_name = sys.argv[1] # 方法名
|
||||
timestamp = sys.argv[2] # IP地址
|
||||
ftp_module = FtpModule() # 实例化对象
|
||||
if hasattr(ftp_module, method_name): # 检查方法是否存在
|
||||
method = getattr(ftp_module, method_name) # 获取方法
|
||||
method(timestamp) # 调用方法
|
||||
else:
|
||||
print(f"Error: Method '{method_name}' not found")
|
||||
114
mod/project/backup_restore/modules/mail_module.py
Normal file
114
mod/project/backup_restore/modules/mail_module.py
Normal file
@@ -0,0 +1,114 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 yakpanel(http://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: miku <miku@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
if "/www/server/panel/class_v2" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class_v2")
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
import public
|
||||
from mod.project.backup_restore.base_util import BaseUtil
|
||||
from mod.project.backup_restore.config_manager import ConfigManager
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning)
|
||||
|
||||
|
||||
class MailModule(BaseUtil, ConfigManager):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.base_path = '/www/backup/backup_restore'
|
||||
self.bakcup_task_json = self.base_path + '/backup_task.json'
|
||||
self.vmail_data_path = '/www/vmail'
|
||||
|
||||
def backup_vmail_data(self, timestamp):
|
||||
if not os.path.exists(self.vmail_data_path):
|
||||
return False
|
||||
|
||||
data_list = self.get_backup_data_list(timestamp)
|
||||
if not data_list:
|
||||
return None
|
||||
|
||||
backup_path = self.base_path + "/{timestamp}_backup/vmail".format(timestamp=timestamp)
|
||||
if not os.path.exists(backup_path):
|
||||
public.ExecShell('mkdir -p {}'.format(backup_path))
|
||||
|
||||
self.print_log("====================================================", "backup")
|
||||
self.print_log(public.lang("Start backing up mail server data"), 'backup')
|
||||
|
||||
maill_info = {
|
||||
'status': 1,
|
||||
'msg': None,
|
||||
'vmail_backup_name': None,
|
||||
}
|
||||
|
||||
data_list['data_list']['vmail'] = maill_info
|
||||
self.update_backup_data_list(timestamp, data_list)
|
||||
|
||||
vmail_backup_name = "vmail_{timestamp}.tar.gz".format(timestamp=timestamp)
|
||||
public.ExecShell("cd /www && tar -czvf {} vmail".format(vmail_backup_name))
|
||||
public.ExecShell("mv /www/{} {}".format(vmail_backup_name, backup_path))
|
||||
maill_info['vmail_backup_name'] = backup_path + "/" + vmail_backup_name
|
||||
maill_info['status'] = 2
|
||||
maill_info['msg'] = None
|
||||
maill_info['size'] = self.get_file_size(backup_path + "/" + vmail_backup_name)
|
||||
data_list['data_list']['vmail'] = maill_info
|
||||
self.update_backup_data_list(timestamp, data_list)
|
||||
backup_size = self.format_size(self.get_file_size(backup_path + "/" + vmail_backup_name))
|
||||
self.print_log(public.lang("Mail server data backup completed. Data size: {}").format(backup_size), 'backup')
|
||||
|
||||
def restore_vmail_data(self, timestamp):
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
if not restore_data:
|
||||
return None
|
||||
|
||||
vmail_data = restore_data['data_list']['vmail']
|
||||
if not vmail_data:
|
||||
return None
|
||||
|
||||
self.print_log("==================================", "restore")
|
||||
self.print_log(public.lang("Start restoring mail server data"), "restore")
|
||||
|
||||
restore_data['data_list']['vmail']['restore_status'] = 1
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
vmail_backup_name = vmail_data['vmail_backup_name']
|
||||
if not os.path.exists(vmail_backup_name):
|
||||
self.print_log(public.lang("Restoration failed, file does not exist"), "restore")
|
||||
return
|
||||
|
||||
if os.path.exists(self.vmail_data_path):
|
||||
public.ExecShell("mv {} {}.bak".format(self.vmail_data_path, self.vmail_data_path))
|
||||
|
||||
public.ExecShell("\cp -rpa {} /www/{}".format(vmail_backup_name, os.path.basename(self.vmail_data_path)))
|
||||
public.ExecShell("cd /www && tar -xzvf {}".format(os.path.basename(self.vmail_data_path)))
|
||||
|
||||
restore_data['data_list']['vmail']['restore_status'] = 2
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
self.print_log(public.lang("Mail server data restoration completed"), "restore")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# 获取命令行参数
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: btpython backup_manager.py <method> <timestamp>")
|
||||
sys.exit(1)
|
||||
method_name = sys.argv[1] # 方法名
|
||||
timestamp = sys.argv[2] # IP地址
|
||||
mail_module = MailModule() # 实例化对象
|
||||
if hasattr(mail_module, method_name): # 检查方法是否存在
|
||||
method = getattr(mail_module, method_name) # 获取方法
|
||||
method(timestamp) # 调用方法
|
||||
else:
|
||||
print(f"Error: Method '{method_name}' not found")
|
||||
541
mod/project/backup_restore/modules/plugin_module.py
Normal file
541
mod/project/backup_restore/modules/plugin_module.py
Normal file
@@ -0,0 +1,541 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 yakpanel(http://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: miku <miku@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
if "/www/server/panel/class_v2" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class_v2")
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
import public
|
||||
from YakPanel import app
|
||||
from mod.project.backup_restore.base_util import BaseUtil
|
||||
from mod.project.backup_restore.config_manager import ConfigManager
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning)
|
||||
|
||||
|
||||
def get_plugin_object():
|
||||
import PluginLoader
|
||||
from panel_plugin_v2 import panelPlugin
|
||||
|
||||
p = panelPlugin()
|
||||
soft_list = PluginLoader.get_plugin_list(0)
|
||||
# noinspection PyTypeChecker
|
||||
setattr(p, "_panelPlugin__plugin_s_list", panelPlugin.set_coexist(None, soft_list["list"]))
|
||||
return p
|
||||
|
||||
|
||||
class PluginModule(BaseUtil, ConfigManager):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.base_path = '/www/backup/backup_restore'
|
||||
self.bakcup_task_json = self.base_path + '/backup_task.json'
|
||||
self.plugin_path = '/www/server/panel/plugin'
|
||||
self._safe_flag = False
|
||||
|
||||
def backup_plugin_data(self, timestamp):
|
||||
self.print_log("====================================================", "backup")
|
||||
self.print_log(public.lang("Start backing up plugin data"), "backup")
|
||||
|
||||
backup_path = self.base_path + "/{timestamp}_backup/plugin".format(timestamp=timestamp)
|
||||
if not os.path.exists(backup_path):
|
||||
public.ExecShell('mkdir -p {}'.format(backup_path))
|
||||
|
||||
plugin_info = {}
|
||||
btwaf_path = os.path.join(self.plugin_path, "btwaf")
|
||||
monitor_path = os.path.join(self.plugin_path, "monitor")
|
||||
tamper_core_path = os.path.join(self.plugin_path, "tamper_core")
|
||||
syssafe_path = os.path.join(self.plugin_path, "syssafe")
|
||||
|
||||
if os.path.exists(btwaf_path):
|
||||
result = self.backup_btwaf_data(timestamp)
|
||||
plugin_info['btwaf'] = result
|
||||
if os.path.exists(monitor_path):
|
||||
result = self.backup_monitor_data(timestamp)
|
||||
plugin_info['monitor'] = result
|
||||
if os.path.exists(tamper_core_path):
|
||||
result = self.backup_tamper_core_data(timestamp)
|
||||
plugin_info['tamper_core'] = result
|
||||
if os.path.exists(syssafe_path):
|
||||
result = self.backup_syssafe_data(timestamp)
|
||||
plugin_info['syssafe'] = result
|
||||
data_list = self.get_backup_data_list(timestamp)
|
||||
data_list['data_list']['plugin'] = plugin_info
|
||||
self.update_backup_data_list(timestamp, data_list)
|
||||
|
||||
def backup_btwaf_data(self, timestamp):
|
||||
backup_path = self.base_path + "/{timestamp}_backup/plugin/btwaf".format(timestamp=timestamp)
|
||||
if not os.path.exists(backup_path):
|
||||
public.ExecShell('mkdir -p {}'.format(backup_path))
|
||||
|
||||
try:
|
||||
btwaf_info_json = json.loads(public.ReadFile(os.path.join(self.plugin_path, "btwaf", "info.json")))
|
||||
result = {
|
||||
"status": 2,
|
||||
"err_msg": None,
|
||||
"version": btwaf_info_json['versions'],
|
||||
"size": self.get_file_size(os.path.join(self.plugin_path, "btwaf"))
|
||||
}
|
||||
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/btwaf/config.json {backup_path}/config.json".format(backup_path=backup_path))
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/btwaf/site.json {backup_path}/site.json".format(backup_path=backup_path))
|
||||
public.ExecShell("\cp -rpa /www/server/btwaf/rule {backup_path}/rule".format(backup_path=backup_path))
|
||||
backup_size = self.format_size(self.get_file_size(backup_path))
|
||||
self.print_log(public.lang("Nginx firewall ✓ ({})").format(backup_size), 'backup')
|
||||
return result
|
||||
except:
|
||||
return None
|
||||
|
||||
def backup_monitor_data(self, timestamp):
|
||||
backup_path = self.base_path + "/{timestamp}_backup/plugin/monitor".format(timestamp=timestamp)
|
||||
if not os.path.exists(backup_path):
|
||||
public.ExecShell('mkdir -p {}'.format(backup_path))
|
||||
|
||||
try:
|
||||
monitor_info_json = json.loads(public.ReadFile(os.path.join(self.plugin_path, "monitor", "info.json")))
|
||||
result = {
|
||||
"status": 2,
|
||||
"err_msg": None,
|
||||
"version": monitor_info_json['versions'],
|
||||
"size": self.get_file_size(os.path.join(self.plugin_path, "monitor"))
|
||||
}
|
||||
if os.path.exists("/www/server/panel/plugin/monitor/site_robots.json"):
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/plugin/monitor/site_robots.json {backup_path}/site_robots.json".format(
|
||||
backup_path=backup_path
|
||||
)
|
||||
)
|
||||
if os.path.exists("/www/server/panel/plugin/monitor/site_sitemap_info.json"):
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/plugin/monitor/site_sitemap_info.json {backup_path}/site_sitemap_info.json".format(
|
||||
backup_path=backup_path
|
||||
)
|
||||
)
|
||||
if os.path.exists("/www/server/panel/plugin/monitor/spider_api.config"):
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/plugin/monitor/spider_api.config {backup_path}/spider_api.config".format(
|
||||
backup_path=backup_path
|
||||
)
|
||||
)
|
||||
if os.path.exists("/www/server/panel/plugin/monitor/baidu_user.config"):
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/plugin/monitor/baidu_user.config {backup_path}/baidu_user.config".format(
|
||||
backup_path=backup_path
|
||||
)
|
||||
)
|
||||
if os.path.exists("/www/server/panel/plugin/monitor/360_user.config"):
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/plugin/monitor/360_user.config {backup_path}/360_user.config".format(
|
||||
backup_path=backup_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/monitor/config {backup_path}/config".format(backup_path=backup_path))
|
||||
backup_size = self.format_size(self.get_file_size(backup_path))
|
||||
self.print_log(public.lang("Website monitoring report ✓ ({})").format(backup_size), 'backup')
|
||||
return result
|
||||
except:
|
||||
return None
|
||||
|
||||
def backup_tamper_core_data(self, timestamp):
|
||||
backup_path = self.base_path + "/{timestamp}_backup/plugin/tamper_core".format(timestamp=timestamp)
|
||||
if not os.path.exists(backup_path):
|
||||
public.ExecShell('mkdir -p {}'.format(backup_path))
|
||||
|
||||
try:
|
||||
tamper_core_info_json = json.loads(
|
||||
public.ReadFile(os.path.join(self.plugin_path, "tamper_core", "info.json")))
|
||||
result = {
|
||||
"status": 2,
|
||||
"err_msg": None,
|
||||
"version": tamper_core_info_json['versions'],
|
||||
"size": self.get_file_size(os.path.join(self.plugin_path, "tamper_core"))
|
||||
}
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/plugin/tamper_core/tamper_push_template.json {backup_path}/tamper_push_template.json".format(
|
||||
backup_path=backup_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/plugin/tamper_core/rule.json {backup_path}/rule.json".format(
|
||||
backup_path=backup_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/tamper/config_ps.json {backup_path}/config_ps.json".format(
|
||||
backup_path=backup_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/tamper/tamper.conf {backup_path}/tamper.conf".format(
|
||||
backup_path=backup_path
|
||||
)
|
||||
)
|
||||
backup_size = self.format_size(self.get_file_size(backup_path))
|
||||
self.print_log(public.lang("Enterprise tamper protection ✓ ({})").format(backup_size), 'backup')
|
||||
return result
|
||||
except:
|
||||
return None
|
||||
|
||||
def backup_syssafe_data(self, timestamp):
|
||||
backup_path = self.base_path + "/{timestamp}_backup/plugin/syssafe".format(timestamp=timestamp)
|
||||
if not os.path.exists(backup_path):
|
||||
public.ExecShell('mkdir -p {}'.format(backup_path))
|
||||
|
||||
try:
|
||||
syssafe_info_json = json.loads(public.ReadFile(os.path.join(self.plugin_path, "syssafe", "info.json")))
|
||||
result = {
|
||||
"status": 2,
|
||||
"err_msg": None,
|
||||
"version": syssafe_info_json['versions'],
|
||||
"size": self.get_file_size(os.path.join(self.plugin_path, "syssafe"))
|
||||
}
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/plugin/syssafe/config.json {backup_path}/config.json".format(
|
||||
backup_path=backup_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/plugin/syssafe/sys_process.json {backup_path}/sys_process.json".format(
|
||||
backup_path=backup_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/plugin/syssafe/config {backup_path}/".format(
|
||||
backup_path=backup_path
|
||||
)
|
||||
)
|
||||
backup_size = self.format_size(self.get_file_size(backup_path))
|
||||
self.print_log(public.lang("System hardening ✓ ({})").format(backup_size), 'backup')
|
||||
return result
|
||||
except:
|
||||
return None
|
||||
|
||||
def _sys_safe_pids(self) -> list:
|
||||
try:
|
||||
cmd = """ps aux |grep -E "(bt_syssafe|syssafe_main|syssafe_pub)"|
|
||||
grep -v grep|grep -v systemctl|grep -v "init.d"|awk '{print $2}'|xargs"""
|
||||
sysafe_output = subprocess.check_output(cmd, shell=True, text=True)
|
||||
pids = sysafe_output.strip().split()
|
||||
return pids if pids else []
|
||||
except Exception:
|
||||
return []
|
||||
|
||||
def restore_plugin_data(self, timestamp):
|
||||
""""
|
||||
恢复插件数据
|
||||
"""
|
||||
self.print_log("====================================================", "restore")
|
||||
self.print_log(public.lang("Start restoring plugin data"), "restore")
|
||||
self.print_log(public.lang("If the migrated machine is not bound to a yakpanel user and upgraded to Professional version, plugin restoration failures may occur"), "restore")
|
||||
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
plugin_info = restore_data['data_list']['plugin']
|
||||
# ============================= start =====================================
|
||||
self._safe_flag = True if self._sys_safe_pids() else False
|
||||
try:
|
||||
public.ExecShell("/etc/init.d/bt_syssafe stop")
|
||||
except:
|
||||
pass
|
||||
|
||||
# ============================== btwaf =====================================
|
||||
if 'btwaf' in plugin_info and plugin_info['btwaf']:
|
||||
log_str = public.lang("Start installing Nginx firewall")
|
||||
self.print_log(log_str, "restore")
|
||||
restore_data['data_list']['plugin']['btwaf']['restore_status'] = 1
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
plugin_version = plugin_info['btwaf']['version']
|
||||
# 检查btwaf目录下是否有正在运行的进程
|
||||
self.before_install_plugin('btwaf')
|
||||
install_result = self.install_plugin('btwaf', plugin_version)
|
||||
if install_result['status'] is True:
|
||||
new_log_str = public.lang("Nginx firewall ✓")
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
log_str = public.lang("Start restoring Nginx firewall data")
|
||||
self.print_log(log_str, "restore")
|
||||
self.restore_btwaf_data(timestamp)
|
||||
restore_data['data_list']['plugin']['btwaf']['restore_status'] = 2
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
new_log_str = public.lang("Nginx firewall data ✓")
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
else:
|
||||
restore_data['data_list']['plugin']['btwaf']['restore_status'] = 3
|
||||
restore_data['data_list']['plugin']['btwaf']['err_msg'] = install_result['msg']
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
new_log_str = public.lang("Nginx firewall ✗")
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
|
||||
# ====================================== monitor ==============================================
|
||||
if 'monitor' in plugin_info and plugin_info['monitor']:
|
||||
log_str = public.lang("Start installing website monitoring report")
|
||||
self.print_log(log_str, "restore")
|
||||
restore_data['data_list']['plugin']['monitor']['restore_status'] = 1
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
plugin_version = plugin_info['monitor']['version']
|
||||
self.before_install_plugin('monitor')
|
||||
install_result = self.install_plugin('monitor', plugin_version)
|
||||
if install_result['status'] is True:
|
||||
new_log_str = public.lang("Website monitoring report ✓")
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
log_str = public.lang("Start restoring website monitoring report data")
|
||||
self.print_log(log_str, "restore")
|
||||
self.restore_monitor_data(timestamp)
|
||||
restore_data['data_list']['plugin']['monitor']['restore_status'] = 2
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
new_log_str = public.lang("Website monitoring report data ✓")
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
else:
|
||||
restore_data['data_list']['plugin']['monitor']['restore_status'] = 3
|
||||
restore_data['data_list']['plugin']['monitor']['err_msg'] = install_result['msg']
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
new_log_str = public.lang("Website monitoring report ✗")
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
# ========================= tamper_core ======================================
|
||||
if 'tamper_core' in plugin_info and plugin_info['tamper_core']:
|
||||
log_str = public.lang("Start installing enterprise tamper protection")
|
||||
self.print_log(log_str, "restore")
|
||||
restore_data['data_list']['plugin']['tamper_core']['restore_status'] = 1
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
plugin_version = plugin_info['tamper_core']['version']
|
||||
self.before_install_plugin('tamper_core')
|
||||
install_result = self.install_plugin('tamper_core', plugin_version)
|
||||
if install_result['status'] is True:
|
||||
public.ExecShell("/etc/init.d/bt-tamper stop")
|
||||
new_log_str = public.lang("Enterprise tamper protection ✓")
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
log_str = public.lang("Start restoring enterprise tamper protection data")
|
||||
self.print_log(log_str, "restore")
|
||||
self.restore_tamper_core_data(timestamp)
|
||||
restore_data['data_list']['plugin']['tamper_core']['restore_status'] = 2
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
new_log_str = public.lang("Enterprise tamper protection data ✓")
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
else:
|
||||
restore_data['data_list']['plugin']['tamper_core']['restore_status'] = 3
|
||||
restore_data['data_list']['plugin']['tamper_core']['err_msg'] = install_result['msg']
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
new_log_str = public.lang("Enterprise tamper protection ✗")
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
|
||||
# ========================= syssafe ===========================================
|
||||
if 'syssafe' in plugin_info and plugin_info['syssafe']:
|
||||
log_str = public.lang("Start installing system hardening")
|
||||
self.print_log(log_str, "restore")
|
||||
restore_data['data_list']['plugin']['syssafe']['restore_status'] = 1
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
plugin_version = plugin_info['syssafe']['version']
|
||||
self.before_install_plugin('syssafe')
|
||||
install_result = self.install_plugin('syssafe', plugin_version)
|
||||
if install_result['status'] is True:
|
||||
public.ExecShell("/etc/init.d/bt_syssafe stop")
|
||||
new_log_str = public.lang("System hardening ✓")
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
log_str = public.lang("Start restoring system hardening data")
|
||||
self.print_log(log_str, "restore")
|
||||
self.restore_syssafe_data(timestamp)
|
||||
restore_data['data_list']['plugin']['syssafe']['restore_status'] = 2
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
new_log_str = public.lang("System hardening data ✓")
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
else:
|
||||
restore_data['data_list']['plugin']['syssafe']['restore_status'] = 3
|
||||
restore_data['data_list']['plugin']['syssafe']['err_msg'] = install_result['msg']
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
new_log_str = public.lang("System hardening ✗")
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
|
||||
# ============================= end =====================================
|
||||
if self._safe_flag is True:
|
||||
try:
|
||||
if os.path.exists("/www/server/panel/plugin/syssafe/syssafe_main.py"):
|
||||
from plugin.syssafe.syssafe_main import syssafe_main as safe_main
|
||||
res = safe_main().set_open(None)
|
||||
public.print_log("Syssafe set_open result: {}".format(res))
|
||||
except Exception as e:
|
||||
public.print_log("Failed to start syssafe: {}".format(str(e)))
|
||||
|
||||
self.print_log(public.lang("Plugin data restoration complete"), "restore")
|
||||
|
||||
def restore_btwaf_data(self, timestamp):
|
||||
restore_path = self.base_path + "/{timestamp}_backup/plugin/btwaf".format(timestamp=timestamp)
|
||||
plugin_path = "/www/server/btwaf"
|
||||
if os.path.exists(restore_path) and os.path.exists(plugin_path):
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/config.json /www/server/btwaf/config.json".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/site.json /www/server/btwaf/site.json".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/rule/* /www/server/btwaf/rule".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
|
||||
def restore_monitor_data(self, timestamp):
|
||||
restore_path = self.base_path + "/{timestamp}_backup/plugin/monitor".format(timestamp=timestamp)
|
||||
plugin_path = "/www/server/panel/plugin/monitor/"
|
||||
if os.path.exists(restore_path) and os.path.exists(plugin_path):
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/site_robots.json /www/server/panel/plugin/monitor/site_robots.json".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/site_sitemap_info.json /www/server/panel/plugin/monitor/site_sitemap_info.json".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/spider_api.config /www/server/panel/plugin/monitor/spider_api.config".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/baidu_user.config /www/server/panel/plugin/monitor/baidu_user.config".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/360_user.config /www/server/panel/plugin/monitor/360_user.config".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/config/* /www/server/monitor/config".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
|
||||
def restore_tamper_core_data(self, timestamp):
|
||||
restore_path = self.base_path + "/{timestamp}_backup/plugin/tamper_core".format(timestamp=timestamp)
|
||||
plugin_path = "/www/server/panel/plugin/tamper_core/"
|
||||
if os.path.exists(restore_path) and os.path.exists(plugin_path):
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/tamper_push_template.json /www/server/panel/plugin/tamper_core/tamper_push_template.json".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/rule.json /www/server/panel/plugin/tamper_core/rule.json".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/config_ps.json /www/server/tamper/config_ps.json".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/tamper.conf /www/server/tamper/tamper.conf".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
public.ExecShell("/etc/init.d/bt-tamper stop")
|
||||
|
||||
def restore_syssafe_data(self, timestamp):
|
||||
restore_path = self.base_path + "/{timestamp}_backup/plugin/syssafe".format(timestamp=timestamp)
|
||||
plugin_path = "/www/server/panel/plugin/syssafe/"
|
||||
if os.path.exists(restore_path) and os.path.exists(plugin_path):
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/config.json /www/server/panel/plugin/syssafe/config.json".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/sys_process.json /www/server/panel/plugin/syssafe/sys_process.json".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"\cp -rpa {restore_path}/config/* /www/server/panel/plugin/syssafe/config".format(
|
||||
restore_path=restore_path
|
||||
)
|
||||
)
|
||||
public.ExecShell("/etc/init.d/bt_syssafe stop")
|
||||
|
||||
def before_install_plugin(self, plugin_name: str):
|
||||
try:
|
||||
if plugin_name == "btwaf":
|
||||
cmd = "ps aux | grep 'BT-WAF' | grep -v grep | awk '{print $2}'"
|
||||
cmd_output = subprocess.check_output(cmd, shell=True, text=True)
|
||||
pids1 = cmd_output.strip()
|
||||
if pids1:
|
||||
subprocess.run(["kill", "-9", str(pids1)], check=True)
|
||||
xss_path = "/www/server/panel/plugin/btwaf/nginx_btwaf_xss"
|
||||
lsof_output = subprocess.check_output(f"lsof -t {xss_path}", shell=True, text=True)
|
||||
pids2 = lsof_output.strip().split()
|
||||
for p2 in pids2:
|
||||
subprocess.run(["kill", "-9", str(p2)], check=True)
|
||||
time.sleep(1)
|
||||
elif plugin_name == "syssafe":
|
||||
public.ExecShell("/etc/init.d/bt_syssafe stop")
|
||||
time.sleep(1)
|
||||
elif plugin_name == "monitor":
|
||||
pass
|
||||
elif plugin_name == "tamper_core":
|
||||
pass
|
||||
time.sleep(1)
|
||||
except:
|
||||
pass
|
||||
|
||||
def install_plugin(self, sName, plugin_version):
|
||||
with app.app_context():
|
||||
try:
|
||||
plugin = get_plugin_object()
|
||||
version_parts = plugin_version.split(".", 1)
|
||||
sVersion = version_parts[0]
|
||||
sMin_version = version_parts[1] if len(version_parts) > 1 else ""
|
||||
get = public.dict_obj()
|
||||
get.sName = sName
|
||||
get.version = sVersion
|
||||
get.min_version = sMin_version
|
||||
info = plugin.install_plugin(get)["message"]
|
||||
args = public.dict_obj()
|
||||
args.tmp_path = info.get("tmp_path")
|
||||
args.plugin_name = sName
|
||||
args.install_opt = info.get("install_opt")
|
||||
info = plugin.input_package(args)
|
||||
return info
|
||||
except Exception:
|
||||
import traceback
|
||||
print(traceback.format_exc())
|
||||
return {
|
||||
'status': False,
|
||||
'msg': public.lang('Installation failed')
|
||||
}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# 获取命令行参数
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: btpython backup_manager.py <method> <timestamp>")
|
||||
sys.exit(1)
|
||||
method_name = sys.argv[1] # 方法名
|
||||
timestamp = sys.argv[2]
|
||||
plugin_manager = PluginModule() # 实例化对象
|
||||
if hasattr(plugin_manager, method_name): # 检查方法是否存在
|
||||
method = getattr(plugin_manager, method_name) # 获取方法
|
||||
method(timestamp) # 调用方法
|
||||
else:
|
||||
print(f"Error: Method '{method_name}' not found")
|
||||
911
mod/project/backup_restore/modules/site_module.py
Normal file
911
mod/project/backup_restore/modules/site_module.py
Normal file
@@ -0,0 +1,911 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 yakpanel(http://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: miku <miku@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
if "/www/server/panel/class_v2" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class_v2")
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
import public
|
||||
from public.hook_import import hook_import
|
||||
|
||||
hook_import()
|
||||
from wp_toolkit import wpbackup
|
||||
import db
|
||||
from YakPanel import app
|
||||
import panel_site_v2 as panelSite
|
||||
from mod.project.backup_restore.data_manager import DataManager
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning)
|
||||
|
||||
|
||||
class SiteModule(DataManager):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.base_path = '/www/backup/backup_restore'
|
||||
self.bakcup_task_json = self.base_path + '/backup_task.json'
|
||||
self.site_dir_auth_path = "/www/server/panel/data/site_dir_auth.json"
|
||||
self.redirect_conf_path = "/www/server/panel/data/redirect.conf"
|
||||
self.proxy_conf_path = "/www/server/panel/data/proxyfile.json"
|
||||
|
||||
@staticmethod
|
||||
def copy_directory(src: str, dst: str, overwrite: bool = False) -> None:
|
||||
"""
|
||||
site 复制文件夹
|
||||
src: 源路径
|
||||
dst: 目标路径
|
||||
overwrite: 是否覆盖
|
||||
"""
|
||||
if not src or not dst:
|
||||
return
|
||||
if src == dst:
|
||||
return
|
||||
if isinstance(overwrite, int):
|
||||
overwrite = False if overwrite == 0 else True
|
||||
|
||||
def _copy2(src_file: str, dst_file: str):
|
||||
if overwrite or not os.path.exists(dst_file):
|
||||
try:
|
||||
shutil.copy2(src_file, dst_file)
|
||||
except:
|
||||
try:
|
||||
public.ExecShell("chattr -i {}".format(dst_file))
|
||||
shutil.copy2(src_file, dst_file)
|
||||
except:
|
||||
pass
|
||||
|
||||
# 如果源路径不存在,直接返回
|
||||
if not os.path.exists(src):
|
||||
return
|
||||
|
||||
# 确保目标目录存在
|
||||
if not os.path.exists(dst):
|
||||
os.makedirs(dst, 0o755, exist_ok=True)
|
||||
|
||||
# 复制源目录下的所有内容到目标目录
|
||||
for item in os.listdir(src):
|
||||
src_item = os.path.join(src, item)
|
||||
dst_item = os.path.join(dst, item)
|
||||
|
||||
if os.path.isdir(src_item):
|
||||
# 递归调用自身来复制子目录内容
|
||||
SiteModule.copy_directory(src_item, dst_item, overwrite)
|
||||
else:
|
||||
# 复制文件
|
||||
_copy2(src_item, dst_item)
|
||||
|
||||
@staticmethod
|
||||
def _db_cp(source_db: str, target_db: str, tables: list = None) -> None:
|
||||
"""
|
||||
指定表复制到新db中
|
||||
source_db: 源db路径
|
||||
target_db: 目标db路径
|
||||
tables: 要复制的表列表, None时复制所有
|
||||
"""
|
||||
if not source_db:
|
||||
return
|
||||
source_db = f"'{source_db}'"
|
||||
target_db = f"'{target_db}'"
|
||||
if not tables:
|
||||
public.ExecShell(f"sqlite3 {source_db} .dump | sqlite3 {target_db}")
|
||||
return
|
||||
|
||||
for table in tables:
|
||||
public.ExecShell(f"sqlite3 {target_db} 'DROP TABLE IF EXISTS {table};'")
|
||||
|
||||
tables_for_dump = " ".join(tables)
|
||||
tables_cmd = f"sqlite3 {source_db} '.dump {tables_for_dump}'"
|
||||
|
||||
tables_for_sql_in = ", ".join([f"'{t}'" for t in tables])
|
||||
triggers_cmd = f"""sqlite3 {source_db} \"
|
||||
SELECT sql || ';' FROM sqlite_master WHERE type='trigger' AND tbl_name IN ({tables_for_sql_in}) AND sql IS NOT NULL;\"
|
||||
"""
|
||||
res_cmd = f"({tables_cmd}; {triggers_cmd}) | sqlite3 {target_db}"
|
||||
public.ExecShell(res_cmd)
|
||||
|
||||
public.ExecShell(f"sqlite3 {target_db} '.dump' | sqlite3 {target_db}")
|
||||
|
||||
@staticmethod
|
||||
def chmod_dir_file(path: str, dir_mode: int = 0o755, file_mode: int = 0o644):
|
||||
if not path:
|
||||
return
|
||||
for root, dirs, files in os.walk(path):
|
||||
for d in dirs:
|
||||
try:
|
||||
os.chmod(os.path.join(root, d), dir_mode)
|
||||
except:
|
||||
continue
|
||||
for f in files:
|
||||
try:
|
||||
os.chmod(os.path.join(root, f), file_mode)
|
||||
except:
|
||||
continue
|
||||
if os.path.isdir(path):
|
||||
try:
|
||||
os.chmod(path, dir_mode)
|
||||
except:
|
||||
pass
|
||||
elif os.path.isfile(path):
|
||||
try:
|
||||
os.chmod(path, file_mode)
|
||||
except:
|
||||
pass
|
||||
|
||||
def get_site_backup_conf(self, timestamp=None):
|
||||
# todo node, 待优化
|
||||
site_data = public.M('sites').where("project_type != ?", "Node").field('name,path,project_type,id,ps').select()
|
||||
domian_data = public.M('domain').field('name,id,pid,id,port').select()
|
||||
wp_onekey = public.M('wordpress_onekey').field('s_id,prefix,user,pass').select()
|
||||
|
||||
filtered_sites = [site for site in site_data]
|
||||
filtered_domain = [name for name in domian_data]
|
||||
|
||||
pid_map = {}
|
||||
for domain in filtered_domain:
|
||||
pid = domain["pid"]
|
||||
if pid not in pid_map:
|
||||
pid_map[pid] = []
|
||||
|
||||
pid_map[pid].append(
|
||||
{
|
||||
"name": domain["name"],
|
||||
"port": domain["port"],
|
||||
}
|
||||
)
|
||||
|
||||
for site in filtered_sites:
|
||||
# domain
|
||||
site_id = site["id"]
|
||||
if site_id in pid_map:
|
||||
site["domains"] = pid_map[site_id]
|
||||
|
||||
# wp prefix
|
||||
hit = False
|
||||
for p in wp_onekey:
|
||||
try: # wp may be not exist
|
||||
if p["s_id"] == site["id"] and p.get('prefix'):
|
||||
site["wp_onekey"] = {
|
||||
"prefix": p['prefix'],
|
||||
"user": p.get('user', ''),
|
||||
"pass": p.get('pass', ''),
|
||||
}
|
||||
hit = True
|
||||
break
|
||||
except:
|
||||
pass
|
||||
if not hit:
|
||||
site["wp_onekey"] = {}
|
||||
|
||||
site["data_type"] = "backup"
|
||||
site["status"] = 0
|
||||
site["msg"] = None
|
||||
|
||||
return filtered_sites
|
||||
|
||||
def backup_site_data(self, timestamp):
|
||||
data_list = self.get_backup_data_list(timestamp)
|
||||
if not data_list:
|
||||
return None
|
||||
data_backup_path = data_list['backup_path']
|
||||
site_backup_path = data_backup_path + '/site/'
|
||||
if not os.path.exists(site_backup_path):
|
||||
public.ExecShell('mkdir -p {}'.format(site_backup_path))
|
||||
self.print_log("====================================================", 'backup')
|
||||
self.print_log(public.lang("Start backing up site data"), 'backup')
|
||||
|
||||
self.backup_site_config(site_backup_path)
|
||||
|
||||
site_sql = db.Sql()
|
||||
site_sql.table('sites')
|
||||
domain_sql = db.Sql()
|
||||
domain_sql.table('domain')
|
||||
|
||||
for site in data_list['data_list']['site']:
|
||||
# 备份db数据库数据
|
||||
site_id = site['id']
|
||||
site_db_record = site_sql.where('id=?', (site_id,)).find()
|
||||
site['site_db_record'] = site_db_record
|
||||
|
||||
# 备份网站数据
|
||||
site['path'] = str(site['path']).rstrip('/')
|
||||
last_path = os.path.basename(site['path'])
|
||||
site["last_path"] = last_path
|
||||
site_path = site_backup_path + last_path
|
||||
|
||||
if site["project_type"] == "PHP":
|
||||
try:
|
||||
site["php_ver"] = panelSite.panelSite().GetSitePHPVersion(
|
||||
public.to_dict_obj({'siteName': site['name']})
|
||||
)['phpversion']
|
||||
except:
|
||||
site["php_ver"] = None
|
||||
|
||||
site['status'] = 1
|
||||
log_str = public.lang("Backing up {} project: {}").format(site['project_type'], site['name'])
|
||||
self.print_log(log_str, "backup")
|
||||
self.update_backup_data_list(timestamp, data_list)
|
||||
|
||||
# 备份网站项目
|
||||
public.ExecShell("cp -rpa {} {}".format(site['path'], site_path))
|
||||
site_zip = site_backup_path + last_path + ".zip"
|
||||
public.ExecShell("cd {} && zip -r {}.zip {}".format(site_backup_path, last_path, last_path))
|
||||
if os.path.exists(site_zip):
|
||||
site_zip_size = public.ExecShell("du -sb {}".format(site_zip))[0].split("\t")[0]
|
||||
site['data_file_name'] = site_zip
|
||||
site['size'] = site_zip_size
|
||||
site['zip_sha256'] = self.get_file_sha256(site_zip)
|
||||
|
||||
# 创建配置文件备份目录
|
||||
webserver_conf_path = ["apache", "cert", "config", "nginx", "open_basedir",
|
||||
"openlitespeed", "other_php", "rewrite", "ssl",
|
||||
"ssl_saved", "template", "tomcat"]
|
||||
conf_backup_path = site_backup_path + site['name'] + "_conf/"
|
||||
public.ExecShell(f"mkdir -p '{conf_backup_path}'")
|
||||
|
||||
# 创建子目录
|
||||
for wpath in webserver_conf_path:
|
||||
web_conf_backup_path = conf_backup_path + wpath
|
||||
public.ExecShell(f"mkdir -p '{web_conf_backup_path}'")
|
||||
|
||||
# 备份网站配置文件
|
||||
self.backup_web_conf(site['name'], conf_backup_path)
|
||||
|
||||
# 打包网站配置文件
|
||||
site_name = site['name']
|
||||
site_conf_zip = site_backup_path + site_name + "_conf.zip"
|
||||
public.ExecShell("cd {} && zip -r {}_conf.zip {}_conf".format(site_backup_path, site_name, site_name))
|
||||
if os.path.exists(site_conf_zip):
|
||||
site['conf_file_name'] = site_conf_zip
|
||||
site['zip_sha256'] = self.get_file_sha256(site_conf_zip)
|
||||
site['conf_sha256'] = self.get_file_sha256(site_conf_zip)
|
||||
|
||||
site['status'] = 2
|
||||
format_backup_file_size = self.format_size(int(site['size']))
|
||||
new_log_str = public.lang("{} project {} ✓ ({})").format(
|
||||
site['project_type'], site['name'], format_backup_file_size
|
||||
)
|
||||
self.replace_log(log_str, new_log_str, 'backup')
|
||||
|
||||
self.update_backup_data_list(timestamp, data_list)
|
||||
|
||||
self.print_log(public.lang("Site data backup completed"), 'backup')
|
||||
|
||||
def backup_site_config(self, site_backup_path):
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/data/default.db {site_backup_path}default.db".format(
|
||||
site_backup_path=site_backup_path
|
||||
)
|
||||
)
|
||||
|
||||
# 备份加密访问配置文件
|
||||
if os.path.exists("/www/server/panel/data/site_dir_auth.json"):
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/data/site_dir_auth.json {site_backup_path}site_dir_auth.json".format(
|
||||
site_backup_path=site_backup_path
|
||||
)
|
||||
)
|
||||
# 备份加密密码
|
||||
if os.path.exists("/www/server/pass/"):
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/pass/ {site_backup_path}pass/".format(
|
||||
site_backup_path=site_backup_path
|
||||
)
|
||||
)
|
||||
|
||||
# 备份反代配置
|
||||
if os.path.exists("/www/server/proxy_project/sites"):
|
||||
public.ExecShell("mkdir -p {site_backup_path}proxy_project/".format(site_backup_path=site_backup_path))
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/proxy_project/sites {site_backup_path}proxy_project/sites/".format(
|
||||
site_backup_path=site_backup_path
|
||||
)
|
||||
)
|
||||
|
||||
# 备份重定向配置
|
||||
if os.path.exists("/www/server/panel/data/redirect.conf"):
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/data/redirect.conf {site_backup_path}redirect.conf".format(
|
||||
site_backup_path=site_backup_path
|
||||
)
|
||||
)
|
||||
|
||||
if os.path.exists("/www/server/panel/data/proxyfile.json"):
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/data/proxyfile.json {site_backup_path}proxyfile.json".format(
|
||||
site_backup_path=site_backup_path
|
||||
)
|
||||
)
|
||||
|
||||
# 备份wp加速配置文件
|
||||
if os.path.exists("/www/server/nginx/conf/"):
|
||||
nginx_conf_list = os.listdir("/www/server/nginx/conf/")
|
||||
for nginx_conf_name in nginx_conf_list:
|
||||
if "wpfastcgi" in nginx_conf_name:
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/nginx/conf/{nginx_conf_name} {site_backup_path}{nginx_conf_name}".format(
|
||||
nginx_conf_name=nginx_conf_name, site_backup_path=site_backup_path
|
||||
)
|
||||
)
|
||||
|
||||
# 备份well-known文件
|
||||
if os.path.exists("/www/server/panel/vhost/nginx/well-known"):
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/vhost/nginx/well-known {site_backup_path}/well-known".format(
|
||||
site_backup_path=site_backup_path
|
||||
)
|
||||
)
|
||||
|
||||
public.ExecShell("mkdir -p {site_backup_path}/monitor_conf/".format(site_backup_path=site_backup_path))
|
||||
public.ExecShell(
|
||||
"\cp -rpa /www/server/panel/vhost/nginx/0.monitor*.conf {site_backup_path}/monitor_conf/".format(
|
||||
site_backup_path=site_backup_path
|
||||
)
|
||||
)
|
||||
|
||||
def restore_site_config(self, backup_path):
|
||||
default_db_file = backup_path + "default.db"
|
||||
dir_auth_file = backup_path + "site_dir_auth.json"
|
||||
pass_path = backup_path + "pass"
|
||||
proxy_project_path = backup_path + "proxy_project"
|
||||
redirect_file = backup_path + "redirect.conf"
|
||||
proxyfile_file = backup_path + "proxyfile.json"
|
||||
if os.path.exists(default_db_file) and self.overwrite:
|
||||
panel_current = public.S("users").find()
|
||||
public.ExecShell(f"\cp -rpa {default_db_file} /www/server/panel/data/default.db")
|
||||
os.chmod("/www/server/panel/data/default.db", 0o600)
|
||||
if "id" in panel_current:
|
||||
del panel_current["id"]
|
||||
public.S("users").where("id=?", (1,)).update(panel_current)
|
||||
if os.path.exists(pass_path):
|
||||
self.copy_directory(
|
||||
src=pass_path,
|
||||
dst="/www/server/pass",
|
||||
overwrite=self.overwrite,
|
||||
)
|
||||
self.chmod_dir_file("/www/server/pass", file_mode=0o644)
|
||||
|
||||
if os.path.exists(proxy_project_path):
|
||||
target = "/www/server/proxy_project"
|
||||
self.copy_directory(
|
||||
src=proxy_project_path,
|
||||
dst=target,
|
||||
overwrite=self.overwrite,
|
||||
)
|
||||
self.chmod_dir_file(target, file_mode=0o644)
|
||||
|
||||
if os.path.exists(dir_auth_file):
|
||||
target = "/www/server/panel/data/site_dir_auth.json"
|
||||
if not os.path.exists(target) or self.overwrite:
|
||||
public.ExecShell(f"\cp -rpa {dir_auth_file} /www/server/panel/data/site_dir_auth.json")
|
||||
self.chmod_dir_file(target, file_mode=0o600)
|
||||
|
||||
if os.path.exists(redirect_file):
|
||||
target = "/www/server/panel/data/redirect.conf"
|
||||
if not os.path.exists(target) or self.overwrite:
|
||||
public.ExecShell(f"\cp -rpa {redirect_file} /www/server/panel/data/redirect.conf")
|
||||
self.chmod_dir_file(target, file_mode=0o600)
|
||||
|
||||
if os.path.exists(proxyfile_file):
|
||||
target = "/www/server/panel/data/proxyfile.json"
|
||||
if not os.path.exists(target) or self.overwrite:
|
||||
public.ExecShell(f"\cp -rpa {proxyfile_file} /www/server/panel/data/proxyfile.json")
|
||||
self.chmod_dir_file(target, file_mode=0o600)
|
||||
|
||||
public.ExecShell(f"\cp -rpa {backup_path}/*wpfastcgi.conf /www/server/nginx/conf/")
|
||||
self.chmod_dir_file("/www/server/nginx/conf", file_mode=0o644)
|
||||
|
||||
if os.path.exists(backup_path + "well-known"):
|
||||
target = "/www/server/panel/vhost/nginx/well-known"
|
||||
if not os.path.exists(target):
|
||||
public.ExecShell(f"mkdir -p {target}")
|
||||
public.ExecShell(f"\cp -rpa {backup_path}well-known/* /www/server/panel/vhost/nginx/well-known/")
|
||||
self.chmod_dir_file(target, dir_mode=0o600, file_mode=0o600)
|
||||
|
||||
public.ExecShell(f"\cp -rpa {backup_path}monitor_conf/* /www/server/panel/vhost/nginx/")
|
||||
self.chmod_dir_file("/www/server/panel/vhost/nginx", dir_mode=0o600, file_mode=0o600)
|
||||
|
||||
def restore_site_python_env(self, timestamp):
|
||||
self.print_log("================================================", "restore")
|
||||
self.print_log(public.lang("Starting to restore site Python dependencies..."), 'restore')
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
site_data = restore_data['data_list']['site']
|
||||
for site in site_data:
|
||||
if site['project_type'] == 'Python':
|
||||
python_site_config = site['site_db_record']['project_config']
|
||||
requirement_path = json.loads(python_site_config)['requirement_path']
|
||||
vpath = json.loads(python_site_config)['vpath']
|
||||
if requirement_path:
|
||||
pip3_path = vpath + "/bin/pip3"
|
||||
pip2_path = vpath + "/bin/pip2"
|
||||
pip_install_cmd = None
|
||||
if os.path.exists(pip3_path):
|
||||
pip_install_cmd = "{} install -r {}".format(pip3_path, requirement_path)
|
||||
elif os.path.exists(pip2_path):
|
||||
pip_install_cmd = "{} install -r {}".format(pip2_path, requirement_path)
|
||||
|
||||
if pip_install_cmd:
|
||||
public.ExecShell(pip_install_cmd)
|
||||
self.print_log(public.lang("Site Python dependencies restoration completed"), 'restore')
|
||||
|
||||
def backup_web_conf(self, site_name: str, conf_backup_path: str) -> None:
|
||||
"""备份网站配置文件
|
||||
|
||||
Args:
|
||||
site_name: 网站名称
|
||||
conf_backup_path: 配置文件备份路径
|
||||
"""
|
||||
# 定义需要备份的配置文件和路径映射
|
||||
conf_paths = {
|
||||
'cert': "/www/server/panel/vhost/cert/{site_name}".format(site_name=site_name),
|
||||
'rewrite': "/www/server/panel/vhost/rewrite/{site_name}.conf".format(site_name=site_name),
|
||||
'nginx': {
|
||||
'main': "/www/server/panel/vhost/nginx/{site_name}.conf".format(site_name=site_name),
|
||||
'redirect': "/www/server/panel/vhost/nginx/redirect/{site_name}".format(site_name=site_name),
|
||||
'proxy': "/www/server/panel/vhost/nginx/proxy/{site_name}".format(site_name=site_name),
|
||||
'dir_auth': "/www/server/panel/vhost/nginx/dir_auth/{site_name}".format(site_name=site_name)
|
||||
},
|
||||
'apache': {
|
||||
'main': "/www/server/panel/vhost/apache/{site_name}.conf".format(site_name=site_name),
|
||||
'redirect': "/www/server/panel/vhost/apache/redirect/{site_name}".format(site_name=site_name),
|
||||
'proxy': "/www/server/panel/vhost/apache/proxy/{site_name}".format(site_name=site_name),
|
||||
'dir_auth': "/www/server/panel/vhost/apache/dir_auth/{site_name}".format(site_name=site_name)
|
||||
},
|
||||
|
||||
'openlitespeed': {
|
||||
'main': '/www/server/panel/vhost/openlitespeed',
|
||||
'detail': '/www/server/panel/vhost/openlitespeed/detail',
|
||||
'listen': '/www/server/panel/vhost/openlitespeed/listen',
|
||||
'ssl': '/www/server/panel/vhost/openlitespeed/detail/ssl',
|
||||
},
|
||||
}
|
||||
|
||||
# 备份证书
|
||||
if os.path.exists(conf_paths['cert']):
|
||||
public.ExecShell(f"mkdir -p {conf_backup_path}cert/")
|
||||
public.ExecShell(f"\cp -rpa {conf_paths['cert']} {conf_backup_path}cert/")
|
||||
|
||||
# 备份伪静态
|
||||
if os.path.exists(conf_paths['rewrite']):
|
||||
public.ExecShell(f"\cp -rpa {conf_paths['rewrite']} {conf_backup_path}rewrite")
|
||||
|
||||
rewrite_file_list = os.listdir("/www/server/panel/vhost/rewrite/")
|
||||
for rewrite_file in rewrite_file_list:
|
||||
if rewrite_file.endswith(".conf"):
|
||||
if site_name in rewrite_file:
|
||||
public.ExecShell(
|
||||
f"\cp -rpa /www/server/panel/vhost/rewrite/{rewrite_file} {conf_backup_path}rewrite"
|
||||
)
|
||||
|
||||
# 备份nginx配置
|
||||
nginx_paths = conf_paths['nginx']
|
||||
if os.path.exists(nginx_paths['main']):
|
||||
public.ExecShell(f"\cp -rpa {nginx_paths['main']} {conf_backup_path}nginx/")
|
||||
|
||||
if not os.path.exists(nginx_paths['main']):
|
||||
web_conf_list = os.listdir("/www/server/panel/vhost/nginx/")
|
||||
for web_conf_name in web_conf_list:
|
||||
if web_conf_name.endswith(".conf"):
|
||||
if site_name in web_conf_name:
|
||||
public.ExecShell(
|
||||
f"\cp -rpa /www/server/panel/vhost/nginx/{web_conf_name} {conf_backup_path}nginx/"
|
||||
)
|
||||
|
||||
if os.path.exists(nginx_paths['redirect']):
|
||||
public.ExecShell(f"mkdir -p {conf_backup_path}nginx/redirect/{site_name}/")
|
||||
public.ExecShell(f"\cp -rpa {nginx_paths['redirect']}/* {conf_backup_path}nginx/redirect/{site_name}/")
|
||||
|
||||
if os.path.exists(nginx_paths['proxy']):
|
||||
public.ExecShell(f"mkdir -p {conf_backup_path}nginx/proxy/{site_name}/")
|
||||
public.ExecShell(f"\cp -rpa {nginx_paths['proxy']}/* {conf_backup_path}nginx/proxy/{site_name}/")
|
||||
|
||||
if os.path.exists(nginx_paths['dir_auth']):
|
||||
public.ExecShell(f"mkdir -p {conf_backup_path}nginx/dir_auth/{site_name}/")
|
||||
public.ExecShell(f"\cp -rpa {nginx_paths['dir_auth']}/* {conf_backup_path}nginx/dir_auth/{site_name}/")
|
||||
|
||||
# 备份apache配置
|
||||
apache_paths = conf_paths['apache']
|
||||
if os.path.exists(apache_paths['main']):
|
||||
public.ExecShell(f"\cp -rpa {apache_paths['main']} {conf_backup_path}apache/")
|
||||
|
||||
if not os.path.exists(apache_paths['main']):
|
||||
web_conf_list = os.listdir("/www/server/panel/vhost/apache/")
|
||||
for web_conf_name in web_conf_list:
|
||||
if web_conf_name.endswith(".conf"):
|
||||
if site_name in web_conf_name:
|
||||
public.ExecShell(
|
||||
f"\cp -rpa /www/server/panel/vhost/apache/{web_conf_name} {conf_backup_path}apache/"
|
||||
)
|
||||
|
||||
if os.path.exists(apache_paths['redirect']):
|
||||
public.ExecShell(f"mkdir -p {conf_backup_path}apache/redirect/{site_name}/")
|
||||
public.ExecShell(f"\cp -rpa {apache_paths['redirect']}/* {conf_backup_path}apache/redirect/{site_name}/")
|
||||
|
||||
if os.path.exists(apache_paths['proxy']):
|
||||
public.ExecShell(f"mkdir -p {conf_backup_path}apache/proxy/{site_name}/")
|
||||
public.ExecShell(f"\cp -rpa {apache_paths['proxy']}/* {conf_backup_path}apache/proxy/{site_name}/")
|
||||
|
||||
if os.path.exists(apache_paths['dir_auth']):
|
||||
public.ExecShell(f"mkdir -p {conf_backup_path}apache/dir_auth/{site_name}/")
|
||||
public.ExecShell(f"\cp -rpa {apache_paths['dir_auth']}/* {conf_backup_path}apache/dir_auth/{site_name}/")
|
||||
|
||||
# 备份openlitespeed配置
|
||||
ols_paths = conf_paths['openlitespeed']
|
||||
if os.path.exists(ols_paths['main']):
|
||||
for web_conf_name in os.listdir(ols_paths['main']):
|
||||
if site_name in web_conf_name:
|
||||
public.ExecShell(
|
||||
f"\cp -rpa /www/server/panel/vhost/openlitespeed/{web_conf_name} {conf_backup_path}openlitespeed/"
|
||||
)
|
||||
|
||||
if os.path.exists(conf_paths['openlitespeed']['detail']):
|
||||
public.ExecShell(f"mkdir -p {conf_backup_path}openlitespeed/detail")
|
||||
for detail in os.listdir(conf_paths['openlitespeed']['detail']):
|
||||
if site_name in detail:
|
||||
public.ExecShell(
|
||||
f"\cp -rpa {ols_paths['main']}/detail/{detail} {conf_backup_path}openlitespeed/detail/"
|
||||
)
|
||||
|
||||
if os.path.exists(conf_paths['openlitespeed']['listen']):
|
||||
public.ExecShell(
|
||||
f"cp -rpa {conf_paths['openlitespeed']['listen']} {conf_backup_path}openlitespeed/listen"
|
||||
)
|
||||
|
||||
if os.path.exists(conf_paths['openlitespeed']['ssl']):
|
||||
public.ExecShell(f"mkdir -p {conf_backup_path}openlitespeed/detail/ssl")
|
||||
for ssl in os.listdir(conf_paths['openlitespeed']['ssl']):
|
||||
if site_name in ssl:
|
||||
public.ExecShell(
|
||||
f"cp -rpa {conf_paths['openlitespeed']['ssl']}/{ssl} {conf_backup_path}openlitespeed/detail/ssl/{ssl}"
|
||||
)
|
||||
|
||||
def restore_web_conf(self, site_name: str, conf_backup_path: str) -> None:
|
||||
"""还原网站配置文件
|
||||
|
||||
Args:
|
||||
site_name: 网站名称
|
||||
conf_backup_path: 配置文件备份路径
|
||||
"""
|
||||
# 定义需要还原的配置文件和路径映射
|
||||
conf_paths = {
|
||||
'cert': "/www/server/panel/vhost/cert/{site_name}".format(site_name=site_name),
|
||||
'rewrite': "/www/server/panel/vhost/rewrite/{site_name}.conf".format(site_name=site_name),
|
||||
'nginx': {
|
||||
'main': "/www/server/panel/vhost/nginx/{site_name}.conf".format(site_name=site_name),
|
||||
'redirect': "/www/server/panel/vhost/nginx/redirect/{site_name}".format(site_name=site_name),
|
||||
'proxy': "/www/server/panel/vhost/nginx/proxy/{site_name}".format(site_name=site_name)
|
||||
},
|
||||
'apache': {
|
||||
'main': "/www/server/panel/vhost/apache/{site_name}.conf".format(site_name=site_name),
|
||||
'redirect': "/www/server/panel/vhost/apache/redirect/{site_name}".format(site_name=site_name),
|
||||
'proxy': "/www/server/panel/vhost/apache/proxy/{site_name}".format(site_name=site_name)
|
||||
},
|
||||
'openlitespeed': {
|
||||
'main': '/www/server/panel/vhost/openlitespeed',
|
||||
'detail': '/www/server/panel/vhost/openlitespeed/detail',
|
||||
'listen': '/www/server/panel/vhost/openlitespeed/listen',
|
||||
'ssl': '/www/server/panel/vhost/openlitespeed/detail/ssl',
|
||||
},
|
||||
}
|
||||
|
||||
# 还原证书
|
||||
if os.path.exists(f"{conf_backup_path}cert"):
|
||||
public.ExecShell(f"\cp -rpa {conf_backup_path}cert {conf_paths['cert']}")
|
||||
|
||||
# 还原伪静态
|
||||
if os.path.exists(f"{conf_backup_path}rewrite"):
|
||||
public.ExecShell(f"\cp -rpa {conf_backup_path}rewrite {conf_paths['rewrite']}")
|
||||
|
||||
# 还原nginx配置
|
||||
if os.path.exists(f"{conf_backup_path}nginx"):
|
||||
public.ExecShell(f"\cp -rpa {conf_backup_path}nginx {conf_paths['nginx']['main']}")
|
||||
if os.path.exists(f"{conf_backup_path}nginx/redirect"):
|
||||
public.ExecShell(f"\cp -rpa {conf_backup_path}nginx/redirect {conf_paths['nginx']['redirect']}")
|
||||
if os.path.exists(f"{conf_backup_path}nginx/proxy"):
|
||||
public.ExecShell(f"\cp -rpa {conf_backup_path}nginx/proxy {conf_paths['nginx']['proxy']}")
|
||||
|
||||
# 还原apache配置
|
||||
if os.path.exists(f"{conf_backup_path}apache"):
|
||||
public.ExecShell(f"\cp -rpa {conf_backup_path}apache {conf_paths['apache']['main']}")
|
||||
if os.path.exists(f"{conf_backup_path}apache/redirect"):
|
||||
public.ExecShell(f"\cp -rpa {conf_backup_path}apache/redirect {conf_paths['apache']['redirect']}")
|
||||
if os.path.exists(f"{conf_backup_path}apache/proxy"):
|
||||
public.ExecShell(f"\cp -rpa {conf_backup_path}apache/proxy {conf_paths['apache']['proxy']}")
|
||||
|
||||
# 还原openlitespeed配置
|
||||
if os.path.exists(f"{conf_backup_path}openlitespeed"):
|
||||
for web_cf_name in os.listdir(f"{conf_backup_path}openlitespeed"):
|
||||
if site_name in web_cf_name:
|
||||
public.ExecShell(
|
||||
f"\cp -rpa {conf_backup_path}openlitespeed/{web_cf_name} {conf_paths['openlitespeed']['main']}/{web_cf_name}"
|
||||
)
|
||||
|
||||
detail_path = f"{conf_backup_path}openlitespeed/detail"
|
||||
if os.path.exists(detail_path):
|
||||
if not os.path.exists(conf_paths['openlitespeed']['detail']):
|
||||
public.ExecShell(f"mkdir -p {conf_paths['openlitespeed']['detail']}")
|
||||
for detail in os.listdir(detail_path):
|
||||
if site_name in detail:
|
||||
public.ExecShell(
|
||||
f"\cp -rpa {detail_path}/{detail} {conf_paths['openlitespeed']['detail']}/{detail}"
|
||||
)
|
||||
|
||||
listen_path = f"{conf_backup_path}openlitespeed/listen"
|
||||
if os.path.exists(listen_path):
|
||||
public.ExecShell(
|
||||
f"\cp -rpa {listen_path} {conf_paths['openlitespeed']['listen']}/listen"
|
||||
)
|
||||
|
||||
ssl_path = f"{conf_backup_path}openlitespeed/detail/ssl"
|
||||
if os.path.exists(ssl_path):
|
||||
if not os.path.exists(conf_paths['openlitespeed']['ssl']):
|
||||
public.ExecShell(f"mkdir -p {conf_paths['openlitespeed']['ssl']}")
|
||||
for ssl in os.listdir(ssl_path):
|
||||
if site_name in ssl:
|
||||
public.ExecShell(
|
||||
f"\cp -rpa {ssl_path}/{ssl} {conf_paths['openlitespeed']['ssl']}/{ssl}"
|
||||
)
|
||||
|
||||
def _restore_site_db_data(self, site_db_record: dict) -> int:
|
||||
sql = db.Sql()
|
||||
sql.table('sites')
|
||||
if 'id' in site_db_record:
|
||||
del site_db_record['id']
|
||||
# SQL关键字字段
|
||||
if 'index' in site_db_record:
|
||||
del site_db_record['index']
|
||||
if_exist = sql.where(
|
||||
'name=? AND project_type=?',
|
||||
(site_db_record['name'], site_db_record['project_type'])
|
||||
).find()
|
||||
if if_exist:
|
||||
return if_exist['id']
|
||||
|
||||
# insert db record
|
||||
try:
|
||||
new_id = sql.insert(site_db_record)
|
||||
return new_id
|
||||
except Exception as e:
|
||||
raise public.lang("Site database insert failed: {}").format(str(e))
|
||||
|
||||
def _restore_site_domian_db_data(self, pid: int, domains: list) -> None:
|
||||
domain_sql = db.Sql()
|
||||
domain_sql.table('domain')
|
||||
for domain in domains:
|
||||
try:
|
||||
if not domain_sql.where('name=?', (domain['name'],)).count():
|
||||
domain_sql.add(
|
||||
'pid, name, port, addtime',
|
||||
(pid, domain['name'], int(domain['port']), public.getDate())
|
||||
)
|
||||
except Exception as e:
|
||||
public.print_log("Domain database insert failed: {}".format(str(e)))
|
||||
continue
|
||||
|
||||
def _backup_site(self, site: dict, backupPath: str) -> None:
|
||||
try:
|
||||
if site.get("project_type", "").lower() == "php":
|
||||
find = public.M('sites').where("id=?", (site['id'],)).field('name,path,id').find()
|
||||
fileName = find['name'] + '_' + time.strftime(
|
||||
'%Y%m%d_%H%M%S', time.localtime()
|
||||
) + '.zip'
|
||||
zipName = backupPath + '/' + fileName
|
||||
if not (os.path.exists(backupPath)):
|
||||
os.makedirs(backupPath)
|
||||
tmps = '/tmp/panelExec.log'
|
||||
execStr = f"cd '{find['path']}' && zip '{zipName}' . -x .user.ini > {tmps} 2>&1"
|
||||
public.ExecShell(execStr)
|
||||
public.M('backup').add(
|
||||
'type,name,pid,filename,size,addtime',
|
||||
(0, fileName, find['id'], zipName, 0, public.getDate())
|
||||
)
|
||||
elif "wp" in site.get("project_type", "").lower():
|
||||
bak_obj = wpbackup(int(site['id']))
|
||||
bak_obj.backup_full()
|
||||
except:
|
||||
pass
|
||||
|
||||
def restore_site_data(self, timestamp: str) -> None:
|
||||
"""还原站点数据
|
||||
Args:
|
||||
timestamp: 备份时间戳
|
||||
"""
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
site_backup_path = self.base_path + "/{timestamp}_backup/site/".format(timestamp=timestamp)
|
||||
# 还原site环境配置, 全局配置
|
||||
self.restore_site_config(site_backup_path)
|
||||
|
||||
if not os.path.exists(site_backup_path):
|
||||
self.print_log(public.lang("Site backup directory does not exist: {}").format(site_backup_path), 'restore')
|
||||
return
|
||||
self.print_log("====================================================", "restore")
|
||||
self.print_log(public.lang("Start restoring site data"), 'restore')
|
||||
|
||||
backupPath = public.M('config').where('id=?', (1,)).getField('backup_path')
|
||||
backupPath = backupPath + '/site/' if backupPath else "/www/backup/site/"
|
||||
with app.app_context():
|
||||
for site in restore_data['data_list']['site']:
|
||||
log_str = public.lang("Restoring {} project: {}").format(site.get("project_type"), site.get("name"))
|
||||
try:
|
||||
site_name = site['name']
|
||||
site['restore_status'] = 1
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
self.print_log(log_str, 'restore')
|
||||
if self.overwrite:
|
||||
# site backup if overwrite
|
||||
self._backup_site(site, backupPath)
|
||||
|
||||
# data
|
||||
if not self.overwrite and 'site_db_record' in site:
|
||||
site_id = self._restore_site_db_data(site['site_db_record'])
|
||||
if site_id and 'domains' in site:
|
||||
# 还原域名记录
|
||||
self._restore_site_domian_db_data(site_id, site['domains'])
|
||||
|
||||
# site file
|
||||
site_path = str(site['path']).rstrip('/')
|
||||
last_path: str = os.path.basename(site_path) if site['last_path'] == '' else site['last_path'] # site name
|
||||
# site abs path
|
||||
site_zip = site_backup_path + last_path + ".zip"
|
||||
|
||||
if os.path.exists(site_zip):
|
||||
public.ExecShell(f"cd {site_backup_path} && unzip -o {last_path}.zip")
|
||||
|
||||
site_data_path = site_backup_path + last_path # site unzip file
|
||||
if os.path.exists(site_data_path):
|
||||
site_parent_path = os.path.dirname(site_path) # /www/wwwroot
|
||||
if not os.path.exists(site_parent_path):
|
||||
public.ExecShell("mkdir -p {}".format(site_parent_path))
|
||||
public.ExecShell("chown -R www:www {}".format(site_parent_path))
|
||||
public.ExecShell("chmod -R 755 {}".format(site_parent_path))
|
||||
|
||||
src_site = os.path.join(site_backup_path, last_path)
|
||||
dst_site = os.path.join(site_parent_path, last_path)
|
||||
|
||||
public.print_log('copying site directory from {} to {}'.format(src_site, dst_site))
|
||||
|
||||
self.copy_directory(
|
||||
src=src_site,
|
||||
dst=dst_site,
|
||||
overwrite=self.overwrite,
|
||||
)
|
||||
try:
|
||||
shutil.rmtree(src_site)
|
||||
except Exception as e:
|
||||
public.print_log(public.lang("Failed to delete source site directory: {}").format(str(e)))
|
||||
|
||||
# makesure
|
||||
public.ExecShell(f"chown -R www:www {dst_site}")
|
||||
user_ini = dst_site + "/.user.ini"
|
||||
if not os.path.exists(user_ini) or self.overwrite:
|
||||
public.writeFile(user_ini, f"open_basedir={site_parent_path}/:/tmp/")
|
||||
public.ExecShell("chmod 644 " + user_ini)
|
||||
public.ExecShell("chown root:root " + user_ini)
|
||||
public.ExecShell("chattr +i " + user_ini)
|
||||
|
||||
# site config
|
||||
site_conf_zip = site_backup_path + site_name + "_conf.zip"
|
||||
if os.path.exists(site_conf_zip):
|
||||
public.ExecShell(
|
||||
"cd {site_backup_path} && unzip -o {site_name}_conf.zip".format(
|
||||
site_backup_path=site_backup_path, site_name=site_name
|
||||
)
|
||||
)
|
||||
public.ExecShell(
|
||||
"cd {site_backup_path} && \cp -rpa {site_name}_conf/* /www/server/panel/vhost".format(
|
||||
site_backup_path=site_backup_path, site_name=site_name
|
||||
)
|
||||
)
|
||||
|
||||
new_log_str = public.lang("{} project: {} ✓").format(site['project_type'], site['name'])
|
||||
self.replace_log(log_str, new_log_str, 'restore')
|
||||
site['restore_status'] = 2
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
except Exception as e:
|
||||
site['restore_status'] = 3
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
new_log_str = public.lang(f"{site['project_type']} project: {site['name']} Reason: {str(e)}")
|
||||
self.replace_log(log_str, new_log_str, 'restore')
|
||||
continue
|
||||
|
||||
self.print_log(public.lang("Site data restoration completed"), 'restore')
|
||||
# 还原site 所有Python环境
|
||||
# self.restore_site_python_env(timestamp)
|
||||
|
||||
def backup_site_dir_auth(self, site_name: str):
|
||||
if os.path.exists(self.site_dir_auth_path):
|
||||
site_dir_auth_data = json.loads(public.ReadFile(self.site_dir_auth_path))
|
||||
if site_name in site_dir_auth_data:
|
||||
result = {site_name: site_dir_auth_data[site_name]}
|
||||
return result
|
||||
return False
|
||||
|
||||
def restore_site_dir_auth(self, site_name: str, backup_data_path: str):
|
||||
if os.path.exists(backup_data_path):
|
||||
dir_auth_backup_data = json.loads(public.ReadFile(backup_data_path))
|
||||
if os.path.exists(self.site_dir_auth_path):
|
||||
site_dir_auth_data = json.loads(public.ReadFile(self.site_dir_auth_path))
|
||||
site_dir_auth_data[site_name] = dir_auth_backup_data[site_name]
|
||||
public.WriteFile(self.site_dir_auth_path, json.dumps(site_dir_auth_data))
|
||||
|
||||
def backup_dir_pass(self, site_name: str, backup_data_path: str):
|
||||
if os.path.exists(self.site_dir_auth_path):
|
||||
site_dir_auth_data = json.loads(public.ReadFile(self.site_dir_auth_path))
|
||||
if site_name in site_dir_auth_data:
|
||||
result = {site_name: site_dir_auth_data[site_name]}
|
||||
return result
|
||||
return {}
|
||||
|
||||
def backup_redirect_conf(self, site_name: str):
|
||||
if os.path.exists(self.redirect_conf_path):
|
||||
redirect_conf_data = json.loads(public.ReadFile(self.redirect_conf_path))
|
||||
for item in redirect_conf_data:
|
||||
if site_name in item['sitename']:
|
||||
return item
|
||||
return False
|
||||
|
||||
def restore_redirect_conf(self, site_name: str, backup_data_path: str):
|
||||
if os.path.exists(backup_data_path):
|
||||
redirect_conf_data = json.loads(public.ReadFile(backup_data_path))
|
||||
local_redirect_conf_data = []
|
||||
if os.path.exists(self.redirect_conf_path):
|
||||
local_redirect_conf_data = json.loads(public.ReadFile(self.redirect_conf_path))
|
||||
data_exists = None
|
||||
for item in local_redirect_conf_data:
|
||||
if item['sitename'] == redirect_conf_data['sitename']:
|
||||
data_exists = True
|
||||
if not data_exists:
|
||||
local_redirect_conf_data.append(redirect_conf_data)
|
||||
public.WriteFile(self.redirect_conf_path, json.dumps(local_redirect_conf_data))
|
||||
return False
|
||||
|
||||
def backup_proxy_conf(self, site_name: str):
|
||||
if os.path.exists(self.proxy_conf_path):
|
||||
proxy_conf_data = json.loads(public.ReadFile(self.proxy_conf_path))
|
||||
for item in proxy_conf_data:
|
||||
if site_name in item['sitename']:
|
||||
return item
|
||||
return False
|
||||
|
||||
def restore_proxy_conf(self, site_name: str, backup_data_path: str):
|
||||
if os.path.exists(backup_data_path):
|
||||
proxy_conf_data = json.loads(public.ReadFile(backup_data_path))
|
||||
local_proxy_conf_data = []
|
||||
if os.path.exists(self.proxy_conf_path):
|
||||
local_proxy_conf_data = json.loads(public.ReadFile(self.proxy_conf_path))
|
||||
data_exists = None
|
||||
for item in local_proxy_conf_data:
|
||||
if item['sitename'] == proxy_conf_data['sitename']:
|
||||
data_exists = True
|
||||
if not data_exists:
|
||||
local_proxy_conf_data.append(proxy_conf_data)
|
||||
public.WriteFile(self.proxy_conf_path, json.dumps(local_proxy_conf_data))
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# 获取命令行参数
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: btpython backup_manager.py <method> <timestamp>")
|
||||
sys.exit(1)
|
||||
method_name = sys.argv[1] # 方法名
|
||||
timestamp = sys.argv[2] # IP地址
|
||||
site_module = SiteModule() # 实例化对象
|
||||
if hasattr(site_module, method_name): # 检查方法是否存在
|
||||
method = getattr(site_module, method_name) # 获取方法
|
||||
method(timestamp) # 调用方法
|
||||
else:
|
||||
print(f"Error: Method '{method_name}' 'does not exist'")
|
||||
829
mod/project/backup_restore/modules/soft_module.py
Normal file
829
mod/project/backup_restore/modules/soft_module.py
Normal file
@@ -0,0 +1,829 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 yakpanel(http://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: miku <miku@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
if "/www/server/panel/class_v2" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class_v2")
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
import public
|
||||
from YakPanel import app
|
||||
from mod.project.backup_restore.base_util import BaseUtil
|
||||
from mod.project.backup_restore.config_manager import ConfigManager
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning)
|
||||
|
||||
OFFICIAL_URL = public.OfficialDownloadBase()
|
||||
|
||||
|
||||
class SoftModule(BaseUtil, ConfigManager):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.base_path = '/www/backup/backup_restore'
|
||||
self.bakcup_task_json = self.base_path + '/backup_task.json'
|
||||
self.packet = False
|
||||
|
||||
def get_install_type(self):
|
||||
if os.path.exists("/usr/bin/yum") or os.path.exists("/usr/bin/dnf") or os.path.exists("/usr/sbin/yum"):
|
||||
return 1
|
||||
elif os.path.exists(
|
||||
"/usr/bin/apt"
|
||||
) or os.path.exists(
|
||||
"/usr/sbin/apt-get"
|
||||
) or os.path.exists(
|
||||
"/usr/bin/apt-get"
|
||||
):
|
||||
return 4
|
||||
else:
|
||||
return 0
|
||||
|
||||
def get_web_server(self):
|
||||
if os.path.exists("/www/server/nginx/sbin/nginx"):
|
||||
nginx_version = public.ExecShell("nginx -v 2>&1")[0].replace("\n", "")
|
||||
version_match = re.search(r'nginx/(\d+\.\d+)', nginx_version)
|
||||
if version_match:
|
||||
nginx_version = version_match.group(1)
|
||||
|
||||
result = {
|
||||
"name": "nginx",
|
||||
"version": nginx_version,
|
||||
"size": BaseUtil().get_file_size("/www/server/nginx/"),
|
||||
"status": 2,
|
||||
}
|
||||
self.print_log("nginx {} ✓".format(nginx_version), 'backup')
|
||||
return result
|
||||
|
||||
if os.path.exists("/www/server/apache/bin/httpd"):
|
||||
apache_version = public.ExecShell("httpd -v 2>&1")[0].replace("\n", "")
|
||||
version_match = re.search(r'Apache/(\d+\.\d+)', apache_version)
|
||||
if version_match:
|
||||
apache_version = version_match.group(1)
|
||||
|
||||
result = {
|
||||
"name": "apache",
|
||||
"version": apache_version,
|
||||
"size": BaseUtil().get_file_size("/www/server/apache/"),
|
||||
"status": 2,
|
||||
}
|
||||
self.print_log("apache {} ✓".format(apache_version), 'backup')
|
||||
return result
|
||||
|
||||
if os.path.exists("/usr/local/lsws/bin/openlitespeed"):
|
||||
openlitespeed_version = public.ExecShell(
|
||||
"/usr/local/lsws/bin/openlitespeed -v 2>&1")[0].replace("\n", "")
|
||||
version_match = re.search(r'LiteSpeed/(\d+\.\d+\.\d+) Open', openlitespeed_version)
|
||||
if version_match:
|
||||
openlitespeed_version = version_match.group(1)
|
||||
|
||||
result = {
|
||||
"name": "openlitespeed",
|
||||
"version": openlitespeed_version,
|
||||
"size": BaseUtil().get_file_size("/usr/local/lsws/"),
|
||||
"status": 2,
|
||||
}
|
||||
self.print_log("openlitespeed {} ✓".format(openlitespeed_version), 'backup')
|
||||
return result
|
||||
|
||||
def get_php_server(self):
|
||||
php_dir = "/www/server/php"
|
||||
if os.path.exists(php_dir):
|
||||
phplist = []
|
||||
for dir_name in os.listdir(php_dir):
|
||||
dir_path = dir_path = os.path.join(php_dir, dir_name)
|
||||
if os.path.isdir(dir_path) and os.path.exists(os.path.join(dir_path, 'bin/php')):
|
||||
phplist.append(int(dir_name))
|
||||
|
||||
result = []
|
||||
for php_ver in phplist:
|
||||
php_ext = public.ExecShell("/www/server/php/{}/bin/php -m".format(php_ver))[0].split("\n")
|
||||
filtered_data = [item for item in php_ext if item not in ('[PHP Modules]', '[Zend Modules]', '')]
|
||||
php_result = {
|
||||
"name": "php",
|
||||
"version": php_ver,
|
||||
"php_ext": filtered_data,
|
||||
"size": BaseUtil().get_file_size("/www/server/php/{}".format(php_ver)),
|
||||
"status": 2,
|
||||
}
|
||||
# 将PHP版本号转换为带小数点的格式
|
||||
if isinstance(php_ver, (int, str)) and len(str(php_ver)) == 2:
|
||||
# 例如:54 -> 5.4, 70 -> 7.0
|
||||
php_result['version'] = f"{str(php_ver)[0]}.{str(php_ver)[1]}"
|
||||
elif isinstance(php_ver, (int, str)) and len(str(php_ver)) == 3:
|
||||
# 例如:82 -> 8.2
|
||||
php_result['version'] = f"{str(php_ver)[0]}.{str(php_ver)[1:]}"
|
||||
result.append(php_result)
|
||||
self.print_log("php {} ✓".format(php_result['version']), 'backup')
|
||||
return result
|
||||
return None
|
||||
|
||||
def get_mysql_server(self):
|
||||
if os.path.exists("/www/server/mysql/bin/mysql"):
|
||||
mysql_version = None
|
||||
if os.path.exists("/www/server/mysql/version.pl"):
|
||||
mysql_version = public.ReadFile("/www/server/mysql/version.pl").replace("\n", "")
|
||||
elif os.path.exists("/www/server/mysql/version_check.pl"):
|
||||
mysql_version = public.ExecShell("/www/server/mysql/version_check.pl")[0].replace("\n", "")
|
||||
|
||||
match = re.search(r'10\.\d+', mysql_version)
|
||||
if match:
|
||||
version = match.group()
|
||||
type = "mariadb"
|
||||
mysql_version = version
|
||||
else:
|
||||
type = "mysql"
|
||||
mysql_version = mysql_version[0:3]
|
||||
result = {
|
||||
"type": type,
|
||||
"version": mysql_version,
|
||||
"size": BaseUtil().get_file_size("/www/server/mysql/"),
|
||||
"status": 2,
|
||||
}
|
||||
self.print_log("mysql {} ✓".format(mysql_version), 'backup')
|
||||
return result
|
||||
else:
|
||||
return False
|
||||
|
||||
def get_ftp_server(self, get=None):
|
||||
if os.path.exists("/www/server/pure-ftpd/bin/pure-pw"):
|
||||
size = BaseUtil().get_file_size("/www/server/pure-ftpd/")
|
||||
try:
|
||||
pure_ftp_port = \
|
||||
public.ExecShell("cat /www/server/pure-ftpd/etc/pure-ftpd.conf | grep Bind|awk '{print $2}'")[
|
||||
0].replace("\n", "").replace("0.0.0.0,", "")
|
||||
pure_ftp_port = int(pure_ftp_port)
|
||||
except:
|
||||
pure_ftp_port = 21
|
||||
self.print_log("pure-ftpd {} ✓".format(pure_ftp_port), 'backup')
|
||||
return {
|
||||
"name": "pure-ftpd",
|
||||
"version": "1.0.49",
|
||||
"size": size,
|
||||
"port": int(pure_ftp_port),
|
||||
"status": 2,
|
||||
}
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_node_list(self, timestamp):
|
||||
node_dir = "/www/server/nodejs"
|
||||
if not os.path.exists(node_dir):
|
||||
return None
|
||||
node_list = []
|
||||
|
||||
result = []
|
||||
for dir_name in os.listdir(node_dir):
|
||||
if re.match(r"^v[1-9]\d*(\.\d+)*$", dir_name):
|
||||
node_list.append(dir_name)
|
||||
|
||||
for node_ver in node_list:
|
||||
node_ver_path = os.path.join(node_dir, node_ver)
|
||||
node_mod_path = os.path.join(node_ver_path, "lib", "node_modules")
|
||||
if os.path.isdir(node_mod_path):
|
||||
mod_list = os.listdir(node_mod_path)
|
||||
else:
|
||||
mod_list = []
|
||||
node_result = {
|
||||
"name": "node",
|
||||
"version": node_ver,
|
||||
"mod_list": mod_list,
|
||||
"size": BaseUtil().get_file_size("/www/server/nodejs/{}".format(node_ver)),
|
||||
"status": 2,
|
||||
}
|
||||
result.append(node_result)
|
||||
self.print_log("node {} ✓".format(node_ver), 'backup')
|
||||
|
||||
if result and self.packet:
|
||||
backup_path = os.path.join(self.base_path, f"{timestamp}_backup/plugin")
|
||||
public.ExecShell(f"\cp -rpa /www/server/nodejs/* {backup_path}/nodejs/*")
|
||||
return result
|
||||
|
||||
def get_redis_server(self):
|
||||
if os.path.exists("/www/server/redis/src/redis-server") and os.path.exists("/www/server/redis/version.pl"):
|
||||
redis_version = public.ReadFile("/www/server/redis/version.pl")
|
||||
size = BaseUtil().get_file_size("/www/server/redis/")
|
||||
self.print_log("redis {} ✓".format(redis_version[0:3]), 'backup')
|
||||
return {
|
||||
"name": "redis",
|
||||
"version": redis_version[0:3],
|
||||
"size": size,
|
||||
"status": 2,
|
||||
}
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_memcached_server(self):
|
||||
if os.path.exists("/usr/local/memcached/bin/memcached"):
|
||||
size = BaseUtil().get_file_size("/usr/local/memcached/")
|
||||
self.print_log("memcached {} ✓".format("1.6.12"), 'backup')
|
||||
return {
|
||||
"name": "memcached",
|
||||
"version": "1.6.12",
|
||||
"size": size,
|
||||
"status": 2,
|
||||
}
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_mongodb_server(self):
|
||||
if os.path.exists("/www/server/mongodb/version.pl"):
|
||||
mongod = "/www/server/mongodb/bin/mongod"
|
||||
mongo = "/www/server/mongodb/bin/mongo"
|
||||
if os.path.exists(mongod) or os.path.exists(mongo):
|
||||
mongodb_version = public.ReadFile("/www/server/mongodb/version.pl")
|
||||
size = BaseUtil().get_file_size("/www/server/mongodb/")
|
||||
self.print_log("mongodb {} ✓".format(mongodb_version[0:3]), 'backup')
|
||||
return {
|
||||
"name": "mongodb",
|
||||
"version": mongodb_version[0:3],
|
||||
"size": size,
|
||||
"status": 2,
|
||||
}
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_pgsql_server(self):
|
||||
if os.path.exists("/www/server/pgsql/bin/pg_config"):
|
||||
pgsql_version = \
|
||||
public.ExecShell("/www/server/pgsql/bin/pg_config --version")[0].replace("\n", "").split(" ")[1]
|
||||
size = BaseUtil().get_file_size("/www/server/pgsql/")
|
||||
self.print_log("pgsql {} ✓".format(pgsql_version), 'backup')
|
||||
return {
|
||||
"name": "pgsql",
|
||||
"version": pgsql_version,
|
||||
"size": size,
|
||||
"status": 2,
|
||||
}
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_phpmyadmin_version(self):
|
||||
if os.path.exists("/www/server/phpmyadmin/version.pl"):
|
||||
phpmyadmin_version = public.ReadFile("/www/server/phpmyadmin/version.pl").replace("\n", "")
|
||||
size = BaseUtil().get_file_size("/www/server/phpmyadmin/")
|
||||
self.print_log("phpmyadmin {} ✓".format(phpmyadmin_version), 'backup')
|
||||
return {
|
||||
"name": "phpmyadmin",
|
||||
"version": phpmyadmin_version,
|
||||
"size": size,
|
||||
"status": 2,
|
||||
}
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_soft_data(self, timestamp=None, packet: bool = False):
|
||||
self.print_log("====================================================", "backup")
|
||||
self.print_log(public.lang("Start backing up software information"), "backup")
|
||||
self.packet = packet
|
||||
result = {
|
||||
"web_server": self.get_web_server(),
|
||||
"php_server": self.get_php_server(),
|
||||
"mysql_server": self.get_mysql_server(),
|
||||
"ftp_server": self.get_ftp_server(),
|
||||
# "node_list": self.get_node_list(timestamp),
|
||||
"redis_server": self.get_redis_server(),
|
||||
"memcached_server": self.get_memcached_server(),
|
||||
"mongodb_server": self.get_mongodb_server(),
|
||||
"pgsql_server": self.get_pgsql_server(),
|
||||
"phpmyadmin_version": self.get_phpmyadmin_version(),
|
||||
}
|
||||
public.WriteFile("/root/soft.json", json.dumps(result))
|
||||
self.print_log(public.lang("Software information backup completed"), 'backup')
|
||||
return result
|
||||
|
||||
# ======================== install software ========================
|
||||
def install_web_server(self, timestamp):
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
web_server = restore_data['data_list']['soft']['web_server']
|
||||
install_type = self.get_install_type()
|
||||
log_str = public.lang("Start installing nginx-{}").format(web_server.get('version', 'latest'))
|
||||
try:
|
||||
result = None
|
||||
if web_server['name'] == 'nginx':
|
||||
self.print_log(log_str, "restore")
|
||||
web_server['restore_status'] = 1
|
||||
web_server['msg'] = None
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
result = public.ExecShell(
|
||||
"cd /www/server/panel/install && wget -O nginx.sh {}/install/{}/nginx.sh && bash nginx.sh install {}".format(
|
||||
OFFICIAL_URL, install_type, web_server['version']
|
||||
)
|
||||
)
|
||||
elif web_server['name'] == 'apache':
|
||||
self.print_log(public.lang("Start installing apache service"), "restore")
|
||||
web_server['restore_status'] = 1
|
||||
web_server['msg'] = None
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
result = public.ExecShell(
|
||||
"cd /www/server/panel/install && wget -O apache.sh {}/install/{}/apache.sh && bash apache.sh install {}".format(
|
||||
OFFICIAL_URL, install_type, web_server['version']
|
||||
)
|
||||
)
|
||||
if web_server['name'] == 'nginx' and os.path.exists("/www/server/nginx/sbin/nginx"):
|
||||
new_log_str = "{}-{} ✓".format(web_server['name'], web_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
web_server['restore_status'] = 2
|
||||
web_server['msg'] = None
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
elif web_server['name'] == 'apache' and os.path.exists("/www/server/apache/bin/httpd"):
|
||||
new_log_str = "{}-{} ✓".format(web_server['name'], web_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
web_server['restore_status'] = 2
|
||||
web_server['msg'] = None
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
else:
|
||||
combined_output = (result[0] + result[1]).splitlines()
|
||||
err_msg = '\n'.join(combined_output[-10:])
|
||||
new_log_str = public.lang(
|
||||
"{}-{} ✗ Installation failed Reason: {} \n Please try to reinstall the web server in the software store after the restore task ends").format(
|
||||
web_server['name'], web_server['version'], err_msg
|
||||
)
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
web_server['restore_status'] = 3
|
||||
web_server['msg'] = new_log_str
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
except Exception as e:
|
||||
err_msg = public.lang(
|
||||
"{}-{} ✗ Installation failed Reason: {} \n Please try to reinstall the web server in the software store after the restore task ends").format(
|
||||
web_server['name'], web_server['version'], str(e)
|
||||
)
|
||||
web_server['restore_status'] = 3
|
||||
web_server['msg'] = err_msg
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
self.print_log(public.lang("Web server installation completed"), "restore")
|
||||
|
||||
def install_php_server(self, timestamp):
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
php_server = restore_data['data_list']['soft']['php_server']
|
||||
install_type = self.get_install_type()
|
||||
for php in php_server:
|
||||
php_ver = php['version']
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
log_str = public.lang("Start installing php-{}").format(php_ver)
|
||||
self.print_log(log_str, "restore")
|
||||
path_ver = php_ver.replace('.', '')
|
||||
if os.path.exists("/www/server/php/{}".format(path_ver)):
|
||||
new_log_str = "php-{} ✓".format(php_ver)
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
continue
|
||||
|
||||
result = public.ExecShell(
|
||||
"cd /www/server/panel/install && wget -O php.sh {}/install/{}/php.sh && bash php.sh install {}".format(
|
||||
OFFICIAL_URL, install_type, php_ver
|
||||
)
|
||||
)
|
||||
if not os.path.exists("/www/server/php/{}".format(path_ver)):
|
||||
combined_output = (result[0] + result[1]).splitlines()
|
||||
err_msg = '\n'.join(combined_output[-10:])
|
||||
new_log_str = public.lang(
|
||||
"php-{} ✗ Installation failed Reason: {} \n Please try to reinstall php in the software store after the restore task ends").format(
|
||||
php_ver,
|
||||
err_msg)
|
||||
php["restore_status"] = 3
|
||||
php["msg"] = err_msg
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
else:
|
||||
php["restore_status"] = 2
|
||||
php["msg"] = "success"
|
||||
new_log_str = "php-{} ✓".format(php_ver)
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
def install_node(self, timestamp):
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
node_list = restore_data['data_list']['soft']['node_list']
|
||||
for node_data in node_list:
|
||||
node_ver = node_data['version']
|
||||
log_str = public.lang("Start installing node-{}").format(node_ver)
|
||||
self.print_log(log_str, "restore")
|
||||
if os.path.exists("/www/server/nodejs/{}".format(node_ver)):
|
||||
new_log_str = "node-{} ✓".format(node_ver)
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
continue
|
||||
result = public.ExecShell(
|
||||
"cd /www/server/panel/install && wget -O node_plugin_install.sh {}/install/0/node_plugin_install.sh && bash node_plugin_install.sh {}".format(
|
||||
OFFICIAL_URL, node_ver
|
||||
)
|
||||
)
|
||||
|
||||
for mod_list in node_data['mod_list']:
|
||||
mod_name = mod_list
|
||||
mod_shell = '''
|
||||
export PATH
|
||||
export HOME=/root
|
||||
export NODE_PATH="/www/server/nodejs/{node_ver}/etc/node_modules"
|
||||
/www/server/nodejs/{node_ver}//bin/npm config set registry https://registry.npmmirror.com/
|
||||
/www/server/nodejs/{node_ver}//bin/npm config set prefix /www/server/nodejs/{node_ver}/
|
||||
/www/server/nodejs/{node_ver}//bin/npm config set cache /www/server/nodejs/{node_ver}//cache
|
||||
/www/server/nodejs/{node_ver}//bin/npm config set strict-ssl false
|
||||
/www/server/nodejs/{node_ver}//bin/yarn config set registry https://registry.npmmirror.com/
|
||||
/www/server/nodejs/{node_ver}/bin/npm install {mod_name} -g &> /www/server/panel/plugin/nodejs/exec.log
|
||||
'''.format(node_ver=node_ver, mod_name=mod_name)
|
||||
result = public.ExecShell(mod_shell)
|
||||
if os.path.exists("/www/server/nodejs/{}".format(node_ver)):
|
||||
new_log_str = "node-{} ✓".format(node_ver)
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
node_data["restore_status"] = 2
|
||||
node_data["msg"] = None
|
||||
else:
|
||||
combined_output = (result[0] + result[1]).splitlines()
|
||||
err_msg = '\n'.join(combined_output[-10:])
|
||||
new_log_str = public.lang(
|
||||
"node-{} ✗ Installation failed Reason: {} \n Please try to reinstall node in the software store after the restore task ends").format(
|
||||
node_ver, err_msg)
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
node_data["restore_status"] = 3
|
||||
node_data["msg"] = err_msg
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
def install_mysql_server(self, timestamp):
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
mysql_server = restore_data['data_list']['soft']['mysql_server']
|
||||
install_type = self.get_install_type()
|
||||
if mysql_server['type'] == 'mariadb':
|
||||
log_str = public.lang("Start installing mariadb-{}").format(mysql_server['version'])
|
||||
self.print_log(log_str, "restore")
|
||||
if os.path.exists("/www/server/mysql/bin/mysql"):
|
||||
new_log_str = "mariadb-{} ✓".format(mysql_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
return
|
||||
result = public.ExecShell(
|
||||
"cd /www/server/panel/install && wget -O mysql.sh {}/install/{}/mysql.sh && bash mysql.sh install {}".format(
|
||||
OFFICIAL_URL, install_type, f"mariadb_{mysql_server['version']}"
|
||||
)
|
||||
)
|
||||
if os.path.exists("/www/server/mysql/bin/mysql"):
|
||||
new_log_str = "mariadb-{} ✓".format(mysql_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
mysql_server["restore_status"] = 2
|
||||
mysql_server["msg"] = None
|
||||
else:
|
||||
combined_output = (result[0] + result[1]).splitlines()
|
||||
err_msg = '\n'.join(combined_output[-10:])
|
||||
new_log_str = public.lang(
|
||||
"mariadb-{} ✗ Installation failed Reason: {} \n Please try to reinstall mariadb in the software store after the restore task ends").format(
|
||||
mysql_server['version'], err_msg
|
||||
)
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
mysql_server["restore_status"] = 3
|
||||
mysql_server["msg"] = err_msg
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
return
|
||||
|
||||
if mysql_server['type'] == 'mysql':
|
||||
log_str = public.lang("Start installing mysql-{}").format(mysql_server['version'])
|
||||
self.print_log(log_str, "restore")
|
||||
if os.path.exists("/www/server/mysql/bin/mysql"):
|
||||
new_log_str = "mysql-{} ✓".format(mysql_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
return
|
||||
result = public.ExecShell(
|
||||
"cd /www/server/panel/install && wget -O mysql.sh {}/install/{}/mysql.sh && bash mysql.sh install {}".format(
|
||||
OFFICIAL_URL, install_type, mysql_server['version']
|
||||
)
|
||||
)
|
||||
if os.path.exists("/www/server/mysql/bin/mysql"):
|
||||
new_log_str = "mysql-{} ✓".format(mysql_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
mysql_server["restore_status"] = 2
|
||||
mysql_server["msg"] = None
|
||||
else:
|
||||
combined_output = (result[0] + result[1]).splitlines()
|
||||
err_msg = '\n'.join(combined_output[-10:])
|
||||
new_log_str = public.lang(
|
||||
"mysql-{} ✗ Installation failed Reason: {} \n Please try to reinstall mysql in the software store after the restore task ends").format(
|
||||
mysql_server['version'], err_msg
|
||||
)
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
mysql_server["restore_status"] = 3
|
||||
mysql_server["msg"] = err_msg
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
def install_mongodb_server(self, timestamp):
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
mongodb_server = restore_data['data_list']['soft']['mongodb_server']
|
||||
# install_type = self.get_install_type()
|
||||
log_str = public.lang("Start installing mongodb-{}").format(mongodb_server['version'])
|
||||
self.print_log(log_str, "restore")
|
||||
mongo = "/www/server/mongodb/bin/mongo"
|
||||
mongod = "/www/server/mongodb/bin/mongod"
|
||||
if (os.path.exists(mongo) or os.path.exists(mongod)) and os.path.exists("/www/server/mongodb/version.pl"):
|
||||
new_log_str = "mongodb-{} ✓".format(mongodb_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
return
|
||||
|
||||
result = public.ExecShell(
|
||||
"cd /www/server/panel/install && wget -O mongodb.sh {}/install/0/mongodb.sh && bash mongodb.sh install {}".format(
|
||||
OFFICIAL_URL, mongodb_server['version']
|
||||
)
|
||||
)
|
||||
if os.path.exists(mongo) or os.path.exists(mongod):
|
||||
new_log_str = "mongodb-{} ✓".format(mongodb_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
mongodb_server["restore_status"] = 2
|
||||
mongodb_server["msg"] = None
|
||||
else:
|
||||
combined_output = (result[0] + result[1]).splitlines()
|
||||
err_msg = '\n'.join(combined_output[-10:])
|
||||
new_log_str = public.lang(
|
||||
"mongodb-{} ✗ Installation failed Reason: {} \n Please try to reinstall mongodb in the software store after the restore task ends").format(
|
||||
mongodb_server['version'], err_msg)
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
mongodb_server["restore_status"] = 3
|
||||
mongodb_server["msg"] = err_msg
|
||||
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
def install_memcached_server(self, timestamp):
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
memcached_server = restore_data['data_list']['soft']['memcached_server']
|
||||
# install_type = self.get_install_type()
|
||||
log_str = public.lang("Start installing memcached-{}").format(memcached_server['version'])
|
||||
self.print_log(log_str, "restore")
|
||||
if os.path.exists("/usr/local/memcached/bin/memcached"):
|
||||
new_log_str = "memcached-{} ✓".format(memcached_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
return
|
||||
result = public.ExecShell(
|
||||
f"cd /www/server/panel/install && wget -O memcached.sh {OFFICIAL_URL}/install/0/memcached.sh && bash memcached.sh install"
|
||||
)
|
||||
if os.path.exists("/usr/local/memcached/bin/memcached"):
|
||||
new_log_str = "memcached-{} ✓".format(memcached_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
memcached_server["restore_status"] = 2
|
||||
memcached_server["msg"] = None
|
||||
else:
|
||||
combined_output = (result[0] + result[1]).splitlines()
|
||||
err_msg = '\n'.join(combined_output[-10:])
|
||||
new_log_str = public.lang(
|
||||
"memcached-{} ✗ Installation failed Reason: {} \n Please try to reinstall memcached in the software store after the restore task ends").format(
|
||||
memcached_server['version'], err_msg)
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
memcached_server["restore_status"] = 3
|
||||
memcached_server["msg"] = err_msg
|
||||
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
def install_redis_server(self, timestamp):
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
redis_server = restore_data['data_list']['soft']['redis_server']
|
||||
# install_type = self.get_install_type()
|
||||
log_str = public.lang("Start installing redis-{}").format(redis_server['version'])
|
||||
self.print_log(log_str, "restore")
|
||||
if os.path.exists("/www/server/redis/src/redis-server") and os.path.exists("/www/server/redis/version.pl"):
|
||||
new_log_str = "redis-{} ✓".format(redis_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
return
|
||||
result = public.ExecShell(
|
||||
"cd /www/server/panel/install && wget -O redis.sh {}/install/0/redis.sh && bash redis.sh install {}".format(
|
||||
OFFICIAL_URL, redis_server['version']
|
||||
)
|
||||
)
|
||||
if os.path.exists("/www/server/redis/src/redis-cli"):
|
||||
new_log_str = "redis-{} ✓".format(redis_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
redis_server["restore_status"] = 2
|
||||
redis_server["msg"] = None
|
||||
else:
|
||||
combined_output = (result[0] + result[1]).splitlines()
|
||||
err_msg = '\n'.join(combined_output[-10:])
|
||||
new_log_str = "redis-{} ✗ {}".format(redis_server['version'], err_msg)
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
redis_server["restore_status"] = 3
|
||||
redis_server["msg"] = err_msg
|
||||
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
def install_pgsql_server(self, timestamp):
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
pgsql_server = restore_data['data_list']['soft']['pgsql_server']
|
||||
log_str = public.lang("Start installing pgsql-{}").format(pgsql_server['version'])
|
||||
self.print_log(log_str, "restore")
|
||||
if os.path.exists("/www/server/pgsql/bin/pg_config"):
|
||||
new_log_str = "pgsql-{} ✓".format(pgsql_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
return
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
down_file = "postgresql-{pgsql_version}.tar.gz".format(pgsql_version=pgsql_server['version'])
|
||||
down_url = "{}/src/postgresql-{}.tar.gz".format(
|
||||
OFFICIAL_URL, pgsql_server['version']
|
||||
)
|
||||
|
||||
result = public.ExecShell(
|
||||
"cd /www/server/panel/install && wget -O pgsql_install.sh {}/install/0/pgsql_install.sh && bash pgsql_install.sh {} {}".format(
|
||||
OFFICIAL_URL, down_file, down_url
|
||||
)
|
||||
)
|
||||
if os.path.exists("/www/server/pgsql/bin/psql"):
|
||||
new_log_str = "pgsql-{} ✓".format(pgsql_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
pgsql_server["restore_status"] = 2
|
||||
pgsql_server["msg"] = None
|
||||
else:
|
||||
combined_output = (result[0] + result[1]).splitlines()
|
||||
err_msg = '\n'.join(combined_output[-10:])
|
||||
new_log_str = "pgsql-{} ✗ {}".format(pgsql_server['version'], err_msg)
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
pgsql_server["restore_status"] = 3
|
||||
pgsql_server["msg"] = err_msg
|
||||
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
def install_phpmyadmin(self, timestamp):
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
phpmyadmin_server = restore_data['data_list']['soft']['phpmyadmin_version']
|
||||
log_str = public.lang("Start installing phpmyadmin-{}").format(phpmyadmin_server['version'])
|
||||
self.print_log(log_str, "restore")
|
||||
result = public.ExecShell(
|
||||
"cd /www/server/panel/install && wget -O phpmyadmin.sh {}/install/0/phpmyadmin.sh && bash phpmyadmin.sh install {}".format(
|
||||
OFFICIAL_URL, phpmyadmin_server['version']
|
||||
)
|
||||
)
|
||||
if os.path.exists("/www/server/phpmyadmin/version.pl"):
|
||||
phpmyadmin_server["restore_status"] = 2
|
||||
phpmyadmin_server["msg"] = None
|
||||
new_log_str = "phpmyadmin-{} ✓".format(phpmyadmin_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
else:
|
||||
combined_output = (result[0] + result[1]).splitlines()
|
||||
err_msg = '\n'.join(combined_output[-10:])
|
||||
new_log_str = public.lang(
|
||||
"phpmyadmin-{} ✗ Installation failed Reason: {} \n Please try to reinstall phpmyadmin in the software store after the restore task ends").format(
|
||||
phpmyadmin_server['version'], err_msg
|
||||
)
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
phpmyadmin_server["restore_status"] = 3
|
||||
phpmyadmin_server["msg"] = err_msg
|
||||
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
def install_ftp_server(self, timestamp):
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
ftp_server = restore_data['data_list']['soft']['ftp_server']
|
||||
log_str = public.lang("Start installing ftp-{}").format(ftp_server['version'])
|
||||
self.print_log(log_str, "restore")
|
||||
|
||||
if os.path.exists("/www/server/pure-ftpd/bin/pure-pw"):
|
||||
new_log_str = "ftp-{} ✓".format(ftp_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
ftp_server["restore_status"] = 2
|
||||
ftp_server["msg"] = None
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
return
|
||||
|
||||
result = public.ExecShell(
|
||||
f"cd /www/server/panel/install && wget -O pureftpd.sh {OFFICIAL_URL}/install/0/pureftpd.sh && bash pureftpd.sh install"
|
||||
)
|
||||
public.ExecShell("rm -f /www/server/pure-ftpd/etc/pureftpd.passwd")
|
||||
public.ExecShell("rm -f /www/server/pure-ftpd/etc/pureftpd.pdb")
|
||||
if not os.path.exists("/www/server/pure-ftpd/bin/pure-pw"):
|
||||
combined_output = (result[0] + result[1]).splitlines()
|
||||
err_msg = '\n'.join(combined_output[-10:])
|
||||
new_log_str = public.lang(
|
||||
"ftp-{} ✗ Installation failed Reason: {} \nPlease try to reinstall ftp in the software store after the restore task ends").format(
|
||||
ftp_server['version'], err_msg)
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
ftp_server["restore_status"] = 3
|
||||
ftp_server["msg"] = err_msg
|
||||
else:
|
||||
new_log_str = "ftp-{} ✓".format(ftp_server['version'])
|
||||
self.replace_log(log_str, new_log_str, "restore")
|
||||
ftp_server["restore_status"] = 2
|
||||
ftp_server["msg"] = None
|
||||
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
import ftp
|
||||
if ftp_server['port'] != 21:
|
||||
with app.app_context():
|
||||
args = public.dict_obj()
|
||||
args.port = str(ftp_server['port'])
|
||||
ftp.ftp().setPort(args)
|
||||
|
||||
def restore_env(self, timestamp):
|
||||
self.print_log("==================================", "restore")
|
||||
self.print_log(public.lang("Start restoring panel running environment"), "restore")
|
||||
self.print_log(public.lang("Will skip installation if the same environment exists"), "restore")
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
soft_json_data = restore_data['data_list']['soft']
|
||||
try:
|
||||
if soft_json_data['web_server']:
|
||||
self.install_web_server(timestamp)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
public.print_log(traceback.format_exc())
|
||||
public.print_log("Error installing web server: {}".format(str(e)))
|
||||
pass
|
||||
|
||||
try:
|
||||
if soft_json_data['php_server']:
|
||||
self.install_php_server(timestamp)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
public.print_log(traceback.format_exc())
|
||||
public.print_log("Error installing PHP server: {}".format(str(e)))
|
||||
pass
|
||||
|
||||
try:
|
||||
if soft_json_data['mysql_server']:
|
||||
self.install_mysql_server(timestamp)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
public.print_log(traceback.format_exc())
|
||||
public.print_log("Error installing MySQL server: {}".format(str(e)))
|
||||
pass
|
||||
|
||||
try:
|
||||
if soft_json_data['ftp_server']:
|
||||
self.install_ftp_server(timestamp)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
public.print_log(traceback.format_exc())
|
||||
public.print_log("Error installing FTP server: {}".format(str(e)))
|
||||
pass
|
||||
|
||||
# try:
|
||||
# if soft_json_data['node_list']:
|
||||
# self.install_node(timestamp)
|
||||
# except Exception as e:
|
||||
# import traceback
|
||||
# public.print_log(traceback.format_exc())
|
||||
# public.print_log("Error installing Node.js: {}".format(str(e)))
|
||||
# pass
|
||||
|
||||
try:
|
||||
if soft_json_data['redis_server']:
|
||||
self.install_redis_server(timestamp)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
public.print_log(traceback.format_exc())
|
||||
public.print_log("Error installing Redis server: {}".format(str(e)))
|
||||
pass
|
||||
|
||||
try:
|
||||
if soft_json_data['memcached_server']:
|
||||
self.install_memcached_server(timestamp)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
public.print_log(traceback.format_exc())
|
||||
public.print_log("Error installing Memcached server: {}".format(str(e)))
|
||||
pass
|
||||
|
||||
try:
|
||||
if soft_json_data['mongodb_server']:
|
||||
self.install_mongodb_server(timestamp)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
public.print_log(traceback.format_exc())
|
||||
public.print_log("Error installing MongoDB server: {}".format(str(e)))
|
||||
pass
|
||||
|
||||
try:
|
||||
if soft_json_data['pgsql_server']:
|
||||
self.install_pgsql_server(timestamp)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
public.print_log(traceback.format_exc())
|
||||
public.print_log("Error installing PostgreSQL server: {}".format(str(e)))
|
||||
pass
|
||||
|
||||
try:
|
||||
if soft_json_data['phpmyadmin_version']:
|
||||
self.install_phpmyadmin(timestamp)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
public.print_log(traceback.format_exc())
|
||||
public.print_log("Error installing phpMyAdmin: {}".format(str(e)))
|
||||
pass
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# 获取命令行参数
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: btpython backup_manager.py <method> <timestamp>")
|
||||
sys.exit(1)
|
||||
method_name = sys.argv[1] # 方法名 p
|
||||
timestamp = sys.argv[2]
|
||||
soft_manager = SoftModule() # 实例化对象
|
||||
if hasattr(soft_manager, method_name): # 检查方法是否存在
|
||||
method = getattr(soft_manager, method_name) # 获取方法
|
||||
method(timestamp) # 调用方法
|
||||
else:
|
||||
print(f"Error: {public.lang('Method')} '{method_name}' {public.lang('does not exist')}")
|
||||
92
mod/project/backup_restore/modules/ssh_module.py
Normal file
92
mod/project/backup_restore/modules/ssh_module.py
Normal file
@@ -0,0 +1,92 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 yakpanel(http://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: miku <miku@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
import os
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
if "/www/server/panel/class_v2" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class_v2")
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
import public
|
||||
from mod.project.backup_restore.base_util import BaseUtil
|
||||
from mod.project.backup_restore.config_manager import ConfigManager
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning)
|
||||
|
||||
|
||||
class SshModule(BaseUtil, ConfigManager):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.base_path = '/www/backup/backup_restore'
|
||||
self.bakcup_task_json = self.base_path + '/backup_task.json'
|
||||
self.ssh_path = "/www/server/panel/config/ssh_info"
|
||||
|
||||
def backup_ssh_data(self, timestamp):
|
||||
self.print_log("====================================================", "backup")
|
||||
self.print_log(public.lang("Start backing up terminal data"), "backup")
|
||||
|
||||
ssh_backup_path = self.base_path + "/{timestamp}_backup/ssh".format(timestamp=timestamp)
|
||||
if not os.path.exists(ssh_backup_path):
|
||||
public.ExecShell('mkdir -p {}'.format(ssh_backup_path))
|
||||
print(self.ssh_path)
|
||||
public.ExecShell("\cp -rpa {} {}".format(self.ssh_path, ssh_backup_path))
|
||||
|
||||
ssh_info = {
|
||||
'status': 2,
|
||||
'msg': None,
|
||||
'ssh_info_path': ssh_backup_path,
|
||||
}
|
||||
|
||||
backup_size = self.format_size(self.get_file_size(ssh_backup_path))
|
||||
self.print_log(public.lang("Terminal data backup completed. Data size: {}").format(backup_size), 'backup')
|
||||
|
||||
data_list = self.get_backup_data_list(timestamp)
|
||||
data_list['data_list']['ssh'] = ssh_info
|
||||
self.update_backup_data_list(timestamp, data_list)
|
||||
|
||||
def restore_ssh_data(self, timestamp):
|
||||
self.print_log("==================================", "restore")
|
||||
self.print_log(public.lang("Start restoring terminal data"), "restore")
|
||||
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
ssh_data = restore_data['data_list']['ssh']
|
||||
ssh_info_path = ssh_data['ssh_info_path'] + "/ssh_info"
|
||||
|
||||
restore_data['data_list']['ssh']['restore_status'] = 1
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
if not os.path.exists(ssh_info_path):
|
||||
self.print_log(public.lang("Restore failed, file does not exist"), "restore")
|
||||
return
|
||||
if not os.path.exists(self.ssh_path):
|
||||
public.ExecShell("mkdir -p {}".format(self.ssh_path))
|
||||
public.ExecShell("\cp -rpa {}/* {}".format(ssh_info_path, self.ssh_path))
|
||||
self.print_log(public.lang("Terminal data restoration completed"), "restore")
|
||||
|
||||
restore_data['data_list']['ssh']['restore_status'] = 2
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# 获取命令行参数
|
||||
if len(sys.argv) < 2:
|
||||
print("Usage: btpython backup_manager.py <method> <timestamp>")
|
||||
sys.exit(1)
|
||||
method_name = sys.argv[1] # 方法名
|
||||
timestamp = sys.argv[2]
|
||||
ssh_manager = SshModule() # 实例化对象
|
||||
if hasattr(ssh_manager, method_name): # 检查方法是否存在
|
||||
method = getattr(ssh_manager, method_name) # 获取方法
|
||||
method(timestamp) # 调用方法
|
||||
else:
|
||||
print(f"Error: Method '{method_name}' does not exist")
|
||||
343
mod/project/backup_restore/modules/ssl_model.py
Normal file
343
mod/project/backup_restore/modules/ssl_model.py
Normal file
@@ -0,0 +1,343 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 yakpanel(http://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from pathlib import Path
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
if "/www/server/panel/class_v2" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class_v2")
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
import public
|
||||
from public.hook_import import hook_import
|
||||
|
||||
hook_import()
|
||||
from YakPanel import app
|
||||
from ssl_domainModelV2.api import DomainObject
|
||||
from ssl_domainModelV2.service import CertHandler
|
||||
from ssl_domainModelV2.config import UserFor
|
||||
from ssl_domainModelV2.model import DnsDomainSSL, DnsDomainProvider
|
||||
from mod.project.backup_restore.data_manager import DataManager
|
||||
|
||||
|
||||
class SSLModel(DataManager):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
self.base_path = '/www/backup/backup_restore'
|
||||
self.bakcup_task_json = self.base_path + '/backup_task.json'
|
||||
|
||||
def get_ssl_backup_conf(self, timestamp: int = None) -> dict:
|
||||
"""
|
||||
Get SSL certificate and DNS API provider backup configuration
|
||||
"""
|
||||
ssl_list = [
|
||||
{
|
||||
**ssl.as_dict(),
|
||||
"data_type": "backup",
|
||||
"status": 0,
|
||||
"msg": None,
|
||||
} for ssl in DnsDomainSSL.objects.filter(is_order=0) # 过滤商业证书
|
||||
]
|
||||
provider_list = [
|
||||
{
|
||||
**x.as_dict(),
|
||||
# 与备份的 status 字段冲突, 使用 account status
|
||||
"account_status": x.status,
|
||||
"data_type": "backup",
|
||||
"status": 0,
|
||||
"msg": None,
|
||||
} for x in DnsDomainProvider.objects.all()
|
||||
]
|
||||
res = {
|
||||
"ssl_list": ssl_list,
|
||||
"provider_list": provider_list,
|
||||
}
|
||||
return res
|
||||
|
||||
def backup_ssl_data(self, timestamp) -> None:
|
||||
"""
|
||||
Backup domain management center
|
||||
"""
|
||||
# 总配置
|
||||
data_list = self.get_backup_data_list(timestamp)
|
||||
if not data_list:
|
||||
return None
|
||||
|
||||
data_backup_path = data_list.get("backup_path")
|
||||
ssl_backup_path = Path(data_backup_path) / "ssl"
|
||||
ssl_backup_path.mkdir(parents=True, exist_ok=True)
|
||||
self.print_log("==================================", "backup")
|
||||
self.print_log(public.lang("Start backing up SSL certificate information"), "backup")
|
||||
|
||||
for ssl in data_list['data_list']['ssl'].get("ssl_list", []): # SSL in the general configuration
|
||||
try:
|
||||
if not ssl.get("path") or not os.path.exists(ssl.get("path")):
|
||||
err = public.lang("{} {} Certificate file does not exist ✗").format(
|
||||
ssl['info'].get("issuer_O", ""), ssl['dns']
|
||||
)
|
||||
self.print_log(err, "backup")
|
||||
ssl["status"] = 3
|
||||
ssl["msg"] = err
|
||||
continue
|
||||
if ssl.get("not_after_ts") < time.time() * 1000:
|
||||
err = public.lang("{} [{}] Certificate has expired ✗").format(
|
||||
ssl['info'].get("issuer_O", ""), str(ssl['dns'])
|
||||
)
|
||||
self.print_log(err, "backup")
|
||||
ssl["status"] = 3
|
||||
ssl["msg"] = err
|
||||
continue
|
||||
domian_path = ssl_backup_path / ssl.get("hash")
|
||||
CertHandler.make_last_info(domian_path, force=True)
|
||||
public.ExecShell(f"\cp -rpa {ssl['path']} {domian_path}")
|
||||
ssl["status"] = 2
|
||||
self.print_log(public.lang("{} {} ✓").format(
|
||||
ssl['info'].get("issuer_O", ""), ssl['dns']
|
||||
), "backup")
|
||||
except Exception as e:
|
||||
err = public.lang("{} {} Backup failed: {} ✗").format(
|
||||
ssl['info'].get('issuer_O', ''), ssl['dns'], str(e)
|
||||
)
|
||||
ssl["status"] = 3
|
||||
ssl["msg"] = err
|
||||
self.print_log(err, "backup")
|
||||
continue
|
||||
|
||||
new_provider_info = [
|
||||
{**x, "status": 2} for x in data_list['data_list']['ssl'].get("provider_list", [])
|
||||
]
|
||||
data_list['data_list']['ssl']['provider_list'] = new_provider_info
|
||||
self.print_log(public.lang("DNS API provider information backup completed"), "backup")
|
||||
|
||||
self.update_backup_data_list(timestamp, data_list)
|
||||
self.print_log(public.lang("SSL certificate information backup completed"), "backup")
|
||||
|
||||
def _rebuild_deploy(self, ssl_obj: DnsDomainSSL, backup_ssl: dict) -> None:
|
||||
try:
|
||||
def r_log(log_str: str, new_log: str):
|
||||
self.replace_log(log_str, new_log, "restore")
|
||||
|
||||
used = backup_ssl.get("user_for", {})
|
||||
if not ssl_obj or not used:
|
||||
return
|
||||
|
||||
# pre clear
|
||||
for other_ssl in DnsDomainSSL.objects.filter(hash__ne=ssl_obj.hash):
|
||||
is_change = False
|
||||
|
||||
for site_name in used.get(UserFor.sites, []):
|
||||
if site_name in other_ssl.sites_uf:
|
||||
other_ssl.sites_uf.remove(site_name)
|
||||
is_change = True
|
||||
|
||||
for mail_name in used.get(UserFor.mails, []):
|
||||
if mail_name in other_ssl.mails_uf:
|
||||
other_ssl.mails_uf.remove(mail_name)
|
||||
is_change = True
|
||||
|
||||
for panel_name in used.get(UserFor.panel, []):
|
||||
if panel_name in other_ssl.panel_uf:
|
||||
other_ssl.panel_uf = []
|
||||
is_change = True
|
||||
|
||||
if is_change:
|
||||
other_ssl.save()
|
||||
|
||||
if used.get(UserFor.sites):
|
||||
log_str = public.lang("Restoring deployment sites for certificate {}...").format(backup_ssl['subject'])
|
||||
self.print_log(log_str, "restore")
|
||||
build_sites = ssl_obj.deploy_sites(
|
||||
site_names=used[UserFor.sites], replace=True
|
||||
)
|
||||
r_log(log_str, public.lang("Restored deployment sites for certificate {}: {}").format(
|
||||
backup_ssl['subject'], build_sites.get('msg')
|
||||
))
|
||||
|
||||
if used.get(UserFor.mails):
|
||||
log_str = public.lang("Restoring deployment mailboxes for certificate {}...").format(
|
||||
backup_ssl['subject'])
|
||||
self.print_log(log_str, "restore")
|
||||
build_mails = ssl_obj.deploy_mails(
|
||||
mail_names=used[UserFor.mails]
|
||||
)
|
||||
r_log(log_str, public.lang("Restored deployment mailboxes for certificate {}: {}").format(
|
||||
backup_ssl['subject'], build_mails.get('msg')
|
||||
))
|
||||
|
||||
if used.get(UserFor.panel):
|
||||
log_str = public.lang("Restoring deployment panel for certificate {}...").format(backup_ssl['subject'])
|
||||
self.print_log(log_str, "restore")
|
||||
build_panel = ssl_obj.deploy_panel(
|
||||
recover=0
|
||||
)
|
||||
r_log(log_str, public.lang("Restored deployment panel for certificate {}: {}").format(
|
||||
backup_ssl['subject'], build_panel.get('msg')
|
||||
))
|
||||
except Exception as e:
|
||||
public.print_log("rebuild deploy error: {}".format(str(e)))
|
||||
|
||||
def _restore_ssl(self, backup_ssl: dict, pem: str, key: str) -> None:
|
||||
exist_obj = DnsDomainSSL.objects.filter(
|
||||
hash=CertHandler.get_hash(cert_pem=pem)
|
||||
).first()
|
||||
if exist_obj:
|
||||
if not self.overwrite:
|
||||
return
|
||||
# overwrite
|
||||
# exist_obj.provider_id = backup_ssl["provider_id"]
|
||||
# exist_obj.not_after = backup_ssl["not_after"]
|
||||
# exist_obj.not_after_ts = backup_ssl["not_after_ts"]
|
||||
exist_obj.user_for = backup_ssl["user_for"]
|
||||
exist_obj.info = backup_ssl["info"]
|
||||
exist_obj.alarm = backup_ssl["alarm"]
|
||||
exist_obj.auto_renew = backup_ssl["auto_renew"]
|
||||
exist_obj.auth_info = backup_ssl["auth_info"]
|
||||
exist_obj.log = backup_ssl["log"]
|
||||
exist_obj.save()
|
||||
# ssl_obj = exist_obj
|
||||
else:
|
||||
try:
|
||||
insert = CertHandler().save_by_data(
|
||||
cert_pem=pem,
|
||||
private_key=key,
|
||||
new_auth_info=backup_ssl["auth_info"],
|
||||
)
|
||||
if not insert:
|
||||
raise Exception(public.lang("Certificate insertion failed, please check the log"))
|
||||
except Exception as e:
|
||||
raise Exception(public.lang(f"Certificate Restore Failed: {str(e)}"))
|
||||
|
||||
# ssl_obj = DnsDomainSSL.objects.filter(hash=insert["hash"]).first()
|
||||
|
||||
# if ssl_obj:
|
||||
# # it will update the ssl field 'user_for'
|
||||
# self._rebuild_deploy(
|
||||
# ssl_obj=ssl_obj,
|
||||
# backup_ssl=backup_ssl,
|
||||
# )
|
||||
|
||||
def _restore_provider(self, provider: dict):
|
||||
if_exist = DnsDomainProvider.objects.filter(
|
||||
name=provider["name"],
|
||||
api_user=provider.get("api_user", ""),
|
||||
api_key=provider["api_key"],
|
||||
).first()
|
||||
if if_exist:
|
||||
if self.overwrite:
|
||||
return
|
||||
return
|
||||
|
||||
res = DomainObject().create_dns_api(
|
||||
public.to_dict_obj({
|
||||
"name": provider["name"],
|
||||
"api_user": provider.get("api_user", ""),
|
||||
"api_key": provider["api_key"],
|
||||
"status": provider.get("account_status", 1),
|
||||
"permission": provider.get("permission", "-"),
|
||||
"alias": provider["alias"],
|
||||
"ps": provider["ps"],
|
||||
})
|
||||
)
|
||||
if res.get("status", 0) != 0:
|
||||
raise Exception(public.lang(
|
||||
f"Restore DNS API provider failed: {res.get('message', 'create dns api error')}"
|
||||
))
|
||||
|
||||
def restore_ssl_data(self, timestamp: int) -> None:
|
||||
""" Restore domain management center """
|
||||
self.print_log("====================================================", "restore")
|
||||
self.print_log(public.lang("Start restoring domain SSL certificate configuration"), "restore")
|
||||
restore_data = self.get_restore_data_list(timestamp)
|
||||
if not restore_data:
|
||||
self.print_log(public.lang("No restore data found"), "restore")
|
||||
return
|
||||
ssl_cert_path = Path(restore_data.get("backup_path")) / "ssl"
|
||||
if not ssl_cert_path.exists():
|
||||
self.print_log(
|
||||
public.lang("Backup directory {} does not exist, unable to restore SSL certificate information").format(
|
||||
ssl_cert_path
|
||||
), "restore")
|
||||
return
|
||||
ssl_info = restore_data["data_list"].get("ssl", {})
|
||||
with app.app_context():
|
||||
# ======================= ssl =============================
|
||||
for ssl in ssl_info.get("ssl_list", []):
|
||||
log_str = public.lang("Restoring {} Subject: {}").format(
|
||||
ssl['info'].get('issuer_O'), ssl['subject']
|
||||
)
|
||||
try:
|
||||
self.print_log(log_str, "restore")
|
||||
ssl["restore_status"] = 1
|
||||
ssl_path = ssl_cert_path / ssl["hash"]
|
||||
if not ssl_path.exists():
|
||||
raise Exception(public.lang("Certificate file does not exist"))
|
||||
pem = ssl_path / "fullchain.pem"
|
||||
key = ssl_path / "privkey.pem"
|
||||
if not pem.exists() or not key.exists():
|
||||
raise Exception(public.lang("Missing certificate or private key file"))
|
||||
self._restore_ssl(
|
||||
backup_ssl=ssl,
|
||||
pem=public.readFile(str(pem)),
|
||||
key=public.readFile(str(key)),
|
||||
)
|
||||
ssl["restore_status"] = 2
|
||||
self.replace_log(
|
||||
log_str,
|
||||
public.lang(f"Restored {ssl['info'].get('issuer_O')} Subject: {ssl['subject']} ✓ "),
|
||||
"restore"
|
||||
)
|
||||
except Exception as e:
|
||||
err_msg = public.lang(
|
||||
f"Restoring {ssl['info'].get('issuer_O', '')} Subject: {ssl['subject']} failed: {str(e)}"
|
||||
)
|
||||
ssl["restore_status"] = 3
|
||||
ssl["msg"] = str(e)
|
||||
self.replace_log(log_str, err_msg, "restore")
|
||||
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
# ======================= dns provider =============================
|
||||
for provider in ssl_info.get("provider_list", []):
|
||||
log_str = public.lang(f"Restoring DNS API {provider['name']}: {provider['alias']}...")
|
||||
try:
|
||||
self.print_log(log_str, "restore")
|
||||
self._restore_provider(provider)
|
||||
time.sleep(1)
|
||||
provider["restore_status"] = 2
|
||||
self.replace_log(
|
||||
log_str,
|
||||
public.lang(f"Restored DNS API {provider['name']}: {provider['alias']} ✓ "),
|
||||
"restore"
|
||||
)
|
||||
except Exception as e:
|
||||
err_msg = public.lang(f"Restoring DNS API provider: {provider['name']} failed: {str(e)}")
|
||||
provider["restore_status"] = 3
|
||||
provider["msg"] = str(e)
|
||||
self.replace_log(log_str, err_msg, "restore")
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
self.update_restore_data_list(timestamp, restore_data)
|
||||
|
||||
self.print_log(public.lang("SSL certificate information restoration completed"), "restore")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: btpython backup_manager.py <method> <timestamp>")
|
||||
sys.exit(1)
|
||||
method_name = sys.argv[1]
|
||||
timestamp = sys.argv[2]
|
||||
database_module = SSLModel()
|
||||
if hasattr(database_module, method_name):
|
||||
method = getattr(database_module, method_name)
|
||||
method(timestamp)
|
||||
else:
|
||||
print(f"Error: method '{method_name}' not found")
|
||||
504
mod/project/backup_restore/restore_manager.py
Normal file
504
mod/project/backup_restore/restore_manager.py
Normal file
@@ -0,0 +1,504 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 yakpanel(http://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: miku <wzz@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
import datetime
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
import warnings
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
if "/www/server/panel/class_v2" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class_v2")
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
import public
|
||||
from mod.project.backup_restore.modules.site_module import SiteModule
|
||||
from mod.project.backup_restore.modules.database_module import DatabaseModule
|
||||
from mod.project.backup_restore.modules.ftp_module import FtpModule
|
||||
|
||||
from mod.project.backup_restore.modules.crontab_module import CrontabModule
|
||||
from mod.project.backup_restore.modules.ssh_module import SshModule
|
||||
from mod.project.backup_restore.modules.firewall_module import FirewallModule
|
||||
from mod.project.backup_restore.modules.mail_module import MailModule
|
||||
from mod.project.backup_restore.modules.ssl_model import SSLModel
|
||||
from mod.project.backup_restore.modules.plugin_module import PluginModule
|
||||
|
||||
warnings.filterwarnings("ignore", category=SyntaxWarning)
|
||||
|
||||
|
||||
class RestoreManager(SiteModule, DatabaseModule, FtpModule, SSLModel, CrontabModule, SshModule,
|
||||
FirewallModule, MailModule, PluginModule):
|
||||
def __init__(self, overwrite: int = 0):
|
||||
super().__init__()
|
||||
self.base_path = '/www/backup/backup_restore'
|
||||
self.bakcup_task_json = self.base_path + '/backup_task.json'
|
||||
self.backup_log_file = self.base_path + '/backup.log'
|
||||
self.backup_pl_file = self.base_path + '/backup.pl'
|
||||
self.backup_success_file = self.base_path + '/success.pl'
|
||||
self.backup_save_config = self.base_path + '/backup_save_config.json'
|
||||
self.history_log_path = '/www/backup/backup_restore/history/log'
|
||||
self.history_info_path = '/www/backup/backup_restore/history/info'
|
||||
self.restore_log_file = self.base_path + '/restore.log'
|
||||
self.restore_pl_file = self.base_path + '/restore.pl'
|
||||
self.restore_success_file = self.base_path + '/restore_success.pl'
|
||||
self.migrate_backup_info_path = '/www/backup/backup_restore/migrate_backup_info.json'
|
||||
self.overwrite = overwrite # 强制还原标志
|
||||
|
||||
def restore_data(self, timestamp):
|
||||
"""
|
||||
还原数据
|
||||
"""
|
||||
if os.path.exists(self.restore_log_file):
|
||||
public.ExecShell("rm -f {}".format(self.restore_log_file))
|
||||
|
||||
if os.path.exists(self.restore_pl_file):
|
||||
return public.returnMsg(False, public.lang("A restore process is already running!"))
|
||||
|
||||
try:
|
||||
public.WriteFile(self.restore_pl_file, timestamp)
|
||||
backup_file = str(timestamp) + "_backup.tar.gz"
|
||||
file_names = os.listdir(self.base_path)
|
||||
for file in file_names:
|
||||
if backup_file in file:
|
||||
backup_file = file
|
||||
|
||||
if os.path.exists(self.migrate_backup_info_path):
|
||||
backup_list = []
|
||||
if os.path.exists(self.bakcup_task_json):
|
||||
backup_list = json.loads(public.ReadFile(self.bakcup_task_json))
|
||||
migrate_backup_info = json.loads(public.ReadFile(self.migrate_backup_info_path))
|
||||
backup_list.append(migrate_backup_info)
|
||||
public.ExecShell("rm -f {}".format(self.migrate_backup_info_path))
|
||||
public.WriteFile(self.bakcup_task_json, json.dumps(backup_list))
|
||||
time.sleep(1)
|
||||
|
||||
backup_conf = self.get_backup_conf(timestamp)
|
||||
backup_conf['restore_status'] = 1
|
||||
self.save_backup_conf(timestamp, backup_conf)
|
||||
|
||||
self.print_log("==================================", "restore")
|
||||
self.print_log(public.lang("Start decompressing the data package"), "restore")
|
||||
try:
|
||||
public.ExecShell("rm -rf {}/{}_backup".format(self.base_path, timestamp))
|
||||
except:
|
||||
pass
|
||||
|
||||
if not os.path.exists(self.base_path + "/{timestamp}_backup".format(timestamp=timestamp)):
|
||||
public.ExecShell("cd {}/ && tar -xvf {}".format(self.base_path, backup_file))
|
||||
|
||||
restore_data_path = self.base_path + "/{timestamp}_backup".format(timestamp=timestamp)
|
||||
public.ExecShell("\cp -rpa {}/backup.json {}/restore.json".format(restore_data_path, restore_data_path))
|
||||
|
||||
restore_info = self.get_restore_data_list(timestamp)
|
||||
restore_info["force_restore"] = self.overwrite # 覆盖标志
|
||||
restore_info['restore_status'] = 1 # 更新状态
|
||||
self.update_restore_data_list(timestamp, restore_info)
|
||||
|
||||
start_time = int(time.time())
|
||||
# ============================= START ==================================
|
||||
self.print_log(public.lang("Start restoring data"), "restore")
|
||||
# ============================= env ====================================
|
||||
try:
|
||||
self.restore_env(timestamp)
|
||||
except Exception as e:
|
||||
public.print_log(f"restore env error: {str(e)}")
|
||||
|
||||
# ============================= site ====================================
|
||||
try:
|
||||
self.restore_site_data(timestamp)
|
||||
except Exception as e:
|
||||
public.print_log("restore site error: {}".format(str(e)))
|
||||
finally:
|
||||
self.chmod_dir_file("/www/wwwroot", dir_mode=0o755, file_mode=0o644)
|
||||
|
||||
# ============================= ftp =====================================
|
||||
try:
|
||||
self.restore_ftp_data(timestamp)
|
||||
except Exception as e:
|
||||
public.print_log("restore ftp error: {}".format(str(e)))
|
||||
|
||||
# ============================= database =================================
|
||||
try:
|
||||
self.restore_database_data(timestamp)
|
||||
except Exception as e:
|
||||
public.print_log("restore database error: {}".format(str(e)))
|
||||
finally:
|
||||
if not self.overwrite:
|
||||
try: # 补全关系
|
||||
self.fix_wp_onekey(timestamp)
|
||||
except Exception as e:
|
||||
public.print_log("fix forign key error: {}".format(str(e)))
|
||||
|
||||
# ============================= ssl ======================================
|
||||
try:
|
||||
self.restore_ssl_data(timestamp)
|
||||
except Exception as e:
|
||||
public.print_log("restore ssl error: {}".format(str(e)))
|
||||
|
||||
# ============================= cron task ================================
|
||||
try:
|
||||
self.restore_crontab_data(timestamp)
|
||||
except Exception as e:
|
||||
public.print_log("restore cron task error: {}".format(str(e)))
|
||||
finally:
|
||||
self.reload_crontab()
|
||||
|
||||
# ============================== ssh ======================================
|
||||
# TDDO: 存在问题,下个版本修复
|
||||
# try:
|
||||
# SshModule().restore_ssh_data(timestamp)
|
||||
# except Exception as e:
|
||||
# public.print_log("restore ssh error: {}".format(str(e)))
|
||||
|
||||
# ============================= firewall ==================================
|
||||
try:
|
||||
self.restore_firewall_data(timestamp)
|
||||
except Exception as e:
|
||||
public.print_log("restore firewall error: {}".format(str(e)))
|
||||
|
||||
# ============================= mail ======================================
|
||||
try:
|
||||
self.restore_vmail_data(timestamp)
|
||||
except Exception as e:
|
||||
public.print_log("restore mail error: {}".format(str(e)))
|
||||
|
||||
# ============================= plugin ====================================
|
||||
try:
|
||||
self.restore_plugin_data(timestamp)
|
||||
except Exception as e:
|
||||
public.print_log("restore plugin error: {}".format(str(e)))
|
||||
|
||||
# ============================= END =======================================
|
||||
|
||||
end_time = int(time.time())
|
||||
done_time = datetime.datetime.fromtimestamp(int(end_time)).strftime('%Y-%m-%d %H:%M:%S')
|
||||
total_time = end_time - start_time
|
||||
backup_conf = self.get_backup_conf(timestamp)
|
||||
backup_conf['restore_status'] = 2
|
||||
backup_conf['restore_done_time'] = done_time
|
||||
backup_conf['restore_total_time'] = total_time
|
||||
self.save_backup_conf(timestamp, backup_conf)
|
||||
|
||||
restore_info['restore_status'] = 2
|
||||
restore_info['restore_done_time'] = done_time
|
||||
restore_info['restore_total_time'] = total_time
|
||||
self.update_restore_data_list(timestamp, restore_info)
|
||||
|
||||
self.print_log("==================================", "restore")
|
||||
self.print_log(public.lang("Data restoration completed"), "restore")
|
||||
|
||||
public.WriteFile(self.restore_success_file, timestamp)
|
||||
public.ExecShell("rm -f {}".format(self.restore_pl_file))
|
||||
if not os.path.exists(self.history_log_path):
|
||||
public.ExecShell("mkdir -p {}".format(self.history_log_path))
|
||||
if not os.path.exists(self.history_info_path):
|
||||
public.ExecShell("mkdir -p {}".format(self.history_info_path))
|
||||
|
||||
hitory_log_file = self.history_log_path + '/' + str(timestamp) + '_restore.log'
|
||||
history_info_file = self.history_info_path + '/' + str(timestamp) + '_restore.info'
|
||||
public.WriteFile(
|
||||
hitory_log_file,
|
||||
public.ReadFile("/www/backup/backup_restore/restore.log".format(timestamp))
|
||||
)
|
||||
public.WriteFile(
|
||||
history_info_file,
|
||||
public.ReadFile("/www/backup/backup_restore/{}_backup/restore.json".format(timestamp))
|
||||
)
|
||||
if os.path.exists("/www/server/panel/data/migration.pl"):
|
||||
public.ExecShell("rm -f /www/server/panel/data/migration.pl")
|
||||
|
||||
# 重启服务
|
||||
public.ServiceReload()
|
||||
time.sleep(2)
|
||||
public.ExecShell("/etc/init.d/bt restart")
|
||||
except Exception as e:
|
||||
return public.returnMsg(False, public.lang("Data restoration failed: {}").format(str(e)))
|
||||
finally:
|
||||
if os.path.exists(self.restore_pl_file):
|
||||
public.ExecShell("rm -f {}".format(self.restore_pl_file))
|
||||
|
||||
def get_restore_log(self, timestamp):
|
||||
restore_log_file = self.base_path + '/restore.log'
|
||||
history_log_file = self.history_log_path + '/' + str(timestamp) + '_restore.log'
|
||||
if os.path.exists(self.restore_pl_file):
|
||||
restore_timestamp = int(public.ReadFile(self.restore_pl_file))
|
||||
if int(restore_timestamp) == int(timestamp):
|
||||
return public.ReadFile(restore_log_file)
|
||||
if os.path.exists(history_log_file):
|
||||
return public.ReadFile(history_log_file)
|
||||
else:
|
||||
return ""
|
||||
|
||||
def get_restore_details(self, timestamp):
|
||||
history_info_file = self.history_info_path + '/' + str(timestamp) + '_restore.info'
|
||||
if not os.path.exists(history_info_file):
|
||||
return public.fail_v2(public.lang("File does not exist"))
|
||||
|
||||
restore_info = json.loads(public.ReadFile(history_info_file))
|
||||
restore_info = self.process_detail(restore_info)
|
||||
|
||||
restore_task_info = self.get_backup_conf(timestamp)
|
||||
restore_info['backup_file_size'] = restore_task_info['backup_file_size']
|
||||
restore_info['backup_file_sha256'] = restore_task_info['backup_file_sha256']
|
||||
restore_info['create_time'] = restore_task_info['create_time']
|
||||
restore_info['backup_time'] = restore_task_info['backup_time']
|
||||
restore_info['backup_file'] = restore_task_info['backup_file']
|
||||
restore_info['backup_path'] = restore_task_info['backup_path']
|
||||
restore_info['restore_done_time'] = restore_task_info['restore_done_time']
|
||||
restore_info['restore_total_time'] = restore_task_info['restore_total_time']
|
||||
restore_info['type'] = "restore"
|
||||
return public.success_v2(restore_info)
|
||||
|
||||
# todo 弃用
|
||||
def get_restore_progress(self, get=None):
|
||||
"""
|
||||
获取还原进度信息
|
||||
@param get: object 包含请求参数
|
||||
@return: dict 还原进度信息
|
||||
"""
|
||||
# 设置相关文件路径
|
||||
restore_pl_file = self.base_path + '/restore.pl'
|
||||
restore_log_file = self.base_path + '/restore.log'
|
||||
restore_success_file = self.base_path + '/restore_success.pl'
|
||||
|
||||
# 创建处理已完成备份的函数,减少代码重复
|
||||
def create_completed_result(restore_timestamp):
|
||||
if not restore_timestamp:
|
||||
return public.ReturnMsg(False, public.lang("Restore completed but unable to get restore timestamp"))
|
||||
|
||||
if not os.path.exists(self.bakcup_task_json):
|
||||
return public.ReturnMsg(False, public.lang("Restore configuration file does not exist"))
|
||||
|
||||
restore_configs = json.loads(public.ReadFile(self.bakcup_task_json))
|
||||
success_data = next(
|
||||
(item for item in restore_configs if str(item.get('timestamp')) == str(restore_timestamp)), {})
|
||||
|
||||
return {
|
||||
"task_type": "restore",
|
||||
"task_status": 2,
|
||||
"backup_data": None,
|
||||
"backup_name": None,
|
||||
"data_backup_status": 2,
|
||||
"progress": 100,
|
||||
"msg": None,
|
||||
'exec_log': public.ReadFile(restore_log_file) if os.path.exists(restore_log_file) else "",
|
||||
'timestamp': restore_timestamp,
|
||||
'backup_file_info': success_data
|
||||
}
|
||||
|
||||
# 检查备份是否已完成
|
||||
if os.path.exists(restore_success_file):
|
||||
success_time = int(os.path.getctime(restore_success_file))
|
||||
local_time = int(time.time())
|
||||
# 如果success文件创建时间在10秒内,说明备份刚刚完成
|
||||
if success_time + 10 > local_time:
|
||||
try:
|
||||
restore_timestamp = public.ReadFile(restore_success_file).strip()
|
||||
return public.ReturnMsg(True, create_completed_result(restore_timestamp))
|
||||
except Exception as e:
|
||||
public.ExecShell("rm -f {}".format(restore_success_file))
|
||||
return public.ReturnMsg(False,
|
||||
public.lang("Error getting restore completion information: {}").format(
|
||||
str(e)))
|
||||
else:
|
||||
# 超过10秒,删除success文件
|
||||
public.ExecShell("rm -f {}".format(restore_success_file))
|
||||
|
||||
# 检查是否有备份进程运行
|
||||
try:
|
||||
# 检查备份进程锁文件
|
||||
if os.path.exists(restore_pl_file):
|
||||
timestamp = public.ReadFile(restore_pl_file).strip()
|
||||
if not timestamp:
|
||||
return public.ReturnMsg(False, public.lang(
|
||||
"Restore process is running, but unable to get restore timestamp"))
|
||||
else:
|
||||
# 等待2秒,可能是还原刚刚完成
|
||||
time.sleep(2)
|
||||
if os.path.exists(restore_success_file):
|
||||
success_time = int(os.path.getctime(restore_success_file))
|
||||
local_time = int(time.time())
|
||||
if success_time + 10 > local_time:
|
||||
restore_timestamp = public.ReadFile(restore_success_file).strip()
|
||||
return public.ReturnMsg(True, create_completed_result(restore_timestamp))
|
||||
|
||||
# 再次检查是否有备份进程
|
||||
if os.path.exists(restore_success_file):
|
||||
timestamp = public.ReadFile(restore_success_file).strip()
|
||||
if not timestamp:
|
||||
return public.ReturnMsg(False, public.lang(
|
||||
"Restore process is running, but unable to get restore timestamp"))
|
||||
else:
|
||||
return public.ReturnMsg(False, public.lang(
|
||||
"No restore task found, please check the restore list to see if the restore is complete"))
|
||||
|
||||
# 读取备份配置文件
|
||||
restore_json_path = f"{self.base_path}/{timestamp}_backup/restore.json"
|
||||
count = 0
|
||||
while 1:
|
||||
if count >= 3:
|
||||
return public.ReturnMsg(False, public.lang("Restore configuration file does not exist: {}").format(
|
||||
restore_json_path))
|
||||
count += 1
|
||||
if not os.path.exists(restore_json_path):
|
||||
time.sleep(1)
|
||||
else:
|
||||
break
|
||||
|
||||
conf_data = json.loads(public.ReadFile(restore_json_path))
|
||||
except Exception as e:
|
||||
return public.ReturnMsg(False, public.lang("Error getting restore progress information: {}").format(str(e)))
|
||||
|
||||
# 读取备份日志
|
||||
restore_log_data = public.ReadFile(restore_log_file) if os.path.exists(restore_log_file) else ""
|
||||
|
||||
# 定义备份类型及其处理逻辑
|
||||
restore_types = [
|
||||
{
|
||||
'type': 'site',
|
||||
'data_key': 'site',
|
||||
'display_name': 'site',
|
||||
'progress': 30
|
||||
},
|
||||
{
|
||||
'type': 'database',
|
||||
'data_key': 'database',
|
||||
'display_name': 'database',
|
||||
'progress': 60
|
||||
},
|
||||
{
|
||||
'type': 'ftp',
|
||||
'data_key': 'ftp',
|
||||
'display_name': 'ftp',
|
||||
'progress': 70
|
||||
},
|
||||
{
|
||||
'type': 'ssh',
|
||||
'data_key': 'ssh',
|
||||
'display_name': 'ssh',
|
||||
'progress': 75
|
||||
},
|
||||
{
|
||||
'type': 'firewall',
|
||||
'data_key': 'firewall',
|
||||
'display_name': 'firewall',
|
||||
'progress': 90
|
||||
}
|
||||
]
|
||||
|
||||
# 检查软件环境状态
|
||||
if "data_list" in conf_data and "soft" in conf_data["data_list"]:
|
||||
soft_data = conf_data["data_list"]["soft"]
|
||||
if "status" in soft_data and soft_data["status"].get("status") == 1:
|
||||
name = soft_data["status"].get("name", "unknown")
|
||||
# version = soft_data["status"].get("version", "unknown")
|
||||
return public.ReturnMsg(True, {
|
||||
"task_type": "restore",
|
||||
"task_status": 1,
|
||||
"data_type": "soft",
|
||||
"name": name,
|
||||
"data_backup_status": 1,
|
||||
"progress": 20,
|
||||
"msg": soft_data["status"].get("err_msg"),
|
||||
'exec_log': restore_log_data,
|
||||
'timestamp': timestamp
|
||||
})
|
||||
|
||||
# 检查各类型备份进度
|
||||
for restore_type in restore_types:
|
||||
items = conf_data.get("data_list", {}).get(restore_type['data_key'], [])
|
||||
for item in items:
|
||||
try:
|
||||
if item.get("restore_status") == 2:
|
||||
continue
|
||||
|
||||
return public.ReturnMsg(True, {
|
||||
"task_type": "restore",
|
||||
"task_status": 1,
|
||||
"data_type": restore_type['type'],
|
||||
"name": item.get("name", public.lang("Unknown {}").format(restore_type['display_name'])),
|
||||
"data_backup_status": item.get("status", 0),
|
||||
"progress": restore_type['progress'],
|
||||
"msg": item.get("msg"),
|
||||
'exec_log': restore_log_data,
|
||||
'timestamp': timestamp
|
||||
})
|
||||
except:
|
||||
return public.ReturnMsg(True, {
|
||||
"task_type": "restore",
|
||||
"task_status": 1,
|
||||
"data_type": public.lang("Server Configuration"),
|
||||
"name": public.lang("Server Configuration"),
|
||||
"data_backup_status": 2,
|
||||
"progress": 90,
|
||||
"msg": None,
|
||||
'exec_log': restore_log_data,
|
||||
'timestamp': timestamp
|
||||
})
|
||||
|
||||
# 检查数据打包进度
|
||||
try:
|
||||
restore_status = conf_data.get('restore_status')
|
||||
if restore_status == 1:
|
||||
return public.ReturnMsg(True, {
|
||||
"task_type": "restore",
|
||||
"task_status": 1,
|
||||
"data_type": "tar",
|
||||
"name": public.lang("Data Decompression"),
|
||||
"data_backup_status": 1,
|
||||
"progress": 10,
|
||||
'exec_log': restore_log_data,
|
||||
'timestamp': timestamp
|
||||
})
|
||||
except Exception:
|
||||
# 可能没有backup_status字段,继续处理
|
||||
pass
|
||||
|
||||
# 如果没有发现进行中的任务,但有备份进程
|
||||
if timestamp:
|
||||
return {
|
||||
"backup_data": "unknown",
|
||||
"backup_name": public.lang("Unknown Task"),
|
||||
"data_backup_status": 1,
|
||||
"progress": 10,
|
||||
'backup_msg': public.lang("Preparing to restore data"),
|
||||
'backup_log': restore_log_data,
|
||||
'timestamp': timestamp
|
||||
}
|
||||
|
||||
return public.ReturnMsg(
|
||||
False,
|
||||
public.lang(
|
||||
"No ongoing restore task found, please check the restore list to see if the restore is complete"
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
# 获取命令行参数
|
||||
if len(sys.argv) < 3:
|
||||
print("Usage: btpython restore_manager.py <method> <timestamp>")
|
||||
sys.exit(1)
|
||||
method_name = sys.argv[1] # 方法名
|
||||
timestamp = sys.argv[2] # 时间戳
|
||||
overwrite = sys.argv[3] if len(sys.argv) >= 3 else 0
|
||||
try:
|
||||
overwrite = int(overwrite)
|
||||
except ValueError:
|
||||
print(public.lang("Error: force parameter must be 0 or 1"))
|
||||
sys.exit(1)
|
||||
restore_manager = RestoreManager(overwrite) # 实例化对象
|
||||
if hasattr(restore_manager, method_name): # 检查方法是否存在
|
||||
method = getattr(restore_manager, method_name) # 获取方法
|
||||
method(timestamp) # 调用方法
|
||||
else:
|
||||
print(f"Error: {public.lang('Method')} '{method_name}' {public.lang('does not exist')}")
|
||||
1751
mod/project/backup_restore/ssh_manager.py
Normal file
1751
mod/project/backup_restore/ssh_manager.py
Normal file
File diff suppressed because it is too large
Load Diff
0
mod/project/docker/__init__.py
Normal file
0
mod/project/docker/__init__.py
Normal file
0
mod/project/docker/app/__init__.py
Normal file
0
mod/project/docker/app/__init__.py
Normal file
1784
mod/project/docker/app/appManageMod.py
Normal file
1784
mod/project/docker/app/appManageMod.py
Normal file
File diff suppressed because it is too large
Load Diff
1159
mod/project/docker/app/base.py
Normal file
1159
mod/project/docker/app/base.py
Normal file
File diff suppressed because it is too large
Load Diff
23
mod/project/docker/app/gpu/__init__.py
Normal file
23
mod/project/docker/app/gpu/__init__.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from typing import List
|
||||
|
||||
from .base import GPUBase
|
||||
from .nvidia import NVIDIA
|
||||
from .amd import AMD
|
||||
|
||||
class Driver:
|
||||
drivers: List[GPUBase] = []
|
||||
|
||||
def __init__(self):
|
||||
if NVIDIA.is_support():
|
||||
self.drivers.append(NVIDIA())
|
||||
|
||||
if AMD.is_support():
|
||||
self.drivers.append(AMD())
|
||||
|
||||
@property
|
||||
def support(self):
|
||||
return len(self.drivers) > 0
|
||||
|
||||
def get_all_device_info(self, get):
|
||||
for _driver in self.drivers:
|
||||
pass
|
||||
36
mod/project/docker/app/gpu/amd.py
Normal file
36
mod/project/docker/app/gpu/amd.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from mod.project.docker.app.gpu.base import GPUBase
|
||||
|
||||
class AMD(GPUBase):
|
||||
@classmethod
|
||||
def is_support(cls):
|
||||
pass
|
||||
|
||||
def _get_device_version(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def _get_device_name(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def _get_fan_info(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def main(self):
|
||||
pass
|
||||
|
||||
def get_info(self, gpu_id=0):
|
||||
pass
|
||||
|
||||
def _get_mem_info(self):
|
||||
pass
|
||||
|
||||
def _get_clock_info(self):
|
||||
pass
|
||||
|
||||
def _get_temp_info(self):
|
||||
pass
|
||||
|
||||
def _get_uti_info(self):
|
||||
pass
|
||||
|
||||
def _get_proc_uti(self, proc_name='', proc_pid=0):
|
||||
pass
|
||||
70
mod/project/docker/app/gpu/base.py
Normal file
70
mod/project/docker/app/gpu/base.py
Normal file
@@ -0,0 +1,70 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class GPUBase(ABC):
|
||||
name = 'base'
|
||||
support = None
|
||||
@abstractmethod
|
||||
def _get_mem_info(self, *args, **kwargs):
|
||||
"""
|
||||
获取显存占用
|
||||
Returns:
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_clock_info(self, *args, **kwargs):
|
||||
"""
|
||||
获取时钟信息
|
||||
Returns:
|
||||
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_temp_info(self, *args, **kwargs):
|
||||
"""
|
||||
获取温度
|
||||
Returns:
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_uti_info(self, *args, **kwargs):
|
||||
"""
|
||||
获取占用
|
||||
|
||||
|
||||
Returns:
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_proc_uti(self, *args, **kwargs):
|
||||
"""
|
||||
获取进程占用
|
||||
Returns:
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_fan_info(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_device_name(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_device_version(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def is_support(cls):
|
||||
pass
|
||||
27
mod/project/docker/app/gpu/constants.py
Normal file
27
mod/project/docker/app/gpu/constants.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class CMD:
|
||||
@dataclass
|
||||
class CTK:
|
||||
@dataclass
|
||||
class APT:
|
||||
GetGPGKey = "curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg"
|
||||
AddSourcesList = "curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list"
|
||||
APTUpdate = "sudo apt-get update"
|
||||
Install = "sudo apt-get install -y nvidia-container-toolkit"
|
||||
OneInstall = GetGPGKey + ';' + AddSourcesList + ';' + APTUpdate + ';' + Install
|
||||
|
||||
@dataclass
|
||||
class YUM:
|
||||
AddRepo = "curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo | sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo"
|
||||
Install = "sudo yum install -y nvidia-container-toolkit"
|
||||
OneInstall = AddRepo + ';' + Install
|
||||
|
||||
@dataclass
|
||||
class ConfigureDocker:
|
||||
Runtime = "sudo nvidia-ctk runtime configure --runtime=docker"
|
||||
Restart = "sudo systemctl restart docker"
|
||||
|
||||
CheckVersion = "nvidia-ctk -v"
|
||||
199
mod/project/docker/app/gpu/nvidia.py
Normal file
199
mod/project/docker/app/gpu/nvidia.py
Normal file
@@ -0,0 +1,199 @@
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from functools import wraps
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.append('/www/server/panel/class')
|
||||
|
||||
import public
|
||||
|
||||
try:
|
||||
import pynvml
|
||||
except:
|
||||
public.ExecShell("btpip install nvidia-ml-py")
|
||||
import pynvml
|
||||
|
||||
try:
|
||||
from mod.project.docker.app.gpu.base import GPUBase
|
||||
except:
|
||||
class GPUBase:
|
||||
pass
|
||||
|
||||
device_tasks = defaultdict()
|
||||
system_tasks = defaultdict()
|
||||
|
||||
|
||||
def register_task(name: str):
|
||||
def task_decorator(task_func):
|
||||
_task_type, _task_name = name.split(':')
|
||||
if _task_type == 'device':
|
||||
device_tasks[_task_name] = task_func
|
||||
elif _task_type == 'system':
|
||||
system_tasks[_task_name] = task_func
|
||||
|
||||
@wraps(task_func)
|
||||
def func_wrapper(*args, **kwargs):
|
||||
return task_func(*args, **kwargs)
|
||||
|
||||
return func_wrapper
|
||||
|
||||
return task_decorator
|
||||
|
||||
|
||||
class NVIDIA(GPUBase):
|
||||
name = 'nvidia'
|
||||
support = None
|
||||
|
||||
def __init__(self):
|
||||
# 判断是否支持,并在判断时初始化pynvml库。
|
||||
self.device_count = 0
|
||||
if self.is_support():
|
||||
self.device_count = pynvml.nvmlDeviceGetCount()
|
||||
|
||||
def __del__(self):
|
||||
if self.is_support():
|
||||
pynvml.nvmlShutdown()
|
||||
|
||||
def get_all_device_info(self):
|
||||
all_info = defaultdict()
|
||||
all_info['system'] = self.get_system_info()
|
||||
for index in range(self.device_count):
|
||||
all_info[index] = self.get_info_by_index(index)
|
||||
return all_info
|
||||
|
||||
def get_info_by_index(self, index=0):
|
||||
info = defaultdict()
|
||||
handle = pynvml.nvmlDeviceGetHandleByIndex(index)
|
||||
|
||||
for t_name, t_func in device_tasks.items():
|
||||
try:
|
||||
info[t_name] = t_func(self, handle)
|
||||
except:
|
||||
# public.print_log("pynvml {t_name} error: {}")
|
||||
info[t_name] = None
|
||||
|
||||
return info
|
||||
|
||||
def get_system_info(self):
|
||||
info = defaultdict()
|
||||
for t_name, t_func in system_tasks.items():
|
||||
try:
|
||||
info[t_name] = t_func(self)
|
||||
except:
|
||||
# public.print_log(f"pynvml {t_name} error: {e}")
|
||||
info[t_name] = None
|
||||
return info
|
||||
|
||||
@classmethod
|
||||
def is_support(cls):
|
||||
try:
|
||||
pynvml.nvmlInit()
|
||||
cls.support = True
|
||||
return True
|
||||
|
||||
except pynvml.NVMLError:
|
||||
cls.support = False
|
||||
# public.print_log("Nvidia was not supported!")
|
||||
return False
|
||||
|
||||
@register_task('device:memory')
|
||||
def _get_mem_info(self, handle):
|
||||
info = defaultdict()
|
||||
info['size'] = int(pynvml.nvmlDeviceGetMemoryInfo(handle).total) / 1024 ** 3
|
||||
info['free'] = int(pynvml.nvmlDeviceGetMemoryInfo(handle).free) / 1024 ** 3
|
||||
info['used'] = int(pynvml.nvmlDeviceGetMemoryInfo(handle).used) / 1024 ** 3
|
||||
return info
|
||||
|
||||
@register_task('device:clock')
|
||||
def _get_clock_info(self, handle):
|
||||
info = defaultdict()
|
||||
info['graphics'] = pynvml.nvmlDeviceGetClockInfo(handle, pynvml.NVML_CLOCK_GRAPHICS)
|
||||
info['sm'] = pynvml.nvmlDeviceGetClockInfo(handle, pynvml.NVML_CLOCK_SM)
|
||||
info['memory'] = pynvml.nvmlDeviceGetClockInfo(handle, pynvml.NVML_CLOCK_MEM)
|
||||
info['video'] = pynvml.nvmlDeviceGetClockInfo(handle, pynvml.NVML_CLOCK_VIDEO)
|
||||
|
||||
return info
|
||||
|
||||
@register_task('device:temperature')
|
||||
def _get_temp_info(self, handle):
|
||||
info = 0
|
||||
try:
|
||||
info = pynvml.nvmlDeviceGetTemperature(handle, pynvml.NVML_TEMPERATURE_GPU)
|
||||
except pynvml.NVMLError or AttributeError:
|
||||
info = pynvml.nvmlDeviceGetTemperatureV1(handle, pynvml.NVML_TEMPERATURE_GPU)
|
||||
return info
|
||||
|
||||
@register_task('device:utilization')
|
||||
def _get_uti_info(self, handle):
|
||||
info = defaultdict()
|
||||
info['gpu'] = pynvml.nvmlDeviceGetUtilizationRates(handle).gpu
|
||||
info['memory'] = pynvml.nvmlDeviceGetUtilizationRates(handle).memory
|
||||
|
||||
return info
|
||||
|
||||
@register_task('device:processes')
|
||||
def _get_proc_uti(self, handle):
|
||||
info = list()
|
||||
for p in pynvml.nvmlDeviceGetComputeRunningProcesses(handle):
|
||||
p.__dict__['name'] = pynvml.nvmlSystemGetProcessName(p.pid)
|
||||
p.__dict__['type'] = 'Compute'
|
||||
info.append(p.__dict__)
|
||||
|
||||
for p in pynvml.nvmlDeviceGetGraphicsRunningProcesses(handle):
|
||||
p.__dict__['name'] = pynvml.nvmlSystemGetProcessName(p.pid)
|
||||
p.__dict__['type'] = 'Graphics'
|
||||
info.append(p.__dict__)
|
||||
|
||||
for p in pynvml.nvmlDeviceGetMPSComputeRunningProcesses(handle):
|
||||
p.__dict__['name'] = pynvml.nvmlSystemGetProcessName(p.pid)
|
||||
p.__dict__['type'] = 'MPS'
|
||||
info.append(p.__dict__)
|
||||
|
||||
return info
|
||||
|
||||
@register_task('device:fan')
|
||||
def _get_fan_info(self, handle):
|
||||
info = defaultdict()
|
||||
try:
|
||||
info['speed'] = pynvml.nvmlDeviceGetFanSpeedRPM(handle).speed
|
||||
except AttributeError:
|
||||
info['speed'] = pynvml.nvmlDeviceGetFanSpeed(handle)
|
||||
except pynvml.NVMLError:
|
||||
info['speed'] = pynvml.nvmlDeviceGetFanSpeed_v2(handle, 0)
|
||||
except:
|
||||
info['speed'] = 0
|
||||
return info
|
||||
|
||||
@register_task('device:name')
|
||||
def _get_device_name(self, handle):
|
||||
return pynvml.nvmlDeviceGetName(handle)
|
||||
|
||||
@register_task('device:power')
|
||||
def _get_device_power(self, handle):
|
||||
info = defaultdict()
|
||||
info['current'] = pynvml.nvmlDeviceGetPowerUsage(handle)
|
||||
info['max'] = pynvml.nvmlDeviceGetPowerManagementLimit(handle)
|
||||
return info
|
||||
|
||||
@register_task('system:version')
|
||||
def _get_device_version(self):
|
||||
info = defaultdict()
|
||||
info['driver'] = pynvml.nvmlSystemGetDriverVersion()
|
||||
|
||||
try:
|
||||
info['cuda'] = pynvml.nvmlSystemGetCudaDriverVersion()
|
||||
except pynvml.NVMLError or AttributeError:
|
||||
info['cuda'] = pynvml.nvmlSystemGetCudaDriverVersion_v2()
|
||||
|
||||
return info
|
||||
|
||||
@register_task('system:count')
|
||||
def _get_device_count(self):
|
||||
info = 0
|
||||
info = pynvml.nvmlDeviceGetCount()
|
||||
return info
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
nvidia = NVIDIA()
|
||||
print(nvidia.get_all_device_info())
|
||||
158
mod/project/docker/app/gpu/tools.py
Normal file
158
mod/project/docker/app/gpu/tools.py
Normal file
@@ -0,0 +1,158 @@
|
||||
import os
|
||||
import sys
|
||||
from typing import Tuple
|
||||
|
||||
from mod.project.docker.app.gpu.constants import CMD
|
||||
from mod.project.docker.app.gpu.nvidia import NVIDIA
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.append('/www/server/panel/class')
|
||||
|
||||
import public
|
||||
|
||||
|
||||
class GPUTool:
|
||||
gpu_option = None
|
||||
option_default = None
|
||||
|
||||
@staticmethod
|
||||
def __get_linux_distribution():
|
||||
"""检测系统是否为 Debian/Ubuntu 或 CentOS/Red Hat 系列"""
|
||||
try:
|
||||
# 优先解析 /etc/os-release
|
||||
with open("/etc/os-release", "r", encoding="utf-8") as f:
|
||||
os_release = {}
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if line and "=" in line:
|
||||
key, value = line.split("=", 1)
|
||||
os_release[key] = value.strip('"')
|
||||
|
||||
dist_id = os_release.get("ID", "").lower()
|
||||
id_like = os_release.get("ID_LIKE", "").lower()
|
||||
|
||||
# 根据 ID 或 ID_LIKE 判断
|
||||
if dist_id in ["debian", "ubuntu"]:
|
||||
return "debian"
|
||||
elif dist_id in ["centos", "rhel", "fedora"]:
|
||||
return "centos"
|
||||
elif "debian" in id_like:
|
||||
return "debian"
|
||||
elif "rhel" in id_like or "fedora" in id_like:
|
||||
return "centos"
|
||||
|
||||
except FileNotFoundError:
|
||||
# 如果 /etc/os-release 不存在,检查其他文件
|
||||
if os.path.exists("/etc/debian_version"):
|
||||
return "debian"
|
||||
elif os.path.exists("/etc/redhat-release"):
|
||||
return "centos"
|
||||
|
||||
except Exception:
|
||||
raise ValueError("System Distribution Is Unknown")
|
||||
|
||||
@classmethod
|
||||
def __gpu_default_setting(cls) -> Tuple[bool, bool]:
|
||||
"""
|
||||
检测是否开启GPU
|
||||
Returns:
|
||||
gpu_option: 返回是否开启GPU选择
|
||||
option_default: 默认GPU选择是否开启
|
||||
"""
|
||||
if cls.gpu_option is not None and cls.option_default is not None:
|
||||
return cls.gpu_option, cls.option_default
|
||||
|
||||
driver = NVIDIA()
|
||||
# 如果不支持直接返回
|
||||
if driver.support is None or driver.support is False:
|
||||
cls.gpu_option = False
|
||||
cls.option_default = False
|
||||
return cls.gpu_option, cls.option_default
|
||||
|
||||
# 如果支持则检查显存大小
|
||||
device_info = driver.get_all_device_info()
|
||||
mem_size = 0
|
||||
for _, _device in device_info.items():
|
||||
mem_size = mem_size + _device.get('memory', {}).get('size', 0)
|
||||
if mem_size > 3:
|
||||
cls.gpu_option = True
|
||||
cls.option_default = True
|
||||
else:
|
||||
cls.gpu_option = True
|
||||
cls.option_default = False
|
||||
|
||||
return cls.gpu_option, cls.option_default
|
||||
|
||||
@classmethod
|
||||
def register_app_gpu_option(cls, app):
|
||||
option, default = cls.__gpu_default_setting()
|
||||
for field in app.get('field', []):
|
||||
if option == False and field.get('attr', '') == 'gpu':
|
||||
app['field'].remove(field)
|
||||
elif option == True and field.get('attr', '') == 'gpu':
|
||||
field['default'] = default
|
||||
field['suffix'] = field['suffix'] + ' | 已默认设置为{}'.format(default)
|
||||
# public.print_log("\n\n\n\n{}\n\n\n\n".format(field['suffix']))
|
||||
return app
|
||||
|
||||
@staticmethod
|
||||
def is_install_ctk():
|
||||
stdout, stderr = public.ExecShell(CMD.CTK.CheckVersion)
|
||||
if len(stderr) != 0:
|
||||
return False
|
||||
if not stdout.lower().find('version'):
|
||||
public.print_log("Not Nvidia Container Toolkit")
|
||||
return False
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def __ctk_install_cmd_apt(cls, app_log):
|
||||
return ("{get_gpg_key} >> {app_log};"
|
||||
"{add_sources_list} >> {app_log};"
|
||||
"{apt_update} >> {app_log};"
|
||||
"{install} >> {app_log}"
|
||||
.format(get_gpg_key=CMD.CTK.APT.GetGPGKey,
|
||||
add_sources_list=CMD.CTK.APT.AddSourcesList,
|
||||
apt_update=CMD.CTK.APT.APTUpdate,
|
||||
install=CMD.CTK.APT.Install,
|
||||
app_log=app_log
|
||||
))
|
||||
|
||||
@classmethod
|
||||
def __ctk_install_cmd_yum(cls, app_log):
|
||||
return ("{add_repo} >> {app_log};"
|
||||
"{install} >> {app_log}"
|
||||
.format(add_repo=CMD.CTK.YUM.AddRepo,
|
||||
install=CMD.CTK.YUM.Install,
|
||||
app_log=app_log
|
||||
))
|
||||
|
||||
@classmethod
|
||||
def __config_docker(cls, app_log):
|
||||
return ("{runtime} >> {app_log};"
|
||||
"{restart} >> {app_log}"
|
||||
.format(runtime=CMD.CTK.ConfigureDocker.Runtime,
|
||||
restart=CMD.CTK.ConfigureDocker.Restart,
|
||||
app_log=app_log))
|
||||
|
||||
@classmethod
|
||||
def ctk_install_cmd(cls, app_log):
|
||||
dtb = cls.__get_linux_distribution()
|
||||
cmd = ''
|
||||
if dtb == 'debian':
|
||||
cmd = (
|
||||
"{install_cmd};"
|
||||
"{config_docker}"
|
||||
.format(
|
||||
install_cmd=cls.__ctk_install_cmd_apt(app_log),
|
||||
config_docker=cls.__config_docker(app_log),
|
||||
))
|
||||
elif dtb == 'centos':
|
||||
cmd = (
|
||||
"{install_cmd};"
|
||||
"{config_docker}"
|
||||
.format(
|
||||
install_cmd=cls.__ctk_install_cmd_yum(app_log),
|
||||
config_docker=cls.__config_docker(app_log),
|
||||
))
|
||||
return cmd
|
||||
0
mod/project/docker/app/gpu/type.py
Normal file
0
mod/project/docker/app/gpu/type.py
Normal file
0
mod/project/docker/app/sub_app/__init__.py
Normal file
0
mod/project/docker/app/sub_app/__init__.py
Normal file
16
mod/project/docker/app/sub_app/base.py
Normal file
16
mod/project/docker/app/sub_app/base.py
Normal file
@@ -0,0 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# +-------------------------------------------------------------------
|
||||
# | YakPanel
|
||||
# +-------------------------------------------------------------------
|
||||
# | Copyleft (c) 2015-2099 YakPanel(www.yakpanel.com) All lefts reserved.
|
||||
# +-------------------------------------------------------------------
|
||||
# | Author: wzz
|
||||
# | email : wzz@yakpanel.com
|
||||
# +-------------------------------------------------------------------
|
||||
# +-------------------------------------------------------------------
|
||||
# | docker sub_app 管理模型 -
|
||||
# +-------------------------------------------------------------------
|
||||
|
||||
class base():
|
||||
def __init__(self):
|
||||
pass
|
||||
198
mod/project/docker/app/sub_app/downModel.py
Normal file
198
mod/project/docker/app/sub_app/downModel.py
Normal file
@@ -0,0 +1,198 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# +-------------------------------------------------------------------
|
||||
# | YakPanel
|
||||
# +-------------------------------------------------------------------
|
||||
# | Copyleft (c) 2015-2099 YakPanel(www.yakpanel.com) All lefts reserved.
|
||||
# +-------------------------------------------------------------------
|
||||
# | Author: wzz
|
||||
# | email : wzz@yakpanel.com
|
||||
# +-------------------------------------------------------------------
|
||||
# +-------------------------------------------------------------------
|
||||
# | docker sub_app 管理模型 -
|
||||
# +-------------------------------------------------------------------
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
from collections import deque
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.append('/www/server/panel/class')
|
||||
|
||||
import public
|
||||
|
||||
def download_model(service_name, model_name, model_version, ollama_url, app_cmd_log):
|
||||
"""
|
||||
下载Ollama模型的具体实现
|
||||
@param service_name: 服务名称
|
||||
@param model_name: 模型名称
|
||||
@param model_version: 模型版本
|
||||
@param ollama_url: Ollama API URL
|
||||
@param app_cmd_log: 日志文件路径
|
||||
"""
|
||||
def start_download():
|
||||
url = ollama_url + "/api/pull"
|
||||
|
||||
# 准备请求数据
|
||||
data = {
|
||||
"model": "{}:{}".format(model_name, model_version),
|
||||
"stream": True
|
||||
}
|
||||
|
||||
try:
|
||||
import requests
|
||||
response = requests.post(url, json=data, stream=True)
|
||||
|
||||
with open(app_cmd_log, 'a') as log_file:
|
||||
log_file.write('{} model is being downloaded, and may need to wait more than 1-30 minutes...\n'.format(model_name))
|
||||
|
||||
download_tag = None
|
||||
last_completed = 0
|
||||
last_time = time.time()
|
||||
# 使用双端队列存储最近10秒的速度
|
||||
speed_history = deque(maxlen=60)
|
||||
|
||||
count_sum = 0
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
json_response = json.loads(line)
|
||||
status = json_response.get("status", "")
|
||||
|
||||
# 记录下载进度
|
||||
if "pulling" in status:
|
||||
status = status.split(" ")
|
||||
if download_tag is None or status[1] != download_tag:
|
||||
download_tag = status[1]
|
||||
last_completed = 0
|
||||
last_time = time.time()
|
||||
speed_history.clear()
|
||||
|
||||
completed = json_response.get("completed", 0)
|
||||
total = json_response.get("total", 0)
|
||||
|
||||
if total > 0:
|
||||
# 计算下载速度
|
||||
current_time = time.time()
|
||||
time_diff = current_time - last_time
|
||||
if time_diff >= 1: # 每秒更新一次
|
||||
bytes_diff = completed - last_completed
|
||||
speed = bytes_diff / time_diff # bytes per second
|
||||
|
||||
# 存储当前速度
|
||||
count_sum += 1
|
||||
if count_sum > 5:
|
||||
speed_history.append(speed)
|
||||
|
||||
# 检查速度是否异常
|
||||
avg_speed = None
|
||||
if len(speed_history) >= 10:
|
||||
avg_speed = sum(list(speed_history)[:-1]) / (len(speed_history) - 1)
|
||||
current_speed = speed_history[-1]
|
||||
|
||||
if current_speed < 1024000 and avg_speed < 1536000: # 当前速度小于1.2MB/s且平均速度小于1.5MB/s
|
||||
log_file.write('Detected that the download speed is too low and is trying to reset the download...\n')
|
||||
log_file.flush()
|
||||
return False # 返回False表示需要重新下载
|
||||
|
||||
if current_speed < (avg_speed / 4) and avg_speed > 1024: # 确保有足够的平均速度
|
||||
log_file.write('Abnormal download speed or CF slowdown detected, trying to reset the download...\n')
|
||||
log_file.flush()
|
||||
return False # 返回False表示需要重新下载
|
||||
|
||||
# 转换速度单位
|
||||
speed_str = ""
|
||||
if speed < 1024:
|
||||
speed_str = "{:.2f} B/s".format(speed)
|
||||
elif speed < 1024 * 1024:
|
||||
speed_str = "{:.2f} KB/s".format(speed / 1024)
|
||||
else:
|
||||
speed_str = "{:.2f} MB/s".format(speed / (1024 * 1024))
|
||||
|
||||
avg_speed_str = ""
|
||||
if not avg_speed is None:
|
||||
if avg_speed < 1024:
|
||||
avg_speed_str = "{:.2f} B/s".format(avg_speed)
|
||||
elif avg_speed < 1024 * 1024:
|
||||
avg_speed_str = "{:.2f} KB/s".format(avg_speed / 1024)
|
||||
else:
|
||||
avg_speed_str = "{:.2f} MB/s".format(avg_speed / (1024 * 1024))
|
||||
|
||||
progress = (completed / total) * 100
|
||||
log_file.write('File: {}, Download Progress: {:.2f}%, Average Speed: {}, Current Speed: {}\n'.format(
|
||||
download_tag,
|
||||
progress,
|
||||
avg_speed_str,
|
||||
speed_str
|
||||
))
|
||||
log_file.flush()
|
||||
|
||||
# 更新上次的数据
|
||||
last_completed = completed
|
||||
last_time = current_time
|
||||
else:
|
||||
log_file.write(status + '\n')
|
||||
log_file.flush()
|
||||
|
||||
# 下载完成后验证模型是否存在
|
||||
verify_cmd = "docker-compose -p {service_name} exec -it {service_name_} ollama list | grep {model_name}:{model_version}".format(
|
||||
service_name=service_name.lower(),
|
||||
service_name_=service_name,
|
||||
model_name=model_name,
|
||||
model_version=model_version
|
||||
)
|
||||
result = public.ExecShell(verify_cmd)[0]
|
||||
|
||||
if model_name in result:
|
||||
log_file.write('bt_successful\n')
|
||||
return True
|
||||
else:
|
||||
public.writeFile("/tmp/{model_name}:{model_version}.failed".format(
|
||||
model_name=model_name,
|
||||
model_version=model_version,
|
||||
), "failed")
|
||||
log_file.write('bt_failed\n')
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
# 发生异常时记录错误并标记失败
|
||||
with open(app_cmd_log, 'a') as log_file:
|
||||
log_file.write('Download failed: {}\n'.format(str(e)))
|
||||
log_file.write('bt_failed\n')
|
||||
public.writeFile("/tmp/{model_name}:{model_version}.failed".format(
|
||||
model_name=model_name,
|
||||
model_version=model_version,
|
||||
), "failed")
|
||||
return False
|
||||
|
||||
# 设置下载状态标记
|
||||
public.ExecShell("echo 'downloading' > /tmp/{model_name}:{model_version}.pl".format(
|
||||
model_name=model_name,
|
||||
model_version=model_version
|
||||
))
|
||||
public.ExecShell("echo 'downloading' > /tmp/nocandown.pl")
|
||||
public.ExecShell("rm -f /tmp/{model_name}:{model_version}.failed".format(
|
||||
model_name=model_name,
|
||||
model_version=model_version,
|
||||
))
|
||||
|
||||
try:
|
||||
max_retries = 30
|
||||
retry_count = 0
|
||||
|
||||
while retry_count < max_retries:
|
||||
if retry_count > 0:
|
||||
with open(app_cmd_log, 'a') as log_file:
|
||||
log_file.write('\n{} retry in progress...\n'.format(retry_count + 1))
|
||||
|
||||
if start_download():
|
||||
break
|
||||
|
||||
retry_count += 1
|
||||
time.sleep(3) # 重试前等待3秒
|
||||
|
||||
finally:
|
||||
# 清理状态文件
|
||||
public.ExecShell("rm -f /tmp/{model_name}:{model_version}.pl".format(
|
||||
model_name=model_name,
|
||||
model_version=model_version,
|
||||
))
|
||||
public.ExecShell("rm -f /tmp/nocandown.pl")
|
||||
324
mod/project/docker/app/sub_app/ollamaMod.py
Normal file
324
mod/project/docker/app/sub_app/ollamaMod.py
Normal file
@@ -0,0 +1,324 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# +-------------------------------------------------------------------
|
||||
# | YakPanel
|
||||
# +-------------------------------------------------------------------
|
||||
# | Copyleft (c) 2015-2099 YakPanel(www.yakpanel.com) All lefts reserved.
|
||||
# +-------------------------------------------------------------------
|
||||
# | Author: wzz
|
||||
# | email : wzz@yakpanel.com
|
||||
# +-------------------------------------------------------------------
|
||||
# +-------------------------------------------------------------------
|
||||
# | docker sub_app 管理模型 -
|
||||
# +-------------------------------------------------------------------
|
||||
import json
|
||||
import os.path
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.append('/www/server/panel/class')
|
||||
|
||||
import public
|
||||
from mod.project.docker.app.base import App
|
||||
|
||||
class OllamaBase(App):
|
||||
def __init__(self):
|
||||
super(OllamaBase, self).__init__()
|
||||
self.ollama_port = "11434"
|
||||
self.ollama_local_url = "http://127.0.0.1:{}".format(self.ollama_port)
|
||||
|
||||
def set_ollama_port(self, port):
|
||||
self.ollama_port = port
|
||||
self.ollama_local_url = self.ollama_local_url.format(port)
|
||||
return self
|
||||
|
||||
def set_ollama_local_url(self, port):
|
||||
self.ollama_local_url = "http://127.0.0.1:{}".format(port)
|
||||
return self
|
||||
|
||||
|
||||
class OllamaMod(OllamaBase):
|
||||
|
||||
def __init__(self):
|
||||
super(OllamaMod, self).__init__()
|
||||
|
||||
# 2025/2/8 11:47 获取本地所有的models
|
||||
# https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models
|
||||
def list_local_models(self):
|
||||
uri = "/api/tags"
|
||||
|
||||
ps_json, stderr = public.ExecShell("docker-compose -p {service_name} ps --format json | {grep_v}".format(
|
||||
service_name=self.service_name.lower(),
|
||||
grep_v=self.grep_version,
|
||||
))
|
||||
if "Segmentation fault" in ps_json:
|
||||
return []
|
||||
|
||||
if not ps_json.startswith("["):
|
||||
ps = json.loads("[" + ps_json.strip().replace("\n", ",") + "]")
|
||||
else:
|
||||
ps = json.loads(ps_json.strip().replace("\n", ","))
|
||||
|
||||
try:
|
||||
p_port = "11434"
|
||||
for i in ps:
|
||||
if "ollama/ollama" in i["Image"]:
|
||||
if len(i["Publishers"]) == 0: break
|
||||
p_port = i["Publishers"][0]["PublishedPort"]
|
||||
except:
|
||||
p_port = "11434"
|
||||
|
||||
self.set_ollama_local_url(p_port)
|
||||
url = self.ollama_local_url + uri
|
||||
response = public.HttpGet(url)
|
||||
if not response: return []
|
||||
response = json.loads(response)
|
||||
|
||||
if "models" in response:
|
||||
models = response["models"]
|
||||
for i in models:
|
||||
i["version"] = i["name"].split(":")[-1] if ":" in i["name"] else i["name"]
|
||||
i["l_name"] = i["name"].split(":")[0] if ":" in i["name"] else i["name"]
|
||||
return models
|
||||
return []
|
||||
|
||||
# 2025/2/10 15:52 获取指定模型的信息
|
||||
# https://github.com/ollama/ollama/blob/main/docs/api.md#show-model-information
|
||||
def show_model_info(self, get):
|
||||
'''
|
||||
@name 获取指定模型的信息
|
||||
'''
|
||||
get.model_name = get.get("model_name", None)
|
||||
if get.model_name is None:
|
||||
return public.return_message(-1, 0, public.lang("model_name parameter cannot be null"))
|
||||
get.model_version = get.get("model_version", None)
|
||||
if get.model_version is None:
|
||||
return public.return_message(-1, 0, public.lang("model_version parameter cannot be null"))
|
||||
get.service_name = get.get("service_name", None)
|
||||
if get.service_name is None:
|
||||
return public.return_message(-1, 0, public.lang("service_name parameter cannot be null"))
|
||||
|
||||
self.set_service_name(get.service_name)
|
||||
uri = "/api/show"
|
||||
ps_json, stderr = public.ExecShell("docker-compose -p {service_name} ps --format json | {grep_v}".format(
|
||||
service_name=self.service_name.lower(),
|
||||
grep_v=self.grep_version,
|
||||
))
|
||||
if "Segmentation fault" in ps_json:
|
||||
return []
|
||||
|
||||
if not ps_json.startswith("["):
|
||||
ps = json.loads("[" + ps_json.strip().replace("\n", ",") + "]")
|
||||
else:
|
||||
ps = json.loads(ps_json.strip().replace("\n", ","))
|
||||
|
||||
try:
|
||||
p_port = "11434"
|
||||
for i in ps:
|
||||
if "ollama/ollama" in i["Image"]:
|
||||
if len(i["Publishers"]) == 0: break
|
||||
p_port = i["Publishers"][0]["PublishedPort"]
|
||||
except:
|
||||
p_port = "11434"
|
||||
|
||||
self.set_ollama_local_url(p_port)
|
||||
|
||||
url = self.ollama_local_url + uri
|
||||
param = {"model": "{}:{}".format(get.model_name, get.model_version)}
|
||||
|
||||
import requests
|
||||
response = requests.post(url, data=json.dumps(param), timeout=10)
|
||||
|
||||
return public.return_message(0, 0, response.json())
|
||||
|
||||
# 2025/2/10 14:51 获取在线的所有models
|
||||
def list_online_models(self):
|
||||
'''
|
||||
@name 获取在线的所有models
|
||||
'''
|
||||
if not os.path.exists(self.ollama_online_models_file):
|
||||
public.downloadFile(public.get_url() + '/src/dk_app/yakpanel/apps/ollama_model.json', self.ollama_online_models_file)
|
||||
|
||||
try:
|
||||
models = json.loads(public.readFile(self.ollama_online_models_file))
|
||||
|
||||
res = []
|
||||
for i in models:
|
||||
res.append({
|
||||
"name": i["name"],
|
||||
"description": i["zh_cn_msg"],
|
||||
"version": i["parameters"],
|
||||
"size": i["size"],
|
||||
"can_down": True,
|
||||
})
|
||||
|
||||
return res
|
||||
except:
|
||||
return []
|
||||
|
||||
# 2025/2/10 14:54 获取模型列表
|
||||
def get_models_list(self, get):
|
||||
'''
|
||||
@name 获取模型列表
|
||||
'''
|
||||
get.search = get.get("search", "")
|
||||
get.p = get.get("p/d", 1)
|
||||
get.row = get.get("limit/d", 20)
|
||||
get.service_name = get.get("service_name", None)
|
||||
if get.service_name is None:
|
||||
return public.return_message(-1, 0, public.lang("service_name parameter cannot be null"))
|
||||
get.status = get.get("status", "all")
|
||||
self.set_service_name(get.service_name)
|
||||
|
||||
local_models = self.list_local_models()
|
||||
public.print_log(local_models)
|
||||
online_models = self.list_online_models()
|
||||
res = []
|
||||
can_down = True
|
||||
if os.path.exists("/tmp/nocandown.pl"):
|
||||
can_down = False
|
||||
|
||||
# 2025/2/10 14:55 合并两个列表,增加status字段,已经安装了值为installed
|
||||
for i in online_models:
|
||||
i["can_down"] = can_down
|
||||
|
||||
i["status"] = "uninstall"
|
||||
for j in local_models:
|
||||
if i["name"] == j["l_name"]:
|
||||
i["status"] = "installed" if i["version"] == j["version"] else "uninstall"
|
||||
|
||||
if os.path.exists("/tmp/{model_name}:{model_version}.failed".format(
|
||||
model_name=i["name"],
|
||||
model_version=i["version"],
|
||||
)):
|
||||
i["status"] = "failed"
|
||||
|
||||
if os.path.exists("/tmp/{model_name}:{model_version}.pl".format(
|
||||
model_name=i["name"],
|
||||
model_version=i["version"],
|
||||
)):
|
||||
i["status"] = "downloading"
|
||||
|
||||
if i["status"] in ("installed", "failed", "downloading"):
|
||||
break
|
||||
|
||||
if get.status != "all":
|
||||
if get.status != i["status"]: continue
|
||||
if get.search != "":
|
||||
if get.search not in i["name"] and get.search not in i["description"]: continue
|
||||
|
||||
res.append(i)
|
||||
|
||||
page_data = self.get_page(res, get)
|
||||
return self.pageResult(True, data=page_data["data"], page=page_data["page"])
|
||||
|
||||
# 2025/2/17 16:34 给指定应用安装指定模型
|
||||
def down_models(self, get):
|
||||
'''
|
||||
@name 给指定应用安装指定模型
|
||||
@param service_name 服务名称
|
||||
@param model_name 模型名称
|
||||
@param model_version 模型版本
|
||||
'''
|
||||
get.service_name = get.get("service_name", None)
|
||||
if get.service_name is None:
|
||||
return public.return_message(-1, 0, public.lang("service_name parameter cannot be null"))
|
||||
get.model_name = get.get("model_name", None)
|
||||
if get.model_name is None:
|
||||
return public.return_message(-1, 0, public.lang("model_name parameter cannot be null"))
|
||||
get.model_version = get.get("model_version", None)
|
||||
if get.model_version is None:
|
||||
return public.return_message(-1, 0, public.lang("model_version parameter cannot be null"))
|
||||
|
||||
self.set_service_name(get.service_name)
|
||||
|
||||
# 获取容器信息
|
||||
ps_json, stderr = public.ExecShell("docker-compose -p {service_name} ps --format json | {grep_v}".format(
|
||||
service_name=self.service_name.lower(),
|
||||
grep_v=self.grep_version,
|
||||
))
|
||||
if "Segmentation fault" in ps_json:
|
||||
return public.return_message(-1, 0, public.lang("Failed to get container information, docker-compose execution is exceptional!"))
|
||||
|
||||
if not ps_json.startswith("["):
|
||||
ps = json.loads("[" + ps_json.strip().replace("\n", ",") + "]")
|
||||
else:
|
||||
ps = json.loads(ps_json.strip().replace("\n", ","))
|
||||
|
||||
try:
|
||||
p_port = "11434"
|
||||
for i in ps:
|
||||
if "ollama/ollama" in i["Image"]:
|
||||
if len(i["Publishers"]) == 0: break
|
||||
p_port = i["Publishers"][0]["PublishedPort"]
|
||||
except:
|
||||
p_port = "11434"
|
||||
|
||||
self.set_ollama_local_url(p_port)
|
||||
|
||||
# 设置日志文件
|
||||
self.set_cmd_log()
|
||||
public.ExecShell("echo > {}".format(self.app_cmd_log))
|
||||
|
||||
# 导入下载模块并执行下载
|
||||
from mod.project.docker.app.sub_app.downModel import download_model
|
||||
import threading
|
||||
|
||||
# 创建新线程执行下载
|
||||
download_thread = threading.Thread(
|
||||
target=download_model,
|
||||
args=(
|
||||
get.service_name,
|
||||
get.model_name,
|
||||
get.model_version,
|
||||
self.ollama_local_url,
|
||||
self.app_cmd_log
|
||||
)
|
||||
)
|
||||
download_thread.daemon = True
|
||||
download_thread.start()
|
||||
|
||||
return public.return_message(0, 0, public.lang("The model is being downloaded, please check the logs later"))
|
||||
|
||||
# 2025/2/10 15:50 删除指定应用的指定模型
|
||||
def del_models(self, get):
|
||||
'''
|
||||
@name 删除指定应用的指定模型
|
||||
'''
|
||||
get.service_name = get.get("service_name", None)
|
||||
if get.service_name is None:
|
||||
return public.return_message(-1, 0, public.lang("service_name parameter cannot be null"))
|
||||
get.model_name = get.get("model_name", None)
|
||||
if get.model_name is None:
|
||||
return public.return_message(-1, 0, public.lang("model_name parameter cannot be null"))
|
||||
get.model_version = get.get("model_version", None)
|
||||
if get.model_version is None:
|
||||
return public.return_message(-1, 0, public.lang("model_version parameter cannot be null"))
|
||||
|
||||
self.set_service_name(get.service_name)
|
||||
|
||||
ps_json, stderr = public.ExecShell("docker-compose -p {service_name} ps --format json | {grep_v}".format(
|
||||
service_name=self.service_name.lower(),
|
||||
grep_v=self.grep_version,
|
||||
))
|
||||
if "Segmentation fault" in ps_json:
|
||||
return public.return_message(0, 0, public.lang("Failed to delete model, docker-compose execution exception!"))
|
||||
|
||||
if not ps_json.startswith("["):
|
||||
ps = json.loads("[" + ps_json.strip().replace("\n", ",") + "]")
|
||||
else:
|
||||
ps = json.loads(ps_json.strip().replace("\n", ","))
|
||||
|
||||
serviceName = get.service_name
|
||||
if len(ps) == 2:
|
||||
serviceName = "ollama"
|
||||
|
||||
cmd = ("docker-compose -p {service_name} exec -it {serviceName} ollama rm {model_name}:{model_version}".format(
|
||||
service_name=get.service_name.lower(),
|
||||
serviceName=serviceName,
|
||||
model_name=get.model_name,
|
||||
model_version=get.model_version,
|
||||
))
|
||||
public.ExecShell(cmd)
|
||||
return public.return_message(0, 0, public.lang("Successful deletion of model!"))
|
||||
0
mod/project/docker/apphub/__init__.py
Normal file
0
mod/project/docker/apphub/__init__.py
Normal file
258
mod/project/docker/apphub/apphubManage.py
Normal file
258
mod/project/docker/apphub/apphubManage.py
Normal file
@@ -0,0 +1,258 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# YakPanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 YakPanel(www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: csj <csj@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
# ------------------------------
|
||||
# docker应用商店 apphub 业务类
|
||||
# ------------------------------
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import time
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
import public
|
||||
from mod.base.git_tool import GitTool
|
||||
from mod.project.docker.app.base import App
|
||||
import mod.base.git_tool.install as GitInstall
|
||||
|
||||
class AppHub():
|
||||
_instance = None
|
||||
hub_config_path = os.path.join(App.dk_project_path,'dk_app','apphub_config.json') #/www/dk_project/dk_app/apphub_config.json
|
||||
hub_home_path = os.path.join(App.dk_project_path,'dk_app','apphub','apphub') #/www/dk_project/dk_app/apphub/apphub
|
||||
hub_apps_path = os.path.join(hub_home_path,'apps.json') #/www/dk_project/dk_app/apphub/apphub/apps.json
|
||||
|
||||
def __new__(cls, *args, **kwargs):
|
||||
if not hasattr(cls, "_instance") or cls._instance is None:
|
||||
cls._instance = super(AppHub, cls).__new__(cls)
|
||||
return cls._instance
|
||||
|
||||
@classmethod
|
||||
def get_config(cls):
|
||||
'''
|
||||
@name 获取外部应用配置
|
||||
'''
|
||||
if not os.path.exists(cls.hub_config_path):
|
||||
apphub_config = {
|
||||
"git_config": {
|
||||
"git_url": "",
|
||||
"git_branch": "main",
|
||||
"user_config": {
|
||||
"name": "",
|
||||
"password": ""
|
||||
}
|
||||
}
|
||||
}
|
||||
public.writeFile(cls.hub_config_path, json.dumps(apphub_config))
|
||||
return json.loads(public.readFile(cls.hub_config_path))
|
||||
|
||||
def install_apphub(self,get):
|
||||
|
||||
git_install = GitInstall.install_git()
|
||||
if not git_install:
|
||||
return public.return_message(-1, 0, public.lang("If the installation fails, check the network or install git manually"))
|
||||
|
||||
return public.return_message(0, 0, public.lang("The environment was successfully installed"))
|
||||
|
||||
def get_hub_apps(self):
|
||||
'''
|
||||
@name 获取外部应用列表
|
||||
'''
|
||||
res = []
|
||||
try:
|
||||
if os.path.exists(AppHub.hub_apps_path):
|
||||
res=json.loads(public.readFile(self.hub_apps_path))
|
||||
except:
|
||||
pass
|
||||
return res
|
||||
|
||||
def generate_apphub(self, get):
|
||||
'''
|
||||
@name 解析外部应用列表
|
||||
'''
|
||||
apps = []
|
||||
if not os.path.isdir(self.hub_home_path):
|
||||
return public.return_message(-1, 0, public.lang("The apphub directory does not exist"))
|
||||
for name in os.listdir(self.hub_home_path):
|
||||
app_dir = os.path.join(self.hub_home_path, name)
|
||||
if not os.path.isdir(app_dir): continue
|
||||
|
||||
app_info=public.readFile(os.path.join(app_dir, 'app.json'))
|
||||
if not app_info: continue
|
||||
|
||||
try:
|
||||
app_info = json.loads(app_info)
|
||||
except Exception as e:
|
||||
continue
|
||||
|
||||
if "reuse" not in app_info: app_info["reuse"] = True
|
||||
if "icon" not in app_info: app_info["icon"] = ""
|
||||
if "sort" not in app_info: app_info["sort"] = 999
|
||||
if "cpu" not in app_info: app_info["cpu"] = 0
|
||||
if "mem" not in app_info: app_info["mem"] = 0
|
||||
if "disk" not in app_info: app_info["disk"] = 10240
|
||||
if "installed" not in app_info: app_info["installed"] = False
|
||||
if "updateat" not in app_info: app_info["updateat"] = 0
|
||||
|
||||
apps.append(app_info)
|
||||
|
||||
self.apphub_apps = apps
|
||||
|
||||
public.writeFile(self.hub_apps_path, json.dumps(apps, indent=4, ensure_ascii=False))
|
||||
|
||||
self.generate_apphub_icon()
|
||||
|
||||
return public.return_message(0, 0, public.lang("The resolution was successful"))
|
||||
|
||||
def generate_apphub_icon(self):
|
||||
'''
|
||||
@name 创建外部应用图标
|
||||
#/static/img/soft_ico/apphub/ico-apphub_xxx.png
|
||||
'''
|
||||
apphub_ico_path = "{}/YakPanel/static/img/soft_ico/apphub/".format(public.get_panel_path())
|
||||
if os.path.exists(apphub_ico_path):
|
||||
public.ExecShell("rm -rf {}".format(apphub_ico_path))
|
||||
public.ExecShell("mkdir -p {}".format(apphub_ico_path))
|
||||
|
||||
for name in os.listdir(self.hub_home_path):
|
||||
app_dir = os.path.join(self.hub_home_path, name,'icon.png')
|
||||
if not os.path.exists(app_dir): continue
|
||||
|
||||
app_icon_path = os.path.join(apphub_ico_path, "ico-apphub_{}.png".format(name))
|
||||
public.ExecShell("cp {} {}".format(app_dir,app_icon_path))
|
||||
return True
|
||||
|
||||
def set_apphub_git(self, get):
|
||||
'''
|
||||
@name 设置git配置
|
||||
@param get: git_url, git_branch, user, password
|
||||
'''
|
||||
config = self.get_config()
|
||||
git_config = config.get("git_config", {})
|
||||
git_config["git_url"] = get.git_url.strip()
|
||||
git_config["git_branch"] = get.git_branch.strip()
|
||||
if "name" in get and "password" in get:
|
||||
git_config["user_config"] = {
|
||||
"name": get.get("name", ""),
|
||||
"password": get.get("password", "")
|
||||
}
|
||||
config["git_config"] = git_config
|
||||
public.writeFile(self.hub_config_path, json.dumps(config, indent=4, ensure_ascii=False))
|
||||
return public.return_message(0, 0, public.lang("GIT INFORMATION IS SUCCESSFULLY CONFIGURED"))
|
||||
|
||||
def import_git_apphub(self,get):
|
||||
'''
|
||||
@name 从git导入外部应用
|
||||
@param None
|
||||
'''
|
||||
if not GitInstall.installed():
|
||||
return public.return_message(-1, 0, public.lang("If you don't have a git environment, please install git first"))
|
||||
|
||||
abs_path = os.path.dirname(self.hub_home_path)
|
||||
if not os.path.exists(abs_path): os.makedirs(abs_path)
|
||||
|
||||
gitconfig = self.get_config()
|
||||
if not gitconfig or not gitconfig.get("git_config", {}).get("git_url", ""):
|
||||
return public.return_message(-1, 0, public.lang("Please set the git information first"))
|
||||
|
||||
git_url = gitconfig.get("git_config", {}).get("git_url", "")
|
||||
git_user = gitconfig.get("git_config", {}).get("user_config", {})
|
||||
git_branch = gitconfig.get("git_config", {}).get("git_branch", {})
|
||||
|
||||
git = GitTool(project_path=abs_path, git_url=git_url,user_config=git_user,git_id="-1")
|
||||
public.ExecShell("rm -rf /tmp/git_-1_log.log")
|
||||
res = git.pull(git_branch)
|
||||
|
||||
if res is not None:
|
||||
return public.return_message(-1, 0, public.lang("Import from git failed"))
|
||||
|
||||
#解析全部应用
|
||||
res = self.generate_apphub(get)
|
||||
if not res["status"]:
|
||||
return public.return_message(-1, 0, res["msg"])
|
||||
#删除模板
|
||||
public.ExecShell("rm -rf {}".format(os.path.join(AppHub.hub_home_path, "templates")))
|
||||
|
||||
public.set_module_logs('apphub', 'import_git_apphub', 1)
|
||||
return public.return_message(0, 0, public.lang("Import from git successful"))
|
||||
|
||||
def import_zip_apphub(self, get):
|
||||
'''
|
||||
@name 从压缩到包导入外部应用
|
||||
@param get: sfile: zip文件路径
|
||||
'''
|
||||
sfile = get.sfile.strip()
|
||||
files = sfile.split(",")
|
||||
|
||||
for sfile in files:
|
||||
|
||||
if not sfile.endswith(('.zip', '.gz')):
|
||||
return public.return_message(-1, 0, public.lang("The file format is incorrect, please select the zip or gz file"))
|
||||
|
||||
if not os.path.exists(self.hub_home_path):
|
||||
os.makedirs(self.hub_home_path)
|
||||
|
||||
if sfile.endswith('.zip'):
|
||||
res, err = public.ExecShell("unzip -o {} -d {}".format(sfile, self.hub_home_path))
|
||||
elif sfile.endswith('.gz'):
|
||||
res, err = public.ExecShell("tar -xzvf {} -C {}".format(sfile, self.hub_home_path))
|
||||
else:
|
||||
err = "{},Unsupported file formats".format(sfile)
|
||||
|
||||
if err:
|
||||
return public.return_message(-1, 0, public.lang("Import failure:{}", str(err)))
|
||||
|
||||
res = self.generate_apphub(get)
|
||||
if not res["status"]:
|
||||
return public.return_message(-1, 0, res["msg"])
|
||||
|
||||
public.set_module_logs('apphub', 'import_zip_apphub', 1)
|
||||
|
||||
return public.return_message(0, 0, public.lang("Successful import"))
|
||||
|
||||
def parser_zip_apphub(self, get):
|
||||
'''
|
||||
@name 解析zip包
|
||||
@param get: sfile: zip文件路径
|
||||
@return app_list: 外部应用列表
|
||||
'''
|
||||
sfile = get.sfile.strip()
|
||||
|
||||
app_list = []
|
||||
|
||||
from mod.project.docker.apphub.tool import GzHandler, ZipHandler
|
||||
if sfile.endswith(".gz"):
|
||||
handler = GzHandler()
|
||||
else:
|
||||
handler = ZipHandler()
|
||||
|
||||
files = handler.get_files(sfile)
|
||||
|
||||
if 'status' in files:
|
||||
return public.return_message(-1, 0, files['msg'])
|
||||
|
||||
for file, file_struck in files.items():
|
||||
if 'app.json' in file_struck and file_struck['app.json']['is_dir'] == 0:
|
||||
|
||||
filename = file_struck['app.json']['fullpath']
|
||||
|
||||
appinfo = handler.get_file_info(sfile, filename)
|
||||
if 'status' in appinfo and appinfo['status'] == False:
|
||||
return public.return_message(-1, 0, appinfo['msg'])
|
||||
|
||||
try:
|
||||
appinfo = json.loads(appinfo['data'])
|
||||
appinfo["parser_from"] = sfile
|
||||
app_list.append(appinfo)
|
||||
except:
|
||||
pass
|
||||
|
||||
return app_list
|
||||
190
mod/project/docker/apphub/tool.py
Normal file
190
mod/project/docker/apphub/tool.py
Normal file
@@ -0,0 +1,190 @@
|
||||
import os
|
||||
import json
|
||||
import shutil
|
||||
import datetime
|
||||
import tarfile
|
||||
import zipfile
|
||||
import public
|
||||
|
||||
|
||||
class BaseCompressHandler:
|
||||
"""压缩文件处理基类"""
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def get_files(self, sfile):
|
||||
"""获取压缩包内文件列表"""
|
||||
pass
|
||||
|
||||
def get_file_info(self, sfile,filename):
|
||||
"""获取压缩包内文件信息"""
|
||||
pass
|
||||
|
||||
def check_file_exists(self, file_path):
|
||||
"""检查文件是否存在"""
|
||||
if not os.path.exists(file_path):
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class GzHandler(BaseCompressHandler):
|
||||
"""tar.gz压缩文件处理类"""
|
||||
|
||||
def get_filename(self, item):
|
||||
"""获取压缩包文件名"""
|
||||
filename = item.name
|
||||
try:
|
||||
filename = item.name.encode('cp437').decode('gbk')
|
||||
except:
|
||||
pass
|
||||
if item.isdir():
|
||||
filename += '/'
|
||||
return filename
|
||||
|
||||
def check_file_type(self, file_path):
|
||||
"""检查文件是否为tar文件"""
|
||||
if not tarfile.is_tarfile(file_path):
|
||||
if file_path[-3:] == ".gz":
|
||||
return False, 'This is not tar.gz archive file, the gz archive file does not support preview, only decompression'
|
||||
return False, 'Not a valid tar.gz archive file'
|
||||
return True, ''
|
||||
|
||||
def get_files(self, sfile):
|
||||
"""获取压缩包内文件列表"""
|
||||
if not self.check_file_exists(sfile):
|
||||
return public.returnMsg(False, 'FILE_NOT_EXISTS')
|
||||
|
||||
is_valid, message = self.check_file_type(sfile)
|
||||
if not is_valid:
|
||||
return public.returnMsg(False, message)
|
||||
|
||||
zip_file = tarfile.open(sfile)
|
||||
data = {}
|
||||
for item in zip_file.getmembers():
|
||||
file_name = self.get_filename(item)
|
||||
|
||||
temp_list = file_name.split("/")
|
||||
|
||||
sub_data = data
|
||||
for name in temp_list:
|
||||
if not name: continue
|
||||
if name not in sub_data:
|
||||
if file_name.endswith(name) and not ".{}".format(name) in file_name:
|
||||
sub_data[name] = {
|
||||
'file_size': item.size,
|
||||
'filename': name,
|
||||
'fullpath': file_name,
|
||||
'date_time': public.format_date(times=item.mtime),
|
||||
'is_dir': 1 if item.isdir() else 0
|
||||
}
|
||||
else:
|
||||
sub_data[name] = {}
|
||||
sub_data = sub_data[name]
|
||||
|
||||
zip_file.close()
|
||||
return data
|
||||
|
||||
def get_file_info(self, sfile, filename):
|
||||
"""获取压缩包内文件信息"""
|
||||
if not self.check_file_exists(sfile):
|
||||
return public.returnMsg(False, 'FILE_NOT_EXISTS')
|
||||
|
||||
tmp_path = '{}/tmp/{}'.format(public.get_panel_path(), public.md5(sfile + filename))
|
||||
result = {}
|
||||
result['status'] = True
|
||||
result['data'] = ''
|
||||
with tarfile.open(sfile, 'r') as zip_file:
|
||||
try:
|
||||
zip_file.extract(filename, tmp_path)
|
||||
result['data'] = public.readFile('{}/{}'.format(tmp_path, filename))
|
||||
except:
|
||||
pass
|
||||
public.ExecShell("rm -rf {}".format(tmp_path))
|
||||
return result
|
||||
|
||||
|
||||
class ZipHandler(BaseCompressHandler):
|
||||
"""zip压缩文件处理类"""
|
||||
|
||||
def check_file_type(self, sfile, is_close=False):
|
||||
"""检查文件是否为zip文件"""
|
||||
zip_file = None
|
||||
try:
|
||||
zip_file = zipfile.ZipFile(sfile)
|
||||
except:
|
||||
pass
|
||||
|
||||
if is_close and zip_file:
|
||||
zip_file.close()
|
||||
|
||||
return zip_file
|
||||
|
||||
def get_filename(self, item):
|
||||
"""获取压缩包文件名"""
|
||||
path = item.filename
|
||||
try:
|
||||
path_name = path.encode('cp437').decode('utf-8')
|
||||
except:
|
||||
try:
|
||||
path_name = path.encode('cp437').decode('gbk')
|
||||
path_name = path_name.encode('utf-8').decode('utf-8')
|
||||
except:
|
||||
path_name = path
|
||||
|
||||
return path_name
|
||||
|
||||
def get_files(self, sfile):
|
||||
"""获取压缩包内文件列表"""
|
||||
if not self.check_file_exists(sfile):
|
||||
return public.returnMsg(False, 'FILE_NOT_EXISTS')
|
||||
|
||||
zip_file = self.check_file_type(sfile)
|
||||
if not zip_file:
|
||||
return public.returnMsg(False, 'NOT_ZIP_FILE')
|
||||
|
||||
data = {}
|
||||
for item in zip_file.infolist():
|
||||
file_name = self.get_filename(item)
|
||||
|
||||
temp_list = file_name.lstrip("./").split("/")
|
||||
|
||||
sub_data = data
|
||||
for name in temp_list:
|
||||
if not name: continue
|
||||
if name not in sub_data:
|
||||
if file_name.endswith(name):
|
||||
sub_data[name] = {
|
||||
'file_size': item.file_size,
|
||||
'compress_size': item.compress_size,
|
||||
'compress_type': item.compress_type,
|
||||
'filename': name,
|
||||
'fullpath': file_name,
|
||||
'date_time': datetime.datetime(*item.date_time).strftime("%Y-%m-%d %H:%M:%S"),
|
||||
'is_dir': 1 if item.is_dir() else 0
|
||||
}
|
||||
else:
|
||||
sub_data[name] = {}
|
||||
sub_data = sub_data[name]
|
||||
|
||||
zip_file.close()
|
||||
return data
|
||||
|
||||
def get_file_info(self, sfile,filename):
|
||||
"""获取压缩包内文件信息"""
|
||||
if not self.check_file_exists(sfile):
|
||||
return public.returnMsg(False, 'FILE_NOT_EXISTS')
|
||||
|
||||
result = {}
|
||||
result['status'] = True
|
||||
result['data'] = ''
|
||||
with zipfile.ZipFile(sfile, 'r') as zip_file:
|
||||
for item in zip_file.infolist():
|
||||
z_filename = self.get_filename(item)
|
||||
if z_filename == filename:
|
||||
buff = zip_file.read(item.filename)
|
||||
encoding, srcBody = public.decode_data(buff)
|
||||
result['encoding'] = encoding
|
||||
result['data'] = srcBody
|
||||
break
|
||||
return result
|
||||
397
mod/project/docker/comMod.py
Normal file
397
mod/project/docker/comMod.py
Normal file
@@ -0,0 +1,397 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# YakPanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 YakPanel(www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: wzz <wzz@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
# ------------------------------
|
||||
# docker模型
|
||||
# ------------------------------
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
os.chdir("/www/server/panel")
|
||||
import public
|
||||
|
||||
from mod.project.docker.app.appManageMod import AppManage
|
||||
# from mod.project.docker.runtime.runtimeManage import RuntimeManage
|
||||
# from mod.project.docker.sites.sitesManage import SitesManage
|
||||
from mod.project.docker.app.sub_app.ollamaMod import OllamaMod
|
||||
from mod.project.docker.apphub.apphubManage import AppHub
|
||||
from btdockerModelV2 import dk_public as dp
|
||||
|
||||
|
||||
class main(AppManage, OllamaMod):
|
||||
|
||||
def __init__(self):
|
||||
super(main, self).__init__()
|
||||
OllamaMod.__init__(self)
|
||||
|
||||
# 2024/6/26 下午5:49 获取所有已部署的项目列表
|
||||
def get_project_list(self, get):
|
||||
'''
|
||||
@name 获取所有已部署的项目列表
|
||||
@author wzz <2024/6/26 下午5:49>
|
||||
@param "data":{"参数名":""} <数据类型> 参数描述
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
try:
|
||||
if self.def_name is None: self.set_def_name(get.def_name)
|
||||
if hasattr(get, '_ws') and hasattr(get._ws, 'btws_get_project_list'):
|
||||
return
|
||||
|
||||
while True:
|
||||
compose_list = self.ls(get)
|
||||
if len(compose_list) == 0:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
True,
|
||||
data=[],
|
||||
)))
|
||||
|
||||
|
||||
stacks_info = dp.sql("stacks").select()
|
||||
|
||||
compose_project = []
|
||||
|
||||
for j in compose_list:
|
||||
t_status = j["Status"].split(",")
|
||||
container_count = 0
|
||||
for ts in t_status:
|
||||
container_count += int(ts.split("(")[1].split(")")[0])
|
||||
|
||||
j_name = j['Name']
|
||||
if "bt_compose_" in j_name:
|
||||
config_path = "{}/config/name_map.json".format(public.get_panel_path())
|
||||
name_map = json.loads(public.readFile(config_path))
|
||||
if j_name in name_map:
|
||||
j_name = name_map[j_name]
|
||||
else:
|
||||
j_name = j_name.replace("bt_compose_", "")
|
||||
|
||||
tmp = {
|
||||
"id": None,
|
||||
"name": j_name,
|
||||
"status": "1",
|
||||
"path": j['ConfigFiles'],
|
||||
"template_id": None,
|
||||
"time": None,
|
||||
"remark": "",
|
||||
"run_status": j['Status'].split("(")[0].lower(),
|
||||
"container_count": container_count,
|
||||
}
|
||||
for i in stacks_info:
|
||||
if public.md5(i['name']) in j['Name']:
|
||||
|
||||
tmp["name"] = i['name']
|
||||
tmp["run_status"] = j['Status'].split("(")[0].lower()
|
||||
tmp["template_id"] = i['template_id']
|
||||
tmp["time"] = i['time']
|
||||
tmp["remark"] = i["remark"]
|
||||
tmp["id"] = i['id']
|
||||
break
|
||||
|
||||
if i['name'] == j['Name']:
|
||||
tmp["run_status"] = j['Status'].split("(")[0].lower()
|
||||
tmp["template_id"] = i['template_id']
|
||||
tmp["time"] = i['time']
|
||||
tmp["remark"] = i["remark"]
|
||||
tmp["id"] = i['id']
|
||||
break
|
||||
|
||||
if tmp["time"] is None:
|
||||
if os.path.exists(j['ConfigFiles']):
|
||||
get.path = j['ConfigFiles']
|
||||
compose_ps = self.ps(get)
|
||||
if len(compose_ps) > 0 and "CreatedAt" in compose_ps[0]:
|
||||
tmp["time"] = dp.convert_timezone_str_to_timestamp(compose_ps[0]['CreatedAt'])
|
||||
|
||||
compose_project.append(tmp)
|
||||
|
||||
if hasattr(get, '_ws'):
|
||||
setattr(get._ws, 'btws_get_project_list', True)
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
True,
|
||||
data=sorted(compose_project, key=lambda x: x["time"] if x["time"] is not None else float('-inf'), reverse=True),
|
||||
)))
|
||||
|
||||
time.sleep(2)
|
||||
except Exception as e:
|
||||
return public.return_message(-1, 0, str(e))
|
||||
|
||||
# 2026/2/26 下午8:55 获取指定compose.yml的docker-compose ps 增加异常处理和类型检查,防止WebSocket推送过程中出现错误导致循环崩溃
|
||||
def get_project_ps(self, get):
|
||||
'''
|
||||
@name 获取指定 compose.yml 的 docker-compose ps 实时状态(WebSocket 推送)
|
||||
@author wzz <2026/2/26>
|
||||
@param get.path: compose 项目路径
|
||||
@return dict {"status": True/False, "msg": "..."}
|
||||
'''
|
||||
try:
|
||||
if self.def_name is None:
|
||||
self.set_def_name(get.def_name)
|
||||
|
||||
# 确保 path 存在
|
||||
if not hasattr(get, 'path') or not get.path:
|
||||
return public.return_message(-1, 0, "Missing 'path' parameter")
|
||||
|
||||
ws_flag_attr = f'btws_get_project_ps_{get.path}'
|
||||
|
||||
# 防止重复订阅
|
||||
if hasattr(get, '_ws') and hasattr(get._ws, ws_flag_attr):
|
||||
result = self.wsResult(True, data=[])
|
||||
try:
|
||||
get._ws.send(json.dumps(result))
|
||||
except:
|
||||
pass
|
||||
return result
|
||||
|
||||
from btdockerModelV2.dockerSock import container
|
||||
sk_container = container.dockerContainer()
|
||||
|
||||
while True:
|
||||
try:
|
||||
compose_list = self.ps(get)
|
||||
# 强制确保是 list
|
||||
if not isinstance(compose_list, list):
|
||||
compose_list = []
|
||||
|
||||
# 发送空结果并退出(无容器)
|
||||
if len(compose_list) == 0:
|
||||
if hasattr(get, '_ws'):
|
||||
try:
|
||||
get._ws.send(json.dumps(self.wsResult(True, data=[])))
|
||||
except:
|
||||
pass
|
||||
break
|
||||
|
||||
# 处理每个容器
|
||||
for l in compose_list:
|
||||
if not isinstance(l, dict):
|
||||
continue
|
||||
|
||||
# 补全 Image
|
||||
if "Image" not in l:
|
||||
l["Image"] = ""
|
||||
if l.get("ID"):
|
||||
try:
|
||||
inspect = sk_container.get_container_inspect(l["ID"])
|
||||
l["Image"] = inspect.get("Config", {}).get("Image", "")
|
||||
except:
|
||||
pass # 忽略 inspect 失败
|
||||
|
||||
# 补全 Ports 字符串
|
||||
if "Ports" not in l:
|
||||
l["Ports"] = ""
|
||||
publishers = l.get("Publishers")
|
||||
if publishers and isinstance(publishers, list):
|
||||
for p in publishers:
|
||||
if not isinstance(p, dict):
|
||||
continue
|
||||
url = p.get("URL", "")
|
||||
target = p.get("TargetPort", "")
|
||||
proto = p.get("Protocol", "")
|
||||
pub_port = p.get("PublishedPort", "")
|
||||
if url == "":
|
||||
l["Ports"] += f"{target}/{proto},"
|
||||
else:
|
||||
l["Ports"] += f"{url}:{pub_port}->{target}/{proto},"
|
||||
|
||||
# 构造结构化 ports(兼容 containerModel.struct_container_ports)
|
||||
ports_data = {}
|
||||
publishers = l.get("Publishers")
|
||||
if publishers and isinstance(publishers, list):
|
||||
for port in publishers:
|
||||
if not isinstance(port, dict):
|
||||
continue
|
||||
key = f"{port.get('TargetPort', '')}/{port.get('Protocol', '')}"
|
||||
host_ip = port.get("URL", "")
|
||||
host_port = str(port.get("PublishedPort", ""))
|
||||
entry = {"HostIp": host_ip, "HostPort": host_port}
|
||||
if key not in ports_data:
|
||||
ports_data[key] = [entry] if host_ip else None
|
||||
elif ports_data[key] is not None:
|
||||
ports_data[key].append(entry)
|
||||
l["ports"] = ports_data
|
||||
|
||||
# 推送数据
|
||||
if hasattr(get, '_ws'):
|
||||
setattr(get._ws, ws_flag_attr, True)
|
||||
try:
|
||||
get._ws.send(json.dumps(self.wsResult(True, data=compose_list)))
|
||||
except:
|
||||
# WebSocket 已断开,退出循环
|
||||
break
|
||||
|
||||
time.sleep(2)
|
||||
|
||||
except Exception:
|
||||
# 内部循环异常,安全退出
|
||||
break
|
||||
|
||||
return self.wsResult(True, data=[])
|
||||
|
||||
except Exception as e:
|
||||
return public.return_message(-1, 0, str(e))
|
||||
|
||||
# 2024/11/11 14:34 获取所有正在运行的容器信息和已安装的应用信息
|
||||
def get_some_info(self, get):
|
||||
'''
|
||||
@name 获取所有正在运行的容器信息和已安装的应用信息
|
||||
'''
|
||||
get.type = get.get("type", "container")
|
||||
if not get.type in ("container", "app"):
|
||||
return public.return_message(-1, 0, public.lang("Only container and app types are supported"))
|
||||
|
||||
if get.type == "container":
|
||||
from btdockerModelV2.dockerSock import container
|
||||
sk_container = container.dockerContainer()
|
||||
sk_container_list = sk_container.get_container()
|
||||
|
||||
data = []
|
||||
for container in sk_container_list:
|
||||
if not "running" in container["State"]: continue
|
||||
|
||||
port_list = []
|
||||
for p in container["Ports"]:
|
||||
if not "PublicPort" in p: continue
|
||||
if not p["PublicPort"] in port_list:
|
||||
port_list.append(p["PublicPort"])
|
||||
|
||||
data.append({
|
||||
"id": container["Id"],
|
||||
"name": container["Names"][0].replace("/", ""),
|
||||
"status": container["State"],
|
||||
"image": container["Image"],
|
||||
"created_time": container["Created"],
|
||||
"ports": port_list,
|
||||
})
|
||||
|
||||
return public.return_message(0, 0, data)
|
||||
else:
|
||||
get.row = 10000
|
||||
installed_apps = self.get_installed_apps(get)['message']
|
||||
not_allow_category = ("Database", "System")
|
||||
if installed_apps and installed_apps.get('data', []):
|
||||
for app in installed_apps["data"]:
|
||||
if not "running" in app["status"]:
|
||||
installed_apps["data"].remove(app)
|
||||
if app["apptype"] in not_allow_category:
|
||||
installed_apps["data"].remove(app) if app in installed_apps["data"] else None
|
||||
|
||||
# return public.returnResult(status=installed_apps["status"], data=installed_apps["data"])
|
||||
return public.return_message(0, 0, installed_apps)
|
||||
|
||||
def generate_apphub(self, get):
|
||||
'''
|
||||
@name 解析外部应用列表
|
||||
@author csj <2025/7/9>
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
return AppHub().generate_apphub(get)
|
||||
|
||||
def create_app(self,get):
|
||||
'''
|
||||
@name 创建应用
|
||||
@author csj <2025/7/9>
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
if get.get("appid","0") == "-1": # 从apphub创建应用
|
||||
self.templates_path = os.path.join(AppHub.hub_home_path, "templates")
|
||||
self.apps_json_file = os.path.join(AppHub.hub_home_path, "apps.json")
|
||||
|
||||
version = get.get("version","latest")
|
||||
app_name = get.get("app_name","")
|
||||
|
||||
if not os.path.exists(self.templates_path):
|
||||
os.makedirs(self.templates_path)
|
||||
|
||||
#/www/dk_project/dk_app/apphub/apphub/templates/app_name/version
|
||||
app_version_path = os.path.join(AppHub.hub_home_path, app_name, version)
|
||||
if not os.path.exists(app_version_path):
|
||||
return public.return_message(-1, 0, public.lang("Version {} for applying {} does not exist", (version, app_name)))
|
||||
|
||||
# /www/dk_project/dk_app/apphub/apphub/templates/app_name
|
||||
app_template_path = os.path.join(self.templates_path, app_name)
|
||||
|
||||
public.ExecShell("\cp -r {} {}".format(app_version_path,app_template_path))
|
||||
|
||||
return super().create_app(get)
|
||||
|
||||
def get_apphub_config(self, get):
|
||||
'''
|
||||
@name 获取apphub配置
|
||||
@author csj <2025/7/9>
|
||||
@return dict{"status":True/False,"data":{}}
|
||||
'''
|
||||
return public.return_message(0, 0, AppHub.get_config())
|
||||
|
||||
def set_apphub_git(self,get):
|
||||
'''
|
||||
@name 设置外部应用的git地址
|
||||
@author csj <2025/7/9>
|
||||
@param get: git_url, git_branch, user, password
|
||||
'''
|
||||
if not hasattr(get, 'git_url') or not get.git_url:
|
||||
return public.return_message(-1, 0, public.lang("GIT ADDRESS IS NOT SET"))
|
||||
if not hasattr(get, 'git_branch') or not get.git_branch:
|
||||
return public.return_message(-1, 0, public.lang("The branch name is not set"))
|
||||
|
||||
return AppHub().set_apphub_git(get)
|
||||
|
||||
def import_git_apphub(self,get):
|
||||
'''
|
||||
@name 从git导入外部应用
|
||||
@author csj <2025/7/9>
|
||||
'''
|
||||
return AppHub().import_git_apphub(get)
|
||||
|
||||
def install_apphub(self,get):
|
||||
'''
|
||||
@name 安装apphub所需环境
|
||||
@author csj <2025/7/9>
|
||||
'''
|
||||
return AppHub().install_apphub(get)
|
||||
|
||||
def import_zip_apphub(self,get):
|
||||
'''
|
||||
@name 从zip包导入外部应用
|
||||
@author csj <2025/7/9>
|
||||
@param get: sfile: zip文件路径
|
||||
'''
|
||||
if not hasattr(get, 'sfile') or not get.sfile:
|
||||
return public.return_message(-1, 0, public.lang("The zip file path is not set"))
|
||||
|
||||
return AppHub().import_zip_apphub(get)
|
||||
|
||||
def parser_zip_apphub(self,get):
|
||||
'''
|
||||
@name 解析zip包
|
||||
@author csj <2025/7/9>
|
||||
@param get: sfile: zip文件路径
|
||||
@return dict{"status":True/False,"data":[]}
|
||||
'''
|
||||
if not hasattr(get, 'sfile') or not get.sfile:
|
||||
return public.return_message(-1, 0, public.lang("Please select the file path"))
|
||||
|
||||
app_list = []
|
||||
files = get.sfile.split(',')
|
||||
for sfile in files:
|
||||
get.sfile = sfile
|
||||
|
||||
apps = AppHub().parser_zip_apphub(get)
|
||||
app_list.extend(apps)
|
||||
public.print_log(app_list)
|
||||
return public.return_message(0, 0, app_list)
|
||||
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
pass
|
||||
747
mod/project/docker/composeMod.py
Normal file
747
mod/project/docker/composeMod.py
Normal file
@@ -0,0 +1,747 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# YakPanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 YakPanel(www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: wzz <wzz@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
# ------------------------------
|
||||
# docker模型 - docker compose
|
||||
# ------------------------------
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
import public
|
||||
|
||||
os.chdir("/www/server/panel")
|
||||
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
from mod.project.docker.docker_compose.base import Compose
|
||||
|
||||
|
||||
# 2024/6/25 下午2:16 检查相同传参的装饰器
|
||||
def check_file(func):
|
||||
'''
|
||||
@name 检查相同传参的装饰器
|
||||
@author wzz <2024/6/25 下午2:30>
|
||||
@param get.path : 传docker-compose.yaml的绝对路劲;
|
||||
get.def_name : 传需要使用的函数名,如get_log
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
|
||||
def wrapper(self, get, *args, **kwargs):
|
||||
try:
|
||||
get.path = get.get("path/s", None)
|
||||
if get.path is None:
|
||||
get._ws.send(json.dumps(self.wsResult(False, public.lang("The path parameter cannot be empty"), code=1)))
|
||||
return
|
||||
|
||||
if not os.path.exists(get.path):
|
||||
get._ws.send(
|
||||
json.dumps(self.wsResult(False, public.lang("[{}] file does not exist",get.path), code=2)))
|
||||
return
|
||||
|
||||
func(self, get, *args, **kwargs)
|
||||
|
||||
if get.def_name in ("create", "up", "update", "start", "stop", "restart","rebuild"):
|
||||
get._ws.send(
|
||||
json.dumps(self.wsResult(True, public.lang(" {} completed, if the log no exception to close this window!\r\n",get.option), data=-1, code=-1)))
|
||||
except Exception as e:
|
||||
return
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
class main(Compose):
|
||||
|
||||
def __init__(self):
|
||||
super(main, self).__init__()
|
||||
|
||||
# 2024/6/25 下午2:41 执行docker-compose命令获取实时输出
|
||||
def exec_cmd(self, get, command):
|
||||
'''
|
||||
@name 执行docker-compose命令获取实时输出
|
||||
@author wzz <2024/6/25 下午2:41>
|
||||
@param "data":{"参数名":""} <数据类型> 参数描述
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
if self.def_name is None: self.set_def_name(get.def_name)
|
||||
import pty
|
||||
|
||||
try:
|
||||
def read_output(fd, ws):
|
||||
while True:
|
||||
output = os.read(fd, 1024)
|
||||
if not output:
|
||||
break
|
||||
|
||||
if hasattr(get, '_ws'):
|
||||
ws.send(json.dumps(self.wsResult(
|
||||
True,
|
||||
output.decode(),
|
||||
)))
|
||||
|
||||
pid, fd = pty.fork()
|
||||
if pid == 0:
|
||||
os.execvp(command[0], command)
|
||||
else:
|
||||
read_output(fd, get._ws)
|
||||
except:
|
||||
if self.def_name in ("get_logs", "get_project_container_logs"):
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
True,
|
||||
"",
|
||||
)))
|
||||
return
|
||||
|
||||
# 2024/6/25 下午2:44 更新指定docker-compose里面的镜像
|
||||
@check_file
|
||||
def update(self, get):
|
||||
'''
|
||||
@name 更新指定docker-compose里面的镜像
|
||||
@param get
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
get.option = "Update"
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
True,
|
||||
"",
|
||||
)))
|
||||
command = self.set_type(1).set_path(get.path).get_compose_pull()
|
||||
self.status_exec_logs(get, command)
|
||||
command = self.set_type(1).set_path(get.path).get_compose_up_remove_orphans()
|
||||
self.status_exec_logs(get, command)
|
||||
|
||||
# 2024/6/28 下午2:19 重建指定docker-compose项目
|
||||
@check_file
|
||||
def rebuild(self, get):
|
||||
'''
|
||||
@name 重建指定docker-compose项目
|
||||
@param get
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
get.option = "Rebuild"
|
||||
command = self.set_type(1).set_path(get.path).get_compose_down()
|
||||
self.status_exec_logs(get, command)
|
||||
command = self.set_type(1).set_path(get.path).get_compose_up_remove_orphans()
|
||||
self.status_exec_logs(get, command)
|
||||
|
||||
# 2024/6/24 下午10:54 停止指定docker-compose项目
|
||||
@check_file
|
||||
def stop(self, get):
|
||||
'''
|
||||
@name 停止指定docker-compose项目
|
||||
@author wzz <2024/6/24 下午10:54>
|
||||
@param "data":{"参数名":""} <数据类型> 参数描述
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
get.option = "Stop"
|
||||
command = self.set_type(1).set_path(get.path).get_compose_stop()
|
||||
self.status_exec_logs(get, command)
|
||||
|
||||
# 2024/6/24 下午10:54 启动指定docker-compose项目
|
||||
@check_file
|
||||
def start(self, get):
|
||||
'''
|
||||
@name 启动指定docker-compose项目
|
||||
'''
|
||||
get.option = "Start"
|
||||
command = self.set_type(1).set_path(get.path).get_compose_up_remove_orphans()
|
||||
self.status_exec_logs(get, command)
|
||||
|
||||
# 2024/6/24 下午11:23 down指定docker-compose项目
|
||||
@check_file
|
||||
def down(self, get):
|
||||
'''
|
||||
@name 停止指定docker-compose项目,并删除容器、网络、镜像等
|
||||
'''
|
||||
get.option = "Stop"
|
||||
command = self.set_type(1).set_path(get.path).get_compose_down()
|
||||
self.status_exec_logs(get, command)
|
||||
|
||||
# 2024/6/24 下午11:23 部署指定docker-compose项目
|
||||
@check_file
|
||||
def up(self, get):
|
||||
'''
|
||||
@name 部署指定docker-compose项目
|
||||
'''
|
||||
get.option = "Add container orchestration"
|
||||
command = self.set_type(1).set_path(get.path).get_compose_up_remove_orphans()
|
||||
self.status_exec_logs(get, command)
|
||||
|
||||
# 2024/6/24 下午11:23 重启指定docker-compose项目
|
||||
@check_file
|
||||
def restart(self, get):
|
||||
'''
|
||||
@name 重启指定docker-compose项目
|
||||
'''
|
||||
get.option = "Reboot"
|
||||
command = self.set_type(1).set_path(get.path).get_compose_restart()
|
||||
# self.exec_logs(get, command)
|
||||
self.status_exec_logs(get, command)
|
||||
|
||||
# 2024/6/26 下午4:28 获取docker-compose ls -a --format json
|
||||
def ls(self, get):
|
||||
'''
|
||||
@name 获取docker-compose ls -a --format json
|
||||
'''
|
||||
get.option = "Get the orchestration list"
|
||||
command = self.get_compose_ls()
|
||||
|
||||
try:
|
||||
cmd_result = public.ExecShell(command)[0]
|
||||
if "Segmentation fault" in cmd_result:
|
||||
return []
|
||||
return json.loads(cmd_result)
|
||||
except:
|
||||
return []
|
||||
|
||||
# 2024/6/26 下午8:38 获取指定compose.yaml的docker-compose ps
|
||||
def ps(self, get):
|
||||
'''
|
||||
@name 获取指定compose.yaml的docker-compose ps
|
||||
@author wzz <2024/6/26 下午8:38>
|
||||
@param "data":{"参数名":""} <数据类型> 参数描述
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
get.path = get.get("path/s", None)
|
||||
if get.path is None:
|
||||
get._ws.send(json.dumps(self.wsResult(False, public.lang("The path parameter cannot be empty"), code=1)))
|
||||
return self.wsResult(False, public.lang("The path parameter cannot be empty"), code=1)
|
||||
|
||||
if not os.path.exists(get.path):
|
||||
get._ws.send(
|
||||
json.dumps(self.wsResult(False, public.lang("[{}] file does not exist",get.path), code=2)))
|
||||
return self.wsResult(False, public.lang("[{}] file does not exist",get.path), code=1)
|
||||
|
||||
get.option = "Obtain the container information of the specified orchestration"
|
||||
command = self.set_path(get.path, rep=True).get_compose_ps()
|
||||
|
||||
try:
|
||||
cmd_result = public.ExecShell(command)[0]
|
||||
if "Segmentation fault" in cmd_result:
|
||||
return []
|
||||
|
||||
if not cmd_result.startswith("["):
|
||||
return json.loads("[" + cmd_result.strip().replace("\n", ",") + "]")
|
||||
else:
|
||||
return json.loads(cmd_result.strip().replace("\n", ","))
|
||||
except:
|
||||
self.ps_count += 1
|
||||
if self.ps_count < 5:
|
||||
time.sleep(0.5)
|
||||
return self.ps(get)
|
||||
return []
|
||||
|
||||
# 2024/6/24 下午10:53 获取指定docker-compose的运行日志
|
||||
@check_file
|
||||
def get_logs(self, get):
|
||||
'''
|
||||
@name websocket接口,执行docker-compose命令,返回结果:执行self.get_compose_logs()命令
|
||||
@param get
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
self.set_tail("10")
|
||||
get.option = "Read the logs"
|
||||
|
||||
command = self.set_type(1).set_path(get.path).get_compose_logs()
|
||||
# public.print_log(" 获取日志 ,命令 --{}".format(command))
|
||||
# public.print_log(" 获取日志 ,get --{}".format(get))
|
||||
self.exec_logs(get, command)
|
||||
|
||||
# 2024/6/26 下午9:24 获取指定compose.yaml的内容
|
||||
def get_config(self, get):
|
||||
'''
|
||||
@name 获取指定compose.yaml的内容
|
||||
@author wzz <2024/6/26 下午9:25>
|
||||
@param "data":{"参数名":""} <数据类型> 参数描述
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
if self.def_name is None: self.set_def_name(get.def_name)
|
||||
get.path = get.get("path/s", None)
|
||||
if get.path is None:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(False, public.lang("The path parameter cannot be empty"), code=1)))
|
||||
return
|
||||
|
||||
if not os.path.exists(get.path):
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(
|
||||
json.dumps(self.wsResult(False, public.lang("[{}] file does not exist",get.path), code=2)))
|
||||
return
|
||||
|
||||
try:
|
||||
config_body = public.readFile(get.path)
|
||||
# env_body = public.readFile(get.path.replace("docker-compose.yaml", ".env").replace("docker-compose.yml", ".env"))
|
||||
# 获取文件路径 有些情况不是用标准文件名进行启动容器的
|
||||
file_path = os.path.dirname(get.path)
|
||||
env_path = os.path.join(file_path, ".env")
|
||||
# 判断路径下.env 文件是否存在
|
||||
env_body = public.readFile(env_path) if os.path.exists(env_path) else ""
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(True, public.lang("Get ahead"), data={
|
||||
"config": config_body if config_body else "",
|
||||
"env": env_body if env_body else "",
|
||||
})))
|
||||
return
|
||||
except:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(False, public.lang("Failed to get"), data={}, code=3)))
|
||||
return
|
||||
|
||||
# 2024/6/26 下午9:31 保存指定compose.yaml的内容
|
||||
def save_config(self, get):
|
||||
'''
|
||||
@name 保存指定compose.yaml的内容
|
||||
@param get
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
if self.def_name is None: self.set_def_name(get.def_name)
|
||||
get.path = get.get("path/s", None)
|
||||
get.config = get.get("config/s", None)
|
||||
get.env = get.get("env/s", None)
|
||||
if get.path is None:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(False, public.lang("The path parameter cannot be empty"), code=1)))
|
||||
return
|
||||
|
||||
if not os.path.exists(get.path):
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(
|
||||
json.dumps(self.wsResult(False, public.lang("[{}] file does not exist",get.path), code=2)))
|
||||
return
|
||||
|
||||
if public.check_chinese(get.path):
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(False, public.lang("The file path cannot contain Chinese!"), code=3)))
|
||||
return
|
||||
|
||||
if get.config is None:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(False, public.lang("The config parameter cannot be empty"), code=3)))
|
||||
return
|
||||
|
||||
if get.env is None:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(False, public.lang("The env parameter cannot be empty"), code=3)))
|
||||
return
|
||||
|
||||
try:
|
||||
stdout, stderr = self.check_config(get)
|
||||
if stderr:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
False,
|
||||
public.lang("Saving failed, please check whether the compose.yaml file format is correct: 【{}】",stderr),
|
||||
code=4,
|
||||
)))
|
||||
return
|
||||
if "Segmentation fault" in stdout:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
False,
|
||||
public.lang("The save failed. The docker-compose version is too low. Please upgrade to the latest version!"),
|
||||
code=4,
|
||||
)))
|
||||
return
|
||||
|
||||
public.writeFile(get.path, get.config)
|
||||
env_path = os.path.join(os.path.dirname(get.path), ".env")
|
||||
public.writeFile(env_path,get.env)
|
||||
|
||||
# self.up(get)
|
||||
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
True,
|
||||
public.lang("The save was successful"),
|
||||
)))
|
||||
|
||||
return
|
||||
except:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
False,
|
||||
public.lang("Save failed"),
|
||||
)))
|
||||
return
|
||||
|
||||
# 2024/6/27 上午10:25 检查compose内容是否正确
|
||||
def check_config(self, get):
|
||||
'''
|
||||
@name 检查compose内容是否正确
|
||||
@author wzz <2024/6/27 上午10:26>
|
||||
@param "data":{"参数名":""} <数据类型> 参数描述
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
if not os.path.exists("/tmp/btdk"):
|
||||
os.makedirs("/tmp/btdk", 0o755, True)
|
||||
|
||||
tmp_path = "/tmp/btdk/{}".format(os.path.basename(public.GetRandomString(10).lower()))
|
||||
public.writeFile(tmp_path, get.config)
|
||||
public.writeFile("/tmp/btdk/.env", get.env)
|
||||
command = self.set_path(tmp_path, rep=True).get_compose_config()
|
||||
|
||||
stdout, stderr = public.ExecShell(command)
|
||||
if "`version` is obsolete" in stderr:
|
||||
public.ExecShell("sed -i '/version/d' {}".format(tmp_path))
|
||||
get.config = public.readFile(tmp_path)
|
||||
return self.check_config(get)
|
||||
|
||||
public.ExecShell("rm -f {}".format(tmp_path))
|
||||
return stdout, stderr
|
||||
|
||||
# 2024/6/27 上午10:06 根据内容创建docker-compose编排
|
||||
def create(self, get):
|
||||
'''
|
||||
@name 根据内容创建docker-compose编排
|
||||
@author wzz <2024/6/27 上午10:07>
|
||||
@param "data":{"参数名":""} <数据类型> 参数描述
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
|
||||
|
||||
if self.def_name is None: self.set_def_name(get.def_name)
|
||||
get.project_name = get.get("project_name/s", None)
|
||||
if get.project_name is None:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
False,
|
||||
public.lang("The project_name parameter cannot be empty"),
|
||||
code=1,
|
||||
)))
|
||||
return
|
||||
|
||||
get.config = get.get("config/s", None)
|
||||
if get.config is None:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
False,
|
||||
public.lang("The config parameter cannot be empty"),
|
||||
code=2,
|
||||
)))
|
||||
return
|
||||
|
||||
stdout, stderr = self.check_config(get)
|
||||
if stderr:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
False,
|
||||
public.lang("Creation failed, please check whether the compose.yaml file format is correct: \r\n{}",stderr.replace("\n", "\r\n")),
|
||||
code=4,
|
||||
)))
|
||||
return
|
||||
if "Segmentation fault" in stdout:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
False,
|
||||
public.lang("Creation failed, the docker-compose version is too low, please upgrade to the latest version!"),
|
||||
code=4,
|
||||
)))
|
||||
return
|
||||
# 2024/2/20 下午 3:21 如果检测到是中文的compose,则自动转换为英文
|
||||
config_path = "{}/config/name_map.json".format(public.get_panel_path())
|
||||
try:
|
||||
name_map = json.loads(public.readFile(config_path))
|
||||
import re
|
||||
if re.findall(r"[\u4e00-\u9fa5]", get.project_name):
|
||||
name_str = 'bt_compose_' + public.GetRandomString(10).lower()
|
||||
name_map[name_str] = get.project_name
|
||||
get.project_name = name_str
|
||||
public.writeFile(config_path, json.dumps(name_map))
|
||||
except:
|
||||
pass
|
||||
|
||||
if not os.path.exists(self.compose_project_path): os.makedirs(self.compose_project_path, 0o755, True)
|
||||
if not os.path.exists(os.path.join(self.compose_project_path, get.project_name)):
|
||||
os.makedirs(os.path.join(self.compose_project_path, get.project_name), 0o755, True)
|
||||
|
||||
get.path = os.path.join(self.compose_project_path, "{}/docker-compose.yaml".format(get.project_name))
|
||||
|
||||
public.writeFile(get.path, get.config)
|
||||
public.writeFile(get.path.replace("docker-compose.yaml", ".env").replace("docker-compose.yml", ".env"), get.env)
|
||||
|
||||
get.add_template = get.get("add_template/d", 0)
|
||||
template_id = None
|
||||
from btdockerModelV2 import dk_public as dp
|
||||
if get.add_template == 1:
|
||||
get.template_name = get.get("template_name/s", None)
|
||||
if get.template_name is None:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
False,
|
||||
public.lang("template_name parameter cannot be empty"),
|
||||
code=1,
|
||||
)))
|
||||
return
|
||||
|
||||
from btdockerModelV2 import composeModel as cm
|
||||
template_list = cm.main()._template_list(get)
|
||||
for template in template_list:
|
||||
if get.template_name == template['name']:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
False,
|
||||
public.lang("The template name already exists, please delete the template and add it again!"),
|
||||
code=2,
|
||||
)))
|
||||
return
|
||||
|
||||
#添加编排模板 ---------- 可以直接引用composeModel.add_template
|
||||
template_path = os.path.join(self.compose_project_path, "{}".format(get.template_name))
|
||||
compose_path = os.path.join(template_path,"docker-compose.yaml")
|
||||
env_path = os.path.join(template_path,".env")
|
||||
pdata = {
|
||||
"name": get.template_name,
|
||||
"remark": "",
|
||||
"path": template_path,
|
||||
"add_in_path":1
|
||||
}
|
||||
template_id = dp.sql("templates").insert(pdata)
|
||||
if not os.path.exists(template_path):
|
||||
os.makedirs(template_path, 0o755, True)
|
||||
public.writeFile(compose_path, get.config)
|
||||
public.writeFile(env_path,get.env)
|
||||
|
||||
get.remark = get.get("remark/s", "")
|
||||
stacks_info = dp.sql("stacks").where("name=?", (public.xsssec(get.project_name))).find()
|
||||
if not stacks_info:
|
||||
pdata = {
|
||||
"name": public.xsssec(get.project_name),
|
||||
"status": "1",
|
||||
"path": get.path,
|
||||
"template_id": template_id,
|
||||
"time": time.time(),
|
||||
"remark": public.xsssec(get.remark)
|
||||
}
|
||||
dp.sql("stacks").insert(pdata)
|
||||
else:
|
||||
check_status = public.ExecShell("docker-compose ls |grep {}".format(get.path))[0]
|
||||
if not check_status:
|
||||
dp.sql("stacks").where("name=?", (public.xsssec(get.project_name))).delete()
|
||||
else:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
False,
|
||||
public.lang("The project name already exists, please delete it before adding it!"),
|
||||
code=3,
|
||||
)))
|
||||
return
|
||||
|
||||
self.up(get)
|
||||
|
||||
# 2024/6/27 上午11:42 删除指定compose.yaml的docker-compose编排
|
||||
def delete(self, get):
|
||||
'''
|
||||
@name 删除指定compose.yaml的docker-compose编排
|
||||
@author wzz <2024/6/27 上午11:42>
|
||||
@param "data":{"参数名":""} <数据类型> 参数描述
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
if self.def_name is None: self.set_def_name(get.def_name)
|
||||
get.project_name = get.get("project_name/s", None)
|
||||
if get.project_name is None:
|
||||
get._ws.send(json.dumps(self.wsResult(False, public.lang("The project_name parameter cannot be empty"), code=1)))
|
||||
return
|
||||
|
||||
get.path = get.get("path/s", None)
|
||||
if get.path is None:
|
||||
get._ws.send(json.dumps(self.wsResult(False, public.lang("The path parameter cannot be empty"), code=1)))
|
||||
return
|
||||
|
||||
from btdockerModelV2 import dk_public as dp
|
||||
stacks_info = dp.sql("stacks").where("path=? or name=?", (get.path, get.project_name)).find()
|
||||
if stacks_info:
|
||||
dp.sql("stacks").where("path=? or name=?", (get.path, get.project_name)).delete()
|
||||
|
||||
if "bt_compose_" in get.path:
|
||||
config_path = "{}/config/name_map.json".format(public.get_panel_path())
|
||||
name_map = json.loads(public.readFile(config_path))
|
||||
bt_compose_name = os.path.dirname(get.path).split("/")[-1]
|
||||
if bt_compose_name in name_map:
|
||||
name_map.pop(bt_compose_name)
|
||||
public.writeFile(config_path, json.dumps(name_map))
|
||||
|
||||
stacks_list = dp.sql("stacks").select()
|
||||
compose_list = self.ls(get)
|
||||
for i in stacks_list:
|
||||
for j in compose_list:
|
||||
if i['name'] == j['Name']:
|
||||
break
|
||||
|
||||
if public.md5(i['name']) in j['Name']:
|
||||
break
|
||||
else:
|
||||
dp.sql("stacks").where("name=?", (i['name'])).delete()
|
||||
|
||||
if not os.path.exists(get.path):
|
||||
command = self.set_type(0).set_compose_name(get.project_name).get_compose_delete_for_ps()
|
||||
else:
|
||||
command = self.set_type(0).set_path(get.path).get_compose_delete()
|
||||
stdout, stderr = public.ExecShell(command)
|
||||
if "invalid compose project" in stderr:
|
||||
command = self.set_type(0).set_compose_name(get.project_name).get_compose_delete_for_ps()
|
||||
stdout, stderr = public.ExecShell(command)
|
||||
|
||||
if stderr and "Error" in stderr:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
False,
|
||||
"Removal fails, check if the compose.yaml file format is correct:\r\n{}".format(stderr.replace("\n", "\r\n")),
|
||||
data=-1,
|
||||
code=4,
|
||||
)))
|
||||
return
|
||||
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
True,
|
||||
public.lang("Delete container orchestration"),
|
||||
data=-1,
|
||||
code=0
|
||||
)))
|
||||
|
||||
# 2024/6/27 下午8:39 批量删除指定compose.yaml的docker-compose编排
|
||||
def batch_delete(self, get):
|
||||
'''
|
||||
@name 批量删除指定compose.yaml的docker-compose编排
|
||||
@param get
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
if self.def_name is None: self.set_def_name(get.def_name)
|
||||
get.project_list = get.get("project_list", None)
|
||||
if get.project_list is None or len(get.project_list) == 0:
|
||||
return self.wsResult(False, public.lang("The project_list parameter cannot be empty"), code=1)
|
||||
|
||||
config_path = "{}/config/name_map.json".format(public.get_panel_path())
|
||||
try:
|
||||
name_map = json.loads(public.readFile(config_path))
|
||||
except:
|
||||
name_map = {}
|
||||
|
||||
for project in get.project_list:
|
||||
if not isinstance(project, dict):
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
False,
|
||||
public.lang("project_list parameter format error: {}",project),
|
||||
code=1,
|
||||
)))
|
||||
continue
|
||||
|
||||
if project["project_name"] is None or project["project_name"] == "":
|
||||
get._ws.send(
|
||||
json.dumps(self.wsResult(False, public.lang("The project_name parameter cannot be empty"), code=1)))
|
||||
continue
|
||||
|
||||
if project["path"] is None or project["path"] == "":
|
||||
get._ws.send(json.dumps(self.wsResult(False, public.lang("The path parameter cannot be empty"), code=1)))
|
||||
continue
|
||||
|
||||
from btdockerModelV2 import dk_public as dp
|
||||
stacks_info = dp.sql("stacks").where("path=? or name=?", (project["path"], project["project_name"])).find()
|
||||
if stacks_info:
|
||||
dp.sql("stacks").where("path=? or name=?", (project["path"], project["project_name"])).delete()
|
||||
|
||||
if "bt_compose_" in project["path"]:
|
||||
bt_compose_name = os.path.dirname(project["path"]).split("/")[-1]
|
||||
if bt_compose_name in name_map:
|
||||
name_map.pop(bt_compose_name)
|
||||
|
||||
if not os.path.exists(project["path"]):
|
||||
command = self.set_type(0).set_compose_name(project["project_name"]).get_compose_delete_for_ps()
|
||||
else:
|
||||
command = self.set_type(0).set_path(project["path"], rep=True).get_compose_delete()
|
||||
|
||||
stdout, stderr = public.ExecShell(command)
|
||||
if "Segmentation fault" in stdout:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
False,
|
||||
public.lang("Deletion failed, docker-compose version is too low, please upgrade to the latest version!"),
|
||||
code=4,
|
||||
)))
|
||||
return
|
||||
|
||||
# public.ExecShell("rm -rf {}".format(os.path.dirname(project["path"])))
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
True,
|
||||
data={
|
||||
"project_name": project["project_name"],
|
||||
"status": True
|
||||
}
|
||||
)))
|
||||
|
||||
public.writeFile(config_path, json.dumps(name_map))
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(True, data=-1)))
|
||||
|
||||
# 2024/6/28 下午3:15 根据容器id获取指定容器的日志
|
||||
def get_project_container_logs(self, get):
|
||||
'''
|
||||
@name 根据容器id获取指定容器的日志
|
||||
@author wzz <2024/6/28 下午3:16>
|
||||
@param "data":{"参数名":""} <数据类型> 参数描述
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
get.container_id = get.get("container_id/s", None)
|
||||
if get.container_id is None:
|
||||
return public.return_message(-1, 0, public.lang("The container_id parameter cannot be empty"))
|
||||
|
||||
self.set_tail("200")
|
||||
self.set_container_id(get.container_id)
|
||||
command = self.get_container_logs()
|
||||
stdout, stderr = public.ExecShell(command)
|
||||
if "invalid compose project" in stderr:
|
||||
return public.return_message(-1, 0, public.lang("The container does not exist"))
|
||||
|
||||
return public.return_message(0, 0, stdout.replace("\n", "\r\n"))
|
||||
|
||||
# 2024/7/18 上午10:13 修改指定项目备注
|
||||
def edit_remark(self, get):
|
||||
'''
|
||||
@name 修改指定项目备注
|
||||
'''
|
||||
try:
|
||||
get.name = get.get("name", None)
|
||||
get.remark = get.get("remark", "")
|
||||
if get.name is None:
|
||||
return public.return_message(-1, 0, public.lang("Please pass the name parameter!"))
|
||||
old_remark = ""
|
||||
|
||||
from btdockerModelV2 import dk_public as dp
|
||||
stacks_info = dp.sql("stacks").where("name=?", (public.xsssec(get.name))).find()
|
||||
if not stacks_info:
|
||||
get.path = get.get("path", None)
|
||||
if get.path is None:
|
||||
return public.return_message(-1, 0, public.lang("Please pass the path parameter!"))
|
||||
|
||||
pdata = {
|
||||
"name": public.xsssec(get.name),
|
||||
"status": "1",
|
||||
"path": get.path,
|
||||
"template_id": None,
|
||||
"time": time.time(),
|
||||
"remark": public.xsssec(get.remark)
|
||||
}
|
||||
dp.sql("stacks").insert(pdata)
|
||||
else:
|
||||
old_remark = stacks_info['remark']
|
||||
dp.sql("stacks").where("name=?", (public.xsssec(get.name))).update({"remark": public.xsssec(get.remark)})
|
||||
|
||||
dp.write_log("Comments for project [{}] changed successfully [{}] --> [{}]!".format(
|
||||
get.name,
|
||||
old_remark,
|
||||
public.xsssec(get.remark)))
|
||||
return public.return_message(0, 0, public.lang("Modify successfully!"))
|
||||
except:
|
||||
public.print_log(public.get_error_info())
|
||||
0
mod/project/docker/docker_compose/__init__.py
Normal file
0
mod/project/docker/docker_compose/__init__.py
Normal file
317
mod/project/docker/docker_compose/base.py
Normal file
317
mod/project/docker/docker_compose/base.py
Normal file
@@ -0,0 +1,317 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# YakPanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 YakPanel(www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: wzz <wzz@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
# ------------------------------
|
||||
# docker模型 - docker compose 基类
|
||||
# ------------------------------
|
||||
import sys
|
||||
import time
|
||||
from typing import List
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
import public
|
||||
|
||||
|
||||
class Compose():
|
||||
|
||||
def __init__(self):
|
||||
self.cmd = 'docker-compose'
|
||||
self.path = None
|
||||
self.tail = "100"
|
||||
self.type = 0
|
||||
self.compose_name = None
|
||||
self.compose_project_path = "{}/data/compose".format(public.get_panel_path())
|
||||
self.grep_version = 'grep -v "\`version\` is obsolete"'
|
||||
self.def_name = None
|
||||
self.container_id = None
|
||||
self.ps_count = 0
|
||||
|
||||
def set_container_id(self, container_id: str) -> 'Compose':
|
||||
self.container_id = container_id
|
||||
return self
|
||||
|
||||
def get_cmd(self) -> str:
|
||||
return self.cmd
|
||||
|
||||
def set_cmd(self, cmd: str) -> 'Compose':
|
||||
self.cmd = cmd
|
||||
return self
|
||||
|
||||
def set_path(self, path: str, rep: bool = False) -> 'Compose':
|
||||
if rep:
|
||||
self.path = path.replace("\'", "\\'").replace("\"", "\\\"").replace(" ", "\\ ").replace("|", "\\|")
|
||||
else:
|
||||
self.path = path
|
||||
return self
|
||||
|
||||
def set_tail(self, tail: str) -> 'Compose':
|
||||
self.tail = tail
|
||||
return self
|
||||
|
||||
def set_type(self, type: int) -> 'Compose':
|
||||
self.type = type
|
||||
return self
|
||||
|
||||
def set_compose_name(self, compose_name: str) -> 'Compose':
|
||||
self.compose_name = compose_name
|
||||
return self
|
||||
|
||||
def set_def_name(self, def_name: str) -> 'Compose':
|
||||
self.def_name = def_name
|
||||
return self
|
||||
|
||||
def get_compose_up(self) -> List[str] or str:
|
||||
return self.cmd + ' -f {} up -d| {}'.format(self.path, self.grep_version)
|
||||
|
||||
def get_compose_up_remove_orphans(self) -> List[str] or str:
|
||||
if self.type == 0:
|
||||
return self.cmd + ' -f {} up -d --remove-orphans'.format(self.path)
|
||||
else:
|
||||
return [self.cmd, '-f', self.path, 'up', '-d', '--remove-orphans']
|
||||
|
||||
def get_compose_down(self) -> List[str] or str:
|
||||
if self.type == 0:
|
||||
return self.cmd + ' -f {} down'.format(self.path)
|
||||
else:
|
||||
return [self.cmd, '-f', self.path, 'down']
|
||||
|
||||
def kill_compose(self) -> List[str] or str:
|
||||
if self.type == 0:
|
||||
return self.cmd + ' -f {} kill'.format(self.path)
|
||||
else:
|
||||
return [self.cmd, '-f', self.path, 'kill']
|
||||
|
||||
def rm_compose(self) -> List[str] or str:
|
||||
if self.type == 0:
|
||||
return self.cmd + ' -f {} rm -f'.format(self.path)
|
||||
else:
|
||||
return [self.cmd, '-f', self.path, 'rm', '-f']
|
||||
|
||||
def get_compose_delete(self) -> List[str] or str:
|
||||
if self.type == 0:
|
||||
return self.cmd + ' -f {} down --volumes --remove-orphans'.format(self.path)
|
||||
else:
|
||||
return [self.cmd, '-f', self.path, 'down', '--volumes', '--remove-orphans']
|
||||
|
||||
def get_compose_delete_for_ps(self) -> List[str] or str:
|
||||
if self.type == 0:
|
||||
return self.cmd + ' -p {} down --volumes --remove-orphans'.format(self.compose_name)
|
||||
else:
|
||||
return [self.cmd, '-p', self.compose_name, 'down', '--volumes', '--remove-orphans']
|
||||
|
||||
def get_compose_restart(self) -> List[str] or str:
|
||||
if self.type == 0:
|
||||
return self.cmd + ' -f {} restart'.format(self.path)
|
||||
else:
|
||||
return [self.cmd, '-f', self.path, 'restart']
|
||||
|
||||
def get_compose_stop(self) -> List[str] or str:
|
||||
if self.type == 0:
|
||||
return self.cmd + ' -f {} stop'.format(self.path)
|
||||
else:
|
||||
return [self.cmd, '-f', self.path, 'stop']
|
||||
|
||||
def get_compose_start(self) -> List[str] or str:
|
||||
if self.type == 0:
|
||||
return self.cmd + ' -f {} start'.format(self.path)
|
||||
else:
|
||||
return [self.cmd, '-f', self.path, 'start']
|
||||
|
||||
def get_compose_pull(self) -> List[str] or str:
|
||||
if self.type == 0:
|
||||
return self.cmd + ' -f {} pull'.format(self.path)
|
||||
else:
|
||||
return [self.cmd, '-f', self.path, 'pull']
|
||||
|
||||
def get_compose_logs(self) -> List[str] or str:
|
||||
if self.type == 0:
|
||||
return self.cmd + ' -f {} logs -f --tail {}'.format(self.path, self.tail)
|
||||
else:
|
||||
return [self.cmd, '-f', self.path, 'logs', '-f', '--tail', self.tail]
|
||||
|
||||
def get_tail_compose_log(self) -> List[str] or str:
|
||||
if self.type == 0:
|
||||
return self.cmd + ' -f {} logs --tail {}'.format(self.path, self.tail)
|
||||
else:
|
||||
return [self.cmd, '-f', self.path, 'logs', '--tail', self.tail]
|
||||
|
||||
def get_compose_ls(self) -> List[str] or str:
|
||||
return self.cmd + ' ls -a --format json| {}'.format(self.grep_version)
|
||||
|
||||
def get_compose_ps(self) -> List[str] or str:
|
||||
return self.cmd + ' -f {} ps -a --format json| {}'.format(self.path, self.grep_version)
|
||||
|
||||
def get_compose_config(self) -> List[str] or str:
|
||||
return self.cmd + ' -f {} config| {}'.format(self.path, self.grep_version)
|
||||
|
||||
def get_container_logs(self) -> List[str] or str:
|
||||
# return ['docker', 'logs', '-f', self.container_id]
|
||||
return "docker logs {} --tail {} 2>&1".format(self.container_id, self.tail)
|
||||
|
||||
def wsResult(self, status: bool = True, msg: str = "", data: any = None, timestamp: int = None, code: int = 0,
|
||||
args: any = None):
|
||||
# public.print_log("wsResult code -- {} status--{}".format(code, status))
|
||||
# rs = public.returnResult(status, msg, data, timestamp, code, args)
|
||||
|
||||
import time
|
||||
if timestamp is None:
|
||||
timestamp = int(time.time())
|
||||
if msg is None:
|
||||
msg = "OK"
|
||||
rs = {"code": code, "status": status, "msg": msg, "data": data, "timestamp": timestamp,
|
||||
"def_name": self.def_name}
|
||||
|
||||
# public.print_log("wsResult rs -- {} ".format(rs))
|
||||
return rs
|
||||
|
||||
# 2024/8/1 下午3:29 构造分页数据
|
||||
def get_page(self, data, get):
|
||||
get.row = get.get("row", 20)
|
||||
# get.row = 20000
|
||||
get.p = get.get("p", 1)
|
||||
import page
|
||||
page = page.Page()
|
||||
info = {'count': len(data), 'row': int(get.row), 'p': int(get.p), 'uri': {}, 'return_js': ''}
|
||||
|
||||
result = {'page': page.GetPage(info)}
|
||||
n = 0
|
||||
result['data'] = []
|
||||
for i in range(info['count']):
|
||||
if n >= page.ROW: break
|
||||
if i < page.SHIFT: continue
|
||||
n += 1
|
||||
result['data'].append(data[i])
|
||||
return result
|
||||
|
||||
# 2024/7/29 下午4:22 检查web服务是否正常
|
||||
def check_web_status(self):
|
||||
'''
|
||||
@name 检查web服务是否正常
|
||||
@param "data":{"参数名":""} <数据类型> 参数描述
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
from mod.base.web_conf import util
|
||||
webserver = util.webserver()
|
||||
if webserver != "nginx" or webserver is None:
|
||||
return public.returnResult(status=False, msg="Domain name access only supports Nginx. Please go to the software store to install Nginx or choose not to use domain name access!")
|
||||
|
||||
from panelSite import panelSite
|
||||
site_obj = panelSite()
|
||||
site_obj.check_default()
|
||||
|
||||
wc_err = public.checkWebConfig()
|
||||
if not wc_err:
|
||||
return public.returnResult(
|
||||
status=False,
|
||||
msg='ERROR: An error in the configuration file has been detected. Please eliminate it before proceeding. <br><br><a style="color:red;">' +
|
||||
wc_err.replace("\n", '<br>') + '</a>'
|
||||
)
|
||||
|
||||
return public.return_message(0, 0, '')
|
||||
def pageResult(self, status: bool = True,
|
||||
msg: str = "",
|
||||
data: any = None,
|
||||
timestamp: int = None,
|
||||
code: int = 0,
|
||||
args: any = None,
|
||||
page: any = None,
|
||||
cpu: any = None,
|
||||
mem: any = None):
|
||||
# rs = public.returnResult(status, msg, data, timestamp, code, args)
|
||||
# public.print_log("re 列表 --{}".format(rs['msg']))
|
||||
# import time
|
||||
# if timestamp is None:
|
||||
# timestamp = int(time.time())
|
||||
if msg is None:
|
||||
msg = "OK"
|
||||
# rs = {"code": code, "status": status, "msg": msg, "data": data, "timestamp": timestamp}
|
||||
rs = {"msg": msg, "data": data}
|
||||
if not self.def_name is None:
|
||||
rs["def_name"] = self.def_name
|
||||
if not cpu is None:
|
||||
rs["maximum_cpu"] = cpu
|
||||
if not mem is None:
|
||||
rs["maximum_memory"] = mem
|
||||
if not page is None:
|
||||
rs["page"] = page
|
||||
st = 0 if status else -1
|
||||
return public.return_message(st, 0, rs)
|
||||
|
||||
# 2024/6/25 下午2:40 获取日志类型的websocket返回值
|
||||
def exec_logs(self, get, command, cwd=None):
|
||||
'''
|
||||
@name 获取日志类型的websocket返回值
|
||||
@author wzz <2024/6/25 下午2:41>
|
||||
@param "data":{"参数名":""} <数据类型> 参数描述
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
import json,select
|
||||
if self.def_name is None: self.set_def_name(get.def_name)
|
||||
from subprocess import Popen, PIPE, STDOUT
|
||||
|
||||
if not hasattr(get, '_ws'):
|
||||
return
|
||||
|
||||
p = Popen(command, stdout=PIPE, stderr=STDOUT, cwd=cwd)
|
||||
try:
|
||||
while True:
|
||||
# 优先检查连接状态,如果断开则立即停止
|
||||
if not get._ws.connected:
|
||||
break
|
||||
|
||||
# 检查是否有数据可读
|
||||
readable, _, _ = select.select([p.stdout], [], [], 0.01)
|
||||
|
||||
if p.stdout in readable:
|
||||
line = p.stdout.readline()
|
||||
if line:
|
||||
try:
|
||||
get._ws.send(json.dumps(self.wsResult(True, "{}".format(line.decode('utf-8').rstrip()))))
|
||||
except:
|
||||
break
|
||||
elif p.poll() is not None:
|
||||
# 没有数据可读,且进程已结束,则退出
|
||||
break
|
||||
|
||||
finally:
|
||||
if p.poll() is None:
|
||||
p.kill()
|
||||
|
||||
|
||||
# 2024/6/25 下午2:40 获取日志类型的websocket返回值
|
||||
def status_exec_logs(self, get, command, cwd=None):
|
||||
'''
|
||||
@name 获取日志类型的websocket返回值
|
||||
@author wzz <2024/6/25 下午2:41>
|
||||
@param "data":{"参数名":""} <数据类型> 参数描述
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
import json
|
||||
if self.def_name is None: self.set_def_name(get.def_name)
|
||||
from subprocess import Popen, PIPE, STDOUT
|
||||
|
||||
p = Popen(command, stdout=PIPE, stderr=STDOUT, cwd=cwd)
|
||||
|
||||
while True:
|
||||
if p.poll() is not None:
|
||||
break
|
||||
|
||||
line = p.stdout.readline() # 非阻塞读取
|
||||
if line:
|
||||
try:
|
||||
if hasattr(get, '_ws'):
|
||||
get._ws.send(json.dumps(self.wsResult(
|
||||
True,
|
||||
"{}\r\n".format(line.decode('utf-8').rstrip()),
|
||||
)))
|
||||
except:
|
||||
continue
|
||||
else:
|
||||
break
|
||||
65
mod/project/docker/gpuMod.py
Normal file
65
mod/project/docker/gpuMod.py
Normal file
@@ -0,0 +1,65 @@
|
||||
import sys
|
||||
|
||||
from mod.project.docker.app.gpu import nvidia
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.append('/www/server/panel/class')
|
||||
|
||||
import public
|
||||
|
||||
def gpu_class():
|
||||
return 'nvidia'
|
||||
|
||||
|
||||
class main:
|
||||
def __init__(self):
|
||||
self.driver = None
|
||||
if gpu_class() == 'nvidia':
|
||||
self.driver = nvidia.NVIDIA()
|
||||
# elif gpu_class() == 'amd':
|
||||
# self.driver = amd.AMD()
|
||||
|
||||
def get_all_device_info(self, get):
|
||||
"""
|
||||
获取所有gpu信息
|
||||
Args:
|
||||
get:
|
||||
|
||||
Returns:
|
||||
dict: All gpu information are included.
|
||||
"""
|
||||
public.print_log('gpu info')
|
||||
if not self.driver.support:
|
||||
return public.return_message(0, 0, {})
|
||||
return public.return_message(0, 0, self.driver.get_all_device_info())
|
||||
|
||||
def get_info_by_index(self, get):
|
||||
"""
|
||||
返回驱动信息
|
||||
Args:
|
||||
get:
|
||||
|
||||
Returns:
|
||||
|
||||
"""
|
||||
index = 0
|
||||
if not self.driver.support:
|
||||
return public.return_message(0, 0, {})
|
||||
try:
|
||||
index = int(get.index)
|
||||
except ValueError as e:
|
||||
public.returnResult(False, "{} need an int: {}".format(self.get_info_by_index.__name__, e))
|
||||
return public.return_message(0, 0, self.driver.get_info_by_index(index))
|
||||
|
||||
def get_system_info(self, get):
|
||||
"""
|
||||
返回驱动信息
|
||||
Args:
|
||||
get:
|
||||
|
||||
Returns:
|
||||
|
||||
"""
|
||||
if not self.driver.support:
|
||||
return public.return_message(0, 0, {})
|
||||
return public.return_message(0, 0, self.driver.get_system_info())
|
||||
0
mod/project/docker/proxy/__init__.py
Normal file
0
mod/project/docker/proxy/__init__.py
Normal file
3426
mod/project/docker/proxy/base.py
Normal file
3426
mod/project/docker/proxy/base.py
Normal file
File diff suppressed because it is too large
Load Diff
65
mod/project/docker/routetestMod.py
Normal file
65
mod/project/docker/routetestMod.py
Normal file
@@ -0,0 +1,65 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2099 yakpanel(http://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: wzz <wzz@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
import os
|
||||
# ------------------------------
|
||||
# Docker模型
|
||||
# ------------------------------
|
||||
import sys
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
os.chdir("/www/server/panel")
|
||||
import public
|
||||
|
||||
|
||||
class main():
|
||||
|
||||
def returnResult(self, get):
|
||||
'''
|
||||
@name 模型测试方法,请求方式
|
||||
/mod/docker/routetestMod/returnResult
|
||||
支持form-data和json
|
||||
|
||||
使用通用的响应对象,返回json格式数据
|
||||
@author wzz <2024/2/19 上午 10:37>
|
||||
@param "data":{"参数名":""} <数据类型> 参数描述
|
||||
@return dict{"status":True/False,"msg":"提示信息"}
|
||||
'''
|
||||
print(public.returnResult(msg="hello"))
|
||||
return public.returnResult(msg="hello")
|
||||
|
||||
def wsRequest(self, get):
|
||||
"""
|
||||
处理websocket,ws测试方法,请求方式
|
||||
ws://192.168.x.x:8888/ws_mod
|
||||
连接成功后先发送第一条信息{"x-http-token":"token"}
|
||||
然后再发第二条信息,信息内容如下格式
|
||||
|
||||
备注:如果需要使用apipost测试,请将__init__.py中ws模型路由的comReturn和csrf检查注释掉再测试
|
||||
@param get:
|
||||
{"mod_name":"docker","sub_mod_name":"routetest","def_name":"wsRequest","ws_callback":"111"}
|
||||
{"mod_name":"模型名称","sub_mod_name":"子模块名称","def_name":"函数名称","ws_callback":"ws必传参数,传111",其他参数接后面}
|
||||
@return:
|
||||
"""
|
||||
if not hasattr(get, "_ws"):
|
||||
return True
|
||||
|
||||
import time
|
||||
sum = 0
|
||||
while sum < 10:
|
||||
time.sleep(0.2)
|
||||
get._ws.send("hello\r\n")
|
||||
sum += 1
|
||||
|
||||
return True
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main().returnResult({})
|
||||
0
mod/project/java/__init__.py
Normal file
0
mod/project/java/__init__.py
Normal file
1169
mod/project/java/groupMod.py
Normal file
1169
mod/project/java/groupMod.py
Normal file
File diff suppressed because it is too large
Load Diff
34
mod/project/java/group_script.py
Normal file
34
mod/project/java/group_script.py
Normal file
@@ -0,0 +1,34 @@
|
||||
import sys
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
from mod.project.java.groupMod import Group
|
||||
|
||||
|
||||
def start_group(g_id: str):
|
||||
g = Group(g_id)
|
||||
g.real_run_start()
|
||||
|
||||
|
||||
def stop_group(g_id: str):
|
||||
g = Group(g_id)
|
||||
g.real_run_stop()
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
if len(sys.argv) >= 3:
|
||||
action = sys.argv[1]
|
||||
group_id = sys.argv[2]
|
||||
else:
|
||||
print("Parameter error")
|
||||
exit(1)
|
||||
|
||||
if action == "start":
|
||||
start_group(group_id)
|
||||
else:
|
||||
stop_group(group_id)
|
||||
|
||||
|
||||
575
mod/project/java/java_web_conf.py
Normal file
575
mod/project/java/java_web_conf.py
Normal file
@@ -0,0 +1,575 @@
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import sys
|
||||
from typing import List, Optional, Union, Tuple
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
import public
|
||||
from mod.base.web_conf.util import listen_ipv6, get_log_path, GET_CLASS, service_reload
|
||||
from mod.base.web_conf import NginxDomainTool, ApacheDomainTool
|
||||
|
||||
|
||||
class JavaNginxTool:
|
||||
def __init__(self):
|
||||
self._panel_path = "/www/server/panel"
|
||||
self._vhost_path = "{}/vhost".format(self._panel_path)
|
||||
self._nginx_bak_path = "/var/tmp/springboot/nginx_conf_backup"
|
||||
if not os.path.exists(self._nginx_bak_path):
|
||||
os.makedirs(self._nginx_bak_path, 0o600)
|
||||
|
||||
def set_nginx_config(self, project_data: dict, domains: List[Tuple[str, Union[str, int]]],
|
||||
use_ssl: bool = False, force_ssl=False):
|
||||
if use_ssl:
|
||||
use_http2_on = public.is_change_nginx_http2()
|
||||
use_http3 = public.is_nginx_http3()
|
||||
else:
|
||||
use_http2_on = False
|
||||
use_http3 = False
|
||||
|
||||
project_config = project_data["project_config"]
|
||||
if project_config['java_type'] == "springboot":
|
||||
project_path = project_data["project_config"]["jar_path"]
|
||||
else:
|
||||
project_path = project_data["path"]
|
||||
if os.path.isfile(project_path):
|
||||
project_path = os.path.dirname(project_path)
|
||||
|
||||
port_set = set()
|
||||
domain_set = set()
|
||||
use_ipv6 = listen_ipv6()
|
||||
listen_ports_list = []
|
||||
for d, p in domains:
|
||||
if str(p) == "443": # 443 端口特殊处理
|
||||
continue
|
||||
if str(p) not in port_set:
|
||||
listen_ports_list.append(" listen {};".format(str(p)))
|
||||
if use_ipv6:
|
||||
listen_ports_list.append(" listen [::]:{};".format(str(p)))
|
||||
|
||||
port_set.add(str(p))
|
||||
domain_set.add(d)
|
||||
|
||||
if use_ssl:
|
||||
if not use_http2_on:
|
||||
http2 = " http2"
|
||||
else:
|
||||
http2 = ""
|
||||
listen_ports_list.append(" http2 on;")
|
||||
|
||||
listen_ports_list.append(" listen 443 ssl{};".format(http2))
|
||||
if use_ipv6:
|
||||
listen_ports_list.append(" listen [::]:443 ssl{};".format(http2))
|
||||
|
||||
if use_http3:
|
||||
listen_ports_list.append(" listen 443 quic;")
|
||||
if use_ipv6:
|
||||
listen_ports_list.append(" listen [::]:443 quic;")
|
||||
|
||||
listen_ports = "\n".join(listen_ports_list).strip()
|
||||
|
||||
static_conf = self._build_static_conf(project_config, project_path)
|
||||
proxy_conf = self._build_proxy_conf(project_config)
|
||||
ssl_conf = "#error_page 404/404.html;"
|
||||
if use_ssl:
|
||||
ssl_conf += "\n" + self._build_ssl_conf(project_config, use_http3=use_http3, force_ssl=force_ssl)
|
||||
|
||||
nginx_template_file = "{}/template/nginx/java_mod_http.conf".format(self._vhost_path)
|
||||
nginx_conf_file = "{}/nginx/java_{}.conf".format(self._vhost_path, project_data["name"])
|
||||
|
||||
nginx_template = public.ReadFile(nginx_template_file)
|
||||
if not isinstance(nginx_template, str):
|
||||
return "读取模版文件失败"
|
||||
|
||||
nginx_conf = nginx_template.format(
|
||||
listen_ports=listen_ports,
|
||||
domains=" ".join(domain_set),
|
||||
site_path=project_path,
|
||||
site_name=project_data["name"],
|
||||
panel_path=self._panel_path,
|
||||
log_path=get_log_path(),
|
||||
ssl_conf=ssl_conf,
|
||||
static_conf=static_conf,
|
||||
proxy_conf=proxy_conf,
|
||||
)
|
||||
rewrite_file = "{}/rewrite/java_{}.conf".format(self._vhost_path, project_data["name"])
|
||||
if not os.path.exists(rewrite_file):
|
||||
public.writeFile(rewrite_file, '# 请将伪静态规则或自定义NGINX配置填写到此处\n')
|
||||
apply_check = "{}/nginx/well-known/{}.conf".format(self._vhost_path, project_data["name"])
|
||||
if not os.path.exists(os.path.dirname(apply_check)):
|
||||
os.makedirs(os.path.dirname(apply_check), 0o600)
|
||||
if not os.path.exists(apply_check):
|
||||
public.writeFile(apply_check, '')
|
||||
|
||||
public.writeFile(nginx_conf_file, nginx_conf)
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _build_proxy_conf(project_config: dict) -> str:
|
||||
if "proxy_info" not in project_config:
|
||||
return ""
|
||||
|
||||
proxy_info = project_config["proxy_info"]
|
||||
proxy_conf_list = []
|
||||
if not proxy_info:
|
||||
return ""
|
||||
ng_proxy = ''' #PROXY-START{proxy_dir}
|
||||
location {proxy_dir} {{{rewrite}
|
||||
proxy_pass {proxy_url};
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;{add_headers}
|
||||
proxy_set_header REMOTE-HOST $remote_addr;
|
||||
add_header X-Cache $upstream_cache_status;
|
||||
proxy_set_header X-Host $host:$server_port;
|
||||
proxy_set_header X-Scheme $scheme;
|
||||
proxy_connect_timeout 30s;
|
||||
proxy_read_timeout 86400s;
|
||||
proxy_send_timeout 30s;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}}
|
||||
#PROXY-END{proxy_dir}'''
|
||||
for i in proxy_info:
|
||||
if i.get("status", False):
|
||||
continue
|
||||
rewrite = ""
|
||||
if "rewrite" in i and i["rewrite"].get("status", False):
|
||||
rewrite = i["rewrite"]
|
||||
src_path = i["src_path"]
|
||||
if not src_path.endswith("/"):
|
||||
src_path += "/"
|
||||
target_path = rewrite["target_path"]
|
||||
if target_path.endswith("/"):
|
||||
target_path += target_path[:-1]
|
||||
|
||||
rewrite = "\n rewrite ^{}(.*)$ {}/$1 break;".format(src_path, target_path)
|
||||
|
||||
add_headers = ""
|
||||
if "add_headers" in i:
|
||||
header_tmp = " add_header {} {};"
|
||||
add_headers_list = [header_tmp.format(h["k"], h["v"]) for h in i["add_headers"] if
|
||||
"k" in h and "v" in h]
|
||||
add_headers = "\n".join(add_headers_list)
|
||||
if add_headers:
|
||||
add_headers = "\n" + add_headers
|
||||
|
||||
proxy_conf_list.append(ng_proxy.format(
|
||||
proxy_dir=i["proxy_dir"],
|
||||
rewrite=rewrite,
|
||||
add_headers=add_headers,
|
||||
proxy_url="http://127.0.0.1:{}".format(i["proxy_port"]),
|
||||
))
|
||||
|
||||
return ("\n".join(proxy_conf_list) + "\n").lstrip()
|
||||
|
||||
@staticmethod
|
||||
def _build_static_conf(project_config: dict, default_path: str) -> str:
|
||||
if project_config['java_type'] == "springboot" and "static_info" in project_config:
|
||||
static_info = project_config["static_info"]
|
||||
if not static_info.get("status", False):
|
||||
return ""
|
||||
index_str = "index.html"
|
||||
index = static_info.get("index", "")
|
||||
if index:
|
||||
if isinstance(index, list):
|
||||
index_str = " ".join(index)
|
||||
elif isinstance(index, str):
|
||||
index_str = " ".join([i.strip() for i in index.split(",") if i.strip()])
|
||||
|
||||
path = static_info.get("path")
|
||||
if not path:
|
||||
path = default_path
|
||||
try_file = ''
|
||||
if static_info.get("use_try_file", True):
|
||||
try_file = " try_files $uri $uri/ /index.html;\n"
|
||||
static_conf = (
|
||||
"location / {\n"
|
||||
" root %s;\n"
|
||||
" index %s;\n%s"
|
||||
" }"
|
||||
) % (path, index_str, try_file)
|
||||
|
||||
return static_conf
|
||||
return ""
|
||||
|
||||
def _build_ssl_conf(self, project_config: dict, use_http3=False, force_ssl=False) -> str:
|
||||
force_ssl_str = ""
|
||||
if force_ssl:
|
||||
force_ssl_str = '''
|
||||
#HTTP_TO_HTTPS_START
|
||||
if ($server_port !~ 443){
|
||||
rewrite ^(/.*)$ https://$host$1 permanent;
|
||||
}
|
||||
#HTTP_TO_HTTPS_END'''
|
||||
http3_header = ""
|
||||
if use_http3:
|
||||
http3_header = '''\n add_header Alt-Svc 'quic=":443"; h3=":443"; h3-27=":443";h3-29=":443";h3-25=":443"; h3-T050=":443"; h3-Q050=":443";h3-Q049=":443";h3-Q048=":443"; h3-Q046=":443"; h3-Q043=":443"';'''
|
||||
|
||||
return ''' ssl_certificate {vhost_path}/cert/{project_name}/fullchain.pem;
|
||||
ssl_certificate_key {vhost_path}/cert/{project_name}/privkey.pem;
|
||||
ssl_protocols TLSv1.1 TLSv1.2 TLSv1.3;
|
||||
ssl_ciphers EECDH+CHACHA20:EECDH+CHACHA20-draft:EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:EECDH+3DES:RSA+3DES:!MD5;
|
||||
ssl_prefer_server_ciphers on;
|
||||
ssl_session_cache shared:SSL:10m;
|
||||
ssl_session_timeout 10m;
|
||||
add_header Strict-Transport-Security "max-age=31536000";{http3_header}
|
||||
error_page 497 https://$host$request_uri;{force_ssl}'''.format(
|
||||
vhost_path=self._vhost_path,
|
||||
project_name=project_config["project_name"],
|
||||
http3_header=http3_header,
|
||||
force_ssl=force_ssl_str,
|
||||
)
|
||||
|
||||
def open_nginx_config_file(self, project_data: dict, domains: List[Tuple[str, str]], ) -> Optional[str]:
|
||||
project_name = project_data["name"]
|
||||
back_path = "{}/{}".format(self._nginx_bak_path, project_name)
|
||||
target_file = "{}/nginx/java_{}.conf".format(self._vhost_path, project_name)
|
||||
if os.path.isfile(target_file):
|
||||
return
|
||||
|
||||
if os.path.isfile(back_path):
|
||||
shutil.copyfile(back_path, target_file)
|
||||
if os.path.isfile(target_file):
|
||||
NginxDomainTool("java_").nginx_set_domain(project_name, *domains)
|
||||
error_msg = public.checkWebConfig()
|
||||
if not isinstance(error_msg, str): # 没有报错时直接退出
|
||||
service_reload()
|
||||
return
|
||||
|
||||
res = self.set_nginx_config(project_data, domains, use_ssl=False)
|
||||
if not res:
|
||||
service_reload()
|
||||
return res
|
||||
|
||||
def close_nginx_config_file(self, project_data: dict) -> None:
|
||||
project_name = project_data["name"]
|
||||
back_path = "{}/{}".format(self._nginx_bak_path, project_name)
|
||||
target_file = "{}/nginx/java_{}.conf".format(self._vhost_path, project_name)
|
||||
if not os.path.isfile(target_file):
|
||||
return
|
||||
|
||||
if os.path.isfile(back_path):
|
||||
os.remove(back_path)
|
||||
|
||||
shutil.move(target_file, back_path)
|
||||
service_reload()
|
||||
|
||||
def exists_nginx_ssl(self, project_name):
|
||||
"""
|
||||
判断项目是否配置Nginx SSL配置
|
||||
"""
|
||||
config_file = "{}/nginx/java_{}.conf".format(self._vhost_path, project_name)
|
||||
if not os.path.exists(config_file):
|
||||
return False, False
|
||||
|
||||
config_body = public.readFile(config_file)
|
||||
if isinstance(config_body, str):
|
||||
return False, False
|
||||
|
||||
is_ssl, is_force_ssl = False, False
|
||||
if config_body.find('ssl_certificate') != -1:
|
||||
is_ssl = True
|
||||
if config_body.find('HTTP_TO_HTTPS_START') != -1:
|
||||
is_force_ssl = True
|
||||
return is_ssl, is_force_ssl
|
||||
|
||||
def set_static_path(self, project_data: dict) -> Optional[Union[bool, str]]:
|
||||
project_path = project_data["project_config"]["jar_path"]
|
||||
static_str = self._build_static_conf(project_data["project_config"], project_path)
|
||||
ng_file = "{}/nginx/java_{}.conf".format(self._vhost_path, project_data["name"])
|
||||
ng_conf = public.readFile(ng_file)
|
||||
if not isinstance(ng_conf, str):
|
||||
return "配置文件读取错误"
|
||||
|
||||
static_conf = "#STATIC-START 静态资源相关配置\n {}\n #STATIC-END".format(static_str)
|
||||
rep_static = re.compile(r"#STATIC-START(.*\n){2,9}\s*#STATIC-END.*")
|
||||
res = rep_static.search(ng_conf)
|
||||
if res:
|
||||
new_ng_conf = ng_conf.replace(res.group(), static_conf)
|
||||
public.writeFile(ng_file, new_ng_conf)
|
||||
error_msg = public.checkWebConfig()
|
||||
if not isinstance(error_msg, str): # 没有报错时直接退出
|
||||
service_reload()
|
||||
return None
|
||||
else:
|
||||
public.writeFile(ng_file, ng_conf)
|
||||
return 'WEB服务器配置配置文件错误ERROR:<br><font style="color:red;">' + \
|
||||
error_msg.replace("\n", '<br>') + '</font>'
|
||||
|
||||
# 添加配置信息到配置文件中
|
||||
rep_list = [
|
||||
(re.compile(r"\s*#PROXY-LOCAl-START.*", re.M), True), # 添加到反向代理结尾的上面
|
||||
(re.compile(r"\s*#REWRITE-END.*", re.M), False), # 添加到伪静态的下面
|
||||
(re.compile(r"\s*#SSL-END.*", re.M), False), # 添加到SSL END的下面
|
||||
]
|
||||
|
||||
# 使用正则匹配确定插入位置
|
||||
def set_by_rep_idx(tmp_rep: re.Pattern, use_start: bool) -> bool:
|
||||
tmp_res = tmp_rep.search(ng_conf)
|
||||
if not tmp_res:
|
||||
return False
|
||||
if use_start:
|
||||
new_conf = ng_conf[:tmp_res.start()] + static_conf + tmp_res.group() + ng_conf[tmp_res.end():]
|
||||
else:
|
||||
new_conf = ng_conf[:tmp_res.start()] + tmp_res.group() + static_conf + ng_conf[tmp_res.end():]
|
||||
|
||||
public.writeFile(ng_file, new_conf)
|
||||
if public.get_webserver() == "nginx" and isinstance(public.checkWebConfig(), str):
|
||||
public.writeFile(ng_file, ng_conf)
|
||||
return False
|
||||
return True
|
||||
|
||||
for r, s in rep_list:
|
||||
if set_by_rep_idx(r, s):
|
||||
service_reload()
|
||||
return None
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
class JavaApacheTool:
|
||||
def __init__(self):
|
||||
self._panel_path = "/www/server/panel"
|
||||
self._vhost_path = "{}/vhost".format(self._panel_path)
|
||||
self._apache_bak_path = "/var/tmp/springboot/httpd_conf_backup"
|
||||
if not os.path.exists(self._apache_bak_path):
|
||||
os.makedirs(self._apache_bak_path, 0o600)
|
||||
|
||||
def set_apache_config_for_ssl(self, project_data):
|
||||
domains = public.M('domain').where('pid=?', (project_data["id"],)).select()
|
||||
domain_list = [(i["name"], i["port"]) for i in domains]
|
||||
return self.set_apache_config(project_data, domain_list, use_ssl=True)
|
||||
|
||||
def set_apache_config(self, project_data: dict, domains: List[Tuple[str, Union[str, int]]],
|
||||
use_ssl: bool = False, force_ssl: bool = False):
|
||||
name = project_data['name']
|
||||
port_set = set()
|
||||
domain_set = set()
|
||||
for d, p in domains:
|
||||
port_set.add(str(p))
|
||||
domain_set.add(d)
|
||||
|
||||
domains_str = ' '.join(domain_set)
|
||||
project_config = project_data["project_config"]
|
||||
if project_config['java_type'] == "springboot":
|
||||
project_path = project_data["project_config"]["jar_path"]
|
||||
else:
|
||||
project_path = project_data["path"]
|
||||
if os.path.isfile(project_path):
|
||||
project_path = os.path.dirname(project_path)
|
||||
|
||||
apache_template_file = "{}/template/apache/java_mod_http.conf".format(self._vhost_path)
|
||||
apache_conf_file = "{}/apache/java_{}.conf".format(self._vhost_path, name)
|
||||
|
||||
apache_template = public.ReadFile(apache_template_file)
|
||||
if not isinstance(apache_template, str):
|
||||
return "读取模版文件失败"
|
||||
|
||||
apache_conf_list = []
|
||||
proxy_conf = self._build_proxy_conf(project_config)
|
||||
for p in port_set:
|
||||
apache_conf_list.append(apache_template.format(
|
||||
site_path=project_path,
|
||||
server_name='{}.{}'.format(p, project_path),
|
||||
domains=domains_str,
|
||||
log_path=get_log_path(),
|
||||
server_admin='admin@{}'.format(name),
|
||||
port=p,
|
||||
ssl_config='',
|
||||
project_name=name,
|
||||
proxy_conf=proxy_conf,
|
||||
))
|
||||
|
||||
if use_ssl:
|
||||
ssl_config = '''SSLEngine On
|
||||
SSLCertificateFile {vhost_path}/cert/{project_name}/fullchain.pem
|
||||
SSLCertificateKeyFile {vhost_path}/cert/{project_name}/privkey.pem
|
||||
SSLCipherSuite EECDH+CHACHA20:EECDH+CHACHA20-draft:EECDH+AES128:RSA+AES128:EECDH+AES256:RSA+AES256:EECDH+3DES:RSA+3DES:!MD5
|
||||
SSLProtocol All -SSLv2 -SSLv3 -TLSv1
|
||||
SSLHonorCipherOrder On'''.format(project_name=name, vhost_path=public.get_vhost_path())
|
||||
if force_ssl:
|
||||
ssl_config += '''
|
||||
#HTTP_TO_HTTPS_START
|
||||
<IfModule mod_rewrite.c>
|
||||
RewriteEngine on
|
||||
RewriteCond %{SERVER_PORT} !^443$
|
||||
RewriteRule (.*) https://%{SERVER_NAME}$1 [L,R=301]
|
||||
</IfModule>
|
||||
#HTTP_TO_HTTPS_END'''
|
||||
|
||||
apache_conf_list.append(apache_template.format(
|
||||
site_path=project_path,
|
||||
server_name='{}.{}'.format("443", project_path),
|
||||
domains=domains_str,
|
||||
log_path=get_log_path(),
|
||||
server_admin='admin@{}'.format(name),
|
||||
port="443",
|
||||
ssl_config=ssl_config,
|
||||
project_name=name,
|
||||
proxy_conf=proxy_conf,
|
||||
))
|
||||
|
||||
apache_conf = '\n'.join(apache_conf_list)
|
||||
public.writeFile(apache_conf_file, apache_conf)
|
||||
ApacheDomainTool.apache_add_ports(*port_set)
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _build_proxy_conf(project_config: dict) -> str:
|
||||
if "proxy_info" not in project_config:
|
||||
return ""
|
||||
|
||||
proxy_info = project_config["proxy_info"]
|
||||
proxy_conf_list = []
|
||||
if not proxy_info:
|
||||
return ""
|
||||
ap_proxy = ''' #PROXY-START{proxy_dir}
|
||||
<IfModule mod_proxy.c>
|
||||
ProxyRequests Off
|
||||
SSLProxyEngine on
|
||||
ProxyPass {proxy_dir} {proxy_url}/
|
||||
ProxyPassReverse {proxy_dir} {proxy_url}/
|
||||
RequestHeader set Host "%{Host}e"
|
||||
RequestHeader set X-Real-IP "%{REMOTE_ADDR}e"
|
||||
RequestHeader set X-Forwarded-For "%{X-Forwarded-For}e"
|
||||
RequestHeader setifempty X-Forwarded-For "%{REMOTE_ADDR}e"
|
||||
</IfModule>
|
||||
#PROXY-END{proxy_dir}'''
|
||||
|
||||
for i in proxy_info:
|
||||
if i.get("status", False):
|
||||
continue
|
||||
|
||||
proxy_conf_list.append(ap_proxy.format(
|
||||
proxy_dir=i["proxy_dir"],
|
||||
proxy_url="http://127.0.0.1:{}".format(i["proxy_port"]),
|
||||
))
|
||||
|
||||
return ("\n".join(proxy_conf_list) + "\n").lstrip()
|
||||
|
||||
def open_apache_config_file(self, project_data: dict, domains: List[Tuple[str, str]]) -> Optional[str]:
|
||||
project_name = project_data["name"]
|
||||
back_path = "{}/{}".format(self._apache_bak_path, project_name)
|
||||
target_file = "{}/apache/java_{}.conf".format(self._vhost_path, project_name)
|
||||
if os.path.isfile(target_file):
|
||||
return
|
||||
|
||||
if os.path.isfile(back_path):
|
||||
shutil.copyfile(back_path, target_file)
|
||||
if os.path.isfile(target_file):
|
||||
ApacheDomainTool("java_").apache_set_domain(project_name, *domains)
|
||||
error_msg = public.checkWebConfig()
|
||||
if not isinstance(error_msg, str): # 没有报错时直接退出
|
||||
service_reload()
|
||||
return
|
||||
|
||||
res = self.set_apache_config(
|
||||
project_data,
|
||||
domains=domains,
|
||||
use_ssl=False,
|
||||
)
|
||||
|
||||
if not res:
|
||||
service_reload()
|
||||
return res
|
||||
|
||||
def close_apache_config_file(self, project_data: dict) -> None:
|
||||
project_name = project_data["name"]
|
||||
back_path = "{}/{}".format(self._apache_bak_path, project_name)
|
||||
target_file = "{}/apache/java_{}.conf".format(self._vhost_path, project_name)
|
||||
if not os.path.isfile(target_file):
|
||||
return
|
||||
|
||||
if os.path.isfile(back_path):
|
||||
os.remove(back_path)
|
||||
|
||||
shutil.move(target_file, back_path)
|
||||
service_reload()
|
||||
|
||||
def exists_apache_ssl(self, project_name) -> Tuple[bool, bool]:
|
||||
"""
|
||||
判断项目是否配置Apache SSL配置
|
||||
"""
|
||||
config_file = "{}/apache/java_{}.conf".format(self._vhost_path, project_name)
|
||||
if not os.path.exists(config_file):
|
||||
return False, False
|
||||
|
||||
config_body = public.readFile(config_file)
|
||||
if not isinstance(config_body, str):
|
||||
return False, False
|
||||
|
||||
is_ssl, is_force_ssl = False, False
|
||||
if config_body.find('SSLCertificateFile') != -1:
|
||||
is_ssl = True
|
||||
if config_body.find('HTTP_TO_HTTPS_START') != -1:
|
||||
is_force_ssl = True
|
||||
return is_ssl, is_force_ssl
|
||||
|
||||
|
||||
class JvavWebConfig:
|
||||
|
||||
def __init__(self):
|
||||
self._ng_conf_onj = JavaNginxTool()
|
||||
self._ap_conf_onj = JavaApacheTool()
|
||||
self.ws_type = public.get_webserver()
|
||||
|
||||
def create_config(self, project_data: dict, domains: List[Tuple[str, Union[str, int]]],
|
||||
use_ssl: bool = False, force_ssl=False):
|
||||
ng_res = self._ng_conf_onj.set_nginx_config(project_data, domains, use_ssl, force_ssl=force_ssl)
|
||||
ap_res = self._ap_conf_onj.set_apache_config(project_data, domains, use_ssl, force_ssl=force_ssl)
|
||||
if self.ws_type == "nginx" and ng_res:
|
||||
return ng_res
|
||||
elif self.ws_type == "apache" and ap_res:
|
||||
return ap_res
|
||||
service_reload()
|
||||
|
||||
def _open_config_file(self, project_data: dict):
|
||||
domain_list = public.M('domain').where('pid=?', (project_data["id"],)).field("name,port").select()
|
||||
domains = [(i["name"], str(i["port"])) for i in domain_list]
|
||||
if not domains:
|
||||
return "域名不能为空"
|
||||
ng_res = self._ng_conf_onj.open_nginx_config_file(project_data, domains)
|
||||
ap_res = self._ap_conf_onj.open_apache_config_file(project_data, domains)
|
||||
if self.ws_type == "nginx" and ng_res:
|
||||
return ng_res
|
||||
elif self.ws_type == "apache" and ap_res:
|
||||
return ap_res
|
||||
|
||||
def _close_apache_config_file(self, project_data: dict) -> None:
|
||||
self._ap_conf_onj.close_apache_config_file(project_data)
|
||||
self._ng_conf_onj.close_nginx_config_file(project_data)
|
||||
|
||||
def _set_domain(self, project_data: dict, domains: List[Tuple[str, str]]) -> Optional[str]:
|
||||
ng_res = NginxDomainTool("java_").nginx_set_domain(project_data["name"], *domains)
|
||||
ap_res = ApacheDomainTool("java_").apache_set_domain(project_data["name"], *domains)
|
||||
if self.ws_type == "nginx" and ng_res:
|
||||
return ng_res
|
||||
elif self.ws_type == "apache" and ap_res:
|
||||
return ap_res
|
||||
|
||||
def _get_ssl_status(self, project_name) -> Tuple[bool, bool]:
|
||||
if self.ws_type == "nginx":
|
||||
return self._ng_conf_onj.exists_nginx_ssl(project_name)
|
||||
elif self.ws_type == "apache":
|
||||
return self._ap_conf_onj.exists_apache_ssl(project_name)
|
||||
return False, False
|
||||
|
||||
def _set_static_path(self, project_data: dict):
|
||||
if self.ws_type == "nginx":
|
||||
res = self._ng_conf_onj.set_static_path(project_data)
|
||||
if res is None:
|
||||
return None
|
||||
elif res is False:
|
||||
err_msg = public.checkWebConfig()
|
||||
if isinstance(err_msg, str):
|
||||
return 'WEB服务器配置配置文件错误ERROR:<br><font style="color:red;">' + \
|
||||
err_msg.replace("\n", '<br>') + '</font>'
|
||||
|
||||
return self._open_config_file(project_data)
|
||||
else:
|
||||
return res
|
||||
return "只支持nginx设置静态路由"
|
||||
BIN
mod/project/java/jmxquery/JMXQuery-0.1.8.jar
Normal file
BIN
mod/project/java/jmxquery/JMXQuery-0.1.8.jar
Normal file
Binary file not shown.
215
mod/project/java/jmxquery/__init__.py
Normal file
215
mod/project/java/jmxquery/__init__.py
Normal file
@@ -0,0 +1,215 @@
|
||||
#!/usr/bin/env python3
|
||||
|
||||
"""
|
||||
Python interface to JMX. Uses local jar to pass commands to JMX and read JSON
|
||||
results returned.
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
import json
|
||||
from typing import List
|
||||
from enum import Enum
|
||||
import logging
|
||||
|
||||
# Full Path to Jar
|
||||
JAR_PATH = os.path.dirname(os.path.realpath(__file__)) + '/JMXQuery-0.1.8.jar'
|
||||
# Default Java path
|
||||
DEFAULT_JAVA_PATH = 'java'
|
||||
# Default timeout for running jar in seconds
|
||||
DEFAULT_JAR_TIMEOUT = 10
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class MetricType(Enum):
|
||||
COUNTER = 'counter'
|
||||
GAUGE = 'gauge'
|
||||
|
||||
|
||||
class JMXQuery:
|
||||
"""
|
||||
A JMX Query which is used to fetch specific MBean attributes/values from the JVM. The object_name can support wildcards
|
||||
to pull multiple metrics at once, for example '*:*' will bring back all MBeans and attributes in the JVM with their values.
|
||||
|
||||
You can set a metric name if you want to override the generated metric name created from the MBean path
|
||||
"""
|
||||
|
||||
def __init__(self,
|
||||
mBeanName: str,
|
||||
attribute: str = None,
|
||||
attributeKey: str = None,
|
||||
value: object = None,
|
||||
value_type: str = None,
|
||||
metric_name: str = None,
|
||||
metric_labels: dict = None):
|
||||
|
||||
self.mBeanName = mBeanName
|
||||
self.attribute = attribute
|
||||
self.attributeKey = attributeKey
|
||||
self.value = value
|
||||
self.value_type = value_type
|
||||
self.metric_name = metric_name
|
||||
self.metric_labels = metric_labels
|
||||
|
||||
def to_query_string(self) -> str:
|
||||
"""
|
||||
Build a query string to pass via command line to JMXQuery Jar
|
||||
|
||||
:return: The query string to find the MBean in format:
|
||||
|
||||
{mBeanName}/{attribute}/{attributeKey}
|
||||
|
||||
Example: java.lang:type=Memory/HeapMemoryUsage/init
|
||||
"""
|
||||
query = ""
|
||||
if self.metric_name:
|
||||
query += self.metric_name
|
||||
|
||||
if ((self.metric_labels != None) and (len(self.metric_labels) > 0)):
|
||||
query += "<"
|
||||
keyCount = 0
|
||||
for key, value in self.metric_labels.items():
|
||||
query += key + "=" + value
|
||||
keyCount += 1
|
||||
if keyCount < len(self.metric_labels):
|
||||
query += ","
|
||||
query += ">"
|
||||
query += "=="
|
||||
|
||||
query += self.mBeanName
|
||||
if self.attribute:
|
||||
query += "/" + self.attribute
|
||||
if self.attributeKey:
|
||||
query += "/" + self.attributeKey
|
||||
|
||||
return query
|
||||
|
||||
def to_string(self):
|
||||
|
||||
string = ""
|
||||
if self.metric_name:
|
||||
string += self.metric_name
|
||||
|
||||
if ((self.metric_labels != None) and (len(self.metric_labels) > 0)):
|
||||
string += " {"
|
||||
keyCount = 0
|
||||
for key, value in self.metric_labels.items():
|
||||
string += key + "=" + value
|
||||
keyCount += 1
|
||||
if keyCount < len(self.metric_labels):
|
||||
string += ","
|
||||
string += "}"
|
||||
else:
|
||||
string += self.mBeanName
|
||||
if self.attribute:
|
||||
string += "/" + self.attribute
|
||||
if self.attributeKey:
|
||||
string += "/" + self.attributeKey
|
||||
|
||||
string += " = "
|
||||
string += str(self.value) + " (" + self.value_type + ")"
|
||||
|
||||
return string
|
||||
|
||||
|
||||
class JMXConnection(object):
|
||||
"""
|
||||
The main class that connects to the JMX endpoint via a local JAR to run queries
|
||||
"""
|
||||
|
||||
def __init__(self, connection_uri: str, jmx_username: str = None, jmx_password: str = None, java_path: str = DEFAULT_JAVA_PATH):
|
||||
"""
|
||||
Creates instance of JMXQuery set to a specific connection uri for the JMX endpoint
|
||||
|
||||
:param connection_uri: The JMX connection URL. E.g. service:jmx:rmi:///jndi/rmi://localhost:7199/jmxrmi
|
||||
:param jmx_username: (Optional) Username if JMX endpoint is secured
|
||||
:param jmx_password: (Optional) Password if JMX endpoint is secured
|
||||
:param java_path: (Optional) Provide an alternative Java path on the machine to run the JAR.
|
||||
Default is 'java' which will use the machines default JVM
|
||||
"""
|
||||
self.connection_uri = connection_uri
|
||||
self.jmx_username = jmx_username
|
||||
self.jmx_password = jmx_password
|
||||
self.java_path = java_path
|
||||
|
||||
def __run_jar(self, queries: List[JMXQuery], timeout) -> List[JMXQuery]:
|
||||
"""
|
||||
Run the JAR and return the results
|
||||
|
||||
:param query: The query
|
||||
:return: The full command array to run via subprocess
|
||||
"""
|
||||
|
||||
command = [self.java_path, '-jar', JAR_PATH, '-url', self.connection_uri, "-json"]
|
||||
if (self.jmx_username):
|
||||
command.extend(["-u", self.jmx_username, "-p", self.jmx_password])
|
||||
|
||||
queryString = ""
|
||||
for query in queries:
|
||||
queryString += query.to_query_string() + ";"
|
||||
|
||||
command.extend(["-q", queryString])
|
||||
logger.debug("Running command: " + str(command))
|
||||
|
||||
jsonOutput = "[]"
|
||||
try:
|
||||
output = subprocess.run(command,
|
||||
stdout=subprocess.PIPE,
|
||||
stderr=subprocess.PIPE,
|
||||
timeout=timeout,
|
||||
check=True)
|
||||
|
||||
jsonOutput = output.stdout.decode('utf-8')
|
||||
except subprocess.TimeoutExpired as err:
|
||||
logger.error("Error calling JMX, Timeout of " + str(err.timeout) + " Expired: " + err.output.decode('utf-8'))
|
||||
except subprocess.CalledProcessError as err:
|
||||
logger.error("Error calling JMX: " + err.output.decode('utf-8'))
|
||||
raise err
|
||||
|
||||
logger.debug("JSON Output Received: " + jsonOutput)
|
||||
metrics = self.__load_from_json(jsonOutput)
|
||||
return metrics
|
||||
|
||||
def __load_from_json(self, jsonOutput: str) -> List[JMXQuery]:
|
||||
"""
|
||||
Loads the list of returned metrics from JSON response
|
||||
|
||||
:param jsonOutput: The JSON Array returned from the command line
|
||||
:return: An array of JMXQuerys
|
||||
"""
|
||||
if "\n" in jsonOutput:
|
||||
jsonOutput = jsonOutput.replace("\n", "")
|
||||
if "\t" in jsonOutput:
|
||||
jsonOutput = jsonOutput.replace("\t", "")
|
||||
jsonMetrics = json.loads(jsonOutput)
|
||||
metrics = []
|
||||
for jsonMetric in jsonMetrics:
|
||||
mBeanName = jsonMetric['mBeanName']
|
||||
attribute = jsonMetric['attribute']
|
||||
attributeType = jsonMetric['attributeType']
|
||||
metric_name = None
|
||||
if 'metricName' in jsonMetric:
|
||||
metric_name = jsonMetric['metricName']
|
||||
metric_labels = None
|
||||
if 'metricLabels' in jsonMetric:
|
||||
metric_labels = jsonMetric['metricLabels']
|
||||
attributeKey = None
|
||||
if 'attributeKey' in jsonMetric:
|
||||
attributeKey = jsonMetric['attributeKey']
|
||||
value = None
|
||||
if 'value' in jsonMetric:
|
||||
value = jsonMetric['value']
|
||||
|
||||
metrics.append(
|
||||
JMXQuery(mBeanName, attribute, attributeKey, value, attributeType, metric_name, metric_labels))
|
||||
return metrics
|
||||
|
||||
def query(self, queries: List[JMXQuery], timeout=DEFAULT_JAR_TIMEOUT) -> List[JMXQuery]:
|
||||
"""
|
||||
Run a list of JMX Queries against the JVM and get the results
|
||||
|
||||
:param queries: A list of JMXQuerys to query the JVM for
|
||||
:return: A list of JMXQuerys found in the JVM with their current values
|
||||
"""
|
||||
return self.__run_jar(queries, timeout)
|
||||
3252
mod/project/java/projectMod.py
Normal file
3252
mod/project/java/projectMod.py
Normal file
File diff suppressed because it is too large
Load Diff
600
mod/project/java/project_update.py
Normal file
600
mod/project/java/project_update.py
Normal file
@@ -0,0 +1,600 @@
|
||||
import copy
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import json
|
||||
import socket
|
||||
import time
|
||||
import traceback
|
||||
|
||||
import psutil
|
||||
import errno
|
||||
|
||||
from typing import Optional, List
|
||||
from threading import Thread
|
||||
from urllib3.util import parse_url, Url
|
||||
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
import public
|
||||
|
||||
|
||||
from mod.base import RealServer
|
||||
from mod.base import json_response
|
||||
from mod.project.java.projectMod import main as java_mod
|
||||
from mod.project.java import utils
|
||||
|
||||
|
||||
class ProjectUpdate:
|
||||
|
||||
def __init__(self, project_name: str, new_jar: str, new_port: int = None, run_time: int = None):
|
||||
self.project_name = project_name
|
||||
self.new_jar = new_jar
|
||||
self.j_project = java_mod()
|
||||
self.keep_path = self.j_project._java_project_path + "/keep"
|
||||
|
||||
if not os.path.exists(self.keep_path):
|
||||
os.makedirs(self.keep_path, 0o755)
|
||||
|
||||
self.keep_log = "{}/{}.log".format(self.keep_path, self.project_name)
|
||||
|
||||
# 不停机更新时使用
|
||||
self.new_port = new_port
|
||||
self.run_time = run_time
|
||||
self.keep_status = []
|
||||
self.new_project_config = None
|
||||
self.old_project_config = None
|
||||
|
||||
self.old_pro: Optional[psutil.Process] = None
|
||||
self.new_pro: Optional[psutil.Process] = None
|
||||
|
||||
self.end = False
|
||||
self.old_project_data = None
|
||||
self.proxy_data = {
|
||||
"scheme": "http"
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def new_suffix() -> str:
|
||||
import uuid
|
||||
return "_" + uuid.uuid4().hex[::4]
|
||||
|
||||
def start_spring_project(self, project_data: dict, write_systemd_file=True, need_wait=True, ) -> dict:
|
||||
return self.j_project._start_spring_boot_project(project_data, write_systemd_file, need_wait)
|
||||
|
||||
def restart_update(self) -> dict:
|
||||
project_data = self.j_project.get_project_find(self.project_name)
|
||||
if not project_data:
|
||||
return json_response(False, msg="The project does not exist")
|
||||
|
||||
project_config = project_data['project_config']
|
||||
old_jar = project_config['project_jar']
|
||||
if self.new_jar != old_jar:
|
||||
if not os.path.isfile(self.new_jar):
|
||||
return json_response(False, msg="项目jar包不存在")
|
||||
|
||||
project_config['jar_path'] = os.path.dirname(self.new_jar)
|
||||
project_config['project_jar'] = self.new_jar
|
||||
old_jar_name = os.path.basename(old_jar)
|
||||
project_cmd_list = project_config['project_cmd'].split(" ")
|
||||
for i in range(len(project_cmd_list)):
|
||||
if old_jar_name in project_cmd_list[i]:
|
||||
project_cmd_list[i] = self.new_jar
|
||||
break
|
||||
|
||||
new_project_cmd = " ".join(project_cmd_list)
|
||||
project_config['project_cmd'] = new_project_cmd
|
||||
project_config["change_flag"] = True
|
||||
|
||||
s_admin = RealServer()
|
||||
server_name = "spring_" + project_config["project_name"] + project_config.get("server_name_suffix", "")
|
||||
if s_admin.daemon_status(server_name)["msg"] == "服务不存在!":
|
||||
self.j_project.stop_by_kill_pid(project_data)
|
||||
if os.path.isfile(project_config["pids"]):
|
||||
os.remove(project_config["pids"])
|
||||
return self.start_spring_project(project_data, write_systemd_file=True, need_wait=False)
|
||||
|
||||
if "change_flag" in project_config and project_config.get("change_flag", False):
|
||||
del project_config["change_flag"]
|
||||
s_admin.daemon_admin(server_name, "stop")
|
||||
s_admin.del_daemon(server_name)
|
||||
self.j_project.stop_by_kill_pid(project_data)
|
||||
if os.path.isfile(project_config["pids"]):
|
||||
os.remove(project_config["pids"])
|
||||
|
||||
public.M("sites").where("id=?", (project_data["id"],)).update(
|
||||
{"project_config": json.dumps(project_config)}
|
||||
)
|
||||
return self.start_spring_project(project_data, write_systemd_file=True)
|
||||
else:
|
||||
return self.start_spring_project(project_data, write_systemd_file=False)
|
||||
|
||||
# 实际执行启动的线程
|
||||
def run_task(self):
|
||||
print("___________开始________________")
|
||||
try:
|
||||
res = self.start_new()
|
||||
self.keep_status[0]["status"] = 1
|
||||
if res:
|
||||
self.keep_status[0]["msg"] = res
|
||||
return
|
||||
else:
|
||||
self.keep_status[0]["msg"] = "新实例已启动,新实例pid:{}".format(self.new_pro.pid)
|
||||
res = self.set_nginx_upstream()
|
||||
self.keep_status[1]["status"] = 1
|
||||
if res:
|
||||
self.stop_new()
|
||||
self.keep_status[1]["msg"] = res
|
||||
return
|
||||
else:
|
||||
self.keep_status[1]["msg"] = "Nginx已配置完成轮询设置,您可以访问新实例了"
|
||||
res = self.wait_time()
|
||||
self.keep_status[2]["status"] = 1
|
||||
if res:
|
||||
self.keep_status[2]["msg"] = res
|
||||
return
|
||||
else:
|
||||
self.keep_status[2]["msg"] = "等待时间结束,新实例已启动成功"
|
||||
res = self.stop_old()
|
||||
self.keep_status[3]["status"] = 1
|
||||
self.keep_status[3]["msg"] = res if res else "停止旧实例成功,项目更新已结束"
|
||||
public.M("sites").where("id=?", (self.old_project_data["id"],)).update(
|
||||
{"project_config": json.dumps(self.new_project_config)}
|
||||
)
|
||||
except:
|
||||
print(traceback.format_exc())
|
||||
pass
|
||||
|
||||
def stop_new(self):
|
||||
new_server_name = "spring_" + self.project_name + self.new_project_config.get("server_name_suffix", "")
|
||||
RealServer().server_admin(new_server_name, "stop")
|
||||
RealServer().del_daemon(new_server_name)
|
||||
if self.new_pro and self.new_pro.is_running():
|
||||
self.new_pro.kill()
|
||||
|
||||
def start_new(self) -> Optional[str]:
|
||||
self.keep_status[0]["status"] = -1
|
||||
self.new_project_config['server_name_suffix'] = self.new_suffix()
|
||||
self.new_project_config['pids'] = "{}/pids/{}.pid".format(
|
||||
self.j_project._java_project_vhost, self.project_name + self.new_project_config['server_name_suffix']
|
||||
)
|
||||
|
||||
if not self.new_port or self.new_port in self.old_listen_port() or \
|
||||
utils.check_port_with_net_connections(self.new_port):
|
||||
self.new_port = utils.create_a_not_used_port()
|
||||
|
||||
old_jar = self.old_project_config['project_jar']
|
||||
if self.new_jar != old_jar:
|
||||
if not os.path.isfile(self.new_jar):
|
||||
return "项目jar包不存在"
|
||||
|
||||
self.new_project_config['jar_path'] = os.path.dirname(self.new_jar)
|
||||
self.new_project_config['project_jar'] = self.new_jar
|
||||
old_jar_name = os.path.basename(old_jar)
|
||||
project_cmd_list = self.new_project_config['project_cmd'].split(" ")
|
||||
for i in range(len(project_cmd_list)):
|
||||
if old_jar_name in project_cmd_list[i]:
|
||||
project_cmd_list[i] = self.new_jar
|
||||
break
|
||||
|
||||
new_project_cmd = " ".join(project_cmd_list)
|
||||
self.new_project_config['project_cmd'] = new_project_cmd
|
||||
|
||||
if "--server.port=" in self.new_project_config['project_cmd']:
|
||||
self.new_project_config['project_cmd'] = re.sub(
|
||||
r"--server\.port=\d+",
|
||||
"--server.port={}".format(self.new_port),
|
||||
self.new_project_config['project_cmd']
|
||||
)
|
||||
else:
|
||||
self.new_project_config['project_cmd'] += " --server.port={}".format(self.new_port)
|
||||
|
||||
self.old_project_data["project_config"] = self.new_project_config
|
||||
|
||||
self.start_spring_project(self.old_project_data, write_systemd_file=True)
|
||||
time.sleep(1)
|
||||
new_pid = self.j_project.get_project_pid(self.old_project_data)
|
||||
if not new_pid:
|
||||
return "项目启动失败"
|
||||
self.new_pro = psutil.Process(new_pid)
|
||||
self.keep_status[0]["msg"] = "新实例pid为:{}".format(new_pid)
|
||||
# 开始等待进程启动
|
||||
server_name = "spring_" + self.project_name + self.new_project_config.get("server_name_suffix", "")
|
||||
wait_num = 1
|
||||
for i in range(5 * 60 * 2 - 2):
|
||||
if self.end:
|
||||
RealServer().server_admin(server_name, "stop")
|
||||
RealServer().del_daemon(server_name)
|
||||
return "退出操作"
|
||||
if not self.new_pro.is_running():
|
||||
RealServer().del_daemon(server_name)
|
||||
return "项目启动失败"
|
||||
|
||||
conns = self.new_pro.connections()
|
||||
for c in conns:
|
||||
if c.status == "LISTEN" and c.laddr.port == self.new_port:
|
||||
return
|
||||
self.keep_status[0]["msg"] = "新实例pid为:{}, 正在等待该进程监听端口:{}, 已等待{}s".format(new_pid, self.new_port, wait_num)
|
||||
wait_num += 0.5
|
||||
time.sleep(0.5)
|
||||
|
||||
RealServer().server_admin(server_name, "stop")
|
||||
RealServer().del_daemon(server_name)
|
||||
return "启动超时"
|
||||
|
||||
def old_listen_port(self) -> List[int]:
|
||||
connects = self.old_pro.connections()
|
||||
res = []
|
||||
for i in connects:
|
||||
if i.status == "LISTEN":
|
||||
res.append(i.laddr.port)
|
||||
return res
|
||||
|
||||
def set_nginx_upstream(self) -> Optional[str]:
|
||||
self.keep_status[1]["status"] = -1
|
||||
ng_file = "/www/server/panel/vhost/nginx/java_{}.conf".format(self.project_name)
|
||||
res = public.checkWebConfig()
|
||||
if res is not True:
|
||||
return "Nginx配置文件错误,无法开始轮询配置"
|
||||
ng_data = public.readFile(ng_file)
|
||||
if not isinstance(ng_data, str):
|
||||
return "Nginx配置文件读取错误,无法开始轮询配置"
|
||||
|
||||
old_proxy_res = None
|
||||
for tmp_res in re.finditer(r"\s*proxy_pass\s+(?P<url>\S+)\s*;", ng_data, re.M):
|
||||
url: Url = parse_url(tmp_res.group("url"))
|
||||
if url.hostname in ("127.0.0.1", "localhost", "0.0.0.0") and url.port in self.old_listen_port():
|
||||
old_proxy_res = tmp_res
|
||||
self.proxy_data["scheme"] = url.scheme
|
||||
self.proxy_data["old_port"] = url.port
|
||||
if not old_proxy_res:
|
||||
return "未找到原实例的代理配置"
|
||||
|
||||
upstream_file = "/www/server/panel/vhost/nginx/java_{}_upstream.conf".format(self.project_name)
|
||||
public.writeFile(upstream_file, """
|
||||
upstream {}_backend {{
|
||||
server 127.0.0.1:{};
|
||||
server 127.0.0.1:{};
|
||||
}}
|
||||
""".format(self.project_name, self.proxy_data["old_port"], self.new_port))
|
||||
|
||||
new_config = ng_data.replace(old_proxy_res.group(), "\n proxy_pass {}://{}_backend;".format(
|
||||
self.proxy_data["scheme"], self.project_name))
|
||||
|
||||
public.writeFile(ng_file, new_config)
|
||||
|
||||
res = public.checkWebConfig()
|
||||
if res is not True:
|
||||
public.writeFile(ng_file, ng_data)
|
||||
return "Nginx配置文件错误,无法开始轮询配置"
|
||||
else:
|
||||
public.serviceReload()
|
||||
|
||||
def wait_time(self):
|
||||
self.keep_status[2]["status"] = -1
|
||||
if not self.run_time:
|
||||
self.run_time = 10 * 60
|
||||
for i in range(self.run_time):
|
||||
if self.end:
|
||||
return "退出操作"
|
||||
self.keep_status[2]["msg"] = "已进入轮询测试等待"
|
||||
if i > 0:
|
||||
self.keep_status[2]["msg"] = "已进入轮询测试等待,已等待{}s, 共需等待{}s".format(i, self.run_time)
|
||||
time.sleep(1)
|
||||
if not self.new_pro.is_running():
|
||||
return "新示例已退出,无法继续执行操作"
|
||||
return None
|
||||
|
||||
def select_new_or_old(self, option: str):
|
||||
if option == "use_new":
|
||||
self.keep_status[2]["status"] = 1
|
||||
self.keep_status[2]["msg"] = "已跳过等待时间,使用新实例运行"
|
||||
res = self.stop_old()
|
||||
public.M("sites").where("id=?", (self.old_project_data["id"],)).update(
|
||||
{"project_config": json.dumps(self.new_project_config)}
|
||||
)
|
||||
self.keep_status[3]["status"] = 1
|
||||
self.keep_status[3]["msg"] = res if res else "停止旧实例成功,项目更新已结束"
|
||||
return {"status": False if res else True, "msg": res if res else "停止旧实例成功,项目更新已结束"}
|
||||
|
||||
self.keep_status[2]["status"] = 1
|
||||
self.keep_status[2]["msg"] = "已跳过等待时间,使用原实例运行"
|
||||
self.keep_status[3]["name"] = "停止新实例"
|
||||
self.keep_status[3]["status"] = 1
|
||||
ng_file = "/www/server/panel/vhost/nginx/java_{}.conf".format(self.project_name)
|
||||
ng_data = public.readFile(ng_file)
|
||||
if not isinstance(ng_data, str):
|
||||
return {"status": False, "msg": "Nginx配置文件读取错误,无法取消轮询并使用原实例"}
|
||||
res = public.checkWebConfig()
|
||||
if res is not True:
|
||||
return {"status": False, "msg": "Nginx配置文件错误,无法取消轮询并使用原实例"}
|
||||
|
||||
upstream_file = "/www/server/panel/vhost/nginx/java_{}_upstream.conf".format(self.project_name)
|
||||
new_config = ng_data.replace(
|
||||
"{}_backend".format(self.project_name),
|
||||
"127.0.0.1:{}".format(self.proxy_data["old_port"])
|
||||
)
|
||||
public.writeFile(ng_file, new_config)
|
||||
res = public.checkWebConfig()
|
||||
if res is not True:
|
||||
public.writeFile(ng_file, ng_data)
|
||||
return {"status": False, "msg": "Nginx配置文件设置错误,无法取消轮询并使用原实例"}
|
||||
else:
|
||||
os.remove(upstream_file)
|
||||
public.serviceReload()
|
||||
self.stop_new()
|
||||
|
||||
return {"status": True, "msg": "停止新实例成功,项目更新已结束"}
|
||||
|
||||
def stop_old(self):
|
||||
self.keep_status[3]["status"] = -1
|
||||
ng_file = "/www/server/panel/vhost/nginx/java_{}.conf".format(self.project_name)
|
||||
ng_data = public.readFile(ng_file)
|
||||
if not isinstance(ng_data, str):
|
||||
return "Nginx配置文件读取错误,无法取消轮询,使用新实例"
|
||||
|
||||
res = public.checkWebConfig()
|
||||
if res is not True:
|
||||
return "Nginx配置文件错误,无法取消轮询,使用新实例"
|
||||
|
||||
old_proxy_res = None
|
||||
for tmp_res in re.finditer(r"\s*proxy_pass\s+(?P<url>\S+)\s*;", ng_data, re.M):
|
||||
if tmp_res.group("url").find("{}_backend".format(self.project_name)):
|
||||
old_proxy_res = tmp_res
|
||||
|
||||
if not old_proxy_res:
|
||||
return "未找到轮询的代理配置"
|
||||
|
||||
upstream_file = "/www/server/panel/vhost/nginx/java_{}_upstream.conf".format(self.project_name)
|
||||
if os.path.isfile(upstream_file):
|
||||
os.remove(upstream_file)
|
||||
|
||||
new_config = ng_data.replace(old_proxy_res.group(), "\n proxy_pass {}://127.0.0.1:{};".format(
|
||||
self.proxy_data["scheme"], self.new_port))
|
||||
|
||||
public.writeFile(ng_file, new_config)
|
||||
|
||||
res = public.checkWebConfig()
|
||||
if res is not True:
|
||||
public.writeFile(ng_file, ng_data)
|
||||
return "Nginx配置文件错误,无法结束轮询配置"
|
||||
else:
|
||||
public.serviceReload()
|
||||
|
||||
old_server_name = "spring_" + self.project_name + self.old_project_config.get("server_name_suffix", "")
|
||||
RealServer().server_admin(old_server_name, "stop")
|
||||
RealServer().del_daemon(old_server_name)
|
||||
if self.old_pro and self.old_pro.is_running():
|
||||
self.old_pro.kill()
|
||||
|
||||
return None
|
||||
|
||||
def keep_update(self):
|
||||
pid_file = "{}/{}.pid".format(self.keep_path, self.project_name)
|
||||
log_file = "{}/{}.log".format(self.keep_path, self.project_name)
|
||||
pid = os.getpid()
|
||||
public.writeFile(pid_file, str(pid))
|
||||
if os.path.exists(log_file):
|
||||
os.remove(log_file)
|
||||
|
||||
project_data = self.j_project.get_project_find(self.project_name)
|
||||
if not project_data:
|
||||
return json_response(False, msg="The project does not exist")
|
||||
|
||||
project_config = project_data['project_config']
|
||||
self.old_project_data = project_data
|
||||
self.old_project_config = project_config
|
||||
self.new_project_config = copy.deepcopy(project_config)
|
||||
|
||||
try:
|
||||
self.old_pro = psutil.Process(self.j_project.get_project_pid(project_data))
|
||||
except:
|
||||
pass
|
||||
if not self.old_pro:
|
||||
return json_response(False, msg="项目未启动")
|
||||
|
||||
self.end = False
|
||||
self.keep_status = [
|
||||
{
|
||||
"name": "启动新实例",
|
||||
"status": 0,
|
||||
"msg": "",
|
||||
},
|
||||
{
|
||||
"name": "设置Nginx轮询",
|
||||
"status": 0,
|
||||
"msg": "",
|
||||
},
|
||||
{
|
||||
"name": "等待并检查新实例",
|
||||
"status": 0,
|
||||
"msg": "",
|
||||
},
|
||||
{
|
||||
"name": "停止旧实例",
|
||||
"status": 0,
|
||||
"msg": "",
|
||||
}
|
||||
]
|
||||
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
socket_file = "{}/{}.socket".format(self.keep_path, self.project_name)
|
||||
# 清理旧的socket文件,如果存在
|
||||
if os.path.exists(socket_file):
|
||||
os.remove(socket_file)
|
||||
|
||||
# 设置为非阻塞
|
||||
sock.bind(socket_file)
|
||||
sock.setblocking(False) # 0表示非阻塞,1表示阻塞
|
||||
sock.listen(2)
|
||||
|
||||
update_run_task = Thread(target=self.run_task)
|
||||
update_run_task.start()
|
||||
|
||||
while True:
|
||||
if not update_run_task.is_alive():
|
||||
public.writeFile(log_file, json.dumps(self.keep_status))
|
||||
break
|
||||
|
||||
try:
|
||||
# 读取客户端发送的数据
|
||||
conn, _ = sock.accept()
|
||||
data = conn.recv(1024)
|
||||
except socket.error as e:
|
||||
if e.errno not in (errno.EAGAIN, errno.EWOULDBLOCK):
|
||||
raise e
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
|
||||
if not data:
|
||||
time.sleep(0.1)
|
||||
continue
|
||||
|
||||
# 打印接收到的数据
|
||||
print("Received:", data.decode())
|
||||
data_str = data.decode()
|
||||
if data_str == "stop_new":
|
||||
if self.keep_status[0]["status"] == -1:
|
||||
self.end = True
|
||||
update_run_task.join()
|
||||
public.writeFile(log_file, json.dumps(self.keep_status))
|
||||
conn.sendall(json.dumps({
|
||||
"status": True,
|
||||
"msg": "已关闭更新任务,并停止新实例"
|
||||
}).encode())
|
||||
break
|
||||
else:
|
||||
conn.sendall(json.dumps({
|
||||
"status": False,
|
||||
"msg": "新实例启动完成,已加入轮询,无法继续执行该操作"
|
||||
}).encode())
|
||||
elif data_str == "status":
|
||||
conn.sendall(json.dumps(self.keep_status).encode())
|
||||
elif data_str in ("use_new", "use_old"):
|
||||
if self.keep_status[2]["status"] != -1:
|
||||
conn.sendall(json.dumps({
|
||||
"status": False,
|
||||
"msg": "已超过轮询等待时间,无法执行该操作"
|
||||
}).encode())
|
||||
else:
|
||||
self.end = True
|
||||
update_run_task.join()
|
||||
public.writeFile(log_file, json.dumps(self.keep_status))
|
||||
res = self.select_new_or_old(data_str)
|
||||
conn.sendall(json.dumps(res).encode())
|
||||
|
||||
time.sleep(0.1)
|
||||
|
||||
# 关闭服务器端socket
|
||||
sock.close()
|
||||
# 清理旧的socket文件,如果存在
|
||||
if os.path.exists(socket_file):
|
||||
os.remove(socket_file)
|
||||
|
||||
def get_keep_status(self):
|
||||
try:
|
||||
log_file = "{}/{}.log".format(self.keep_path, self.project_name)
|
||||
pid_file = "{}/{}.pid".format(self.keep_path, self.project_name)
|
||||
log_data = public.readFile(log_file)
|
||||
data = None
|
||||
if isinstance(log_data, str):
|
||||
try:
|
||||
data = json.loads(log_data)
|
||||
except:
|
||||
pass
|
||||
|
||||
if data:
|
||||
return json_response(True, data={
|
||||
"running": False,
|
||||
"keep_msg": data
|
||||
})
|
||||
|
||||
pid_data = public.readFile(pid_file)
|
||||
er_msg = "没有正在进行的更新任务"
|
||||
if not isinstance(pid_data, str):
|
||||
return json_response(False, msg=er_msg)
|
||||
try:
|
||||
pid = int(pid_data)
|
||||
if not psutil.pid_exists(pid):
|
||||
return json_response(False, msg=er_msg)
|
||||
except:
|
||||
return json_response(False, msg=er_msg)
|
||||
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
try:
|
||||
sock.connect("{}/{}.socket".format(self.keep_path, self.project_name))
|
||||
except Exception:
|
||||
public.print_log(public.get_error_info())
|
||||
return json_response(False, msg="链接错误请尝试强制停止更新")
|
||||
data = b"status"
|
||||
sock.sendall(data)
|
||||
|
||||
# 接收响应
|
||||
sock.settimeout(1)
|
||||
response = sock.recv(1024 * 2)
|
||||
sock.close()
|
||||
try:
|
||||
data = json.loads(response.decode())
|
||||
except:
|
||||
public.print_log(public.get_error_info())
|
||||
return json_response(False, msg="链接错误请尝试强制停止更新")
|
||||
return json_response(True, data={
|
||||
"running": True,
|
||||
"keep_msg": data
|
||||
})
|
||||
except:
|
||||
public.print_log(public.get_error_info())
|
||||
return json_response(False, msg="链接错误请尝试强制停止更新")
|
||||
|
||||
def keep_option(self, option: str) -> dict:
|
||||
try:
|
||||
pid_file = "{}/{}.pid".format(self.keep_path, self.project_name)
|
||||
pid_data = public.readFile(pid_file)
|
||||
er_msg = "没有正在进行的更新任务, 无法执行操作"
|
||||
if not isinstance(pid_data, str) or pid_data == "0":
|
||||
return json_response(False, msg=er_msg)
|
||||
try:
|
||||
pid = int(pid_data)
|
||||
if not psutil.pid_exists(pid):
|
||||
return json_response(False, msg=er_msg)
|
||||
except:
|
||||
return json_response(False, msg=er_msg)
|
||||
|
||||
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
try:
|
||||
sock.connect("{}/{}.socket".format(self.keep_path, self.project_name))
|
||||
except Exception:
|
||||
return json_response(False, msg="链接错误,无法执行操作,请尝试强制停止更新")
|
||||
|
||||
sock.sendall(option.encode())
|
||||
|
||||
# 接收响应
|
||||
sock.settimeout(10)
|
||||
response = sock.recv(1024)
|
||||
sock.close()
|
||||
try:
|
||||
data = json.loads(response.decode())
|
||||
except:
|
||||
public.print_log(public.get_error_info())
|
||||
return json_response(False, msg="链接错误请尝试强制停止更新")
|
||||
if isinstance(data, dict):
|
||||
return json_response(data['status'], msg=data['msg'])
|
||||
else:
|
||||
return json_response(False, msg="链接错误请尝试强制停止更新")
|
||||
except:
|
||||
public.print_log(public.get_error_info())
|
||||
return json_response(False, msg="链接错误请尝试强制停止更新")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
def run_main(project_name: str, new_jar: str, new_port: int, run_time: int,):
|
||||
pu = ProjectUpdate(project_name, new_jar=new_jar, run_time=run_time, new_port=new_port)
|
||||
pu.keep_update()
|
||||
|
||||
if len(sys.argv) == 5:
|
||||
run_main(sys.argv[1], sys.argv[2], int(sys.argv[3]), int(sys.argv[4]))
|
||||
|
||||
644
mod/project/java/server_proxy.py
Normal file
644
mod/project/java/server_proxy.py
Normal file
@@ -0,0 +1,644 @@
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
from typing import Optional, Union, List, Dict, Any
|
||||
from mod.base.web_conf.util import check_server_config, write_file, read_file, service_reload
|
||||
from mod.base import json_response
|
||||
from urllib3.util import Url, parse_url
|
||||
|
||||
import public
|
||||
|
||||
|
||||
class RealServerProxy:
|
||||
panel_path = "/www/server/panel"
|
||||
default_headers = (
|
||||
"Host", "X-Real-IP", "X-Forwarded-For", "REMOTE-HOST", "X-Host", "X-Scheme", "Upgrade", "Connection"
|
||||
)
|
||||
|
||||
def __init__(self, project_data: dict):
|
||||
self.config_prefix: str = "java_"
|
||||
|
||||
site_name = project_data["name"]
|
||||
self.project_id = project_data["id"]
|
||||
self.project_config = project_data["project_config"]
|
||||
|
||||
self._config: Optional[List[dict]] = None
|
||||
self._ng_file: str = "{}/vhost/nginx/{}{}.conf".format(self.panel_path, self.config_prefix, site_name)
|
||||
self._ap_file: str = "{}/vhost/apache/{}{}.conf".format(self.panel_path, self.config_prefix, site_name)
|
||||
self.site_name = site_name
|
||||
self.ws_type = public.get_webserver()
|
||||
|
||||
@staticmethod
|
||||
def new_id() -> str:
|
||||
from uuid import uuid4
|
||||
return uuid4().hex[::3]
|
||||
|
||||
@property
|
||||
def config(self) -> List[dict]:
|
||||
if self._config is None:
|
||||
if "proxy_info" in self.project_config:
|
||||
self._config = self.project_config["proxy_info"]
|
||||
else:
|
||||
self._config = []
|
||||
|
||||
return self._config
|
||||
|
||||
def save_config(self):
|
||||
if self._config is not None:
|
||||
self.project_config["proxy_info"] = self._config
|
||||
public.M("sites").where("id=?", (self.project_id,)).update(
|
||||
{"project_config": json.dumps(self.project_config)}
|
||||
)
|
||||
|
||||
# 检查代理是否存在
|
||||
def _check_even(self, proxy_conf: dict, is_modify) -> Optional[str]:
|
||||
if is_modify is True:
|
||||
for i in self.config:
|
||||
if i["proxy_dir"] == proxy_conf["proxy_dir"] and i["proxy_id"] != proxy_conf["proxy_id"]:
|
||||
return '指定反向代理名称或代理文件夹已存在'
|
||||
if i["proxy_port"] == proxy_conf["proxy_port"] and i["proxy_id"] != proxy_conf["proxy_id"]:
|
||||
return '指定反向代理端口已存在对应的代理'
|
||||
else:
|
||||
for i in self.config:
|
||||
if i["proxy_port"] == proxy_conf["proxy_port"]:
|
||||
return '指定反向代理端口已存在对应的代理'
|
||||
|
||||
def check_args(self, get, is_modify=False) -> Union[str, dict]:
|
||||
err_msg = check_server_config()
|
||||
if isinstance(err_msg, str):
|
||||
return 'WEB服务器配置配置文件错误ERROR:<br><font style="color:red;">' + \
|
||||
err_msg.replace("\n", '<br>') + '</font>'
|
||||
data = {
|
||||
"proxy_dir": "/",
|
||||
"status": 1,
|
||||
"proxy_id": self.new_id(),
|
||||
"rewrite": {
|
||||
"status": False,
|
||||
"src_path": "",
|
||||
"target_path": "",
|
||||
},
|
||||
"add_headers": [],
|
||||
}
|
||||
try:
|
||||
data["site_name"] = get.site_name.strip()
|
||||
if "proxy_dir" in get:
|
||||
data["proxy_dir"] = get.proxy_dir.strip()
|
||||
if "proxy_id" in get:
|
||||
data["proxy_id"] = get.proxy_id.strip()
|
||||
data["proxy_port"] = int(get.proxy_port)
|
||||
data["status"] = int(get.status.strip())
|
||||
if hasattr(get, "rewrite"):
|
||||
data["rewrite"] = get.rewrite
|
||||
if isinstance(get.rewrite, str):
|
||||
data["rewrite"] = json.loads(get.rewrite)
|
||||
|
||||
if hasattr(get, "add_headers"):
|
||||
data["add_headers"] = get.add_headers
|
||||
if isinstance(get.add_headers, str):
|
||||
data["add_headers"] = json.loads(get.add_headers)
|
||||
except:
|
||||
public.print_log(public.get_error_info())
|
||||
return "Parameter error"
|
||||
|
||||
if not 1 < data["proxy_port"] < 65536:
|
||||
return '代理端口范围错误'
|
||||
|
||||
if not data["proxy_dir"].endswith("/"):
|
||||
data["proxy_dir"] += "/"
|
||||
|
||||
evn_msg = self._check_even(data, is_modify)
|
||||
if isinstance(evn_msg, str):
|
||||
return evn_msg
|
||||
|
||||
rep_re_key = re.compile(r'''[?=\[\])(*&^%$#@!~`{}><,'"\\]+''')
|
||||
special = r'''?,=,[,],),(,*,&,^,%,$,#,@,!,~,`,{,},>,<,\,',"'''
|
||||
# 检测代理目录格式
|
||||
if rep_re_key.search(data["proxy_dir"]):
|
||||
return "代理路由不能有以下特殊符号" + special
|
||||
|
||||
if not isinstance(data["rewrite"], dict):
|
||||
return "路由重写配置错误"
|
||||
if "status" not in data["rewrite"] or not data["rewrite"]["status"]:
|
||||
data["rewrite"] = {
|
||||
"status": False,
|
||||
"src_path": "",
|
||||
"target_path": "",
|
||||
}
|
||||
else:
|
||||
if not ("src_path" in data["rewrite"] and "target_path" in data["rewrite"]):
|
||||
return "路由重写参数配置错误"
|
||||
if not isinstance(data["rewrite"]["src_path"], str) or not isinstance(data["rewrite"]["target_path"], str):
|
||||
return "路由重写参数配置错误"
|
||||
if rep_re_key.search(data["rewrite"]["src_path"]):
|
||||
return "路由重写匹配路由不能有以下特殊符号" + special
|
||||
if rep_re_key.search(data["rewrite"]["target_path"]):
|
||||
return "路由重写目标路由不能有以下特殊符号" + special
|
||||
|
||||
if not isinstance(data["add_headers"], list):
|
||||
return "自定义代理头配置错误"
|
||||
else:
|
||||
rep_blank_space = re.compile(r"\s+")
|
||||
for h in data["add_headers"]:
|
||||
if "k" not in h or "v" not in h:
|
||||
return "自定义代理头配置错误"
|
||||
if not isinstance(h["k"], str) or not isinstance(h["v"], str):
|
||||
return "自定义代理头配置错误"
|
||||
if rep_blank_space.search(h["k"]) or rep_blank_space.search(h["v"]):
|
||||
return "代理头配置中不能包含有空格"
|
||||
if h["k"] in self.default_headers:
|
||||
return '代理头配置中不能包含有默认头【{}】'.format(h["k"])
|
||||
|
||||
return data
|
||||
|
||||
def check_location(self, proxy_dir: str) -> Optional[str]:
|
||||
# 伪静态文件路径
|
||||
rewrite_conf_path = "%s/vhost/rewrite/%s%s.conf" % (self.panel_path, self.config_prefix, self.site_name)
|
||||
|
||||
rep_location = re.compile(r"s*location\s+(\^~\s*)?%s\s*{" % proxy_dir)
|
||||
|
||||
for i in [rewrite_conf_path, self._ng_file]:
|
||||
conf = read_file(i)
|
||||
if isinstance(conf, str) and rep_location.search(conf):
|
||||
return '伪静态/站点主配置文件已经存路径【{}】的配置'.format(proxy_dir)
|
||||
|
||||
@staticmethod
|
||||
def _set_nginx_proxy_base():
|
||||
file = "/www/server/nginx/conf/proxy.conf"
|
||||
setup_path = "/www/server"
|
||||
if not os.path.exists(file):
|
||||
conf = '''proxy_temp_path %s/nginx/proxy_temp_dir;
|
||||
proxy_cache_path %s/nginx/proxy_cache_dir levels=1:2 keys_zone=cache_one:10m inactive=1d max_size=5g;
|
||||
client_body_buffer_size 512k;
|
||||
proxy_connect_timeout 60;
|
||||
proxy_read_timeout 60;
|
||||
proxy_send_timeout 60;
|
||||
proxy_buffer_size 32k;
|
||||
proxy_buffers 4 64k;
|
||||
proxy_busy_buffers_size 128k;
|
||||
proxy_temp_file_write_size 128k;
|
||||
proxy_next_upstream error timeout invalid_header http_500 http_503 http_404;
|
||||
proxy_cache cache_one;''' % (setup_path, setup_path)
|
||||
write_file(file, conf)
|
||||
|
||||
conf = read_file(file)
|
||||
if conf and conf.find('include proxy.conf;') == -1:
|
||||
conf = re.sub(r"include\s+mime\.types;", "include mime.types;\n\tinclude proxy.conf;", conf)
|
||||
write_file(file, conf)
|
||||
|
||||
# websocket前置map
|
||||
map_file = "/www/server/panel/vhost/nginx/0.websocket.conf"
|
||||
if not os.path.exists(map_file):
|
||||
write_file(map_file, '''
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}''')
|
||||
|
||||
@staticmethod
|
||||
def build_proxy_conf(proxy_data: dict) -> str:
|
||||
ng_proxy = '''
|
||||
#PROXY-START{proxy_dir}
|
||||
location {proxy_dir} {{{rewrite}
|
||||
proxy_pass {proxy_url};
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;{add_headers}
|
||||
proxy_set_header REMOTE-HOST $remote_addr;
|
||||
add_header X-Cache $upstream_cache_status;
|
||||
proxy_set_header X-Host $host:$server_port;
|
||||
proxy_set_header X-Scheme $scheme;
|
||||
proxy_connect_timeout 30s;
|
||||
proxy_read_timeout 86400s;
|
||||
proxy_send_timeout 30s;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
}}
|
||||
#PROXY-END{proxy_dir}
|
||||
'''
|
||||
|
||||
rewrite = ""
|
||||
if "rewrite" in proxy_data and proxy_data["rewrite"].get("status", False):
|
||||
rewrite = proxy_data["rewrite"]
|
||||
src_path = rewrite["src_path"]
|
||||
if not src_path.endswith("/"):
|
||||
src_path += "/"
|
||||
target_path = rewrite["target_path"]
|
||||
if target_path.endswith("/"):
|
||||
target_path = target_path[:-1]
|
||||
|
||||
rewrite = "\n rewrite ^{}(.*)$ {}/$1 break;".format(src_path, target_path)
|
||||
|
||||
add_headers = ""
|
||||
if "add_headers" in proxy_data:
|
||||
header_tmp = " proxy_set_header {} {};"
|
||||
add_headers_list = [header_tmp.format(h["k"], h["v"]) for h in proxy_data["add_headers"] if
|
||||
"k" in h and "v" in h]
|
||||
add_headers = "\n".join(add_headers_list)
|
||||
if add_headers:
|
||||
add_headers = "\n" + add_headers
|
||||
|
||||
# 构造替换字符串
|
||||
proxy_dir = proxy_data["proxy_dir"]
|
||||
proxy_site = "http://127.0.0.1:{}".format(proxy_data["proxy_port"])
|
||||
|
||||
proxy = ng_proxy.format(
|
||||
proxy_dir=proxy_dir,
|
||||
proxy_url=proxy_site,
|
||||
rewrite=rewrite,
|
||||
add_headers=add_headers,
|
||||
)
|
||||
|
||||
return proxy
|
||||
|
||||
def add_nginx_proxy(self, proxy_data: dict) -> Optional[str]:
|
||||
ng_conf = read_file(self._ng_file)
|
||||
if not ng_conf:
|
||||
return "Nginx配置文件不存在"
|
||||
|
||||
proxy_str = self.build_proxy_conf(proxy_data)
|
||||
|
||||
# 添加配置信息到配置文件中
|
||||
rep_list = [
|
||||
(re.compile(r"\s*#PROXY-LOCAl-END.*", re.M), True), # 添加到反向代理结尾的上面
|
||||
(re.compile(r"\s*#ROXY-END.*", re.M), False), # 添加到其他的反向代理的下面
|
||||
(re.compile(r"\s*#\s*HTTP反向代理相关配置结束\s*<<<.*", re.M), False), # 添加到其他的反向代理的下面
|
||||
(re.compile(r"\s*include\s*/www/server/panel/vhost/rewrite/.*(\s*#.*)?"), False),
|
||||
# 添加到伪静态的下面
|
||||
# (re.compile(r"(#.*)?\s*location\s+/\.well-known/\s*{"), True), # 添加到location /.well-known/上面
|
||||
]
|
||||
|
||||
# 使用正则匹配确定插入位置
|
||||
def set_by_rep_idx(tmp_rep: re.Pattern, use_start: bool) -> bool:
|
||||
tmp_res = tmp_rep.search(ng_conf)
|
||||
if not tmp_res:
|
||||
return False
|
||||
if use_start:
|
||||
new_conf = ng_conf[:tmp_res.start()] + proxy_str + tmp_res.group() + ng_conf[tmp_res.end():]
|
||||
else:
|
||||
new_conf = ng_conf[:tmp_res.start()] + tmp_res.group() + proxy_str + ng_conf[tmp_res.end():]
|
||||
|
||||
write_file(self._ng_file, new_conf)
|
||||
if self.ws_type == "nginx" and check_server_config() is not None:
|
||||
write_file(self._ng_file, ng_conf)
|
||||
return False
|
||||
return True
|
||||
|
||||
for r, s in rep_list:
|
||||
if set_by_rep_idx(r, s):
|
||||
break
|
||||
else:
|
||||
return "无法在配置文件中定位到需要添加的项目"
|
||||
|
||||
def _unset_nginx_proxy(self, proxy_data) -> Optional[str]:
|
||||
ng_conf = read_file(self._ng_file)
|
||||
if not isinstance(ng_conf, str):
|
||||
return "配置文件不存在"
|
||||
|
||||
proxy_dir = proxy_data["proxy_dir"]
|
||||
rep_start_end = re.compile(r"\s*#PROXY-START%s(.|\n)*?#PROXY-END%s[^\n]*" % (proxy_dir, proxy_dir))
|
||||
if rep_start_end.search(ng_conf):
|
||||
new_ng_conf = rep_start_end.sub("", ng_conf)
|
||||
write_file(self._ng_file, new_ng_conf)
|
||||
if self.ws_type == "nginx":
|
||||
err_msg = check_server_config()
|
||||
if isinstance(err_msg, str):
|
||||
write_file(self._ng_file, ng_conf)
|
||||
return err_msg
|
||||
else:
|
||||
return
|
||||
|
||||
rep_location = re.compile(r"(\s*#.*?)\s*location\s+(\^~\s*)?%s\s*{" % proxy_dir)
|
||||
res = rep_location.search(ng_conf)
|
||||
if res:
|
||||
end_idx = self.find_nginx_block_end(ng_conf, res.end() + 1)
|
||||
if not end_idx:
|
||||
return
|
||||
|
||||
block = ng_conf[res.start(): end_idx]
|
||||
if block.find("proxy_pass") == -1: # 如果这块内不包含proxy_pass 则跳过
|
||||
return
|
||||
|
||||
# 异除下一个注释行
|
||||
res_end = re.search(r"\s*#PROXY-END.*", ng_conf[end_idx + 1:])
|
||||
if res_end:
|
||||
end_idx += res_end.end()
|
||||
|
||||
new_ng_conf = ng_conf[:res.start()] + ng_conf[end_idx + 1:]
|
||||
write_file(self._ng_file, new_ng_conf)
|
||||
if self.ws_type == "nginx":
|
||||
err_msg = check_server_config()
|
||||
if isinstance(err_msg, str):
|
||||
write_file(self._ng_file, ng_conf)
|
||||
return err_msg
|
||||
|
||||
@staticmethod
|
||||
def find_nginx_block_end(data: str, start_idx: int) -> Optional[int]:
|
||||
if len(data) < start_idx + 1:
|
||||
return None
|
||||
|
||||
level = 1
|
||||
line_start = 0
|
||||
for i in range(start_idx + 1, len(data)):
|
||||
if data[i] == '\n':
|
||||
line_start = i + 1
|
||||
if data[i] == '{' and line_start and data[line_start: i].find("#") == -1: # 没有注释的下一个{
|
||||
level += 1
|
||||
elif data[i] == '}' and line_start and data[line_start: i].find("#") == -1: # 没有注释的下一个}
|
||||
level -= 1
|
||||
if level == 0:
|
||||
return i
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def build_apache_conf(proxy_data: dict) -> str:
|
||||
return '''
|
||||
#PROXY-START{proxy_dir}
|
||||
<IfModule mod_proxy.c>
|
||||
ProxyRequests Off
|
||||
SSLProxyEngine on
|
||||
ProxyPass {proxy_dir} {url}/
|
||||
ProxyPassReverse {proxy_dir} {url}/
|
||||
RequestHeader set Host "%{{Host}}e"
|
||||
RequestHeader set X-Real-IP "%{{REMOTE_ADDR}}e"
|
||||
RequestHeader set X-Forwarded-For "%{{X-Forwarded-For}}e"
|
||||
RequestHeader setifempty X-Forwarded-For "%{{REMOTE_ADDR}}e"
|
||||
</IfModule>
|
||||
#PROXY-END{proxy_dir}
|
||||
'''.format(proxy_dir=proxy_data["proxy_dir"], url="http://127.0.0.1:{}".format(proxy_data["proxy_port"]))
|
||||
|
||||
def add_apache_proxy(self, proxy_data: dict) -> Optional[str]:
|
||||
ap_conf = read_file(self._ap_file)
|
||||
if not ap_conf:
|
||||
return "Apache配置文件不存在"
|
||||
|
||||
proxy_str = self.build_apache_conf(proxy_data)
|
||||
|
||||
# 添加配置信息到配置文件中
|
||||
rep_list = [
|
||||
(re.compile(r"#ROXY-END[^\n]*\n"), False), # 添加到其他的反向代理的下面
|
||||
(re.compile(r"#\s*HTTP反向代理相关配置结束\s*<<<[^\n]*\n"), False), # 添加到其他的反向代理的下面
|
||||
(
|
||||
re.compile(r"\s*(#SSL[^\n]*)?\s*<IfModule\s*alias_module>[^\n]*\s*.*/.well-known/[^\n]*\s*</IfModule>"),
|
||||
True # 添加到location /.well-known/上面
|
||||
),
|
||||
(re.compile(r"\s*</VirtualHost>[^\n]*\n?"), True),
|
||||
]
|
||||
|
||||
# 使用正则匹配确定插入位置
|
||||
def set_by_rep_idx(tmp_rep: re.Pattern, use_start: bool) -> bool:
|
||||
new_conf_list = []
|
||||
change_flag = False
|
||||
start_idx = 0
|
||||
for tmp in tmp_rep.finditer(ap_conf):
|
||||
change_flag = True
|
||||
new_conf_list.append(ap_conf[start_idx:tmp.start()])
|
||||
start_idx = tmp.end()
|
||||
if use_start:
|
||||
new_conf_list.append(proxy_str)
|
||||
new_conf_list.append(tmp.group())
|
||||
else:
|
||||
new_conf_list.append(tmp.group())
|
||||
new_conf_list.append(proxy_str)
|
||||
|
||||
if not change_flag:
|
||||
return False
|
||||
|
||||
new_conf_list.append(ap_conf[start_idx:])
|
||||
write_file(self._ap_file, "".join(new_conf_list))
|
||||
if self.ws_type == "apache" and check_server_config() is not None:
|
||||
write_file(self._ap_file, ap_conf)
|
||||
return False
|
||||
return True
|
||||
|
||||
for r, s in rep_list:
|
||||
if set_by_rep_idx(r, s):
|
||||
break
|
||||
else:
|
||||
return "无法在配置文件中定位到需要添加的项目"
|
||||
|
||||
def remove_apache_proxy(self, proxy_data) -> Optional[str]:
|
||||
ap_conf = read_file(self._ap_file)
|
||||
if not isinstance(ap_conf, str):
|
||||
return "配置文件不存在"
|
||||
|
||||
proxy_dir = proxy_data["proxy_dir"]
|
||||
rep_start_end = re.compile(r"\s*#PROXY-START%s(.|\n)*?#PROXY-END%s[^\n]*" % (proxy_dir, proxy_dir))
|
||||
if rep_start_end.search(ap_conf):
|
||||
new_ap_conf = rep_start_end.sub("", ap_conf)
|
||||
write_file(self._ap_file, new_ap_conf)
|
||||
if self.ws_type == "apache":
|
||||
err_msg = check_server_config()
|
||||
if isinstance(err_msg, str):
|
||||
write_file(self._ap_file, ap_conf)
|
||||
return err_msg
|
||||
else:
|
||||
return
|
||||
|
||||
rep_if_mod = re.compile(
|
||||
r"(\s*#.*)?\s*<IfModule mod_proxy\.c>\s*(.*\n){3,5}\s*"
|
||||
r"ProxyPass\s+%s\s+\S+/\s*(.*\n){1,2}\s*</IfModule>(\s*#.*)?" % proxy_dir)
|
||||
|
||||
res = rep_if_mod.search(ap_conf)
|
||||
if res:
|
||||
new_ap_conf = rep_if_mod.sub("", ap_conf)
|
||||
write_file(self._ap_file, new_ap_conf)
|
||||
if self.ws_type == "apache":
|
||||
err_msg = check_server_config()
|
||||
if isinstance(err_msg, str):
|
||||
write_file(self._ap_file, ap_conf)
|
||||
return err_msg
|
||||
|
||||
def create_proxy(self, proxy_data: dict) -> Optional[str]:
|
||||
for i in self.config:
|
||||
if i["proxy_dir"] == proxy_data["proxy_dir"]:
|
||||
proxy_data["proxy_id"] = i["proxy_id"]
|
||||
return self.modify_proxy(proxy_data)
|
||||
|
||||
if self.ws_type == "nginx":
|
||||
err_msg = self.check_location(proxy_data["proxy_dir"])
|
||||
if err_msg:
|
||||
return json_response(False, err_msg)
|
||||
|
||||
self._set_nginx_proxy_base()
|
||||
error_msg = self.add_nginx_proxy(proxy_data)
|
||||
if self.ws_type == "nginx" and error_msg:
|
||||
return error_msg
|
||||
error_msg = self.add_apache_proxy(proxy_data)
|
||||
if self.ws_type == "apache" and error_msg:
|
||||
return error_msg
|
||||
self.config.append(proxy_data)
|
||||
self.save_config()
|
||||
service_reload()
|
||||
|
||||
def modify_proxy(self, proxy_data: dict) -> Optional[str]:
|
||||
idx = None
|
||||
for index, i in enumerate(self.config):
|
||||
if i["proxy_id"] == proxy_data["proxy_id"] and i["site_name"] == proxy_data["site_name"]:
|
||||
idx = index
|
||||
break
|
||||
|
||||
if idx is None:
|
||||
return "未找到该id的反向代理配置"
|
||||
|
||||
if proxy_data["proxy_dir"] != self.config[idx]["proxy_dir"] and self.ws_type == "nginx":
|
||||
err_msg = self.check_location(proxy_data["proxy_dir"])
|
||||
if err_msg:
|
||||
return json_response(False, err_msg)
|
||||
|
||||
self._set_nginx_proxy_base()
|
||||
error_msg = self._unset_nginx_proxy(self.config[idx])
|
||||
if self.ws_type == "nginx" and error_msg:
|
||||
return error_msg
|
||||
|
||||
error_msg = self.remove_apache_proxy(self.config[idx])
|
||||
if self.ws_type == "apache" and error_msg:
|
||||
return error_msg
|
||||
|
||||
error_msg = self.add_nginx_proxy(proxy_data)
|
||||
if self.ws_type == "nginx" and error_msg:
|
||||
return error_msg
|
||||
|
||||
error_msg = self.add_apache_proxy(proxy_data)
|
||||
if self.ws_type == "apache" and error_msg:
|
||||
return error_msg
|
||||
|
||||
self.config[idx] = proxy_data
|
||||
self.save_config()
|
||||
service_reload()
|
||||
|
||||
def remove_proxy(self, site_name, proxy_id, multiple=False) -> Optional[str]:
|
||||
idx = None
|
||||
for index, i in enumerate(self.config):
|
||||
if i["proxy_id"] == proxy_id and i["site_name"] == site_name:
|
||||
idx = index
|
||||
|
||||
if idx is None:
|
||||
return "未找到该名称的反向代理配置"
|
||||
|
||||
err_msg = self._unset_nginx_proxy(self.config[idx])
|
||||
if err_msg and self.ws_type == "nginx":
|
||||
return err_msg
|
||||
|
||||
error_msg = self.remove_apache_proxy(self.config[idx])
|
||||
if self.ws_type == "apache" and error_msg:
|
||||
return error_msg
|
||||
|
||||
del self.config[idx]
|
||||
self.save_config()
|
||||
if not multiple:
|
||||
service_reload()
|
||||
|
||||
def get_proxy_list_by_nginx(self) -> List[Dict[str, Any]]:
|
||||
ng_conf = read_file(self._ng_file)
|
||||
if not isinstance(ng_conf, str):
|
||||
return []
|
||||
|
||||
rep_location = re.compile(r"\s*location\s+([=*~^]*\s+)?(?P<path>\S+)\s*{")
|
||||
proxy_location_path_info = {}
|
||||
for tmp in rep_location.finditer(ng_conf):
|
||||
end_idx = self.find_nginx_block_end(ng_conf, tmp.end() + 1)
|
||||
if end_idx and ng_conf[tmp.start(): end_idx].find("proxy_pass") != -1:
|
||||
p = tmp.group("path")
|
||||
if not p.endswith("/"):
|
||||
p += "/"
|
||||
proxy_location_path_info[p] = (tmp.start(), end_idx)
|
||||
|
||||
res_pass = re.compile(r"proxy_pass\s+(?P<pass>\S+)\s*;", re.M)
|
||||
remove_list = []
|
||||
local_host = ("127.0.0.1", "localhost", "0.0.0.0")
|
||||
for i in self.config:
|
||||
if i["proxy_dir"] in proxy_location_path_info:
|
||||
start_idx, end_idx = proxy_location_path_info[i["proxy_dir"]]
|
||||
block = ng_conf[start_idx: end_idx]
|
||||
res_pass_res = res_pass.search(block)
|
||||
if res_pass_res:
|
||||
url = parse_url(res_pass_res.group("pass"))
|
||||
if isinstance(url, Url) and url.hostname in local_host and url.port == i["proxy_port"]:
|
||||
i["status"] = True
|
||||
proxy_location_path_info.pop(i["proxy_dir"])
|
||||
continue
|
||||
|
||||
remove_list.append(i)
|
||||
|
||||
need_save = False
|
||||
for i in remove_list:
|
||||
self.config.remove(i)
|
||||
need_save = True
|
||||
|
||||
for path, (start_idx, end_idx) in proxy_location_path_info.items():
|
||||
block = ng_conf[start_idx: end_idx]
|
||||
res_pass_res = res_pass.search(block)
|
||||
if res_pass_res:
|
||||
url = parse_url(res_pass_res.group("pass"))
|
||||
if isinstance(url, Url) and url.hostname in ("127.0.0.1", "localhost", "0.0.0.0"):
|
||||
self.config.insert(0, {
|
||||
"proxy_id": self.new_id(),
|
||||
"site_name": self.site_name,
|
||||
"proxy_dir": "/",
|
||||
"proxy_port": url.port,
|
||||
"status": 1,
|
||||
"rewrite": {
|
||||
"status": False,
|
||||
"src_path": "",
|
||||
"target_path": "",
|
||||
},
|
||||
"add_headers": [],
|
||||
})
|
||||
need_save = True
|
||||
if need_save:
|
||||
self.save_config()
|
||||
|
||||
return self.config
|
||||
|
||||
def get_proxy_list_by_apache(self) -> List[Dict[str, Any]]:
|
||||
ap_conf = read_file(self._ap_file)
|
||||
if not isinstance(ap_conf, str):
|
||||
return []
|
||||
|
||||
rep_proxy_pass = r"ProxyPass\s+%s\s+\S+/"
|
||||
mian_location_use = False
|
||||
for i in self.config:
|
||||
if i["proxy_dir"] == "/":
|
||||
mian_location_use = True
|
||||
rep_l = re.search(rep_proxy_pass % i["proxy_dir"], ap_conf, re.M)
|
||||
if rep_l:
|
||||
i["status"] = 1
|
||||
else:
|
||||
i["status"] = 0
|
||||
|
||||
if not mian_location_use:
|
||||
res_l = re.search(
|
||||
r"\s*<IfModule mod_proxy\.c>\s*(.*\n){3,5}\s*"
|
||||
r"ProxyPass\s+/\s+(?P<pass>\S+)/\s*(.*\n){1,2}\s*</IfModule>", ap_conf)
|
||||
|
||||
if not res_l:
|
||||
return self.config
|
||||
|
||||
url = parse_url(res_l.group("pass"))
|
||||
if isinstance(url, Url) and url.hostname in ("127.0.0.1", "localhost", "0.0.0.0"):
|
||||
self.config.insert(0, {
|
||||
"proxy_id": self.new_id(),
|
||||
"site_name": self.site_name,
|
||||
"proxy_dir": "/",
|
||||
"proxy_port": url.port,
|
||||
"status": 1,
|
||||
"rewrite": {
|
||||
"status": False,
|
||||
"src_path": "",
|
||||
"target_path": "",
|
||||
},
|
||||
"add_headers": [],
|
||||
})
|
||||
self.save_config()
|
||||
|
||||
return self.config
|
||||
|
||||
def get_proxy_list(self) -> List[Dict[str, Any]]:
|
||||
if self.ws_type == "nginx":
|
||||
return self.get_proxy_list_by_nginx()
|
||||
else:
|
||||
return self.get_proxy_list_by_apache()
|
||||
|
||||
|
||||
1047
mod/project/java/springboot_parser.py
Normal file
1047
mod/project/java/springboot_parser.py
Normal file
File diff suppressed because it is too large
Load Diff
946
mod/project/java/utils.py
Normal file
946
mod/project/java/utils.py
Normal file
@@ -0,0 +1,946 @@
|
||||
import json
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import zipfile
|
||||
import os
|
||||
import yaml
|
||||
import psutil
|
||||
import platform
|
||||
from xml.etree.ElementTree import Element, ElementTree, parse, XMLParser
|
||||
from typing import Optional, Dict, Tuple, AnyStr, List, Any
|
||||
import threading
|
||||
import itertools
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
import public
|
||||
|
||||
|
||||
def get_jar_war_config(jar_war_file: str) -> Optional[List[Tuple[str, AnyStr]]]:
|
||||
"""获取jar文件中的配置文件"""
|
||||
if not os.path.exists(jar_war_file):
|
||||
return None
|
||||
if not zipfile.is_zipfile(jar_war_file): # 判断是否为zip文件
|
||||
return None
|
||||
# 打开jar文件
|
||||
res_list = []
|
||||
with zipfile.ZipFile(jar_war_file, 'r') as jar:
|
||||
for i in jar.namelist():
|
||||
# 查询所有文件中可能是配置文件的项目
|
||||
if i.endswith("application.yaml") or i.endswith("application.yml"):
|
||||
with jar.open(i) as f:
|
||||
res_list.append((i, f.read()))
|
||||
|
||||
if not res_list:
|
||||
return None
|
||||
|
||||
return res_list
|
||||
|
||||
|
||||
def to_utf8(file_data_list: List[Tuple[str, AnyStr]]) -> List[Tuple[str, str]]:
|
||||
res_list = []
|
||||
for i, data in file_data_list:
|
||||
if isinstance(data, bytes):
|
||||
try:
|
||||
new_data = data.decode("utf-8")
|
||||
except:
|
||||
continue
|
||||
else:
|
||||
res_list.append((i, new_data))
|
||||
return res_list
|
||||
|
||||
|
||||
def parse_application_yaml(conf_data_list: List[Tuple[str, AnyStr]]) -> List[Tuple[str, Dict]]:
|
||||
res_list = []
|
||||
for i, data in conf_data_list:
|
||||
d = yaml.safe_load(data)
|
||||
if isinstance(d, dict):
|
||||
res_list.append((i, d))
|
||||
|
||||
return res_list
|
||||
|
||||
|
||||
# 接收一个jdk路径并将其规范化
|
||||
def normalize_jdk_path(jdk_path: str) -> Optional[str]:
|
||||
if jdk_path.endswith("/java"):
|
||||
jdk_path = os.path.dirname(jdk_path)
|
||||
if jdk_path.endswith("/bin"):
|
||||
jdk_path = os.path.dirname(jdk_path)
|
||||
if jdk_path.endswith("/jre"):
|
||||
jdk_path = os.path.dirname(jdk_path)
|
||||
if not os.path.isdir(jdk_path):
|
||||
return None
|
||||
if not os.path.exists(os.path.join(jdk_path, "bin/java")):
|
||||
return None
|
||||
return jdk_path
|
||||
|
||||
|
||||
def test_jdk(jdk_path: str) -> bool:
|
||||
java_bin = os.path.join(jdk_path, "bin/java")
|
||||
if os.path.exists(java_bin):
|
||||
out, err = public.ExecShell("{} -version 2>&1".format(java_bin)) # type: str, str
|
||||
if out.lower().find("version") != -1:
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class TomCat:
|
||||
|
||||
def __init__(self, tomcat_path: str):
|
||||
self.path = tomcat_path.rstrip("/") # 移除多余的右"/" 统一管理
|
||||
self._jdk_path: Optional[str] = None
|
||||
self._config_xml: Optional[ElementTree] = None
|
||||
self._bt_tomcat_conf: Optional[dict] = None
|
||||
self._log_file = None
|
||||
self._version = None
|
||||
|
||||
@property
|
||||
def jdk_path(self) -> Optional[str]:
|
||||
p = os.path.join(self.path, "bin/daemon.sh")
|
||||
if not os.path.exists(p):
|
||||
return None
|
||||
|
||||
tmp_data = public.readFile(p)
|
||||
if isinstance(tmp_data, str):
|
||||
rep_deemon_sh = re.compile(r"^JAVA_HOME=(?P<path>.*)\n", re.M)
|
||||
re_res_jdk_path = rep_deemon_sh.search(tmp_data)
|
||||
if re_res_jdk_path:
|
||||
self._jdk_path = re_res_jdk_path.group("path").strip()
|
||||
self._jdk_path = normalize_jdk_path(self._jdk_path)
|
||||
return self._jdk_path
|
||||
|
||||
return None
|
||||
|
||||
def version(self) -> Optional[int]:
|
||||
if isinstance(self._version, int):
|
||||
return self._version
|
||||
v_file = os.path.join(self.path, "version.pl")
|
||||
if os.path.isfile(v_file):
|
||||
ver = public.readFile(v_file)
|
||||
if isinstance(ver, str):
|
||||
try:
|
||||
ver_int = int(ver.split(".")[0])
|
||||
self._version = ver_int
|
||||
return self._version
|
||||
except:
|
||||
pass
|
||||
return None
|
||||
|
||||
@property
|
||||
def log_file(self) -> str:
|
||||
if self._log_file is not None:
|
||||
return self._log_file
|
||||
default_file = os.path.join(self.path, "logs/catalina-daemon.out")
|
||||
target_sh = os.path.join(self.path, "bin/daemon.sh")
|
||||
file_data = public.readFile(target_sh)
|
||||
conf_path = os.path.join(self.path, "conf/logpath.conf")
|
||||
if not isinstance(file_data, str):
|
||||
return default_file
|
||||
rep = re.compile(r'''\n\s?test ?"\.\$CATALINA_OUT" ?= ?\. +&& +CATALINA_OUT=['"](?P<path>\S+)['"]''')
|
||||
if rep.search(file_data):
|
||||
self._log_file = rep.search(file_data).group("path")
|
||||
public.writeFile(conf_path, os.path.dirname(self._log_file))
|
||||
return self._log_file
|
||||
|
||||
if os.path.isfile(conf_path):
|
||||
path = public.readFile(conf_path)
|
||||
else:
|
||||
return default_file
|
||||
log_file = os.path.join(path, "catalina-daemon.out")
|
||||
if os.path.exists(log_file):
|
||||
self._log_file = log_file
|
||||
return self._log_file
|
||||
|
||||
ver = self.version()
|
||||
if ver:
|
||||
log_file = os.path.join(path, "catalina-daemon-{}.out".format(ver))
|
||||
return log_file
|
||||
else:
|
||||
return os.path.join(path, "catalina-daemon.out")
|
||||
|
||||
@property
|
||||
def bt_tomcat_conf(self) -> Optional[dict]:
|
||||
if self._bt_tomcat_conf is None:
|
||||
p = os.path.join(self.path, "bt_tomcat.json")
|
||||
if not os.path.exists(p):
|
||||
self._bt_tomcat_conf = {}
|
||||
return self._bt_tomcat_conf
|
||||
try:
|
||||
self._bt_tomcat_conf = json.loads(public.readFile(p))
|
||||
except:
|
||||
self._bt_tomcat_conf = {}
|
||||
return self._bt_tomcat_conf
|
||||
|
||||
def save_bt_tomcat_conf(self):
|
||||
if self._bt_tomcat_conf is not None:
|
||||
p = os.path.join(self.path, "bt_tomcat.json")
|
||||
public.writeFile(p, json.dumps(self._bt_tomcat_conf))
|
||||
|
||||
def change_log_path(self, log_path: str, prefix: str = "") -> bool:
|
||||
log_path = log_path.rstrip("/")
|
||||
target_sh = os.path.join(self.path, "bin/daemon.sh")
|
||||
if not os.path.exists(target_sh):
|
||||
return False
|
||||
file_data = public.readFile(target_sh)
|
||||
if not isinstance(file_data, str):
|
||||
return False
|
||||
rep = re.compile(r'''\n ?test ?"\.\$CATALINA_OUT" ?= ?\. && {0,3}CATALINA_OUT="[^\n]*"[^\n]*\n''')
|
||||
if prefix and not prefix.startswith("-"):
|
||||
prefix = "-{}".format(prefix)
|
||||
repl = '\ntest ".$CATALINA_OUT" = . && CATALINA_OUT="{}/catalina-daemon{}.out"\n'.format(log_path, prefix)
|
||||
file_data = rep.sub(repl, file_data)
|
||||
public.writeFile(target_sh, file_data)
|
||||
conf_path = os.path.join(self.path, "conf/logpath.conf")
|
||||
public.WriteFile(conf_path, log_path)
|
||||
return True
|
||||
|
||||
@property
|
||||
def config_xml(self) -> Optional[ElementTree]:
|
||||
if self._config_xml is None:
|
||||
p = os.path.join(self.path, "conf/server.xml")
|
||||
if not os.path.exists(p):
|
||||
return None
|
||||
|
||||
self._config_xml = parse(p, parser=XMLParser(encoding="utf-8"))
|
||||
return self._config_xml
|
||||
|
||||
def set_port(self, port: int) -> bool:
|
||||
if self.config_xml is None:
|
||||
return False
|
||||
conf_elem = self.config_xml.findall("Service/Connector")
|
||||
if conf_elem is None:
|
||||
return False
|
||||
for i in conf_elem:
|
||||
if 'protocol' in i.attrib and 'port' in i.attrib:
|
||||
if i.attrib['protocol'] == 'HTTP/1.1':
|
||||
i.attrib['port'] = str(port)
|
||||
return True
|
||||
return False
|
||||
|
||||
def pid(self) -> Optional[int]:
|
||||
pid_file = os.path.join(self.path, 'logs/catalina-daemon.pid')
|
||||
if os.path.exists(pid_file):
|
||||
# 使用psutil判断进程是否在运行
|
||||
try:
|
||||
pid = public.readFile(pid_file)
|
||||
return int(pid)
|
||||
except:
|
||||
return None
|
||||
return None
|
||||
|
||||
def port(self) -> int:
|
||||
if self.config_xml is None:
|
||||
return 0
|
||||
for i in self.config_xml.findall("Service/Connector"):
|
||||
if i.attrib.get("protocol") == "HTTP/1.1" and 'port' in i.attrib:
|
||||
return int(i.attrib.get("port"))
|
||||
return 8080
|
||||
|
||||
@property
|
||||
def installed(self) -> bool:
|
||||
start_path = os.path.join(self.path, 'bin/daemon.sh')
|
||||
conf_path = os.path.join(self.path, 'conf/server.xml')
|
||||
if not os.path.exists(self.path):
|
||||
return False
|
||||
if not os.path.isfile(start_path):
|
||||
return False
|
||||
if not os.path.isfile(conf_path):
|
||||
return False
|
||||
return True
|
||||
|
||||
def running(self) -> bool:
|
||||
pid = self.pid()
|
||||
if pid:
|
||||
try:
|
||||
p = psutil.Process(pid)
|
||||
return p.is_running()
|
||||
except:
|
||||
return False
|
||||
return False
|
||||
|
||||
def status(self) -> dict:
|
||||
return {
|
||||
"status": os.path.exists(self.path) and os.path.exists(os.path.join(self.path, "bin/daemon.sh")),
|
||||
"jdk_path": self.jdk_path,
|
||||
"path": self.path,
|
||||
"running": self.running(),
|
||||
"port": self.port(),
|
||||
"stype": "built" if os.path.exists(os.path.join(self.path, "conf/server.xml")) else "uninstall"
|
||||
}
|
||||
|
||||
def save_config_xml(self) -> bool:
|
||||
if self.config_xml is None:
|
||||
return False
|
||||
p = os.path.join(self.path, "conf/server.xml")
|
||||
|
||||
def _indent(elem: Element, level=0):
|
||||
i = "\n" + level * " "
|
||||
if len(elem):
|
||||
if not elem.text or not elem.text.strip():
|
||||
elem.text = i + " "
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
for elem in elem:
|
||||
_indent(elem, level + 1)
|
||||
if not elem.tail or not elem.tail.strip():
|
||||
elem.tail = i
|
||||
else:
|
||||
if level and (not elem.tail or not elem.tail.strip()):
|
||||
elem.tail = i
|
||||
|
||||
_indent(self.config_xml.getroot())
|
||||
self.config_xml.write(p, encoding="utf-8", xml_declaration=True)
|
||||
return True
|
||||
|
||||
def host_by_name(self, name: str) -> Optional[Element]:
|
||||
if self.config_xml is None:
|
||||
return None
|
||||
engines = self.config_xml.findall("Service/Engine")
|
||||
if not engines:
|
||||
return None
|
||||
engine = engines[0]
|
||||
for h in engine:
|
||||
if h.tag == "Host" and h.attrib.get("name", None) == name:
|
||||
return h
|
||||
return None
|
||||
|
||||
def add_host(self, name: str, path: str) -> bool:
|
||||
if self.config_xml is None:
|
||||
return False
|
||||
if not os.path.exists(path):
|
||||
os.makedirs(path)
|
||||
if self.host_by_name(name):
|
||||
return False
|
||||
engine = self.config_xml.findall("Service/Engine")
|
||||
if not engine:
|
||||
return False
|
||||
path_name = ""
|
||||
if os.path.isfile(path):
|
||||
app_base = os.path.dirname(path)
|
||||
if path.endswith(".war"):
|
||||
path_name = os.path.basename(path).rsplit(".", 1)[0]
|
||||
else:
|
||||
app_base = path
|
||||
|
||||
host = Element("Host", attrib={
|
||||
"appBase": app_base,
|
||||
"autoDeploy": "true",
|
||||
"name": name,
|
||||
"unpackWARs": "true",
|
||||
"xmlNamespaceAware": "false",
|
||||
"xmlValidation": "false",
|
||||
})
|
||||
|
||||
context = Element("Context", attrib={
|
||||
"docBase": path,
|
||||
"path": path_name,
|
||||
"reloadable": "true",
|
||||
"crossContext": "true",
|
||||
})
|
||||
host.append(context)
|
||||
|
||||
engine[0].append(host)
|
||||
return True
|
||||
|
||||
def set_host_path_by_name(self, name: str, path: str) -> bool:
|
||||
if self.config_xml is None:
|
||||
return False
|
||||
for i in self.config_xml.findall("Service/Engine/Host"):
|
||||
if i.attrib.get("name", None) != name:
|
||||
continue
|
||||
for j in i:
|
||||
if j.tag == "Context":
|
||||
j.attrib["docBase"] = path
|
||||
return True
|
||||
return False
|
||||
|
||||
def remove_host(self, name: str) -> bool:
|
||||
if self.config_xml is None:
|
||||
return False
|
||||
target_host = self.host_by_name(name)
|
||||
if not target_host:
|
||||
return False
|
||||
engine = self.config_xml.findall("Service/Engine")
|
||||
if not engine:
|
||||
return False
|
||||
engine[0].remove(target_host)
|
||||
return True
|
||||
|
||||
def mutil_remove_host(self, name_list: List[str]) -> bool:
|
||||
if self.config_xml is None:
|
||||
return False
|
||||
for name in name_list:
|
||||
self.remove_host(name)
|
||||
return False
|
||||
|
||||
def start(self, by_user: str = "root") -> bool:
|
||||
if not self.running():
|
||||
daemon_file = os.path.join(self.path, "bin/daemon.sh")
|
||||
if not os.path.isfile(self.log_file):
|
||||
public.ExecShell("touch {}".format(self.log_file))
|
||||
public.ExecShell("chown {}:{} {}".format(by_user, by_user, self.log_file))
|
||||
public.ExecShell("bash {} start".format(daemon_file), user=by_user)
|
||||
|
||||
return self.running()
|
||||
|
||||
def stop(self) -> bool:
|
||||
if self.running():
|
||||
daemon_file = os.path.join(self.path, "bin/daemon.sh")
|
||||
public.ExecShell("bash {} stop".format(daemon_file))
|
||||
return not self.running()
|
||||
|
||||
def restart(self, by_user: str = "root") -> bool:
|
||||
daemon_file = os.path.join(self.path, "bin/daemon.sh")
|
||||
if self.running():
|
||||
public.ExecShell("bash {} stop".format(daemon_file))
|
||||
if not os.path.isfile(self.log_file):
|
||||
public.ExecShell("touch {}".format(self.log_file))
|
||||
public.ExecShell("chown {}:{} {}".format(by_user, by_user, self.log_file))
|
||||
public.ExecShell("bash {} start".format(daemon_file), user=by_user)
|
||||
return self.running()
|
||||
|
||||
def replace_jdk(self, jdk_path: str) -> Optional[str]:
|
||||
jdk_path = normalize_jdk_path(jdk_path)
|
||||
if not jdk_path:
|
||||
return "jdk路径错误或无法识别"
|
||||
|
||||
deemon_sh_path = "{}/bin/daemon.sh".format(self.path)
|
||||
if not os.path.isfile(deemon_sh_path):
|
||||
return 'Tomcat启动文件丢失!'
|
||||
|
||||
deemon_sh_data = public.readFile(deemon_sh_path)
|
||||
if not isinstance(deemon_sh_data, str):
|
||||
return 'Tomcat启动文件读取失败!'
|
||||
|
||||
# deemon_sh
|
||||
rep_deemon_sh = re.compile(r"^JAVA_HOME=(?P<path>.*)\n", re.M)
|
||||
re_res_deemon_sh = rep_deemon_sh.search(deemon_sh_data)
|
||||
if not re_res_deemon_sh:
|
||||
return 'Tomcat启动文件解析失败!'
|
||||
|
||||
jsvc_make_path = None
|
||||
for i in os.listdir(self.path + "/bin"):
|
||||
tmp_dir = "{}/bin/{}".format(self.path, i)
|
||||
if i.startswith("commons-daemon") and os.path.isdir(tmp_dir):
|
||||
make_path = tmp_dir + "/unix"
|
||||
if os.path.isdir(make_path):
|
||||
jsvc_make_path = make_path
|
||||
break
|
||||
|
||||
if jsvc_make_path is None:
|
||||
return 'Jsvc文件丢失!'
|
||||
|
||||
# 重装jsvc
|
||||
if os.path.isfile(self.path + "/bin/jsvc"):
|
||||
os.rename(self.path + "/bin/jsvc", self.path + "/bin/jsvc_back")
|
||||
|
||||
if os.path.isfile(jsvc_make_path + "/jsvc"):
|
||||
os.remove(jsvc_make_path + "/jsvc")
|
||||
|
||||
shell_str = r'''
|
||||
cd {}
|
||||
make clean
|
||||
./configure --with-java={}
|
||||
make
|
||||
'''.format(jsvc_make_path, jdk_path)
|
||||
public.ExecShell(shell_str)
|
||||
if os.path.isfile(jsvc_make_path + "/jsvc"):
|
||||
os.rename(jsvc_make_path + "/jsvc", self.path + "/bin/jsvc")
|
||||
public.ExecShell("chmod +x {}/bin/jsvc".format(self.path))
|
||||
os.remove(self.path + "/bin/jsvc_back")
|
||||
else:
|
||||
os.rename(self.path + "/bin/jsvc_back", self.path + "/bin/jsvc")
|
||||
return 'Jsvc编译失败!'
|
||||
|
||||
new_deemon_sh_data = deemon_sh_data[:re_res_deemon_sh.start()] + (
|
||||
'JAVA_HOME={}\n'.format(jdk_path)
|
||||
) + deemon_sh_data[re_res_deemon_sh.end():]
|
||||
public.writeFile(deemon_sh_path, new_deemon_sh_data)
|
||||
return None
|
||||
|
||||
def reset_tomcat_server_config(self, port: int):
|
||||
ret = '''<Server port="{}" shutdown="SHUTDOWN">
|
||||
<Listener className="org.apache.catalina.startup.VersionLoggerListener" />
|
||||
<Listener SSLEngine="on" className="org.apache.catalina.core.AprLifecycleListener" />
|
||||
<Listener className="org.apache.catalina.core.JreMemoryLeakPreventionListener" />
|
||||
<Listener className="org.apache.catalina.mbeans.GlobalResourcesLifecycleListener" />
|
||||
<Listener className="org.apache.catalina.core.ThreadLocalLeakPreventionListener" />
|
||||
<GlobalNamingResources>
|
||||
<Resource auth="Container" description="User database that can be updated and saved" factory="org.apache.catalina.users.MemoryUserDatabaseFactory" name="UserDatabase" pathname="conf/tomcat-users.xml" type="org.apache.catalina.UserDatabase" />
|
||||
</GlobalNamingResources>
|
||||
<Service name="Catalina">
|
||||
<Connector connectionTimeout="20000" port="{}" protocol="HTTP/1.1" redirectPort="8490" />
|
||||
<Engine defaultHost="localhost" name="Catalina">
|
||||
<Realm className="org.apache.catalina.realm.LockOutRealm">
|
||||
<Realm className="org.apache.catalina.realm.UserDatabaseRealm" resourceName="UserDatabase" />
|
||||
</Realm>
|
||||
<Host appBase="webapps" autoDeploy="true" name="localhost" unpackWARs="true">
|
||||
<Valve className="org.apache.catalina.valves.AccessLogValve" directory="logs" pattern="%h %l %u %t "%r" %s %b" prefix="localhost_access_log" suffix=".txt" />
|
||||
</Host>
|
||||
</Engine>
|
||||
</Service>
|
||||
</Server>'''.format(create_a_not_used_port(), port)
|
||||
public.WriteFile(self.path + '/conf/server.xml', ret)
|
||||
|
||||
@staticmethod
|
||||
def _get_os_version() -> str:
|
||||
# 获取Centos
|
||||
if os.path.exists('/usr/bin/yum') and os.path.exists('/etc/yum.conf'):
|
||||
return 'Centos'
|
||||
# 获取Ubuntu
|
||||
if os.path.exists('/usr/bin/apt-get') and os.path.exists('/usr/bin/dpkg'):
|
||||
return 'Ubuntu'
|
||||
return 'Unknown'
|
||||
|
||||
@classmethod
|
||||
def async_install_tomcat_new(cls, version: str, jdk_path: Optional[str]) -> Optional[str]:
|
||||
os_ver = cls._get_os_version()
|
||||
if version == "7" and os_ver == 'Ubuntu':
|
||||
return '操作系统不支持!'
|
||||
|
||||
if jdk_path:
|
||||
jdk_path = normalize_jdk_path(jdk_path)
|
||||
if not jdk_path:
|
||||
return 'jdk路径错误或无法识别'
|
||||
if not test_jdk(jdk_path):
|
||||
return '指定的jdk不可用'
|
||||
|
||||
if not jdk_path:
|
||||
jdk_path = ''
|
||||
|
||||
shell_str = (
|
||||
'rm -rf /tmp/1.sh && '
|
||||
'/usr/local/curl/bin/curl -o /tmp/1.sh %s/install/src/webserver/shell/new_jdk.sh && '
|
||||
'bash /tmp/1.sh install %s %s'
|
||||
) % (public.get_url(), version, jdk_path)
|
||||
|
||||
if not os.path.exists("/tmp/panelTask.pl"): # 如果当前任务队列并未执行,就把日志清空
|
||||
public.writeFile('/tmp/panelExec.log', '')
|
||||
soft_name = "Java项目Tomcat-" + version
|
||||
task_id = public.M('tasks').add(
|
||||
'id,name,type,status,addtime,execstr',
|
||||
(None, '安装[{}]'.format(soft_name), 'execshell', '0', time.strftime('%Y-%m-%d %H:%M:%S'), shell_str))
|
||||
|
||||
cls._create_install_wait_msg(task_id, version)
|
||||
|
||||
@staticmethod
|
||||
def _create_install_wait_msg(task_id: int, version: str):
|
||||
from panel_msg.msg_file import message_mgr
|
||||
|
||||
file_path = "/tmp/panelExec.log"
|
||||
if not os.path.exists(file_path):
|
||||
public.writeFile(file_path, "")
|
||||
|
||||
soft_name = "Java项目Tomcat-" + version
|
||||
data = {
|
||||
"soft_name": soft_name,
|
||||
"install_status": "等待安装" + soft_name,
|
||||
"file_name": file_path,
|
||||
"self_type": "soft_install",
|
||||
"status": 0,
|
||||
"task_id": task_id
|
||||
}
|
||||
title = "等待安装" + soft_name
|
||||
res = message_mgr.collect_message(title, ["Java环境管理", soft_name], data)
|
||||
if isinstance(res, str):
|
||||
public.WriteLog("消息盒子", "安装信息收集失败")
|
||||
return None
|
||||
return res
|
||||
|
||||
|
||||
def bt_tomcat(ver: int) -> Optional[TomCat]:
|
||||
if ver not in (7, 8, 9, 10) and ver not in ("7", "8", "9", "10"):
|
||||
return None
|
||||
return TomCat(tomcat_path="/usr/local/bttomcat/tomcat%d" % int(ver))
|
||||
|
||||
|
||||
def site_tomcat(site_name: str) -> Optional[TomCat]:
|
||||
tomcat_path = os.path.join("/www/server/bt_tomcat_web", site_name)
|
||||
if not os.path.exists(tomcat_path):
|
||||
return None
|
||||
return TomCat(tomcat_path=tomcat_path)
|
||||
|
||||
|
||||
class JDKManager:
|
||||
|
||||
def __init__(self):
|
||||
self._versions_list: Optional[List[str]] = None
|
||||
self._custom_jdk_list: Optional[List[str]] = None
|
||||
self._jdk_path = "/www/server/java"
|
||||
self._custom_file = "/www/server/panel/data/get_local_jdk.json"
|
||||
if not os.path.exists(self._jdk_path):
|
||||
os.makedirs(self._jdk_path, 0o755)
|
||||
|
||||
@property
|
||||
def versions_list(self) -> List[str]:
|
||||
if self._versions_list:
|
||||
return self._versions_list
|
||||
jdk_json_file = '/www/server/panel/data/jdk.json'
|
||||
tip_file = '/www/server/panel/data/jdk.json.pl'
|
||||
try:
|
||||
last_refresh = int(public.readFile(tip_file))
|
||||
except ValueError:
|
||||
last_refresh = 0
|
||||
versions_data = public.readFile(jdk_json_file)
|
||||
if time.time() - last_refresh > 3600:
|
||||
public.run_thread(public.downloadFile, ('{}/src/jdk/jdk.json'.format(public.get_url()), jdk_json_file))
|
||||
public.writeFile(tip_file, str(int(time.time())))
|
||||
|
||||
try:
|
||||
versions = json.loads(versions_data)
|
||||
except Exception:
|
||||
versions = {
|
||||
"x64": [
|
||||
"jdk1.7.0_80", "jdk1.8.0_371", "jdk-9.0.4", "jdk-10.0.2",
|
||||
"jdk-11.0.19", "jdk-12.0.2", "jdk-13.0.2", "jdk-14.0.2",
|
||||
"jdk-15.0.2", "jdk-16.0.2", "jdk-17.0.8", "jdk-18.0.2.1",
|
||||
"jdk-19.0.2", "jdk-20.0.2"
|
||||
],
|
||||
"arm": [
|
||||
"jdk1.8.0_371", "jdk-11.0.19", "jdk-15.0.2", "jdk-16.0.2",
|
||||
"jdk-17.0.8", "jdk-18.0.2.1", "jdk-19.0.2", "jdk-20.0.2"
|
||||
],
|
||||
"loongarch64": [
|
||||
"jdk-8.1.18", "jdk-11.0.22", "jdk-17.0.10", "jdk-21.0.2"
|
||||
]
|
||||
}
|
||||
arch = platform.machine()
|
||||
if arch == "aarch64" or 'arm' in arch:
|
||||
arch = "arm"
|
||||
elif arch == "loongarch64":
|
||||
arch = "loongarch64"
|
||||
elif arch == "x86_64":
|
||||
arch = "x64"
|
||||
|
||||
self._versions_list = versions.get(arch, [])
|
||||
return self._versions_list
|
||||
|
||||
def jdk_list_path(self) -> List[str]:
|
||||
return ["{}/{}".format(self._jdk_path, i) for i in self.versions_list]
|
||||
|
||||
@property
|
||||
def custom_jdk_list(self) -> List[str]:
|
||||
if self._custom_jdk_list:
|
||||
return self._custom_jdk_list
|
||||
|
||||
try:
|
||||
self._custom_jdk_list = json.loads(public.readFile(self._custom_file))
|
||||
except:
|
||||
self._custom_jdk_list = []
|
||||
|
||||
if not isinstance(self._custom_jdk_list, list):
|
||||
self._custom_jdk_list = []
|
||||
|
||||
return self._custom_jdk_list
|
||||
|
||||
def add_custom_jdk(self, jdk_path: str) -> Optional[str]:
|
||||
jdk_path = normalize_jdk_path(jdk_path)
|
||||
if not jdk_path:
|
||||
return "jdk路径错误或无法识别"
|
||||
|
||||
if jdk_path in self.custom_jdk_list or jdk_path in self.jdk_list_path:
|
||||
return
|
||||
|
||||
self.custom_jdk_list.append(jdk_path)
|
||||
public.writeFile(self._custom_file, json.dumps(self.custom_jdk_list))
|
||||
|
||||
def remove_custom_jdk(self, jdk_path: str) -> None:
|
||||
if jdk_path not in self.custom_jdk_list:
|
||||
return
|
||||
|
||||
self.custom_jdk_list.remove(jdk_path)
|
||||
public.writeFile(self._custom_file, json.dumps(self.custom_jdk_list))
|
||||
|
||||
def async_install_jdk(self, version: str) -> None:
|
||||
sh_str = "cd /www/server/panel/install && /bin/bash install_soft.sh {} install {} {}".format(0, 'jdk', version)
|
||||
|
||||
if not os.path.exists("/tmp/panelTask.pl"): # 如果当前任务队列并未执行,就把日志清空
|
||||
public.writeFile('/tmp/panelExec.log', '')
|
||||
task_id = public.M('tasks').add(
|
||||
'id,name,type,status,addtime,execstr',
|
||||
(None, '安装[{}]'.format(version), 'execshell', '0', time.strftime('%Y-%m-%d %H:%M:%S'), sh_str))
|
||||
|
||||
self._create_install_wait_msg(task_id, version)
|
||||
|
||||
@staticmethod
|
||||
def _create_install_wait_msg(task_id: int, version: str):
|
||||
from panel_msg.msg_file import message_mgr
|
||||
|
||||
file_path = "/tmp/panelExec.log"
|
||||
if not os.path.exists(file_path):
|
||||
public.writeFile(file_path, "")
|
||||
|
||||
data = {
|
||||
"soft_name": version,
|
||||
"install_status": "等待安装" + version,
|
||||
"file_name": file_path,
|
||||
"self_type": "soft_install",
|
||||
"status": 0,
|
||||
"task_id": task_id
|
||||
}
|
||||
title = "等待安装" + version
|
||||
res = message_mgr.collect_message(title, ["Java环境管理", version], data)
|
||||
if isinstance(res, str):
|
||||
public.WriteLog("消息盒子", "安装信息收集失败")
|
||||
return None
|
||||
return res
|
||||
|
||||
def install_jdk(self, version: str) -> Optional[str]:
|
||||
if version not in self.versions_list:
|
||||
return "Version does not exist and cannot be installed"
|
||||
|
||||
if os.path.exists(self._jdk_path + "/" + version):
|
||||
return "已存在的版本, 无法再次安装,如需再次安装请先卸载"
|
||||
|
||||
if os.path.exists("{}/{}.pl".format(self._jdk_path, version)):
|
||||
return "安装任务进行中,请勿再次添加"
|
||||
|
||||
public.writeFile("{}/{}.pl".format(self._jdk_path, version), "installing")
|
||||
t = threading.Thread(target=self._install_jdk, args=(version,))
|
||||
t.start()
|
||||
return None
|
||||
|
||||
def _install_jdk(self, version: str) -> None:
|
||||
try:
|
||||
log_file = "{}/{}_install.log".format(self._jdk_path, version)
|
||||
if not os.path.exists('/www/server/panel/install/jdk.sh'):
|
||||
public.ExecShell('wget -O /www/server/panel/install/jdk.sh ' + public.get_url() + '/install/0/jdk.sh')
|
||||
public.ExecShell('bash /www/server/panel/install/jdk.sh install {} 2>&1 > {}'.format(version, log_file))
|
||||
except:
|
||||
pass
|
||||
public.ExecShell('rm -rf /www/server/java/{}.*'.format(version))
|
||||
|
||||
def uninstall_jdk(self, version: str) -> Optional[str]:
|
||||
if not os.path.exists(self._jdk_path + "/" + version):
|
||||
return "没有安装指定的版本,无法卸载"
|
||||
public.ExecShell('rm -rf /www/server/java/{}*'.format(version))
|
||||
return
|
||||
|
||||
@staticmethod
|
||||
def set_jdk_env(jdk_path) -> Optional[str]:
|
||||
if jdk_path != "":
|
||||
jdk_path = normalize_jdk_path(jdk_path)
|
||||
if not jdk_path:
|
||||
return "jdk路径错误或无法识别"
|
||||
|
||||
# 写入全局的shell配置文件
|
||||
profile_path = '/etc/profile'
|
||||
java_home_line = "export JAVA_HOME={}".format(jdk_path) if jdk_path else ""
|
||||
path_line = "export PATH=$JAVA_HOME/bin:$PATH"
|
||||
profile_data = public.readFile(profile_path)
|
||||
if not isinstance(profile_data, str):
|
||||
return "无法读取环境变量文件"
|
||||
|
||||
rep_java_home = re.compile(r"export\s+JAVA_HOME=.*\n")
|
||||
rep_path = re.compile(r"export\s+PATH=\$JAVA_HOME/bin:\$PATH\s*?\n")
|
||||
if rep_java_home.search(profile_data):
|
||||
profile_data = rep_java_home.sub(java_home_line, profile_data)
|
||||
elif jdk_path:
|
||||
profile_data = profile_data + "\n" + java_home_line
|
||||
|
||||
if rep_path.search(profile_data):
|
||||
if not jdk_path:
|
||||
profile_data = rep_path.sub("", profile_data)
|
||||
elif jdk_path:
|
||||
profile_data = profile_data + "\n" + path_line
|
||||
|
||||
try:
|
||||
with open(profile_path, "w") as f:
|
||||
f.write(profile_data)
|
||||
except PermissionError:
|
||||
return "无法修改环境变量,可能是系统加固插件拒绝了操作"
|
||||
except:
|
||||
return "修改失败"
|
||||
|
||||
return
|
||||
|
||||
@staticmethod
|
||||
def get_env_jdk() -> Optional[str]:
|
||||
profile_data = public.readFile('/etc/profile')
|
||||
if not isinstance(profile_data, str):
|
||||
return None
|
||||
current_java_home = None
|
||||
for line in profile_data.split("\n"):
|
||||
if 'export JAVA_HOME=' in line:
|
||||
current_java_home = line.split('=')[1].strip().replace('"', '').replace("'", "")
|
||||
|
||||
return current_java_home
|
||||
|
||||
|
||||
def jps() -> List[int]:
|
||||
dir_list = [i for i in os.listdir("/tmp") if i.startswith("hsperfdata_")]
|
||||
return [int(j) for j in itertools.chain(*[os.listdir("/tmp/" + i) for i in dir_list]) if j.isdecimal()]
|
||||
|
||||
|
||||
def js_value_to_bool(value: Any) -> bool:
|
||||
if isinstance(value, bool):
|
||||
return value
|
||||
if isinstance(value, str):
|
||||
return value.lower() in ("true", "yes", "1")
|
||||
return bool(value)
|
||||
|
||||
|
||||
def check_port_with_net_connections(port: int) -> bool:
|
||||
try:
|
||||
for conn in psutil.net_connections():
|
||||
if conn.status == 'LISTEN' and conn.laddr.port == port:
|
||||
return False
|
||||
except:
|
||||
pass
|
||||
return True
|
||||
|
||||
|
||||
def check_port(port) -> bool:
|
||||
"""
|
||||
返回false表示端口不可用
|
||||
"""
|
||||
if not isinstance(port, int):
|
||||
port = int(port)
|
||||
if port == 0:
|
||||
return False
|
||||
if not 0 < port < 65535:
|
||||
return False
|
||||
project_list = public.M('sites').field('name,path,project_config').select()
|
||||
for project_find in project_list:
|
||||
try:
|
||||
project_config = json.loads(project_find['project_config'])
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
if 'port' not in project_config:
|
||||
continue
|
||||
if int(project_config['port']) == port:
|
||||
return False
|
||||
|
||||
try:
|
||||
for conn in psutil.net_connections():
|
||||
if conn.status == 'LISTEN' and conn.laddr.port == port:
|
||||
return False
|
||||
except:
|
||||
pass
|
||||
return True
|
||||
|
||||
|
||||
def pass_dir_for_user(path_dir: str, user: str):
|
||||
"""
|
||||
给某个用户,对应目录的执行权限
|
||||
"""
|
||||
import stat
|
||||
if not os.path.isdir(path_dir):
|
||||
return
|
||||
try:
|
||||
import pwd
|
||||
uid_data = pwd.getpwnam(user)
|
||||
uid = uid_data.pw_uid
|
||||
gid = uid_data.pw_gid
|
||||
except:
|
||||
return
|
||||
|
||||
if uid == 0:
|
||||
return
|
||||
|
||||
if path_dir[:-1] == "/":
|
||||
path_dir = path_dir[:-1]
|
||||
|
||||
while path_dir != "/":
|
||||
path_dir_stat = os.stat(path_dir)
|
||||
if path_dir_stat.st_uid != uid or path_dir_stat.st_gid != gid:
|
||||
old_mod = stat.S_IMODE(path_dir_stat.st_mode)
|
||||
if not old_mod & 1:
|
||||
os.chmod(path_dir, old_mod+1)
|
||||
path_dir = os.path.dirname(path_dir)
|
||||
|
||||
|
||||
def create_a_not_used_port() -> int:
|
||||
"""
|
||||
生成一个可用的端口
|
||||
"""
|
||||
import random
|
||||
while True:
|
||||
port = random.randint(2000, 65535)
|
||||
if check_port_with_net_connections(port):
|
||||
return port
|
||||
|
||||
|
||||
# 记录项目是通过用户停止的
|
||||
def stop_by_user(project_id):
|
||||
file_path = "{}/data/push/tips/project_stop.json".format(public.get_panel_path())
|
||||
if not os.path.exists(file_path):
|
||||
data = {}
|
||||
else:
|
||||
data_content = public.readFile(file_path)
|
||||
try:
|
||||
data = json.loads(data_content)
|
||||
except json.JSONDecodeError:
|
||||
data = {}
|
||||
data[str(project_id)] = True
|
||||
public.writeFile(file_path, json.dumps(data))
|
||||
|
||||
|
||||
# 记录项目是通过用户操作启动的
|
||||
def start_by_user(project_id):
|
||||
file_path = "{}/data/push/tips/project_stop.json".format(public.get_panel_path())
|
||||
if not os.path.exists(file_path):
|
||||
data = {}
|
||||
else:
|
||||
data_content = public.readFile(file_path)
|
||||
try:
|
||||
data = json.loads(data_content)
|
||||
except json.JSONDecodeError:
|
||||
data = {}
|
||||
data[str(project_id)] = False
|
||||
public.writeFile(file_path, json.dumps(data))
|
||||
|
||||
|
||||
def is_stop_by_user(project_id):
|
||||
file_path = "{}/data/push/tips/project_stop.json".format(public.get_panel_path())
|
||||
if not os.path.exists(file_path):
|
||||
data = {}
|
||||
else:
|
||||
data_content = public.readFile(file_path)
|
||||
try:
|
||||
data = json.loads(data_content)
|
||||
except json.JSONDecodeError:
|
||||
data = {}
|
||||
if str(project_id) not in data:
|
||||
return False
|
||||
return data[str(project_id)]
|
||||
|
||||
# # 内置项目复制Tomcat
|
||||
# def check_and_copy_tomcat(version: int):
|
||||
# old_path = "/usr/local/bttomcat/tomcat_bak%d"
|
||||
# new_path = "/usr/local/bt_mod_tomcat/tomcat%d"
|
||||
# if not os.path.exists("/usr/local/bt_mod_tomcat"):
|
||||
# os.makedirs("/usr/local/bt_mod_tomcat", 0o755)
|
||||
#
|
||||
# src_path = old_path % version
|
||||
# if not os.path.exists(old_path % version) or not os.path.isfile(src_path + '/conf/server.xml'):
|
||||
# return
|
||||
# if os.path.exists(new_path % version):
|
||||
# return
|
||||
# else:
|
||||
# os.makedirs(new_path % version)
|
||||
#
|
||||
# public.ExecShell('cp -r %s/* %s ' % (src_path, new_path % version,))
|
||||
# t = bt_tomcat(version)
|
||||
# if t:
|
||||
# t.reset_tomcat_server_config(8330 + version - 6)
|
||||
|
||||
|
||||
# def tomcat_install_status() -> List[dict]:
|
||||
# res_list = []
|
||||
# install_path = "/usr/local/bttomcat/tomcat_bak%d"
|
||||
# for i in range(7, 11):
|
||||
# src_path = install_path % i
|
||||
# start_path = src_path + '/bin/daemon.sh'
|
||||
# conf_path = src_path + '/conf/server.xml'
|
||||
# if os.path.exists(src_path) and os.path.isfile(start_path) and os.path.isfile(conf_path):
|
||||
# res_list.append({"version": i, "installed": True})
|
||||
# else:
|
||||
# res_list.append({"version": i, "installed": False})
|
||||
# return res_list
|
||||
|
||||
0
mod/project/node/__init__.py
Normal file
0
mod/project/node/__init__.py
Normal file
17
mod/project/node/dbutil/__init__.py
Normal file
17
mod/project/node/dbutil/__init__.py
Normal file
@@ -0,0 +1,17 @@
|
||||
from .load_db import LoadSite, HttpNode, TcpNode, NodeDB
|
||||
from .node_db import Node, ServerNodeDB, ServerMonitorRepo, NodeAPPKey
|
||||
from .file_transfer_db import FileTransfer, FileTransferDB, FileTransferTask
|
||||
# from .executor import Script, ScriptGroup, ExecutorDB, ExecutorLog, ExecutorTask
|
||||
from .node_task_flow import Script, Flow, CommandTask, CommandLog, TransferFile, TransferLog, TaskFlowsDB, \
|
||||
TransferTask, FlowTemplates
|
||||
|
||||
# 初始化数据库
|
||||
try:
|
||||
NodeDB().init_db()
|
||||
ServerNodeDB().init_db()
|
||||
FileTransferDB().init_db()
|
||||
# ExecutorDB().init_db()
|
||||
TaskFlowsDB().init_db()
|
||||
except Exception as e:
|
||||
import public
|
||||
public.print_error()
|
||||
481
mod/project/node/dbutil/executor.py
Normal file
481
mod/project/node/dbutil/executor.py
Normal file
@@ -0,0 +1,481 @@
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Optional, List, Dict, Tuple, Any, Union, Type, Generic, TypeVar, TextIO
|
||||
import sqlite3
|
||||
import json
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
import public
|
||||
import db
|
||||
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
|
||||
@dataclass
|
||||
class Script:
|
||||
"""对应scripts表"""
|
||||
name: str
|
||||
script_type: str
|
||||
content: str
|
||||
id: Optional[int] = None
|
||||
description: Optional[str] = None
|
||||
group_id: int = 0
|
||||
created_at: Optional[datetime] = None
|
||||
updated_at: Optional[datetime] = None
|
||||
|
||||
@staticmethod
|
||||
def check(data: Dict[str, Any]) -> str:
|
||||
if "script_type" not in data or not data["script_type"]:
|
||||
return "Script type cannot be empty"
|
||||
if not data["script_type"] in ["python", "shell"]:
|
||||
return "Script type error, please choose Python or Shell"
|
||||
if "content" not in data or not data["content"]:
|
||||
return "Script content cannot be empty"
|
||||
if "name" not in data or not data["name"]:
|
||||
return "Script name cannot be empty"
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'Script':
|
||||
"""从字典创建Script实例"""
|
||||
return cls(
|
||||
id=int(data['id']) if data.get('id', None) else None,
|
||||
name=str(data['name']),
|
||||
script_type=str(data['script_type']),
|
||||
content=str(data['content']),
|
||||
description=str(data['description']) if data.get('description', None) else None,
|
||||
group_id=int(data['group_id']) if data.get('group_id', None) else 0,
|
||||
created_at=datetime.fromisoformat(data['created_at']) if data.get('created_at', None) else None,
|
||||
updated_at=datetime.fromisoformat(data['updated_at']) if data.get('updated_at', None) else None
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""转换为字典格式"""
|
||||
return {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'script_type': self.script_type,
|
||||
'content': self.content,
|
||||
'description': self.description,
|
||||
'group_id': self.group_id,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ScriptGroup:
|
||||
"""对应script_groups表"""
|
||||
name: str
|
||||
id: Optional[int] = None
|
||||
description: Optional[str] = None
|
||||
created_at: Optional[datetime] = None
|
||||
|
||||
@staticmethod
|
||||
def check(data: Dict[str, Any]) -> str:
|
||||
if "name" not in data or not data["name"]:
|
||||
return "Script group name cannot be empty"
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ScriptGroup':
|
||||
"""从字典创建ScriptGroup实例"""
|
||||
return cls(
|
||||
id=int(data['id']) if data.get('id', None) else None,
|
||||
name=str(data['name']),
|
||||
description=str(data['description']) if data.get('description', None) else None,
|
||||
created_at=datetime.fromisoformat(data['created_at']) if data.get('created_at', None) else None
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""转换为字典格式"""
|
||||
return {
|
||||
'id': self.id,
|
||||
'name': self.name,
|
||||
'description': self.description,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExecutorTask:
|
||||
"""对应executor_tasks表"""
|
||||
script_id: int
|
||||
script_content: str
|
||||
script_type: str
|
||||
server_ids: str = ""
|
||||
id: Optional[int] = None
|
||||
created_at: Optional[datetime] = None
|
||||
updated_at: Optional[datetime] = None
|
||||
_elogs: Optional[List["ExecutorLog"]] = None
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ExecutorTask':
|
||||
"""从字典创建ExecutorTask实例"""
|
||||
return cls(
|
||||
id=int(data['id']) if data.get('id', None) else None,
|
||||
script_id=int(data['script_id']),
|
||||
script_content=str(data['script_content']),
|
||||
script_type=str(data['script_type']),
|
||||
created_at=datetime.fromisoformat(data['created_at']) if data.get('created_at', None) else None,
|
||||
updated_at=datetime.fromisoformat(data['updated_at']) if data.get('updated_at', None) else None
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""转换为字典格式"""
|
||||
return {
|
||||
'id': self.id,
|
||||
'script_id': self.script_id,
|
||||
'server_ids': self.server_ids,
|
||||
'script_content': self.script_content,
|
||||
'script_type': self.script_type,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
@property
|
||||
def elogs(self) -> List["ExecutorLog"]:
|
||||
if self._elogs is None:
|
||||
return []
|
||||
return self._elogs
|
||||
|
||||
@elogs.setter
|
||||
def elogs(self, elogs: List["ExecutorLog"]):
|
||||
self._elogs = elogs
|
||||
|
||||
|
||||
_EXECUTOR_LOG_DIR = public.get_panel_path() + "/logs/executor_log/"
|
||||
try:
|
||||
if not os.path.exists(_EXECUTOR_LOG_DIR):
|
||||
os.makedirs(_EXECUTOR_LOG_DIR)
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
@dataclass
|
||||
class ExecutorLog:
|
||||
"""对应executor_logs表"""
|
||||
executor_task_id: int
|
||||
server_id: int
|
||||
ssh_host: str
|
||||
id: Optional[int] = None
|
||||
status: int = 0 # 0:运行中 1:成功 2:失败 3:异常
|
||||
log_name: Optional[str] = None
|
||||
created_at: Optional[datetime] = None
|
||||
updated_at: Optional[datetime] = None
|
||||
_log_fp: Optional[TextIO] = None
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: Dict[str, Any]) -> 'ExecutorLog':
|
||||
"""从字典创建ExecutorLog实例"""
|
||||
return cls(
|
||||
id=int(data['id']) if data.get('id', None) else None,
|
||||
executor_task_id=int(data['executor_task_id']),
|
||||
server_id=int(data['server_id']),
|
||||
ssh_host=str(data['ssh_host']),
|
||||
status=int(data['status']) if data.get('status', 0) else 0,
|
||||
log_name=str(data['log_name']) if data.get('log_name', None) else None,
|
||||
created_at=datetime.fromisoformat(data['created_at']) if data.get('created_at', None) else None,
|
||||
updated_at=datetime.fromisoformat(data['updated_at']) if data.get('updated_at', None) else None
|
||||
)
|
||||
|
||||
def to_dict(self) -> Dict[str, Any]:
|
||||
"""转换为字典格式"""
|
||||
return {
|
||||
'id': self.id,
|
||||
'executor_task_id': self.executor_task_id,
|
||||
'server_id': self.server_id,
|
||||
'ssh_host': self.ssh_host,
|
||||
'status': self.status,
|
||||
'log_name': self.log_name,
|
||||
'created_at': self.created_at.isoformat() if self.created_at else None,
|
||||
'updated_at': self.updated_at.isoformat() if self.updated_at else None
|
||||
}
|
||||
|
||||
@property
|
||||
def log_file(self):
|
||||
return os.path.join(_EXECUTOR_LOG_DIR, self.log_name)
|
||||
|
||||
@property
|
||||
def log_fp(self):
|
||||
if self._log_fp is None:
|
||||
self._log_fp = open(self.log_file, "w+")
|
||||
return self._log_fp
|
||||
|
||||
def create_log(self):
|
||||
public.writeFile(self.log_file, "")
|
||||
|
||||
def remove_log(self):
|
||||
if os.path.exists(self.log_file):
|
||||
os.remove(self.log_file)
|
||||
|
||||
def get_log(self):
|
||||
return public.readFile(self.log_file)
|
||||
|
||||
def write_log(self, log_data: str, is_end_log=False):
|
||||
self.log_fp.write(log_data)
|
||||
self.log_fp.flush()
|
||||
if is_end_log:
|
||||
self.log_fp.close()
|
||||
self._log_fp = None
|
||||
|
||||
|
||||
_TableType = TypeVar("_TableType", bound=Union[Script, ScriptGroup, ExecutorTask, ExecutorLog])
|
||||
|
||||
|
||||
class _Table(Generic[_TableType]):
|
||||
"""数据库表"""
|
||||
table_name: str = ""
|
||||
data_cls: Type[_TableType]
|
||||
|
||||
def __init__(self, db_obj: db.Sql):
|
||||
self._db = db_obj
|
||||
|
||||
# 当仅传递一个数据时,返回插入数的 id或错误信息; 当传递多个数据时,返回插入的行数或错误信息
|
||||
def create(self,
|
||||
data: Union[_TableType, List[_TableType]]) -> Union[int, str]:
|
||||
"""创建数据"""
|
||||
if not isinstance(data, list):
|
||||
data = [data]
|
||||
|
||||
if not len(data):
|
||||
raise ValueError("Data cannot be empty")
|
||||
if not isinstance(data[0], self.data_cls):
|
||||
raise ValueError("Data type error")
|
||||
|
||||
now = datetime.now().isoformat()
|
||||
|
||||
def fileter_data(item):
|
||||
item_dict = item.to_dict()
|
||||
if "id" in item_dict:
|
||||
item_dict.pop("id")
|
||||
if "created_at" in item_dict and item_dict["created_at"] is None:
|
||||
item_dict["created_at"] = now
|
||||
if "updated_at" in item_dict and item_dict["updated_at"] is None:
|
||||
item_dict["updated_at"] = now
|
||||
return item_dict
|
||||
|
||||
data_list = list(map(fileter_data, data))
|
||||
if len(data_list) == 1:
|
||||
try:
|
||||
res = self._db.table(self.table_name).insert(data_list[0])
|
||||
if isinstance(res, int):
|
||||
return res
|
||||
return str(res)
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
try:
|
||||
res = self._db.table(self.table_name).batch_insert(data_list)
|
||||
if isinstance(res, (int, bool)):
|
||||
return len(data)
|
||||
return str(res)
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
def update(self, data: _TableType) -> str:
|
||||
"""更新数据"""
|
||||
if not isinstance(data, self.data_cls):
|
||||
raise ValueError("Data type error")
|
||||
data_dict = data.to_dict()
|
||||
data_dict.pop('created_at', None)
|
||||
if "updated_at" in data_dict:
|
||||
data_dict["updated_at"] = datetime.now().isoformat()
|
||||
if "id" not in data_dict:
|
||||
raise ValueError("The data ID cannot be empty")
|
||||
try:
|
||||
self._db.table(self.table_name).where("id=?", (data_dict["id"],)).update(data_dict)
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
return ""
|
||||
|
||||
def get_byid(self, data_id: int) -> Optional[_TableType]:
|
||||
"""根据id获取数据"""
|
||||
try:
|
||||
result = self._db.table(self.table_name).where("id=?", (data_id,)).find()
|
||||
except Exception as e:
|
||||
return None
|
||||
if not result:
|
||||
return None
|
||||
return self.data_cls.from_dict(result)
|
||||
|
||||
def delete(self, data_id: Union[int, List[int]]):
|
||||
"""删除数据"""
|
||||
if isinstance(data_id, list):
|
||||
data_id = [int(item) for item in data_id]
|
||||
elif isinstance(data_id, int):
|
||||
data_id = [int(data_id)]
|
||||
else:
|
||||
return "数据id类型错误"
|
||||
try:
|
||||
self._db.table(self.table_name).where(
|
||||
"id in ({})".format(",".join(["?"] * len(data_id))), (*data_id,)
|
||||
).delete()
|
||||
return ""
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
def query(self, *args) -> List[_TableType]:
|
||||
"""查询数据"""
|
||||
try:
|
||||
result = self._db.table(self.table_name).where(*args).select()
|
||||
except Exception as e:
|
||||
return []
|
||||
if not result:
|
||||
return []
|
||||
return [self.data_cls.from_dict(item) for item in result]
|
||||
|
||||
def query_page(self, *args, page_num: int = 1, limit: int = 10) -> List[_TableType]:
|
||||
"""查询数据, 支持分页"""
|
||||
try:
|
||||
offset = limit * (page_num - 1)
|
||||
result = self._db.table(self.table_name).where(*args).limit(limit, offset).order("id DESC").select()
|
||||
except Exception as e:
|
||||
public.print_error()
|
||||
return []
|
||||
if not result:
|
||||
return []
|
||||
return [self.data_cls.from_dict(item) for item in result]
|
||||
|
||||
def count(self, *args) -> int:
|
||||
"""查询数据数量"""
|
||||
try:
|
||||
result = self._db.table(self.table_name).where(*args).count()
|
||||
except Exception as e:
|
||||
return 0
|
||||
return result
|
||||
|
||||
def find(self, *args) -> Optional[_TableType]:
|
||||
"""查询单条数据"""
|
||||
try:
|
||||
result = self._db.table(self.table_name).where(*args).find()
|
||||
except Exception as e:
|
||||
return None
|
||||
if not result:
|
||||
return None
|
||||
return self.data_cls.from_dict(result)
|
||||
|
||||
|
||||
class _ScriptTable(_Table[Script]):
|
||||
"""脚本表"""
|
||||
table_name = "scripts"
|
||||
data_cls = Script
|
||||
|
||||
def set_group_id(self, group_id: int, *where_args) -> str:
|
||||
"""设置脚本组"""
|
||||
try:
|
||||
self._db.table(self.table_name).where(where_args).update({"group_id": group_id})
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
return ""
|
||||
|
||||
|
||||
class _ScriptGroupTable(_Table[ScriptGroup]):
|
||||
"""脚本组表"""
|
||||
table_name = "script_groups"
|
||||
data_cls = ScriptGroup
|
||||
default_group = ScriptGroup(
|
||||
id=0,
|
||||
name="default",
|
||||
description="Default grouping, use this grouping when not set",
|
||||
created_at=datetime.now(),
|
||||
)
|
||||
|
||||
def all_group(self) -> List[ScriptGroup]:
|
||||
"""获取所有脚本组"""
|
||||
try:
|
||||
result = self._db.table(self.table_name).select()
|
||||
except Exception as e:
|
||||
return []
|
||||
if not result:
|
||||
return []
|
||||
return [self.default_group] + [self.data_cls.from_dict(item) for item in result]
|
||||
|
||||
|
||||
class _ExecutorTaskTable(_Table[ExecutorTask]):
|
||||
"""执行任务表"""
|
||||
table_name = "executor_tasks"
|
||||
data_cls = ExecutorTask
|
||||
|
||||
def query_tasks(self,
|
||||
page=1, size=10, node_id: int = None, script_type: str = None, search: str = None
|
||||
) -> Tuple[int, List[ExecutorTask]]:
|
||||
"""查询任务"""
|
||||
where_args, parms = [], []
|
||||
if script_type and script_type != "all":
|
||||
where_args.append("script_type=?")
|
||||
parms.append(script_type)
|
||||
if search:
|
||||
search_str = "script_content like ?"
|
||||
parms.append("%{}%".format(search))
|
||||
|
||||
stable = _ScriptTable(self._db)
|
||||
data = stable.query("name like ? or description like ?", ("%{}%".format(search), "%{}%".format(search)))
|
||||
if data:
|
||||
search_str += " or script_id in ({})".format(",".join(["?"] * len(data)))
|
||||
where_args.append("(" + search_str + ")")
|
||||
parms.append(tuple([item.id for item in data]))
|
||||
else:
|
||||
where_args.append(search_str)
|
||||
|
||||
if node_id:
|
||||
where_args.append("server_ids like ?")
|
||||
parms.append("%|{}%".format(node_id))
|
||||
|
||||
|
||||
# public.print_log("search criteria: {}".format(" AND ".join(where_args)), parms)
|
||||
count = self.count(
|
||||
" AND ".join(where_args),
|
||||
(*parms, )
|
||||
)
|
||||
|
||||
return count, self.query_page(
|
||||
" AND ".join(where_args),
|
||||
(*parms, ),
|
||||
page_num=page,
|
||||
limit=size
|
||||
)
|
||||
|
||||
|
||||
class _ExecutorLogTable(_Table[ExecutorLog]):
|
||||
"""执行日志表"""
|
||||
table_name = "executor_logs"
|
||||
data_cls = ExecutorLog
|
||||
|
||||
|
||||
class ExecutorDB:
|
||||
_DB_FILE = public.get_panel_path() + "/data/db/executor.db"
|
||||
_DB_INIT_FILE = os.path.dirname(__file__) + "/executor.sql"
|
||||
|
||||
def __init__(self):
|
||||
sql = db.Sql()
|
||||
sql._Sql__DB_FILE = self._DB_FILE
|
||||
self.db = sql
|
||||
self.Script = _ScriptTable(self.db)
|
||||
self.ScriptGroup = _ScriptGroupTable(self.db)
|
||||
self.ExecutorTask = _ExecutorTaskTable(self.db)
|
||||
self.ExecutorLog = _ExecutorLogTable(self.db)
|
||||
|
||||
def init_db(self):
|
||||
sql_data = public.readFile(self._DB_INIT_FILE)
|
||||
if not os.path.exists(self._DB_FILE) or os.path.getsize(self._DB_FILE) == 0:
|
||||
public.writeFile(self._DB_FILE, "")
|
||||
import sqlite3
|
||||
conn = sqlite3.connect(self._DB_FILE)
|
||||
cursor = conn.cursor()
|
||||
cursor.executescript(sql_data)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def close(self):
|
||||
self.db.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_trackback):
|
||||
self.close()
|
||||
60
mod/project/node/dbutil/executor.sql
Normal file
60
mod/project/node/dbutil/executor.sql
Normal file
@@ -0,0 +1,60 @@
|
||||
|
||||
-- 创建脚本表
|
||||
CREATE TABLE IF NOT EXISTS scripts (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL CHECK(length(name) <= 255),
|
||||
script_type TEXT NOT NULL CHECK(length(script_type) <= 255),
|
||||
content TEXT NOT NULL,
|
||||
description TEXT CHECK(length(description) <= 255),
|
||||
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
group_id INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
|
||||
-- 创建脚本组表
|
||||
CREATE TABLE IF NOT EXISTS script_groups (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL CHECK(length(name) <= 255),
|
||||
description TEXT CHECK(length(description) <= 255),
|
||||
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- 创建执行任务表
|
||||
CREATE TABLE IF NOT EXISTS executor_tasks (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
server_ids TEXT NOT NULL,
|
||||
script_id INTEGER NOT NULL,
|
||||
script_content TEXT NOT NULL,
|
||||
script_type TEXT NOT NULL CHECK(length(script_type) <= 255),
|
||||
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- 创建执行日志表
|
||||
CREATE TABLE IF NOT EXISTS executor_logs (
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
executor_task_id INTEGER NOT NULL,
|
||||
server_id INTEGER NOT NULL,
|
||||
ssh_host TEXT NOT NULL,
|
||||
status INTEGER NOT NULL DEFAULT 0 CHECK(status IN (0,1,2,3)),
|
||||
log_name TEXT CHECK(length(log_name) <= 255),
|
||||
created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
|
||||
updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
|
||||
-- 创建索引(分开创建以避免SQLite语法错误)
|
||||
-- 脚本表索引
|
||||
CREATE INDEX IF NOT EXISTS idx_scripts_name ON scripts(name);
|
||||
CREATE INDEX IF NOT EXISTS idx_scripts_script_type ON scripts(script_type);
|
||||
CREATE INDEX IF NOT EXISTS idx_scripts_group_id ON scripts(group_id);
|
||||
|
||||
-- 脚本组索引
|
||||
CREATE INDEX IF NOT EXISTS idx_script_groups_name ON script_groups(name);
|
||||
|
||||
-- 执行任务索引
|
||||
CREATE INDEX IF NOT EXISTS idx_executor_tasks_script_id ON executor_tasks(script_id);
|
||||
|
||||
-- 执行日志索引
|
||||
CREATE INDEX IF NOT EXISTS idx_executor_logs_task_server ON executor_logs(executor_task_id, server_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_executor_logs_status ON executor_logs(status);
|
||||
36
mod/project/node/dbutil/file_transfer.sql
Normal file
36
mod/project/node/dbutil/file_transfer.sql
Normal file
@@ -0,0 +1,36 @@
|
||||
-- 传输任务表
|
||||
CREATE TABLE IF NOT EXISTS transfer_tasks
|
||||
(
|
||||
task_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
source_node TEXT NOT NULL DEFAULT '{}', -- {"address":"https:/xxxx", "api_key":"xxxxx", "name":"xxxx"}
|
||||
target_node TEXT NOT NULL DEFAULT '{}', -- {"address":"https:/xxxx", "api_key":"xxxxx", "name":"xxxx"}
|
||||
source_path_list TEXT NOT NULL DEFAULT '[]', -- 源节点上的路径 [{"path":"/www/wwwroot/aaaa", "is_dir":true}]
|
||||
target_path TEXT NOT NULL, -- 目标节点上的路径
|
||||
task_action TEXT NOT NULL, -- upload/download
|
||||
status TEXT NOT NULL, -- pending/running/completed/failed
|
||||
default_mode TEXT NOT NULL, -- 默认处理模式 cover: 覆盖,ignore: 跳过,rename:重命名
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
started_at TIMESTAMP,
|
||||
completed_at TIMESTAMP,
|
||||
created_by TEXT NOT NULL, -- 创建的节点名称
|
||||
target_task_id INTEGER NOT NULL,
|
||||
is_source_node BOOLEAN NOT NULL, -- 是否为本节点发送
|
||||
is_target_node BOOLEAN NOT NULL -- 是否为本节点接收
|
||||
);
|
||||
|
||||
-- 文件传输详情表
|
||||
CREATE TABLE IF NOT EXISTS file_transfers
|
||||
(
|
||||
transfer_id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
task_id INTEGER NOT NULL,
|
||||
src_file TEXT NOT NULL, -- 源文件
|
||||
dst_file TEXT NOT NULL, -- 目标文件
|
||||
file_size INTEGER NOT NULL, -- 文件大小
|
||||
is_dir INTEGER NOT NULL DEFAULT 0,
|
||||
status TEXT NOT NULL, -- pending/running/completed/failed
|
||||
progress INTEGER DEFAULT 0, -- 0-100
|
||||
message TEXT NOT NULL DEFAULT '',
|
||||
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
|
||||
started_at TIMESTAMP,
|
||||
completed_at TIMESTAMP
|
||||
);
|
||||
328
mod/project/node/dbutil/file_transfer_db.py
Normal file
328
mod/project/node/dbutil/file_transfer_db.py
Normal file
@@ -0,0 +1,328 @@
|
||||
import os
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from datetime import datetime
|
||||
from typing import Optional, List, Dict, Tuple
|
||||
import sqlite3
|
||||
import json
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
import public
|
||||
import db
|
||||
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileTransferTask:
|
||||
task_id: Optional[int] = None
|
||||
source_node: dict = field(default_factory=lambda: {})
|
||||
target_node: dict = field(default_factory=lambda: {})
|
||||
source_path_list: list = field(default_factory=lambda: []) # [{"path":"/www/wwwroot/aaaa", "is_dir":true}]
|
||||
target_path: str = ""
|
||||
task_action: str = "" # upload/download
|
||||
status: str = "pending" # pending/running/completed/failed
|
||||
default_mode: str = "cover" # 默认处理模式 cover: 覆盖,ignore: 跳过,rename:重命名
|
||||
created_at: Optional[datetime] = None
|
||||
started_at: Optional[datetime] = None
|
||||
completed_at: Optional[datetime] = None
|
||||
created_by: str = ""
|
||||
target_task_id: int = 0
|
||||
is_source_node: bool = False
|
||||
is_target_node: bool = False
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, row: dict) -> 'FileTransferTask':
|
||||
source_node = row.get("source_node", "{}")
|
||||
if isinstance(source_node, str):
|
||||
source_node = json.loads(source_node)
|
||||
elif isinstance(source_node, dict):
|
||||
source_node = source_node
|
||||
else:
|
||||
source_node = {}
|
||||
|
||||
target_node = row.get("target_node", "{}")
|
||||
if isinstance(target_node, str):
|
||||
target_node = json.loads(target_node)
|
||||
elif isinstance(target_node, dict):
|
||||
target_node = target_node
|
||||
else:
|
||||
target_node = {}
|
||||
|
||||
source_path_list = row.get("source_path_list", "[]")
|
||||
if isinstance(source_path_list, str):
|
||||
source_path_list = json.loads(source_path_list)
|
||||
elif isinstance(source_path_list, list):
|
||||
source_path_list = source_path_list
|
||||
else:
|
||||
source_path_list = []
|
||||
|
||||
return cls(
|
||||
task_id=row.get("task_id", None),
|
||||
source_node=source_node,
|
||||
target_node=target_node,
|
||||
source_path_list=source_path_list,
|
||||
target_path=row.get("target_path", ""),
|
||||
task_action=row.get("task_action", ""),
|
||||
status=row.get("status", ""),
|
||||
default_mode=row.get("default_mode", "cover"),
|
||||
created_at=datetime.fromisoformat(row.get("created_at")) if row.get("created_at", "") else None,
|
||||
started_at=datetime.fromisoformat(row.get("started_at")) if row.get("started_at", "") else None,
|
||||
completed_at=datetime.fromisoformat(row.get("completed_at")) if row.get("completed_at", "") else None,
|
||||
created_by=row.get("created_by", ""),
|
||||
target_task_id=row.get("target_task_id", 0),
|
||||
is_source_node=row.get("is_source_node", False),
|
||||
is_target_node=row.get("is_target_node", False)
|
||||
)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"task_id": self.task_id,
|
||||
"source_node": self.source_node,
|
||||
"target_node": self.target_node,
|
||||
"source_path_list": self.source_path_list,
|
||||
"target_path": self.target_path,
|
||||
"task_action": self.task_action,
|
||||
"status": self.status,
|
||||
"default_mode": self.default_mode,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"started_at": self.started_at.isoformat() if self.started_at else None,
|
||||
"completed_at": self.completed_at.isoformat() if self.completed_at else None,
|
||||
"created_by": self.created_by,
|
||||
"target_task_id": self.target_task_id,
|
||||
"is_source_node": self.is_source_node,
|
||||
"is_target_node": self.is_target_node
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class FileTransfer:
|
||||
transfer_id: Optional[int] = None
|
||||
task_id: int = 0
|
||||
src_file: str = ""
|
||||
dst_file: str = ""
|
||||
file_size: int = 0
|
||||
is_dir: int = 0
|
||||
status: str = "" # pending/running/completed/failed
|
||||
progress: int = 0
|
||||
message: str = ""
|
||||
created_at: Optional[datetime] = None
|
||||
started_at: Optional[datetime] = None
|
||||
completed_at: Optional[datetime] = None
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, row: dict) -> 'FileTransfer':
|
||||
return cls(
|
||||
transfer_id=row.get("transfer_id", None),
|
||||
task_id=row.get("task_id", 0),
|
||||
src_file=row.get("src_file", ""),
|
||||
dst_file=row.get("dst_file", ""),
|
||||
file_size=row.get("file_size", 0),
|
||||
is_dir=row.get("is_dir", 0),
|
||||
status=row.get("status", ""),
|
||||
progress=row.get("progress", 0),
|
||||
message=row.get("message", ""),
|
||||
created_at=datetime.fromisoformat(row.get("created_at")) if row.get("created_at", "") else None,
|
||||
started_at=datetime.fromisoformat(row.get("started_at")) if row.get("started_at", "") else None,
|
||||
completed_at=datetime.fromisoformat(row.get("completed_at")) if row.get("completed_at", "") else None
|
||||
)
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"transfer_id": self.transfer_id,
|
||||
"task_id": self.task_id,
|
||||
"src_file": self.src_file,
|
||||
"dst_file": self.dst_file,
|
||||
"file_size": self.file_size,
|
||||
"is_dir": self.is_dir,
|
||||
"status": self.status,
|
||||
"progress": self.progress,
|
||||
"message": self.message,
|
||||
"created_at": self.created_at.isoformat() if self.created_at else None,
|
||||
"started_at": self.started_at.isoformat() if self.started_at else None,
|
||||
"completed_at": self.completed_at.isoformat() if self.completed_at else None
|
||||
}
|
||||
|
||||
|
||||
# SQLite 操作类
|
||||
class FileTransferDB:
|
||||
_DB_FILE = public.get_panel_path() + "/data/db/node_file_transfer.db"
|
||||
_DB_INIT_FILE = os.path.dirname(__file__) + "/file_transfer.sql"
|
||||
|
||||
def __init__(self):
|
||||
sql = db.Sql()
|
||||
sql._Sql__DB_FILE = self._DB_FILE
|
||||
self.db = sql
|
||||
|
||||
def init_db(self):
|
||||
sql_data = public.readFile(self._DB_INIT_FILE)
|
||||
if not os.path.exists(self._DB_FILE) or os.path.getsize(self._DB_FILE) == 0:
|
||||
public.writeFile(self._DB_FILE, "")
|
||||
import sqlite3
|
||||
conn = sqlite3.connect(self._DB_FILE)
|
||||
c = conn.cursor()
|
||||
c.executescript(sql_data)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def close(self):
|
||||
self.db.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_trackback):
|
||||
self.close()
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def create_task(self, task: FileTransferTask) -> str:
|
||||
task_data = task.to_dict()
|
||||
task_data.pop('task_id', None)
|
||||
task_data.pop('created_at', None)
|
||||
task_data["source_node"] = json.dumps(task_data["source_node"])
|
||||
task_data["target_node"] = json.dumps(task_data["target_node"])
|
||||
task_data["source_path_list"] = json.dumps(task_data["source_path_list"])
|
||||
try:
|
||||
err = self.db.table("transfer_tasks").insert(task_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
elif isinstance(err, int):
|
||||
task.task_id = err
|
||||
return ""
|
||||
except Exception as e:
|
||||
return f"Database operation error: {str(e)}"
|
||||
|
||||
def update_task(self, task: FileTransferTask) -> str:
|
||||
task_data = task.to_dict()
|
||||
task_data.pop('created_at', None)
|
||||
task_data["source_node"] = json.dumps(task_data["source_node"])
|
||||
task_data["target_node"] = json.dumps(task_data["target_node"])
|
||||
task_data["source_path_list"] = json.dumps(task_data["source_path_list"])
|
||||
if not task.task_id:
|
||||
return "task_id is required"
|
||||
try:
|
||||
err = self.db.table("transfer_tasks").where("task_id = ?", task.task_id).update(task_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
return ""
|
||||
except Exception as e:
|
||||
return f"Database operation error: {str(e)}"
|
||||
|
||||
def get_task(self, task_id: int) -> Tuple[Optional[dict], str]:
|
||||
result = self.db.table("transfer_tasks").where("task_id = ?", task_id).find()
|
||||
if isinstance(result, str):
|
||||
return None, result
|
||||
if self.db.ERR_INFO:
|
||||
return None, self.db.ERR_INFO
|
||||
return result, ""
|
||||
|
||||
def get_last_task(self) -> Tuple[Optional[dict], str]:
|
||||
result = self.db.table("transfer_tasks").order("task_id DESC").limit(1).find()
|
||||
if isinstance(result, str):
|
||||
return None, result
|
||||
if self.db.ERR_INFO:
|
||||
return None, self.db.ERR_INFO
|
||||
return result, ""
|
||||
|
||||
def delete_task(self, task_id: int) -> str:
|
||||
result = self.db.table("transfer_tasks").where("task_id = ?", task_id).delete()
|
||||
if isinstance(result, str):
|
||||
return result
|
||||
return ""
|
||||
|
||||
def get_all_tasks(self, offset: int = 0, limit: int = 100) -> List[dict]:
|
||||
results = self.db.table("transfer_tasks").limit(limit, offset).select()
|
||||
if isinstance(results, list):
|
||||
return results
|
||||
return []
|
||||
|
||||
def count_tasks(self) -> int:
|
||||
return self.db.table("transfer_tasks").count()
|
||||
|
||||
def create_file_transfer(self, transfer: FileTransfer) -> str:
|
||||
transfer_data = transfer.to_dict()
|
||||
transfer_data.pop('transfer_id', None)
|
||||
transfer_data.pop('created_at', None)
|
||||
try:
|
||||
err = self.db.table("file_transfers").insert(transfer_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
return ""
|
||||
except Exception as e:
|
||||
return f"Database operation error: {str(e)}"
|
||||
|
||||
def update_file_transfer(self, transfer: FileTransfer) -> str:
|
||||
transfer_data = transfer.to_dict()
|
||||
if not transfer.transfer_id:
|
||||
return "transfer_id is required"
|
||||
try:
|
||||
err = self.db.table("file_transfers").where("transfer_id = ?", transfer.transfer_id).update(transfer_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
return ""
|
||||
except Exception as e:
|
||||
return f"Database operation error: {str(e)}"
|
||||
|
||||
def get_file_transfer(self, transfer_id: int) -> Optional[dict]:
|
||||
result = self.db.table("file_transfers").where("transfer_id = ?", transfer_id).find()
|
||||
if isinstance(result, str):
|
||||
return None
|
||||
if self.db.ERR_INFO:
|
||||
return None
|
||||
return result
|
||||
|
||||
def get_task_file_transfers(self, task_id: int) -> List[dict]:
|
||||
results = self.db.table("file_transfers").where("task_id = ?", task_id).select()
|
||||
if isinstance(results, list):
|
||||
return results
|
||||
return []
|
||||
|
||||
def batch_create_file_transfers(self, transfers: List[FileTransfer]) -> str:
|
||||
"""
|
||||
批量创建文件传输记录
|
||||
|
||||
Args:
|
||||
transfers: FileTransfer 对象列表
|
||||
|
||||
Returns:
|
||||
str: 错误信息,如果成功则返回空字符串
|
||||
"""
|
||||
if not transfers:
|
||||
return ""
|
||||
|
||||
try:
|
||||
# 准备批量插入的数据
|
||||
transfer_data_list = []
|
||||
for transfer in transfers:
|
||||
transfer_data = transfer.to_dict()
|
||||
transfer_data.pop('transfer_id', None)
|
||||
transfer_data['created_at'] = datetime.now().isoformat()
|
||||
transfer_data_list.append(transfer_data)
|
||||
|
||||
# 执行批量插入
|
||||
err = self.db.table("file_transfers").batch_insert(transfer_data_list)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
return ""
|
||||
except Exception as e:
|
||||
return f"Batch creation of file transfer records failed: {str(e)}"
|
||||
|
||||
# 获取上一个任务所有文件传输状态
|
||||
def last_task_all_status(self) -> Tuple[Dict, str]:
|
||||
last_task, err = self.get_last_task()
|
||||
if err:
|
||||
return {}, err
|
||||
if not last_task:
|
||||
return {}, ""
|
||||
|
||||
task = FileTransferTask.from_dict(last_task)
|
||||
file_list = self.get_task_file_transfers(task.task_id)
|
||||
return {
|
||||
"task": task.to_dict(),
|
||||
"file_list": file_list,
|
||||
}, ""
|
||||
54
mod/project/node/dbutil/load_balancer.sql
Normal file
54
mod/project/node/dbutil/load_balancer.sql
Normal file
@@ -0,0 +1,54 @@
|
||||
-- load_sites 负载均衡网站
|
||||
CREATE TABLE IF NOT EXISTS `load_sites`
|
||||
(
|
||||
`load_id` INTEGER PRIMARY KEY AUTOINCREMENT, -- 负载均衡ID
|
||||
`name` TEXT NOT NULL UNIQUE, -- 负载均衡名称
|
||||
`site_id` INTEGER NOT NULL DEFAULT 0, -- 站点ID
|
||||
`site_name` TEXT NOT NULL , -- 站点名称,网站主域名
|
||||
`site_type` TEXT NOT NULL DEFAULT 'http', -- http, tcp (http:代表http负载均衡,tcp:代表tcp/udp负载均衡)
|
||||
`ps` TEXT NOT NULL DEFAULT '',
|
||||
`http_config` TEXT NOT NULL DEFAULT '{"proxy_next_upstream":"error timeout http_500 http_502 http_503 http_504","http_alg":"sticky_cookie"}',
|
||||
`tcp_config` TEXT NOT NULL DEFAULT '{"proxy_connect_timeout":8,"proxy_timeout":86400,"host":"127.0.0.1","port":80,"type":"tcp"}',
|
||||
`created_at` TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- http_nodes
|
||||
CREATE TABLE IF NOT EXISTS `http_nodes`
|
||||
(
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`load_id` INTEGER NOT NULL DEFAULT 0, -- 负载均衡ID
|
||||
`node_id` INTEGER NOT NULL DEFAULT 0, -- 节点ID
|
||||
`node_site_id` INTEGER NOT NULL DEFAULT 0, -- 节点上的网站ID
|
||||
`node_site_name` TEXT NOT NULL DEFAULT '', -- 节点上的网站名称
|
||||
`port` INTEGER NOT NULL DEFAULT 0, -- 端口
|
||||
`location` TEXT NOT NULL DEFAULT '/', -- 实施代理的路由, 默认是根路由 '/' 当前版本也只支持根路由
|
||||
`path` TEXT NOT NULL DEFAULT '/', -- 访问验证路径
|
||||
`node_status` TEXT NOT NULL DEFAULT 'online', -- 节点状态 online, backup, down
|
||||
`weight` INTEGER NOT NULL DEFAULT 1, -- 权重
|
||||
`max_fail` INTEGER NOT NULL DEFAULT 0, -- 最大失败次数
|
||||
`fail_timeout` INTEGER NOT NULL DEFAULT 0, -- 失败恢复时间
|
||||
`max_conns` INTEGER NOT NULL DEFAULT 0, -- 最大连接数
|
||||
`ps` TEXT NOT NULL DEFAULT '',
|
||||
`created_at` TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
-- tcp_nodes
|
||||
CREATE TABLE IF NOT EXISTS `tcp_nodes`
|
||||
(
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`load_id` INTEGER NOT NULL DEFAULT 0, -- 负载均衡ID
|
||||
`node_id` INTEGER NOT NULL DEFAULT 0, -- 节点ID
|
||||
`host` TEXT NOT NULL,
|
||||
`port` INTEGER NOT NULL DEFAULT 0,
|
||||
`node_status` TEXT NOT NULL DEFAULT 'online', -- 节点状态 online, backup, down
|
||||
`weight` INTEGER NOT NULL DEFAULT 1,
|
||||
`max_fail` INTEGER NOT NULL DEFAULT 0,
|
||||
`fail_timeout` INTEGER NOT NULL DEFAULT 0,
|
||||
`ps` TEXT NOT NULL DEFAULT '',
|
||||
`created_at` TIMESTAMP DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS `load_sites_name` ON `load_sites` (`name`);
|
||||
CREATE INDEX IF NOT EXISTS `load_sites_site_type` ON `load_sites` (`site_type`);
|
||||
CREATE INDEX IF NOT EXISTS `http_nodes_load_id` ON `http_nodes` (`load_id`);
|
||||
CREATE INDEX IF NOT EXISTS `tcp_nodes_load_id` ON `tcp_nodes` (`load_id`);
|
||||
449
mod/project/node/dbutil/load_db.py
Normal file
449
mod/project/node/dbutil/load_db.py
Normal file
@@ -0,0 +1,449 @@
|
||||
import json
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Tuple, Optional, List, Union
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
import public
|
||||
import db
|
||||
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
|
||||
@dataclass
|
||||
class LoadSite:
|
||||
name: str
|
||||
site_name: str
|
||||
site_type: str
|
||||
ps: str = ''
|
||||
http_config: dict = field(default_factory=lambda: {
|
||||
"proxy_next_upstream": "error timeout http_500 http_502 http_503 http_504",
|
||||
"http_alg": "sticky_cookie",
|
||||
"proxy_cache_status": False,
|
||||
"cache_time": "1d",
|
||||
"cache_suffix": "css,js,jpg,jpeg,gif,png,webp,woff,eot,ttf,svg,ico,css.map,js.map",
|
||||
})
|
||||
tcp_config: dict = field(default_factory=lambda: {
|
||||
"proxy_connect_timeout": 8,
|
||||
"proxy_timeout": 86400,
|
||||
"host": "127.0.0.1",
|
||||
"port": 80,
|
||||
"type": "tcp"
|
||||
})
|
||||
created_at: int = 0
|
||||
load_id: int = 0
|
||||
site_id: int = 0
|
||||
|
||||
@classmethod
|
||||
def bind_http_load(cls, data: dict) -> Tuple[Optional["LoadSite"], str]:
|
||||
check_msg = cls.base_check(data)
|
||||
if check_msg:
|
||||
return None, check_msg
|
||||
if not data.get('site_name', None):
|
||||
return None, 'site_name is required'
|
||||
if not public.is_domain(data['site_name']):
|
||||
return None, 'site_name is invalid'
|
||||
if not isinstance(data.get('http_config', None), dict):
|
||||
return None, 'http_config is required'
|
||||
else:
|
||||
if "proxy_cache_status" not in dict.keys(data['http_config']): #兼容旧版本数据
|
||||
data['http_config']["proxy_cache_status"] = False
|
||||
data['http_config']["cache_time"] = "1d"
|
||||
data['http_config']["cache_suffix"] = "css,js,jpg,jpeg,gif,png,webp,woff,eot,ttf,svg,ico,css.map,js.map"
|
||||
for k in ['proxy_next_upstream', 'http_alg', "proxy_cache_status", "cache_time", "cache_suffix"]:
|
||||
if k not in dict.keys(data['http_config']):
|
||||
return None, 'http_config.{} is required'.format(k)
|
||||
for i in data['http_config']['proxy_next_upstream'].split():
|
||||
if i not in ('error', 'timeout') and not re.match(r'^http_\d{3}$', i):
|
||||
return None, 'http_config.proxy_next_upstream is invalid'
|
||||
if data['http_config']['http_alg'] not in ('sticky_cookie', 'round_robin', 'least_conn', 'ip_hash'):
|
||||
return None, 'http_config.http_alg is invalid'
|
||||
if not isinstance(data['http_config']['proxy_cache_status'], bool):
|
||||
return None, 'http_config.proxy_cache_status is invalid'
|
||||
if not isinstance(data['http_config']['cache_time'], str):
|
||||
return None, 'http_config.cache_time is invalid'
|
||||
if not re.match(r"^[0-9]+([smhd])$", data['http_config']['cache_time']):
|
||||
return None, 'http_config.cache_time is invalid'
|
||||
cache_suffix = data['http_config']['cache_suffix']
|
||||
cache_suffix_list = []
|
||||
for suffix in cache_suffix.split(","):
|
||||
tmp_suffix = re.sub(r"\s", "", suffix)
|
||||
if not tmp_suffix:
|
||||
continue
|
||||
cache_suffix_list.append(tmp_suffix)
|
||||
real_cache_suffix = ",".join(cache_suffix_list)
|
||||
if not real_cache_suffix:
|
||||
real_cache_suffix = "css,js,jpg,jpeg,gif,png,webp,woff,eot,ttf,svg,ico,css.map,js.map"
|
||||
data['http_config']['cache_suffix'] = real_cache_suffix
|
||||
|
||||
l = LoadSite(data.get('name'), data.get('site_name'), 'http', data.get('ps', ''),
|
||||
http_config=data.get('http_config'),
|
||||
created_at=data.get('created_at', 0), load_id=data.get('load_id', 0),
|
||||
site_id=data.get('site_id', 0))
|
||||
return l, ""
|
||||
|
||||
@classmethod
|
||||
def base_check(cls, data) -> str:
|
||||
if not data.get('name', None):
|
||||
return 'name is required'
|
||||
if not re.match(r'^[a-zA-Z0-9][a-zA-Z0-9_]+$', data['name']):
|
||||
return 'The name can only contain letters, numbers, underscores, and cannot start with numbers or underscores'
|
||||
if not len(data['name']) >= 3:
|
||||
return 'The length of the name cannot be less than 3 characters'
|
||||
return ""
|
||||
|
||||
@classmethod
|
||||
def bind_tcp_load(cls, data: dict) -> Tuple[Optional["LoadSite"], str]:
|
||||
check_msg = cls.base_check(data)
|
||||
if check_msg:
|
||||
return None, check_msg
|
||||
if not isinstance(data.get('tcp_config', None), dict):
|
||||
return None, 'tcp_config is required'
|
||||
else:
|
||||
for k in ['proxy_connect_timeout', 'proxy_timeout', 'host', 'port', 'type']:
|
||||
if not data['tcp_config'].get(k):
|
||||
return None, 'tcp_config.{} is required'.format(k)
|
||||
if data['tcp_config']['type'] not in ('tcp', 'udp'):
|
||||
return None, 'tcp_config.type is invalid'
|
||||
if not isinstance(data['tcp_config']['port'], int) and not 1 <= data['tcp_config']['port'] <= 65535:
|
||||
return None, 'tcp_config.port is invalid'
|
||||
if not public.check_ip(data['tcp_config']['host']):
|
||||
return None, 'tcp_config.host is invalid'
|
||||
|
||||
l = LoadSite(data.get('name'), data.get('site_name'), 'tcp', ps=data.get('ps', ''),
|
||||
tcp_config=data.get('tcp_config'),
|
||||
created_at=data.get('created_at', 0), load_id=data.get('load_id', 0),
|
||||
site_id=data.get('site_id', 0))
|
||||
return l, ""
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"name": self.name,
|
||||
"site_name": self.site_name,
|
||||
"site_type": self.site_type,
|
||||
"ps": self.ps,
|
||||
"http_config": self.http_config,
|
||||
"tcp_config": self.tcp_config,
|
||||
"created_at": self.created_at,
|
||||
"load_id": self.load_id,
|
||||
"site_id": self.site_id
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class HttpNode:
|
||||
node_id: int
|
||||
node_site_name: str
|
||||
port: int
|
||||
location: str = "/"
|
||||
path: str = "/"
|
||||
node_status: str = "online" # online, backup, down
|
||||
weight: int = 1
|
||||
max_fail: int = 3
|
||||
fail_timeout: int = 600
|
||||
ps: str = ""
|
||||
created_at: int = 0
|
||||
node_site_id: int = 0
|
||||
id: int = 0
|
||||
load_id: int = 0
|
||||
|
||||
@classmethod
|
||||
def bind(cls, data: dict) -> Tuple[Optional["HttpNode"], str]:
|
||||
if not isinstance(data.get('node_site_name', None), str):
|
||||
return None, 'node_site_name is required'
|
||||
if not public.is_domain(data['node_site_name']) and not public.check_ip(data['node_site_name']):
|
||||
return None, 'node_site_name is invalid'
|
||||
if not isinstance(data.get('port', None), int):
|
||||
return None, 'port is required'
|
||||
if not 1 <= data['port'] <= 65535:
|
||||
return None, 'port is invalid'
|
||||
if not isinstance(data.get('node_id', None), int):
|
||||
return None, 'node_id is required'
|
||||
if not isinstance(data.get('node_status', None), str):
|
||||
return None, 'node_status is required'
|
||||
if not data['node_status'] in ('online', 'backup', 'down'):
|
||||
return None, 'node_status is invalid'
|
||||
|
||||
n = HttpNode(data.get('node_id'), data.get('node_site_name'), data.get('port'), "/",
|
||||
data.get('path', "/"), data.get('node_status', "online"), data.get('weight', 1),
|
||||
data.get('max_fail', 3), data.get('fail_timeout', 600), data.get('ps', ''),
|
||||
data.get('created_at', 0), data.get('node_site_id', 0), data.get('id', 0),
|
||||
data.get('load_id', 0)
|
||||
)
|
||||
return n, ""
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"node_id": self.node_id,
|
||||
"node_site_name": self.node_site_name,
|
||||
"port": self.port,
|
||||
"location": self.location,
|
||||
"path": self.path,
|
||||
"node_status": self.node_status,
|
||||
"weight": self.weight,
|
||||
"max_fail": self.max_fail,
|
||||
"fail_timeout": self.fail_timeout,
|
||||
"ps": self.ps,
|
||||
"created_at": self.created_at,
|
||||
"node_site_id": self.node_site_id,
|
||||
"id": self.id,
|
||||
"load_id": self.load_id
|
||||
}
|
||||
|
||||
|
||||
@dataclass
|
||||
class TcpNode:
|
||||
node_id: int
|
||||
host: str
|
||||
port: int
|
||||
id: int = 0
|
||||
load_id: int = 0
|
||||
node_status: str = "online" # online, backup, down
|
||||
weight: int = 1
|
||||
max_fail: int = 3
|
||||
fail_timeout: int = 600
|
||||
ps: str = ""
|
||||
created_at: int = 0
|
||||
|
||||
@classmethod
|
||||
def bind(cls, data: dict) -> Tuple[Optional["TcpNode"], str]:
|
||||
if not isinstance(data.get('node_status', None), str):
|
||||
return None, 'node_status is required'
|
||||
if not data['node_status'] in ('online', 'backup', 'down'):
|
||||
return None, 'node_status is invalid'
|
||||
if not isinstance(data.get('host', None), str):
|
||||
return None, 'host is required'
|
||||
if not isinstance(data.get('node_id', None), int):
|
||||
return None, 'node_id is required'
|
||||
if not isinstance(data.get('port', None), int):
|
||||
return None, 'port is required'
|
||||
if not 1 <= data['port'] <= 65535:
|
||||
return None, 'port is invalid'
|
||||
n = TcpNode(data.get('node_id'), data.get('host'), data.get('port'), data.get('id', 0), data.get('load_id', 0),
|
||||
data.get('node_status', "online"), data.get('weight', 1), data.get('max_fail', 3),
|
||||
data.get('fail_timeout', 600), data.get('ps', ''), data.get('created_at', 0))
|
||||
return n, ""
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"node_id": self.node_id,
|
||||
"host": self.host,
|
||||
"port": self.port,
|
||||
"id": self.id,
|
||||
"load_id": self.load_id,
|
||||
"node_status": self.node_status,
|
||||
"weight": self.weight,
|
||||
"max_fail": self.max_fail,
|
||||
"fail_timeout": self.fail_timeout,
|
||||
"ps": self.ps,
|
||||
"created_at": self.created_at
|
||||
}
|
||||
|
||||
|
||||
class NodeDB:
|
||||
_DB_FILE = public.get_panel_path() + "/data/db/node_load_balance.db"
|
||||
_DB_INIT_FILE = os.path.dirname(__file__) + "/load_balancer.sql"
|
||||
|
||||
def __init__(self):
|
||||
sql = db.Sql()
|
||||
sql._Sql__DB_FILE = self._DB_FILE
|
||||
self.db = sql
|
||||
|
||||
def init_db(self):
|
||||
sql_data = public.readFile(self._DB_INIT_FILE)
|
||||
if not os.path.exists(self._DB_FILE) or os.path.getsize(self._DB_FILE) == 0:
|
||||
public.writeFile(self._DB_FILE, "")
|
||||
import sqlite3
|
||||
conn = sqlite3.connect(self._DB_FILE)
|
||||
c = conn.cursor()
|
||||
c.executescript(sql_data)
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def close(self):
|
||||
self.db.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_trackback):
|
||||
self.close()
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def update_load_key(self, load_id: int, load_data: dict) -> str:
|
||||
if not isinstance(load_id, int):
|
||||
return "load_id is required"
|
||||
if not isinstance(load_data, dict):
|
||||
return "load_data is required"
|
||||
err = self.db.table("load_sites").where("load_id = ?", load_id).update(load_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
return ""
|
||||
|
||||
def name_exist(self, name: str) -> bool:
|
||||
return self.db.table("load_sites").where("name = ?", name).count() > 0
|
||||
|
||||
def load_site_name_exist(self, name: str) -> bool:
|
||||
return self.db.table("load_sites").where("site_name = ?", name).count() > 0
|
||||
|
||||
def load_id_exist(self, load_id: int) -> bool:
|
||||
return self.db.table("load_sites").where("load_id = ?", load_id).count() > 0
|
||||
|
||||
def loads_count(self, site_type: str, query: str = "") -> int:
|
||||
if site_type == "http":
|
||||
if not query:
|
||||
return self.db.table("load_sites").where("site_type = ?", "http").count()
|
||||
return self.db.table("load_sites").where(
|
||||
"site_type = ? AND ps like ?", ("http", "%" + query + "%")).count()
|
||||
else:
|
||||
if not query:
|
||||
return self.db.table("load_sites").where("site_type = ?", "tcp").count()
|
||||
return self.db.table("load_sites").where(
|
||||
"site_type = ? AND ps like ?", ("tcp", "%" + query + "%")).count()
|
||||
|
||||
def loads_list(self, site_type: str, offset: int, limit: int, query: str = ""):
|
||||
if site_type == "all":
|
||||
if query:
|
||||
return self.db.table("load_sites").where("ps like ?", "%" + query + "%").limit(limit, offset).select()
|
||||
return self.db.table("load_sites").limit(limit, offset).select()
|
||||
if site_type == "http":
|
||||
if not query:
|
||||
return self.db.table("load_sites").where("site_type = ?", "http").limit(limit, offset).select()
|
||||
return self.db.table("load_sites").where(
|
||||
"site_type = ? AND ps like ?", ("http", "%" + query + "%")).limit(limit, offset).select()
|
||||
else:
|
||||
if not query:
|
||||
return self.db.table("load_sites").where("site_type = ?", "tcp").limit(limit, offset).select()
|
||||
return self.db.table("load_sites").where(
|
||||
"site_type = ? AND ps like ?", ("tcp", "%" + query + "%")).limit(limit, offset).select()
|
||||
|
||||
def create_load(self, site_type: str, load: LoadSite, nodes: List[Union[HttpNode, TcpNode]]) -> str:
|
||||
load_data = load.to_dict()
|
||||
load_data.pop('load_id')
|
||||
load_data.pop('created_at')
|
||||
load_data["http_config"] = json.dumps(load.http_config)
|
||||
load_data["tcp_config"] = json.dumps(load.tcp_config)
|
||||
try:
|
||||
err = self.db.table("load_sites").insert(load_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
load.load_id = err
|
||||
|
||||
for node in nodes:
|
||||
node_data = node.to_dict()
|
||||
node_data.pop('id')
|
||||
node_data.pop('created_at')
|
||||
node_data['load_id'] = load.load_id
|
||||
if site_type == "http" and isinstance(node, HttpNode):
|
||||
err = self.db.table("http_nodes").insert(node_data)
|
||||
else:
|
||||
err = self.db.table("tcp_nodes").insert(node_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
except Exception as e:
|
||||
return "数据库操作错误:" + str(e)
|
||||
|
||||
return ""
|
||||
|
||||
def update_load(self, site_type: str, load: LoadSite, nodes: List[Union[HttpNode, TcpNode]]) -> str:
|
||||
load_data = load.to_dict()
|
||||
if not load.load_id:
|
||||
return "load_id is required"
|
||||
load_data.pop('created_at')
|
||||
load_data.pop('load_id')
|
||||
load_data["http_config"] = json.dumps(load.http_config)
|
||||
load_data["tcp_config"] = json.dumps(load.tcp_config)
|
||||
|
||||
try:
|
||||
err = self.db.table("load_sites").where("load_id = ?", load.load_id).update(load_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
except Exception as e:
|
||||
return "数据库操作错误:" + str(e)
|
||||
|
||||
old_nodes, err = self.get_nodes(load.load_id, site_type)
|
||||
if err:
|
||||
return err
|
||||
old_nodes_map = {}
|
||||
for old_node in old_nodes:
|
||||
old_nodes_map[old_node['id']] = old_node
|
||||
|
||||
try:
|
||||
for node in nodes:
|
||||
node_data = node.to_dict()
|
||||
node_data.pop('id')
|
||||
node_data.pop('created_at')
|
||||
node_data['load_id'] = load.load_id
|
||||
if node.id in old_nodes_map:
|
||||
if site_type == "http" and isinstance(node, HttpNode):
|
||||
err = self.db.table("http_nodes").where("id = ?", node.id).update(node_data)
|
||||
else:
|
||||
err = self.db.table("tcp_nodes").where("id = ?", node.id).update(node_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
old_nodes_map.pop(node.id)
|
||||
else:
|
||||
if site_type == "http" and isinstance(node, HttpNode):
|
||||
err = self.db.table("http_nodes").insert(node_data)
|
||||
else:
|
||||
err = self.db.table("tcp_nodes").insert(node_data)
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
for node_id in old_nodes_map:
|
||||
if site_type == "http":
|
||||
err = self.db.table("http_nodes").where("id = ?", node_id).delete()
|
||||
else:
|
||||
err = self.db.table("tcp_nodes").where("id = ?", node_id).delete()
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
except Exception as e:
|
||||
return "数据库操作错误:" + str(e)
|
||||
return ""
|
||||
|
||||
def get_nodes(self, load_id: int, site_type: str) -> Tuple[List[dict], str]:
|
||||
if site_type == "http":
|
||||
nodes: List[dict] = self.db.table("http_nodes").where("load_id = ?", load_id).select()
|
||||
else:
|
||||
nodes: List[dict] = self.db.table("tcp_nodes").where("load_id = ?", load_id).select()
|
||||
if isinstance(nodes, str):
|
||||
return [], nodes
|
||||
if not nodes and self.db.ERR_INFO:
|
||||
return [], self.db.ERR_INFO
|
||||
return nodes, ""
|
||||
|
||||
def get_load(self, load_id: int) -> Tuple[Optional[dict], str]:
|
||||
load_data = self.db.table("load_sites").where("load_id = ?", load_id).find()
|
||||
if isinstance(load_data, str):
|
||||
return None, load_data
|
||||
if self.db.ERR_INFO:
|
||||
return None, self.db.ERR_INFO
|
||||
if len(load_data) == 0:
|
||||
return None, "未查询到该负载配置"
|
||||
return load_data, ""
|
||||
|
||||
def delete(self, load_id: int) -> str:
|
||||
load_data = self.db.table("load_sites").where("load_id = ?", load_id).find()
|
||||
if isinstance(load_data, str):
|
||||
return load_data
|
||||
if self.db.ERR_INFO:
|
||||
return self.db.ERR_INFO
|
||||
if len(load_data) == 0:
|
||||
return ""
|
||||
|
||||
if load_data["site_type"] == "http":
|
||||
err = self.db.table("http_nodes").where("load_id = ?", load_id).delete()
|
||||
else:
|
||||
err = self.db.table("tcp_nodes").where("load_id = ?", load_id).delete()
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
err = self.db.table("load_sites").where("load_id = ?", load_id).delete()
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
return ""
|
||||
28
mod/project/node/dbutil/node.sql
Normal file
28
mod/project/node/dbutil/node.sql
Normal file
@@ -0,0 +1,28 @@
|
||||
CREATE TABLE IF NOT EXISTS `node`
|
||||
(
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`address` VARCHAR, -- 节点地址 https://xxx:xx/
|
||||
`category_id` INTEGER, -- 分类
|
||||
`remarks` VARCHAR, -- 节点名称
|
||||
`api_key` VARCHAR, -- api key
|
||||
`create_time` INTEGER DEFAULT (0), -- 创建时间
|
||||
`server_ip` TEXT, -- 服务器ip
|
||||
`status` INTEGER, -- 0: 不在线 1: 在线
|
||||
`error` TEXT DEFAULT '{}',
|
||||
`error_num` INTEGER DEFAULT 0,
|
||||
`app_key` TEXT, -- app key
|
||||
`ssh_conf` TEXT NOT NULL DEFAULT '{}',
|
||||
`ssh_test` INTEGER DEFAULT 0, -- 是否执行了ssh秘钥测试, 0: 未测试 1: 已测试
|
||||
`lpver` TEXT DEFAULT '' -- 1panel 版本,当目标面板时1panel时,记录版本是v1还是v2
|
||||
);
|
||||
|
||||
CREATE TABLE IF NOT EXISTS `category`
|
||||
(
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`name` VARCHAR,
|
||||
`create_time` INTEGER DEFAULT (0)
|
||||
);
|
||||
|
||||
INSERT INTO `node` (app_key, api_key, remarks, server_ip)
|
||||
SELECT 'local', 'local', 'Local node', '127.0.0.1'
|
||||
WHERE NOT EXISTS (SELECT 1 FROM `node` WHERE app_key = 'local' AND api_key = 'local');
|
||||
462
mod/project/node/dbutil/node_db.py
Normal file
462
mod/project/node/dbutil/node_db.py
Normal file
@@ -0,0 +1,462 @@
|
||||
import base64
|
||||
import json
|
||||
import os.path
|
||||
import re
|
||||
import time
|
||||
import sys
|
||||
from urllib.parse import urlparse
|
||||
from dataclasses import dataclass, field
|
||||
from typing import Tuple, Optional, List, Union, Dict
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
import public
|
||||
import db
|
||||
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
|
||||
@dataclass
|
||||
class NodeAPPKey:
|
||||
origin: str
|
||||
request_token: str
|
||||
app_key: str
|
||||
app_token: str
|
||||
|
||||
def to_string(self)->str:
|
||||
data = "|".join((self.origin, self.request_token, self.app_key, self.app_token))
|
||||
return base64.b64encode(data.encode()).decode("utf-8")
|
||||
|
||||
|
||||
@dataclass
|
||||
class Node:
|
||||
remarks: str
|
||||
id: int = 0
|
||||
address: str = ""
|
||||
category_id: int = 0
|
||||
api_key: str = ""
|
||||
create_time: int = 0
|
||||
server_ip: str = ""
|
||||
status: int = 1
|
||||
error: dict = field(default_factory=dict)
|
||||
error_num: int = 0
|
||||
app_key: str = ""
|
||||
ssh_conf: dict = field(default_factory=dict)
|
||||
lpver: str = ""
|
||||
|
||||
@classmethod
|
||||
def from_dict(cls, data: dict) -> Tuple[Optional["Node"], str]:
|
||||
if not isinstance(data.get('remarks', None), str):
|
||||
return None, 'remarks is required'
|
||||
if not data["remarks"].strip():
|
||||
return None, 'remarks is required'
|
||||
data["remarks"] = data["remarks"].strip()
|
||||
|
||||
api_key = data.get('api_key', '')
|
||||
app_key = data.get('app_key', '')
|
||||
ssh_conf: dict = data.get('ssh_conf', {})
|
||||
if not api_key and not app_key and not ssh_conf:
|
||||
return None, 'api_key or app_key or ssh_conf is required'
|
||||
|
||||
if app_key:
|
||||
app = cls.parse_app_key(app_key)
|
||||
if not app:
|
||||
return None, 'App_key format error'
|
||||
data["address"] = app.origin
|
||||
url = urlparse(data["address"], allow_fragments=False)
|
||||
if not url.scheme or not url.netloc:
|
||||
return None, 'address is invalid'
|
||||
|
||||
if api_key:
|
||||
if not isinstance(data.get('address', None), str):
|
||||
return None, 'address is required'
|
||||
url = urlparse(data["address"], allow_fragments=False)
|
||||
if not url.scheme or not url.netloc:
|
||||
return None, 'address is invalid'
|
||||
|
||||
if ssh_conf:
|
||||
for key in ("host", "port"):
|
||||
if key not in ssh_conf:
|
||||
return None, 'ssh_conf is invalid'
|
||||
if "username" not in ssh_conf:
|
||||
ssh_conf["username"] = "root"
|
||||
if "password" not in ssh_conf:
|
||||
ssh_conf["password"] = ""
|
||||
if "pkey" not in ssh_conf:
|
||||
ssh_conf["pkey"] = ""
|
||||
if "pkey_passwd" not in ssh_conf:
|
||||
ssh_conf["pkey_passwd"] = ""
|
||||
|
||||
if ssh_conf and not data.get("address", None):
|
||||
data["address"] = ssh_conf["host"]
|
||||
|
||||
n = Node(
|
||||
data["remarks"], id=data.get('id', 0), address=data.get("address"), category_id=int(data.get('category_id', 0)),
|
||||
api_key=api_key, create_time=data.get('create_time', 0), server_ip=data.get('server_ip', ''),
|
||||
status=data.get('status', 1), error=data.get('error', {}), error_num=data.get('error_num', 0),
|
||||
app_key=app_key, ssh_conf=ssh_conf, lpver=data.get('lpver', '')
|
||||
)
|
||||
return n, ''
|
||||
|
||||
def to_dict(self) -> dict:
|
||||
return {
|
||||
"remarks": self.remarks,
|
||||
"id": self.id,
|
||||
"address": self.address,
|
||||
"category_id": self.category_id,
|
||||
"api_key": self.api_key,
|
||||
"create_time": self.create_time,
|
||||
"server_ip": self.server_ip,
|
||||
"status": self.status,
|
||||
"error": self.error,
|
||||
"error_num": self.error_num,
|
||||
"app_key": self.app_key,
|
||||
"ssh_conf": self.ssh_conf,
|
||||
"lpver": self.lpver
|
||||
}
|
||||
|
||||
def parse_server_ip(self):
|
||||
import socket
|
||||
from urllib.parse import urlparse
|
||||
if not self.address.startswith("http"):
|
||||
host = self.address # 仅 ssh时 address本身就是host
|
||||
else:
|
||||
host = urlparse(self.address).hostname
|
||||
if isinstance(host, str) and public.check_ip(host):
|
||||
return host
|
||||
try:
|
||||
ip_address = socket.gethostbyname(host)
|
||||
return ip_address
|
||||
except socket.gaierror as e:
|
||||
public.print_log(f"Error: {e}")
|
||||
return ""
|
||||
|
||||
@staticmethod
|
||||
def parse_app_key(app_key: str) -> Optional[NodeAPPKey]:
|
||||
try:
|
||||
data = base64.b64decode(app_key).decode("utf-8")
|
||||
origin, request_token, app_key, app_token = data.split("|")
|
||||
origin_arr = origin.split(":")
|
||||
if len(origin_arr) > 3:
|
||||
origin = ":".join(origin_arr[:3])
|
||||
return NodeAPPKey(origin, request_token, app_key, app_token)
|
||||
except:
|
||||
return None
|
||||
|
||||
|
||||
class ServerNodeDB:
|
||||
_DB_FILE = public.get_panel_path() + "/data/db/node.db"
|
||||
_DB_INIT_FILE = os.path.dirname(__file__) + "/node.sql"
|
||||
|
||||
def __init__(self):
|
||||
sql = db.Sql()
|
||||
sql._Sql__DB_FILE = self._DB_FILE
|
||||
self.db = sql
|
||||
|
||||
def init_db(self):
|
||||
sql_data = public.readFile(self._DB_INIT_FILE)
|
||||
import sqlite3
|
||||
conn = sqlite3.connect(self._DB_FILE)
|
||||
cur = conn.cursor()
|
||||
cur.executescript(sql_data)
|
||||
cur.execute("PRAGMA table_info(node)")
|
||||
existing_cols = [row[1] for row in cur.fetchall()]
|
||||
if "ssh_test" in existing_cols:
|
||||
pass
|
||||
# print("字段 ssh_test 已存在")
|
||||
else:
|
||||
cur.execute("ALTER TABLE node ADD COLUMN ssh_test INTEGER DEFAULT (0)")
|
||||
conn.commit()
|
||||
conn.close()
|
||||
|
||||
def close(self):
|
||||
self.db.close()
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_trackback):
|
||||
self.close()
|
||||
|
||||
def __del__(self):
|
||||
self.close()
|
||||
|
||||
def is_local_node(self, node_id: int):
|
||||
return self.db.table('node').where("id=? AND app_key = 'local' AND api_key = 'local'", (node_id,)).count() > 0
|
||||
|
||||
def get_local_node(self):
|
||||
data = self.db.table('node').where("app_key = 'local' AND api_key = 'local'", ()).find()
|
||||
if isinstance(data, dict):
|
||||
return data
|
||||
return {
|
||||
"id": 0,
|
||||
"address": "",
|
||||
"category_id": 0,
|
||||
"remarks": "Local node",
|
||||
"api_key": "local",
|
||||
"create_time": time.strftime('%Y-%m-%d %H:%M:%S'),
|
||||
"server_ip": "127.0.0.1",
|
||||
"status": 0,
|
||||
"error": 0,
|
||||
"error_num": 0,
|
||||
"app_key": "local",
|
||||
"ssh_conf": "{}",
|
||||
"lpver": "",
|
||||
}
|
||||
|
||||
def create_node(self, node: Node) -> str:
|
||||
node_data = node.to_dict()
|
||||
node_data.pop("id")
|
||||
node_data["create_time"] = time.strftime('%Y-%m-%d %H:%M:%S')
|
||||
node_data.pop("error")
|
||||
node_data["status"] = 1
|
||||
node_data["ssh_conf"] = json.dumps(node_data["ssh_conf"])
|
||||
|
||||
if node.category_id > 0 and not self.category_exites(node.category_id):
|
||||
return "Classification does not exist"
|
||||
|
||||
if self.db.table('node').where('remarks=?', (node.remarks,)).count() > 0:
|
||||
return "The node with this name already exists"
|
||||
try:
|
||||
node_id = self.db.table('node').insert(node_data)
|
||||
if isinstance(node_id, int):
|
||||
node.id = node_id
|
||||
return ""
|
||||
elif isinstance(node_id, str):
|
||||
return node_id
|
||||
else:
|
||||
return str(node_id)
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
def update_node(self, node: Node, with_out_fields: List[str] = Node) -> str:
|
||||
if self.is_local_node(node.id):
|
||||
return "Cannot modify local nodes"
|
||||
if not self.node_id_exites(node.id):
|
||||
return "Node does not exist"
|
||||
node_data = node.to_dict()
|
||||
node_data.pop("create_time")
|
||||
node_data.pop("id")
|
||||
node_data["ssh_conf"] = json.dumps(node_data["ssh_conf"])
|
||||
node_data["error"] = json.dumps(node_data["error"])
|
||||
if with_out_fields and isinstance(with_out_fields, list):
|
||||
for f in with_out_fields:
|
||||
if f in node_data:
|
||||
node_data.pop(f)
|
||||
|
||||
if node.category_id > 0 and not self.category_exites(node.category_id):
|
||||
node.category_id = 0
|
||||
node_data["category_id"] = 0
|
||||
try:
|
||||
res = self.db.table('node').where('id=?', (node.id,)).update(node_data)
|
||||
if isinstance(res, str):
|
||||
return res
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
return ""
|
||||
|
||||
def set_node_ssh_conf(self, node_id: int, ssh_conf: dict, ssh_test: int=0):
|
||||
pdata = {"ssh_conf": json.dumps(ssh_conf)}
|
||||
if ssh_test:
|
||||
pdata["ssh_test"] = 1
|
||||
self.db.table('node').where('id=?', (node_id,)).update(pdata)
|
||||
return
|
||||
|
||||
def remove_node_ssh_conf(self, node_id: int):
|
||||
self.db.table('node').where('id=?', (node_id,)).update({"ssh_conf": "{}"})
|
||||
return
|
||||
|
||||
def delete_node(self, node_id: int) -> str:
|
||||
if self.is_local_node(node_id):
|
||||
return "Cannot delete local node"
|
||||
if not self.node_id_exites(node_id):
|
||||
return "Node does not exist"
|
||||
try:
|
||||
res = self.db.table('node').where('id=?', (node_id,)).delete()
|
||||
if isinstance(res, str):
|
||||
return res
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
return ""
|
||||
|
||||
def find_node(self, api_key:str = "", app_key: str = "") -> Optional[dict]:
|
||||
res = self.db.table('node').where('api_key=?', (api_key, app_key)).find()
|
||||
if isinstance(res, dict):
|
||||
return res
|
||||
else:
|
||||
return None
|
||||
|
||||
def get_node_list(self,
|
||||
search: str = "",
|
||||
category_id: int = -1,
|
||||
offset: int = 0,
|
||||
limit: int = 10) -> Tuple[List[Dict], str]:
|
||||
try:
|
||||
args = []
|
||||
query_str = ""
|
||||
if search:
|
||||
query_str += "remarks like ?"
|
||||
args.append('%{}%'.format(search))
|
||||
if category_id >= 0:
|
||||
if query_str:
|
||||
query_str += " and category_id=?"
|
||||
else:
|
||||
query_str += "category_id=?"
|
||||
args.append(category_id)
|
||||
if query_str:
|
||||
data_list = self.db.table('node').where(query_str, args).order('id desc').limit(limit, offset).select()
|
||||
else:
|
||||
data_list = self.db.table('node').order('id desc').limit(limit, offset).select()
|
||||
if self.db.ERR_INFO:
|
||||
return [], self.db.ERR_INFO
|
||||
if not isinstance(data_list, list):
|
||||
return [], str(data_list)
|
||||
return data_list, ""
|
||||
except Exception as e:
|
||||
return [], str(e)
|
||||
|
||||
def query_node_list(self, *args) -> List[Dict]:
|
||||
return self.db.table('node').where(*args).select()
|
||||
|
||||
def category_exites(self, category_id: int) -> bool:
|
||||
return self.db.table('category').where('id=?', (category_id,)).count() > 0
|
||||
|
||||
def node_id_exites(self, node_id: int) -> bool:
|
||||
return self.db.table('node').where('id=?', (node_id,)).count() > 0
|
||||
|
||||
def category_map(self) -> Dict:
|
||||
default_data = {0: "Default classification"}
|
||||
data_list = self.db.table('category').field('id,name').select()
|
||||
if isinstance(data_list, list):
|
||||
for data in data_list:
|
||||
default_data[data["id"]] = data["name"]
|
||||
return default_data
|
||||
|
||||
def node_map(self) -> Dict:
|
||||
default_data = {}
|
||||
data_list = self.db.table('node').field('id,remarks').select()
|
||||
if isinstance(data_list, list):
|
||||
for data in data_list:
|
||||
default_data[data["id"]] = data["remarks"]
|
||||
return default_data
|
||||
|
||||
def create_category(self, name: str) -> str:
|
||||
if self.db.table('category').where('name=?', (name,)).count() > 0:
|
||||
return "The classification for this name already exists"
|
||||
try:
|
||||
res = self.db.table('category').insert({"name": name, "create_time": time.strftime('%Y-%m-%d %H:%M:%S')})
|
||||
if isinstance(res, str):
|
||||
return res
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
return ""
|
||||
|
||||
def delete_category(self, category_id: int):
|
||||
self.db.table('node').where('category_id=?', (category_id,)).update({"category_id": 0})
|
||||
self.db.table('category').where('id=?', (category_id,)).delete()
|
||||
|
||||
def bind_category_to_node(self, node_id: List[int], category_id: int) -> str:
|
||||
if not node_id:
|
||||
return "Node ID cannot be empty"
|
||||
if category_id > 0 and not self.category_exites(category_id):
|
||||
return "Classification does not exist"
|
||||
|
||||
try:
|
||||
err = self.db.table('node').where(
|
||||
'id in ({})'.format(",".join(["?"]*len(node_id))), (*node_id,)
|
||||
).update({"category_id": category_id})
|
||||
if isinstance(err, str):
|
||||
return err
|
||||
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
return ""
|
||||
|
||||
def node_count(self, search, category_id) -> int:
|
||||
try:
|
||||
args = []
|
||||
query_str = ""
|
||||
if search:
|
||||
query_str += "remarks like ?"
|
||||
args.append('%{}%'.format(search))
|
||||
if category_id >= 0:
|
||||
if query_str:
|
||||
query_str += " and category_id=?"
|
||||
else:
|
||||
query_str += "category_id=?"
|
||||
args.append(category_id)
|
||||
if query_str:
|
||||
count = self.db.table('node').where(query_str, args).order('id desc').count()
|
||||
else:
|
||||
count = self.db.table('node').order('id desc').count()
|
||||
return count
|
||||
except:
|
||||
return 0
|
||||
|
||||
def get_node_by_id(self, node_id: int) -> Optional[Dict]:
|
||||
try:
|
||||
data = self.db.table('node').where('id=?', (node_id,)).find()
|
||||
if self.db.ERR_INFO:
|
||||
return None
|
||||
if not isinstance(data, dict):
|
||||
return None
|
||||
return data
|
||||
except:
|
||||
return None
|
||||
|
||||
class ServerMonitorRepo:
|
||||
_REPO_DIR = public.get_panel_path() + "/data/mod_node_status_cache/"
|
||||
|
||||
def __init__(self):
|
||||
if not os.path.exists(self._REPO_DIR):
|
||||
os.makedirs(self._REPO_DIR)
|
||||
|
||||
def set_wait_reboot(self, server_ip: str, start: bool):
|
||||
wait_file = os.path.join(self._REPO_DIR, "wait_reboot_{}".format(server_ip))
|
||||
if start:
|
||||
return public.writeFile(wait_file, "wait_reboot")
|
||||
else:
|
||||
if os.path.exists(wait_file):
|
||||
os.remove(wait_file)
|
||||
|
||||
def is_reboot_wait(self, server_ip: str):
|
||||
wait_file = os.path.join(self._REPO_DIR, "wait_reboot_{}".format(server_ip))
|
||||
# 重器待等待时间超过10分钟认为超时
|
||||
return os.path.exists(wait_file) and os.path.getmtime(wait_file) > time.time() - 610
|
||||
|
||||
@staticmethod
|
||||
def get_local_server_status():
|
||||
from system import system
|
||||
return system().GetNetWork(None)
|
||||
|
||||
def get_server_status(self, server_id: int) -> Optional[Dict]:
|
||||
cache_file = os.path.join(self._REPO_DIR, "server_{}.json".format(server_id))
|
||||
if not os.path.exists(cache_file):
|
||||
return None
|
||||
|
||||
mtime = os.path.getmtime(cache_file)
|
||||
if time.time() - mtime > 60 * 5:
|
||||
os.remove(cache_file)
|
||||
return None
|
||||
try:
|
||||
data = public.readFile(cache_file)
|
||||
if isinstance(data, str):
|
||||
return json.loads(data)
|
||||
except:
|
||||
return None
|
||||
|
||||
def save_server_status(self, server_id: int, data: Dict) -> str:
|
||||
cache_file = os.path.join(self._REPO_DIR, "server_{}.json".format(server_id))
|
||||
try:
|
||||
public.writeFile(cache_file, json.dumps(data))
|
||||
return ""
|
||||
except Exception as e:
|
||||
return str(e)
|
||||
|
||||
def remove_cache(self, server_id: int):
|
||||
cache_file = os.path.join(self._REPO_DIR, "server_{}.json".format(server_id))
|
||||
if os.path.exists(cache_file):
|
||||
os.remove(cache_file)
|
||||
1149
mod/project/node/dbutil/node_task_flow.py
Normal file
1149
mod/project/node/dbutil/node_task_flow.py
Normal file
File diff suppressed because it is too large
Load Diff
144
mod/project/node/dbutil/node_task_flow.sql
Normal file
144
mod/project/node/dbutil/node_task_flow.sql
Normal file
@@ -0,0 +1,144 @@
|
||||
-- 创建脚本表
|
||||
CREATE TABLE IF NOT EXISTS scripts
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL CHECK (length(name) <= 255),
|
||||
script_type TEXT NOT NULL CHECK (length(script_type) <= 255),
|
||||
content TEXT NOT NULL,
|
||||
description TEXT,
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
-- 创建任务流
|
||||
CREATE TABLE IF NOT EXISTS flows
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
server_ids TEXT NOT NULL, -- 存储服务器ID列表
|
||||
step_count INTEGER NOT NULL,
|
||||
strategy TEXT NOT NULL, -- 对于不同任务的处理策略, json字段
|
||||
status TEXT NOT NULL, -- 总体状态 waiting, running, complete, error
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
-- 创建命令行任务表
|
||||
CREATE TABLE IF NOT EXISTS command_tasks
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
flow_id INTEGER NOT NULL,
|
||||
name TEXT NOT NULL CHECK (length(name) <= 255),
|
||||
step_index INTEGER NOT NULL,
|
||||
script_id INTEGER NOT NULL,
|
||||
script_content TEXT NOT NULL,
|
||||
script_type TEXT NOT NULL CHECK (length(script_type) <= 255),
|
||||
status INTEGER NOT NULL DEFAULT 0 CHECK (status IN (0, 1, 2, 3)), -- 0: 等待中, 1: 进行中, 2: 成功, 3: 失败
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
-- 创建命令行任务日志表
|
||||
CREATE TABLE IF NOT EXISTS command_logs
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
command_task_id INTEGER NOT NULL,
|
||||
server_id INTEGER NOT NULL,
|
||||
ssh_host TEXT NOT NULL,
|
||||
status INTEGER NOT NULL DEFAULT 0 CHECK (status IN (0, 1, 2, 3, 4)), -- 0: 等待中, 1: 进行中, 2: 成功, 3: 失败, 4: 异常
|
||||
log_name TEXT NOT NULL CHECK (length(log_name) <= 255)
|
||||
);
|
||||
|
||||
-- 传输任务表
|
||||
CREATE TABLE IF NOT EXISTS transfer_tasks
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL CHECK (length(name) <= 255),
|
||||
flow_id INTEGER NOT NULL, -- 当本机不是数据源节点时, 本字段的值为 0
|
||||
step_index INTEGER NOT NULL,
|
||||
src_node TEXT NOT NULL, -- 数据源节点, json字段
|
||||
src_node_task_id INTEGER NOT NULL, -- 当本机是数据源节点时, 本字段的值为 0, 否则为目标机器上的transfer_tasks.id
|
||||
dst_nodes TEXT NOT NULL, -- 目标节点,多个,json字段
|
||||
message TEXT NOT NULL DEFAULT '', -- 与目标节点的链接错误信息
|
||||
path_list TEXT NOT NULL DEFAULT '[]', -- 源节点上的路径 [{"path":"/www/wwwroots", "is_dir":true}]
|
||||
status INTEGER NOT NULL DEFAULT 0 CHECK (status IN (0, 1, 2, 3)), -- 0: 等待中, 1: 进行中, 2: 成功, 3: 失败
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
-- 传输文件列表
|
||||
CREATE TABLE IF NOT EXISTS transfer_files
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
flow_id INTEGER NOT NULL,
|
||||
transfer_task_id INTEGER NOT NULL,
|
||||
src_file TEXT NOT NULL, -- 源文件
|
||||
dst_file TEXT NOT NULL, -- 目标文件
|
||||
file_size INTEGER NOT NULL, -- 文件大小
|
||||
is_dir INTEGER NOT NULL DEFAULT 0
|
||||
);
|
||||
|
||||
|
||||
-- 传输文件列表
|
||||
CREATE TABLE IF NOT EXISTS transfer_logs
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
flow_id INTEGER NOT NULL,
|
||||
transfer_task_id INTEGER NOT NULL,
|
||||
transfer_file_id INTEGER NOT NULL,
|
||||
dst_node_idx INTEGER NOT NULL, -- 目标节点索引,基于 transfer_tasks.dst_nodes
|
||||
status INTEGER NOT NULL DEFAULT 0 CHECK (status IN (0, 1, 2, 3, 4)), -- 0: 等待中, 1: 进行中, 2: 成功, 3: 失败, 4: 跳过
|
||||
progress INTEGER DEFAULT 0, -- 0-100
|
||||
message TEXT NOT NULL DEFAULT '',
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
started_at INTEGER,
|
||||
completed_at INTEGER
|
||||
);
|
||||
|
||||
-- 创建流程模板表
|
||||
CREATE TABLE IF NOT EXISTS flow_templates
|
||||
(
|
||||
id INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
name TEXT NOT NULL CHECK (length(name) <= 255),
|
||||
key_words TEXT NOT NULL DEFAULT '', -- 关键字词,用来查询内容是子任务的名称
|
||||
description TEXT NOT NULL DEFAULT '', -- 模板描述
|
||||
content TEXT NOT NULL, -- json字段,由前端构建,实际流程内容
|
||||
created_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now')),
|
||||
updated_at INTEGER NOT NULL DEFAULT (strftime('%s', 'now'))
|
||||
);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_scripts_name ON scripts (name);
|
||||
CREATE INDEX IF NOT EXISTS idx_scripts_description ON scripts (description);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_flow_server_ids ON flows (server_ids);
|
||||
|
||||
-- command_tasks 表
|
||||
CREATE INDEX IF NOT EXISTS idx_command_tasks_flow_id ON command_tasks (flow_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_command_tasks_script_id ON command_tasks (script_id);
|
||||
|
||||
-- command_logs 表
|
||||
CREATE INDEX IF NOT EXISTS idx_command_logs_task_id ON command_logs (command_task_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_command_logs_server_id ON command_logs (server_id);
|
||||
-- command_logs 状态查询
|
||||
CREATE INDEX IF NOT EXISTS idx_command_logs_status ON command_logs (command_task_id, status);
|
||||
|
||||
-- transfer_tasks 表
|
||||
CREATE INDEX IF NOT EXISTS idx_transfer_tasks_flow_id ON transfer_tasks (flow_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_transfer_tasks_src_node_task_id ON transfer_tasks (src_node_task_id);
|
||||
|
||||
-- transfer_files 表
|
||||
CREATE INDEX IF NOT EXISTS idx_transfer_files_task_id ON transfer_files (transfer_task_id);
|
||||
|
||||
-- transfer_logs 表
|
||||
CREATE INDEX IF NOT EXISTS idx_transfer_logs_flow_id ON transfer_logs (flow_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_transfer_logs_task_id ON transfer_logs (transfer_task_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_transfer_logs_file_id ON transfer_logs (transfer_file_id);
|
||||
-- transfer_logs 状态查询
|
||||
CREATE INDEX IF NOT EXISTS idx_transfer_logs_status ON transfer_logs (transfer_file_id, status);
|
||||
|
||||
-- flow_templates 表
|
||||
CREATE INDEX IF NOT EXISTS idx_flow_templates_name ON flow_templates (name);
|
||||
CREATE INDEX IF NOT EXISTS idx_flow_templates_key_words ON flow_templates (key_words);
|
||||
|
||||
|
||||
|
||||
|
||||
115
mod/project/node/executor/__init__.py
Normal file
115
mod/project/node/executor/__init__.py
Normal file
@@ -0,0 +1,115 @@
|
||||
import json
|
||||
import threading
|
||||
import queue
|
||||
import time
|
||||
import traceback
|
||||
|
||||
from mod.base.ssh_executor import SSHExecutor
|
||||
from mod.project.node.dbutil import ServerNodeDB, ExecutorDB, ExecutorLog
|
||||
|
||||
|
||||
class Task(object):
|
||||
def __init__(self, task_id: int, log_id: int):
|
||||
self._edb = ExecutorDB()
|
||||
self.task = self._edb.ExecutorTask.find("id = ?", (task_id,))
|
||||
if not self.task:
|
||||
raise RuntimeError("The specified task does not exist")
|
||||
if log_id == 0:
|
||||
self.task.elogs = self._edb.ExecutorLog.query("executor_task_id = ?", (self.task.id,))
|
||||
else:
|
||||
self.task.elogs = [self._edb.ExecutorLog.find("executor_task_id = ? AND id = ?", (self.task.id, log_id))]
|
||||
if not self.task.elogs:
|
||||
raise RuntimeError("Task has no execution entry")
|
||||
|
||||
self.end_queue = queue.Queue()
|
||||
self.end_status = False
|
||||
|
||||
|
||||
def end_func(self):
|
||||
self._edb = ExecutorDB()
|
||||
while not self.end_queue.empty() or not self.end_status:
|
||||
if self.end_queue.empty():
|
||||
time.sleep(0.1)
|
||||
|
||||
elog: ExecutorLog = self.end_queue.get()
|
||||
self._edb.ExecutorLog.update(elog)
|
||||
|
||||
def start(self):
|
||||
thread_list = []
|
||||
s_db = ServerNodeDB()
|
||||
for log in self.task.elogs:
|
||||
node = s_db.get_node_by_id(log.server_id)
|
||||
if not node:
|
||||
log.status = 2
|
||||
log.update_log("Node data loss, unable to execute\n")
|
||||
self._edb.ExecutorLog.update(log)
|
||||
|
||||
ssh_conf = json.loads(node["ssh_conf"])
|
||||
if not ssh_conf:
|
||||
log.status = 2
|
||||
log.update_log("Node SSH configuration data lost, unable to execute\n")
|
||||
self._edb.ExecutorLog.update(log)
|
||||
|
||||
thread = threading.Thread(target=self.run_one, args=(ssh_conf, log))
|
||||
thread.start()
|
||||
thread_list.append(thread)
|
||||
|
||||
self._edb.close()
|
||||
end_th = threading.Thread(target=self.end_func)
|
||||
end_th.start()
|
||||
|
||||
for i in thread_list:
|
||||
i.join()
|
||||
self.end_status = True
|
||||
end_th.join()
|
||||
|
||||
def run_one(self, ssh_conf: dict, elog: ExecutorLog):
|
||||
ssh = SSHExecutor(
|
||||
host=ssh_conf["host"],
|
||||
port=ssh_conf["port"],
|
||||
username=ssh_conf["username"],
|
||||
password=ssh_conf["password"],
|
||||
key_data=ssh_conf["pkey"],
|
||||
passphrase=ssh_conf["pkey_passwd"])
|
||||
elog.write_log("Start executing the task\nStart establishing SSH connection...\n")
|
||||
try:
|
||||
ssh.open()
|
||||
def on_stdout(data):
|
||||
if isinstance(data, bytes):
|
||||
data = data.decode()
|
||||
print(data)
|
||||
elog.write_log(data)
|
||||
|
||||
elog.write_log("Start executing script...\n\n")
|
||||
t = time.time()
|
||||
res_code = ssh.execute_script_streaming(
|
||||
script_content=self.task.script_content,
|
||||
script_type=self.task.script_type,
|
||||
timeout=60*60,
|
||||
on_stdout=on_stdout,
|
||||
on_stderr=on_stdout
|
||||
)
|
||||
take_time = round((time.time() - t)* 1000, 2)
|
||||
elog.write_log("\n\nExecution completed, time-consuming [{}ms]\n".format(take_time))
|
||||
if res_code == 0:
|
||||
elog.status = 1
|
||||
elog.write_log("Mission accomplished\n", is_end_log=True)
|
||||
else:
|
||||
elog.status = 3
|
||||
elog.write_log("Task exception, return status code is:{}\n".format(res_code), is_end_log=True)
|
||||
self.end_queue.put(elog)
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
elog.status = 2
|
||||
elog.write_log("\nTask failed, error:" + str(e), is_end_log=True)
|
||||
self.end_queue.put(elog)
|
||||
return
|
||||
|
||||
|
||||
# log_id 要执行的子任务,默认为 0,表示执行所有子任务
|
||||
def run_executor_task(task_id: int, log_id: int = 0):
|
||||
t = Task(task_id, log_id)
|
||||
t.start()
|
||||
|
||||
|
||||
|
||||
913
mod/project/node/executorMod.py
Normal file
913
mod/project/node/executorMod.py
Normal file
@@ -0,0 +1,913 @@
|
||||
import json
|
||||
import os.path
|
||||
import threading
|
||||
import time
|
||||
import psutil
|
||||
import traceback
|
||||
from datetime import datetime
|
||||
from typing import List, Dict, Optional, Tuple, MutableMapping, Union
|
||||
|
||||
import simple_websocket
|
||||
from mod.base import json_response, list_args
|
||||
from mod.project.node.nodeutil import ServerNode, LocalNode, LPanelNode, SSHApi
|
||||
from mod.project.node.dbutil import Script, CommandLog, TaskFlowsDB, CommandTask, ServerNodeDB, TransferTask, \
|
||||
ServerMonitorRepo, Flow, FlowTemplates
|
||||
from mod.project.node.task_flow import self_file_running_log, flow_running_log, flow_useful_version, file_task_run_sync, \
|
||||
command_task_run_sync
|
||||
|
||||
import public
|
||||
|
||||
|
||||
class main:
|
||||
next_flow_tip_name = "user_next_flow_tip"
|
||||
|
||||
@staticmethod
|
||||
def create_script(get):
|
||||
e_db = TaskFlowsDB()
|
||||
err = Script.check(get)
|
||||
if err:
|
||||
return public.return_message(-1, 0,=err)
|
||||
s = Script.from_dict(get)
|
||||
# 查重
|
||||
if e_db.Script.find("name = ?", (s.name,)):
|
||||
return public.return_message(-1, 0,"Script name already exists")
|
||||
err = e_db.Script.create(s)
|
||||
if isinstance(err, str):
|
||||
return public.return_message(-1, 0,err)
|
||||
# return json_response(status=True, msg="Created successfully", data=s.to_dict())
|
||||
return public.return_message(0, 0,s.to_dict())
|
||||
|
||||
@staticmethod
|
||||
def modify_script(get):
|
||||
e_db = TaskFlowsDB()
|
||||
err = Script.check(get)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
s = Script.from_dict(get)
|
||||
if not s.id:
|
||||
return public.return_message(-1, 0,"Script ID cannot be empty")
|
||||
if not e_db.Script.find("id = ?", (s.id,)):
|
||||
return public.return_message(-1, 0,"Script does not exist")
|
||||
err = e_db.Script.update(s)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
# return json_response(status=True, msg="Modified successfully", data=s.to_dict())
|
||||
return public.return_message(0, 0,s.to_dict())
|
||||
|
||||
@staticmethod
|
||||
def delete_script(get):
|
||||
e_db = TaskFlowsDB()
|
||||
if not get.id:
|
||||
return public.return_message(-1, 0,"Script ID cannot be empty")
|
||||
try:
|
||||
del_id = int(get.id)
|
||||
except:
|
||||
return public.return_message(-1, 0,"Script ID format error")
|
||||
|
||||
e_db.Script.delete(del_id)
|
||||
return public.return_message(0, 0,"Deleted successfully")
|
||||
|
||||
@staticmethod
|
||||
def get_script_list(get):
|
||||
page_num = max(int(get.get('p/d', 1)), 1)
|
||||
limit = max(int(get.get('limit/d', 16)), 1)
|
||||
search = get.get('search', "").strip()
|
||||
script_type = get.get('script_type/s', "all")
|
||||
if not script_type in ["all", "python", "shell"]:
|
||||
script_type = "all"
|
||||
|
||||
where_list, params = [], []
|
||||
if search:
|
||||
where_list.append("(name like ? or content like ? or description like ?)")
|
||||
params.append("%{}%".format(search))
|
||||
params.append("%{}%".format(search))
|
||||
params.append("%{}%".format(search))
|
||||
|
||||
if script_type != "all":
|
||||
where_list.append("script_type = ?")
|
||||
params.append(script_type)
|
||||
|
||||
where = " and ".join(where_list)
|
||||
e_db = TaskFlowsDB()
|
||||
data_list = e_db.Script.query_page(where, (*params,), page_num=page_num, limit=limit)
|
||||
count = e_db.Script.count(where, params)
|
||||
page = public.get_page(count, page_num, limit)
|
||||
page["data"] = [i.to_dict() for i in data_list]
|
||||
return page
|
||||
|
||||
@staticmethod
|
||||
def bath_delete_script(get):
|
||||
script_ids = list_args(get, 'script_ids')
|
||||
try:
|
||||
script_ids = [int(i) for i in script_ids]
|
||||
except:
|
||||
return public.return_message(-1, 0,"Script ID format error")
|
||||
if not script_ids:
|
||||
return public.return_message(-1, 0,"Script ID cannot be empty")
|
||||
|
||||
e_db = TaskFlowsDB()
|
||||
err = e_db.Script.delete(script_ids)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
return public.return_message(0, 0,"Deleted successfully")
|
||||
|
||||
@staticmethod
|
||||
def create_task(get):
|
||||
node_ids = list_args(get, 'node_ids')
|
||||
if not node_ids:
|
||||
return public.return_message(-1, 0,"Node ID cannot be empty")
|
||||
try:
|
||||
node_ids = [int(i) for i in node_ids]
|
||||
except:
|
||||
return public.return_message(-1, 0,"Node ID format error")
|
||||
|
||||
e_db = TaskFlowsDB()
|
||||
script_id = get.get('script_id/d', 0)
|
||||
if script_id:
|
||||
s = e_db.Script.find("id = ?", (script_id,))
|
||||
if not s:
|
||||
return public.return_message(-1, 0,"Script does not exist")
|
||||
|
||||
elif get.get("script_content/s", "").strip():
|
||||
if not (get.get("script_type", "").strip() in ("python", "shell")):
|
||||
return public.return_message(-1, 0,"Script type error")
|
||||
s = Script("", get.get("script_type", "").strip(), content=get.get("script_content", "").strip())
|
||||
s.id = 0
|
||||
else:
|
||||
return public.return_message(-1, 0,"Please select a script")
|
||||
|
||||
nodes_db = ServerNodeDB()
|
||||
nodes = []
|
||||
timestamp = int(datetime.now().timestamp())
|
||||
for i in node_ids:
|
||||
n = nodes_db.get_node_by_id(i)
|
||||
if not n:
|
||||
return public.return_message(-1, 0,"The node with node ID [{}] does not exist".format(i))
|
||||
n["ssh_conf"] = json.loads(n["ssh_conf"])
|
||||
if not n["ssh_conf"]:
|
||||
return public.return_message(-1, 0,"The node with node ID [{}] has not configured SSH information and cannot distribute instructions".format(i))
|
||||
n["log_name"] = "{}_{}_{}.log".format(public.md5(s.content)[::2], timestamp, n['remarks'])
|
||||
nodes.append(n)
|
||||
|
||||
e_task = CommandTask(
|
||||
script_id=s.id,
|
||||
script_content=s.content,
|
||||
script_type=s.script_type,
|
||||
flow_id=0,
|
||||
step_index=0,
|
||||
)
|
||||
command_task_id = e_db.CommandTask.create(e_task)
|
||||
e_task.id = command_task_id
|
||||
if not isinstance(command_task_id, int) or command_task_id <= 0:
|
||||
return public.return_message(-1, 0,"Task creation failed:" + command_task_id)
|
||||
|
||||
log_list = []
|
||||
for i in nodes:
|
||||
elog = CommandLog(
|
||||
command_task_id=command_task_id,
|
||||
server_id=i["id"],
|
||||
ssh_host=i["ssh_conf"]["host"],
|
||||
status=0,
|
||||
log_name=i["log_name"],
|
||||
)
|
||||
elog.create_log()
|
||||
log_list.append(elog)
|
||||
|
||||
last_id = e_db.CommandLog.create(log_list)
|
||||
if not isinstance(last_id, int) or last_id <= 0:
|
||||
for i in log_list:
|
||||
i.remove_log()
|
||||
return public.return_message(-1, 0,"Failed to create log:" + last_id)
|
||||
|
||||
script_py = "{}/script/node_command_executor.py command".format(public.get_panel_path())
|
||||
res = public.ExecShell("nohup {} {} {} > /dev/null 2>&1 &".format(
|
||||
public.get_python_bin(), script_py, command_task_id)
|
||||
)
|
||||
|
||||
data_dict = e_task.to_dict()
|
||||
data_dict["log_list"] = [i.to_dict() for i in log_list]
|
||||
data_dict["task_id"] = command_task_id
|
||||
# return json_response(status=True, msg="Created successfully", data=data_dict)
|
||||
return public.return_message(0, 0, data_dict)
|
||||
|
||||
@staticmethod
|
||||
def get_server_info(server_id: int, server_cache) -> dict:
|
||||
server_info = server_cache.get(server_id)
|
||||
if not server_info:
|
||||
server = ServerNodeDB().get_node_by_id(server_id)
|
||||
if not server:
|
||||
server_cache[server_id] = {}
|
||||
else:
|
||||
server_cache[server_id] = server
|
||||
return server_cache[server_id]
|
||||
else:
|
||||
return server_info
|
||||
|
||||
@classmethod
|
||||
def get_task_list(cls, get):
|
||||
page_num = max(int(get.get('p/d', 1)), 1)
|
||||
limit = max(int(get.get('limit/d', 16)), 1)
|
||||
script_type = get.get('script_type/s', "all")
|
||||
if not script_type in ["all", "python", "shell"]:
|
||||
script_type = "all"
|
||||
search = get.get('search', "").strip()
|
||||
|
||||
e_db = TaskFlowsDB()
|
||||
count, tasks = e_db.CommandTask.query_tasks(
|
||||
page=page_num, size=limit, script_type=script_type, search=search
|
||||
)
|
||||
|
||||
res = []
|
||||
server_cache: Dict[int, Dict] = {}
|
||||
for i in tasks:
|
||||
task_dict = i.to_dict()
|
||||
log_list = e_db.CommandLog.query("command_task_id = ?", (i.id,))
|
||||
task_dict["log_list"] = []
|
||||
if i.script_id > 0:
|
||||
s = e_db.Script.find("id = ?", (i.script_id,))
|
||||
if s:
|
||||
task_dict["script_name"] = s.name
|
||||
else:
|
||||
task_dict["script_name"] = "-"
|
||||
|
||||
for j in log_list:
|
||||
tmp = j.to_dict()
|
||||
tmp["server_name"] = cls.get_server_info(j.server_id, server_cache).get("remarks")
|
||||
task_dict["log_list"].append(tmp)
|
||||
|
||||
res.append(task_dict)
|
||||
|
||||
page = public.get_page(count, page_num, limit)
|
||||
page["data"] = res
|
||||
return public.return_message(0, 0,page)
|
||||
|
||||
@classmethod
|
||||
def get_task_info(cls, get):
|
||||
task_id = get.get('task_id/d', 0)
|
||||
if not task_id:
|
||||
return public.return_message(-1, 0,"Task ID cannot be empty")
|
||||
|
||||
e_db = TaskFlowsDB()
|
||||
task = e_db.CommandTask.find("id = ?", (task_id,))
|
||||
if not task:
|
||||
return public.return_message(-1, 0,"Task does not exist")
|
||||
|
||||
task_dict = task.to_dict()
|
||||
task_dict["log_list"] = []
|
||||
server_cache = {}
|
||||
log_list = e_db.CommandLog.query("command_task_id = ?", (task_id,))
|
||||
for i in log_list:
|
||||
tmp = i.to_dict()
|
||||
if i.status != 0:
|
||||
tmp["log"] = i.get_log()
|
||||
tmp["server_name"] = cls.get_server_info(i.server_id, server_cache).get("remarks", "")
|
||||
task_dict["log_list"].append(tmp)
|
||||
|
||||
return public.return_message(0, 0,task_dict)
|
||||
|
||||
@staticmethod
|
||||
def delete_task(get):
|
||||
e_db = TaskFlowsDB()
|
||||
task_id = get.get('task_id/d', 0)
|
||||
if not task_id:
|
||||
return public.return_message(-1, 0,"Task ID cannot be empty")
|
||||
|
||||
task = e_db.CommandTask.find("id = ?", (task_id,))
|
||||
if not task:
|
||||
return public.return_message(-1, 0,"Task does not exist")
|
||||
|
||||
pid_file = "{}/logs/executor_log/{}.pid".format(public.get_panel_path(), task_id)
|
||||
if os.path.exists(pid_file):
|
||||
pid: str = public.readFile(pid_file)
|
||||
if pid and pid.isdigit():
|
||||
public.ExecShell("kill -9 {}".format(pid))
|
||||
os.remove(pid_file)
|
||||
|
||||
log_list = e_db.CommandLog.query("command_task_id = ?", (task_id,))
|
||||
for i in log_list:
|
||||
i.remove_log()
|
||||
e_db.CommandLog.delete(i.id)
|
||||
|
||||
e_db.CommandTask.delete(task_id)
|
||||
return public.return_message(0, 0,"Deleted successfully")
|
||||
|
||||
@staticmethod
|
||||
def batch_delete_task(get):
|
||||
task_ids: List[int] = list_args(get, "task_ids")
|
||||
if not task_ids:
|
||||
return public.return_message(-1, 0,"Please select the task to delete")
|
||||
task_ids = [int(i) for i in task_ids]
|
||||
e_db = TaskFlowsDB()
|
||||
task_list = e_db.CommandTask.query("id IN ({})".format(",".join(["?"] * len(task_ids))), (*task_ids,))
|
||||
if not task_list:
|
||||
return public.return_message(-1, 0,"Task does not exist")
|
||||
for i in task_list:
|
||||
pid_file = "{}/logs/executor_log/{}.pid".format(public.get_panel_path(), i.id)
|
||||
if os.path.exists(pid_file):
|
||||
pid: str = public.readFile(pid_file)
|
||||
if pid and pid.isdigit():
|
||||
public.ExecShell("kill -9 {}".format(pid))
|
||||
os.remove(pid_file)
|
||||
|
||||
log_list = e_db.CommandLog.query("command_task_id = ?", (i.id,))
|
||||
for j in log_list:
|
||||
j.remove_log()
|
||||
e_db.CommandLog.delete(j.id)
|
||||
e_db.CommandTask.delete(i.id)
|
||||
|
||||
return public.return_message(0, 0,"Deleted successfully")
|
||||
|
||||
@staticmethod
|
||||
def retry_task(get):
|
||||
task_id = get.get('task_id/d', 0)
|
||||
if not task_id:
|
||||
return public.return_message(-1, 0,"Task ID cannot be empty")
|
||||
|
||||
log_id = get.get('log_id/d', 0)
|
||||
if not log_id:
|
||||
return public.return_message(-1, 0,"The log ID cannot be empty")
|
||||
|
||||
e_db = TaskFlowsDB()
|
||||
log = e_db.CommandLog.find("id = ? AND command_task_id = ?", (log_id, task_id))
|
||||
if not log:
|
||||
return public.return_message(-1, 0,"log does not exist")
|
||||
|
||||
log.create_log()
|
||||
log.status = 0
|
||||
e_db.CommandLog.update(log)
|
||||
script_py = "{}/script/node_command_executor.py command".format(public.get_panel_path())
|
||||
public.ExecShell("nohup {} {} {} {} > /dev/null 2>&1 &".format(
|
||||
public.get_python_bin(), script_py, task_id, log_id)
|
||||
)
|
||||
return public.return_message(0, 0,"Retry started")
|
||||
|
||||
@staticmethod
|
||||
def node_create_transfer_task(get):
|
||||
try:
|
||||
transfer_task_data = json.loads(get.get('transfer_task_data', "{}"))
|
||||
if not transfer_task_data:
|
||||
return public.return_message(-1, 0,"Parameter error")
|
||||
except Exception as e:
|
||||
return public.return_message(-1, 0,"Parameter error")
|
||||
|
||||
transfer_task_data["flow_id"] = 0
|
||||
transfer_task_data["step_index"] = 0
|
||||
transfer_task_data["src_node"] = {"name": "local"}
|
||||
transfer_task_data["src_node_task_id"] = 0
|
||||
if not isinstance(transfer_task_data["dst_nodes"], dict):
|
||||
return public.return_message(-1, 0,"Please upgrade the version of the main node panel you are currently using")
|
||||
|
||||
fdb = TaskFlowsDB()
|
||||
tt = TransferTask.from_dict(transfer_task_data)
|
||||
task_id = fdb.TransferTask.create(tt)
|
||||
if not task_id:
|
||||
return public.return_message(-1, 0,"Task creation failed")
|
||||
# return json_response(status=True, msg="Created successfully", data={"task_id": task_id})
|
||||
return public.return_message(0, 0, {"task_id": task_id})
|
||||
|
||||
@classmethod
|
||||
def node_transferfile_status_history(cls, get):
|
||||
task_id = get.get('task_id/d', 0)
|
||||
only_error = get.get('only_error/d', 1)
|
||||
if not task_id:
|
||||
return public.return_message(-1, 0,"Task ID cannot be empty")
|
||||
fdb = TaskFlowsDB()
|
||||
ret = fdb.history_transferfile_task(task_id, only_error=only_error==1)
|
||||
fdb.close()
|
||||
# return json_response(status=True, msg="Successfully obtained", data=ret)
|
||||
return public.return_message(0, 0, ret)
|
||||
|
||||
@classmethod
|
||||
def node_proxy_transferfile_status(cls, get):
|
||||
ws: simple_websocket.Server = getattr(get, '_ws', None)
|
||||
if not ws:
|
||||
return public.return_message(-1, 0, "Please use WebSocket connection")
|
||||
|
||||
task_id = get.get('task_id/d', 0)
|
||||
exclude_nodes = list_args(get, "exclude_nodes")
|
||||
the_log_id = get.get('the_log_id/d', 0)
|
||||
if not task_id:
|
||||
ws.send(json.dumps({"type": "end", "msg": "Task ID cannot be empty"}))
|
||||
ws.send("{}")
|
||||
return
|
||||
|
||||
try:
|
||||
exclude_nodes = [int(i) for i in exclude_nodes]
|
||||
except:
|
||||
exclude_nodes = []
|
||||
|
||||
fdb = TaskFlowsDB()
|
||||
task = fdb.TransferTask.get_byid(task_id)
|
||||
if not task:
|
||||
ws.send(json.dumps({"type": "end", "msg": "Task does not exist"}))
|
||||
ws.send("{}")
|
||||
return
|
||||
if the_log_id: # 单任务重试
|
||||
res_data = file_task_run_sync(task_id, the_log_id)
|
||||
if isinstance(res_data, str):
|
||||
ws.send(json.dumps({"type": "error", "msg": res_data}))
|
||||
ws.send("{}")
|
||||
else:
|
||||
ws.send(json.dumps({"type": "end", "data": res_data}))
|
||||
ws.send("{}")
|
||||
fdb.close()
|
||||
return
|
||||
|
||||
if task.status in (0, 3): # 初次执行 或 出错后再次尝试
|
||||
pid = cls._start_task("file", task_id, exclude_nodes=exclude_nodes)
|
||||
elif task.status == 2: # 运行成功了, 获取历史数据并返回
|
||||
ret = fdb.history_transferfile_task(task_id)
|
||||
ws.send(json.dumps({"type": "end", "data": ret}))
|
||||
ws.send("{}")
|
||||
fdb.close()
|
||||
return
|
||||
else: # 还在运行中
|
||||
pid_file = "{}/logs/executor_log/file_{}_0.pid".format(public.get_panel_path(), task_id)
|
||||
if os.path.exists(pid_file):
|
||||
pid = int(public.readFile(pid_file))
|
||||
else:
|
||||
pid = None
|
||||
|
||||
if not pid: # 运行失败, 返回数据库信息
|
||||
ret = fdb.history_transferfile_task(task_id)
|
||||
ws.send(json.dumps({"type": "end", "data": ret}))
|
||||
fdb.close()
|
||||
ws.send("{}")
|
||||
return
|
||||
|
||||
def send_status(soc_data: dict):
|
||||
ws.send(json.dumps({"type": "status", "data": soc_data}))
|
||||
|
||||
err = self_file_running_log(task_id, send_status)
|
||||
if err:
|
||||
ws.send(json.dumps({"type": "error", "msg": err}))
|
||||
|
||||
ret = fdb.history_transferfile_task(task_id)
|
||||
ws.send(json.dumps({"type": "end", "data": ret}))
|
||||
fdb.close()
|
||||
ws.send("{}") # 告诉接收端,数据传输已经结束
|
||||
return
|
||||
|
||||
def run_flow_task(self, get):
|
||||
ws: simple_websocket.Server = getattr(get, '_ws', None)
|
||||
if not ws:
|
||||
return public.return_message(-1, 0,"Please use WebSocket connection")
|
||||
|
||||
public.set_module_logs("nodes_flow_task", "run_flow_task")
|
||||
node_ids = list_args(get, 'node_ids')
|
||||
if not node_ids:
|
||||
ws.send(json.dumps({"type": "error", "msg": "Node ID cannot be empty"}))
|
||||
return
|
||||
try:
|
||||
node_ids = [int(i) for i in node_ids]
|
||||
except:
|
||||
ws.send(json.dumps({"type": "error", "msg": "Node ID format error"}))
|
||||
return
|
||||
|
||||
try:
|
||||
flow_data = get.get('flow_data', '[]')
|
||||
if isinstance(flow_data, str):
|
||||
flow_data = json.loads(flow_data)
|
||||
elif isinstance(flow_data, (list, tuple)):
|
||||
pass
|
||||
else:
|
||||
raise
|
||||
except:
|
||||
ws.send(json.dumps({"type": "error", "msg": "Process data format error"}))
|
||||
return
|
||||
|
||||
strategy = {"run_when_error": True}
|
||||
if "exclude_when_error" in get and get.exclude_when_error not in ("1", "true", 1, True):
|
||||
strategy["exclude_when_error"] = False
|
||||
|
||||
has_cmd_task = False
|
||||
data_src_node = []
|
||||
for i in flow_data:
|
||||
if i["task_type"] == "command":
|
||||
has_cmd_task = True
|
||||
elif i["task_type"] == "file":
|
||||
data_src_node.append(i["src_node_id"])
|
||||
|
||||
nodes_db = ServerNodeDB()
|
||||
used_nodes, target_nodes = [], []
|
||||
srv_cache = ServerMonitorRepo()
|
||||
for i in set(node_ids + data_src_node):
|
||||
n = nodes_db.get_node_by_id(i)
|
||||
if not n:
|
||||
ws.send(json.dumps({"type": "error", "msg": "The node with node ID [{}] does not exist".format(i)}))
|
||||
return
|
||||
n["ssh_conf"] = json.loads(n["ssh_conf"])
|
||||
if has_cmd_task and n["id"] in node_ids and not n["ssh_conf"]:
|
||||
ws.send(json.dumps({"type": "error", "msg": "The node of node [{}] has not enabled SSH".format(n["remarks"])}))
|
||||
return
|
||||
if n["id"] in data_src_node:
|
||||
is_local = n["app_key"] == n["api_key"] == "local"
|
||||
if (not n["app_key"] and not n["api_key"]) or n["lpver"]: # 1panel面板或者 仅有ssh配置的节点无法作为数据源
|
||||
ws.send(json.dumps(
|
||||
{"type": "error", "msg": "Node [{}] is not a pagoda node and cannot be used as a data source".format(n["remarks"])}))
|
||||
return
|
||||
if not is_local:
|
||||
# 检查节点版本号
|
||||
tmp = srv_cache.get_server_status(n["id"])
|
||||
if not tmp or not flow_useful_version(tmp["version"]):
|
||||
ws.send(
|
||||
json.dumps({"type": "error", "msg": "Node [{}] version is too low, please upgrade the node".format(n["remarks"])}))
|
||||
return
|
||||
|
||||
used_nodes.append(n)
|
||||
if n["id"] in node_ids:
|
||||
target_nodes.append(n)
|
||||
|
||||
fdb = TaskFlowsDB()
|
||||
flow, err = fdb.create_flow(used_nodes, target_nodes, strategy, flow_data)
|
||||
if not flow:
|
||||
ws.send(json.dumps({"type": "error", "msg": err}))
|
||||
return
|
||||
fdb.close()
|
||||
|
||||
pid = self._start_task("flow", flow.id)
|
||||
if not pid:
|
||||
ws.send(json.dumps({"type": "error", "msg": "Task startup failed"}))
|
||||
return
|
||||
|
||||
def update_status(data: dict):
|
||||
ws.send(json.dumps({"type": "status", "data": data}))
|
||||
|
||||
err = flow_running_log(flow.id, update_status)
|
||||
if err:
|
||||
ws.send(json.dumps({"type": "error", "msg": err}))
|
||||
# flow_data = fdb.history_flow_task(flow.id)
|
||||
# ws.send(json.dumps({"type": "data", "data": flow_data}))
|
||||
ws.send(json.dumps({"type": "end", "msg": "Mission complete"}))
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def _start_task(cls, task_type: str, task_id: int, exclude_nodes: List[int]=None) -> Optional[int]:
|
||||
pid_file = "{}/logs/executor_log/{}_{}_0.pid".format(public.get_panel_path(), task_type, task_id)
|
||||
if os.path.exists(pid_file):
|
||||
pid = int(public.readFile(pid_file))
|
||||
if psutil.pid_exists(pid):
|
||||
return pid
|
||||
|
||||
script_py = "{}/script/node_command_executor.py".format(public.get_panel_path())
|
||||
cmd = [
|
||||
public.get_python_bin(), script_py,
|
||||
"--task_type={}".format(task_type),
|
||||
"--task_id={}".format(task_id),
|
||||
]
|
||||
|
||||
exclude_nodes = exclude_nodes or []
|
||||
if exclude_nodes:
|
||||
exclude_nodes = [str(i) for i in exclude_nodes if i]
|
||||
exclude_nodes_str = "'{}'".format(",".join(exclude_nodes))
|
||||
cmd.append("--exclude_nodes={}".format(exclude_nodes_str))
|
||||
|
||||
cmd_str = "nohup {} > /dev/null 2>&1 &".format(" ".join(cmd))
|
||||
public.ExecShell(cmd_str)
|
||||
for i in range(60):
|
||||
if os.path.exists(pid_file):
|
||||
pid = int(public.readFile(pid_file))
|
||||
if psutil.pid_exists(pid):
|
||||
return pid
|
||||
time.sleep(0.05)
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def flow_task_status(cls, get):
|
||||
ws: simple_websocket.Server = getattr(get, '_ws', None)
|
||||
if not ws:
|
||||
return public.return_message(-1, 0, "Please use WebSocket connection")
|
||||
|
||||
fdb = TaskFlowsDB()
|
||||
flow = fdb.Flow.last(order_by="id DESC")
|
||||
if flow and flow.status == "running":
|
||||
flow_data = fdb.history_flow_task(flow)
|
||||
ws.send(json.dumps({"type": "status", "data": flow_data}))
|
||||
for t in flow.steps:
|
||||
t: Union[CommandTask, TransferTask]
|
||||
src_node = getattr(t, "src_node", {})
|
||||
is_local_src = src_node.get("address", None) is None
|
||||
if not src_node:
|
||||
task_data = fdb.history_command_task(t.id)
|
||||
elif is_local_src:
|
||||
task_data = fdb.history_transferfile_task(t.id)
|
||||
else:
|
||||
srv = ServerNode(src_node["address"], src_node["api_key"], src_node["app_key"], src_node["remarks"])
|
||||
srv_data = srv.node_transferfile_status_history(t.src_node_task_id)
|
||||
if srv_data["status"]:
|
||||
task_data = srv_data["data"]
|
||||
else:
|
||||
task_data = {
|
||||
"task_id": t.id, "task_type": "file",
|
||||
"count": 0, "complete": 0, "error": 0, "data": []
|
||||
}
|
||||
ws.send(json.dumps({"type": "status", "data": task_data}))
|
||||
|
||||
err = flow_running_log(flow.id, lambda x: ws.send(json.dumps({"type": "status", "data": x})))
|
||||
if err:
|
||||
ws.send(json.dumps({"type": "error", "msg": err}))
|
||||
|
||||
ws.send(json.dumps({"type": "end", "msg": "Mission complete"}))
|
||||
return
|
||||
else:
|
||||
if not flow:
|
||||
ws.send(json.dumps({"type": "no_flow", "msg": "No tasks"})) # 没有任务
|
||||
return
|
||||
flow_data = fdb.history_flow_task(flow.id)
|
||||
ws.send(json.dumps({"type": "end", "last_flow": flow_data}))
|
||||
return
|
||||
|
||||
@classmethod
|
||||
def next_flow_tip(cls, get):
|
||||
return public.return_message(0, 0,"Setup successful")
|
||||
|
||||
@staticmethod
|
||||
def get_flow_info(get):
|
||||
flow_id = get.get("flow_id/d", 0)
|
||||
fdb = TaskFlowsDB()
|
||||
flow = fdb.Flow.get_byid(flow_id)
|
||||
if not flow:
|
||||
return public.return_message(-1, 0,"Task does not exist")
|
||||
|
||||
flow_data = fdb.history_flow_task(flow.id)
|
||||
return public.return_message(0, 0,flow_data)
|
||||
|
||||
@staticmethod
|
||||
def get_command_task_info(get):
|
||||
task_id = get.get("task_id/d", 0)
|
||||
fdb = TaskFlowsDB()
|
||||
task = fdb.CommandTask.get_byid(task_id)
|
||||
if not task:
|
||||
return public.return_message(-1, 0,"Task does not exist")
|
||||
return public.return_message(0, 0,fdb.history_command_task(task.id, only_error=False))
|
||||
|
||||
@staticmethod
|
||||
def get_transferfile_task_info(get):
|
||||
task_id = get.get("task_id/d", 0)
|
||||
fdb = TaskFlowsDB()
|
||||
task = fdb.TransferTask.get_byid(task_id)
|
||||
if not task:
|
||||
return public.return_message(-1, 0,"Task does not exist")
|
||||
|
||||
src_node = task.src_node
|
||||
is_local_src = task.src_node.get("address", None) is None
|
||||
if is_local_src:
|
||||
return public.return_message(0, 0,fdb.history_transferfile_task(task.id, only_error=False))
|
||||
else:
|
||||
srv = ServerNode(src_node["address"], src_node["api_key"], src_node["app_key"], src_node["name"])
|
||||
srv_data = srv.node_transferfile_status_history(task.src_node_task_id, only_error=False)
|
||||
if srv_data["status"]:
|
||||
task_data = srv_data["data"]
|
||||
else:
|
||||
task_data = {
|
||||
"task_id": task.id, "task_type": "file",
|
||||
"count": 0, "complete": 0, "error": 0, "data": []
|
||||
}
|
||||
return public.return_message(0, 0,task_data)
|
||||
|
||||
def flow_task_list(self, get):
|
||||
page_num = max(int(get.get('p/d', 1)), 1)
|
||||
limit = max(int(get.get('limit/d', 16)), 1)
|
||||
|
||||
fdb = TaskFlowsDB()
|
||||
flow_list = fdb.Flow.query_page(page_num=page_num, limit=limit)
|
||||
count = fdb.Flow.count()
|
||||
res = []
|
||||
server_cache: Dict[int, Dict] = {}
|
||||
for flow in flow_list:
|
||||
tmp_data = fdb.history_flow_task(flow.id)
|
||||
tmp_data["server_list"] = [{
|
||||
"id": int(i),
|
||||
"name": self.get_server_info(int(i), server_cache).get("remarks", ""),
|
||||
"server_ip": self.get_server_info(int(i), server_cache).get("server_ip", ""),
|
||||
} for i in tmp_data["server_ids"].strip("|").split("|")]
|
||||
res.append(tmp_data)
|
||||
|
||||
page = public.get_page(count, page_num, limit)
|
||||
page["data"] = res
|
||||
return public.return_message(0, 0,page)
|
||||
|
||||
@staticmethod
|
||||
def remove_flow(get):
|
||||
flow_ids = list_args(get,"flow_ids")
|
||||
if not flow_ids:
|
||||
return public.return_message(-1, 0,"Please select the task to delete")
|
||||
fdb = TaskFlowsDB()
|
||||
flows = fdb.Flow.query(
|
||||
"id IN (%s) AND status NOT IN (?, ?)" % (",".join(["?"]*len(flow_ids))),
|
||||
(*flow_ids, "waiting", "running")
|
||||
)
|
||||
|
||||
command_tasks = fdb.CommandTask.query(
|
||||
"flow_id IN (%s)" % (",".join(["?"]*len(flow_ids))),
|
||||
(*flow_ids,)
|
||||
)
|
||||
|
||||
command_logs = fdb.CommandLog.query(
|
||||
"command_task_id IN (%s)" % (",".join(["?"]*len(flow_ids))),
|
||||
(*flow_ids,)
|
||||
)
|
||||
|
||||
for log in command_logs:
|
||||
try:
|
||||
if os.path.exists(log.log_file):
|
||||
os.remove(log.log_file)
|
||||
except:
|
||||
pass
|
||||
|
||||
fdb.CommandLog.delete([log.id for log in command_logs])
|
||||
fdb.CommandTask.delete([task.id for task in command_tasks])
|
||||
fdb.Flow.delete([flow.id for flow in flows])
|
||||
|
||||
w, p = "flow_id IN (%s)" % (",".join(["?"]*len(flow_ids))), (*flow_ids,)
|
||||
fdb.TransferTask.delete_where(w, p)
|
||||
fdb.TransferLog.delete_where(w, p)
|
||||
fdb.TransferFile.delete_where(w, p)
|
||||
|
||||
return public.return_message(0, 0,"Deleted successfully")
|
||||
|
||||
def retry_flow(self, get):
|
||||
ws: simple_websocket.Server = getattr(get, '_ws', None)
|
||||
if not ws:
|
||||
return public.return_message(-1, 0,"Please use WebSocket connection")
|
||||
|
||||
flow_id = get.get("flow_id/d", 0)
|
||||
flow = TaskFlowsDB().Flow.get_byid(flow_id)
|
||||
if not flow:
|
||||
ws.send(json.dumps({"type": "error", "msg": "Task does not exist"}))
|
||||
return
|
||||
|
||||
if flow.status == "complete":
|
||||
ws.send(json.dumps({"type": "error", "msg": "Task completed, cannot retry"}))
|
||||
return
|
||||
|
||||
def call_status(data):
|
||||
ws.send(json.dumps({"type": "status", "data": data}))
|
||||
|
||||
pid = self._start_task("flow", flow.id)
|
||||
if not pid:
|
||||
ws.send(json.dumps({"type": "error", "msg": "Task startup failed"}))
|
||||
return
|
||||
|
||||
err = flow_running_log(flow.id, call_status)
|
||||
if err:
|
||||
ws.send(json.dumps({"type": "error", "msg": err}))
|
||||
|
||||
ws.send(json.dumps({"type": "end", "msg": "Mission complete"}))
|
||||
return
|
||||
|
||||
# 重试某个单一任务, 如:单机器文件上传或单机器命令执行
|
||||
@staticmethod
|
||||
def retry_flow_task(get):
|
||||
task_type = get.get("task_type/s", "")
|
||||
task_id = get.get("task_id/d", 0)
|
||||
log_id = get.get("log_id/d", 0)
|
||||
if not task_type or not task_id or not log_id:
|
||||
return public.return_message(-1, 0,"Parameter error")
|
||||
if task_type not in ("file", "command"):
|
||||
return public.return_message(-1, 0,"Parameter error")
|
||||
if task_type == "file":
|
||||
ret = file_task_run_sync(task_id, log_id)
|
||||
else:
|
||||
ret = command_task_run_sync(task_id, log_id)
|
||||
if isinstance(ret, str):
|
||||
return public.return_message(-1, 0,ret)
|
||||
# return json_response(status=True, msg="Task has been retried", data=ret)
|
||||
return public.return_message(0, 0, ret)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def stop_flow(get):
|
||||
flow_id = get.get("flow_id/d", 0)
|
||||
if not flow_id:
|
||||
return public.return_message(-1, 0,"Please select the task to stop")
|
||||
pid_file = "{}/logs/executor_log/flow_{}_0.pid".format(public.get_panel_path(), flow_id)
|
||||
if os.path.exists(pid_file):
|
||||
pid = int(public.readFile(pid_file))
|
||||
if psutil.pid_exists(pid):
|
||||
psutil.Process(pid).kill()
|
||||
|
||||
if os.path.exists(pid_file):
|
||||
os.remove(pid_file)
|
||||
|
||||
sock_file = "/tmp/flow_task/flow_task_{}".format(flow_id)
|
||||
if os.path.exists(sock_file):
|
||||
os.remove(sock_file)
|
||||
|
||||
return public.return_message(0, 0,"Task stopped")
|
||||
|
||||
@staticmethod
|
||||
def file_dstpath_check(get):
|
||||
path = get.get("path/s", "")
|
||||
node_ids = list_args(get, "node_ids")
|
||||
if not path or not node_ids:
|
||||
return public.return_message(-1, 0,"Parameter error")
|
||||
|
||||
if path == "/":
|
||||
return public.return_message(-1, 0,"Cannot upload to root directory")
|
||||
|
||||
nodes_db = ServerNodeDB()
|
||||
ret = []
|
||||
|
||||
def check_node(n_data:dict, t_srv: Union[ServerNode, LPanelNode, SSHApi]):
|
||||
res = {"id": n_data["id"], "err": "", "remarks": n_data["remarks"]}
|
||||
err = t_srv.upload_dir_check(path)
|
||||
if err:
|
||||
res["err"] = err
|
||||
ret.append(res)
|
||||
|
||||
th_list = []
|
||||
for i in node_ids:
|
||||
n = nodes_db.get_node_by_id(i)
|
||||
if not n:
|
||||
ret.append({"id": i, "err": "Node does not exist"})
|
||||
n["ssh_conf"] = json.loads(n["ssh_conf"])
|
||||
if n["app_key"] or n["api_key"]:
|
||||
srv = ServerNode.new_by_data(n)
|
||||
elif n["ssh_conf"]:
|
||||
srv = SSHApi(**n["ssh_conf"])
|
||||
else:
|
||||
ret.append({"id": i, "err": "Node configuration error"})
|
||||
continue
|
||||
|
||||
th = threading.Thread(target=check_node, args=(n, srv))
|
||||
th.start()
|
||||
th_list.append(th)
|
||||
|
||||
for th in th_list:
|
||||
th.join()
|
||||
|
||||
# return json_response(status=True, data=ret)
|
||||
return public.return_message(0, 0,ret)
|
||||
|
||||
@staticmethod
|
||||
def create_flow_template(get):
|
||||
err = FlowTemplates.check(get)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
s = FlowTemplates.from_dict(get)
|
||||
# 查重
|
||||
e_db = TaskFlowsDB()
|
||||
if e_db.FlowTemplate.find("name = ?", (s.name,)):
|
||||
return public.return_message(-1, 0,"Script name already exists")
|
||||
err = e_db.FlowTemplate.create(s)
|
||||
if isinstance(err, str):
|
||||
return public.return_message(-1, 0,err)
|
||||
e_db.close()
|
||||
# return json_response(status=True, msg="Created successfully", data=s.to_dict())
|
||||
return public.return_message(0, 0,s.to_dict())
|
||||
|
||||
|
||||
@staticmethod
|
||||
def modify_flow_template(get):
|
||||
err = FlowTemplates.check(get)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
e_db = TaskFlowsDB()
|
||||
ft = FlowTemplates.from_dict(get)
|
||||
if not ft.id:
|
||||
return public.return_message(-1, 0,"Please select the template to modify")
|
||||
if not e_db.FlowTemplate.get_byid(ft.id):
|
||||
return public.return_message(-1, 0,"Template does not exist")
|
||||
err = e_db.FlowTemplate.update(ft)
|
||||
if isinstance(err, str) and err:
|
||||
return public.return_message(-1, 0,err)
|
||||
e_db.close()
|
||||
# return json_response(status=True, msg="Modified successfully", data=ft.to_dict())
|
||||
return public.return_message(0, 0,ft.to_dict())
|
||||
|
||||
@staticmethod
|
||||
def delete_flow_template(get):
|
||||
e_db = TaskFlowsDB()
|
||||
if not get.get("id/d", 0):
|
||||
return public.return_message(-1, 0,"Script ID cannot be empty")
|
||||
try:
|
||||
del_id = int(get.id)
|
||||
except:
|
||||
return public.return_message(-1, 0,"Script ID format error")
|
||||
|
||||
e_db.FlowTemplate.delete(del_id)
|
||||
return public.return_message(0, 0,"Deleted successfully")
|
||||
|
||||
@staticmethod
|
||||
def get_flow_template_list(get):
|
||||
page_num = max(int(get.get('p/d', 1)), 1)
|
||||
limit = max(int(get.get('limit/d', 16)), 1)
|
||||
search = get.get('search', "").strip()
|
||||
|
||||
where_list, params = [], []
|
||||
if search:
|
||||
where_list.append("(name like ? or key_words like ? or description like ?)")
|
||||
params.append("%{}%".format(search))
|
||||
params.append("%{}%".format(search))
|
||||
params.append("%{}%".format(search))
|
||||
|
||||
where = " and ".join(where_list)
|
||||
e_db = TaskFlowsDB()
|
||||
data_list = e_db.FlowTemplate.query_page(where, (*params,), page_num=page_num, limit=limit)
|
||||
count = e_db.FlowTemplate.count(where, params)
|
||||
page = public.get_page(count, page_num, limit)
|
||||
page["data"] = [i.to_dict() for i in data_list]
|
||||
return public.return_message(0, 0,page)
|
||||
|
||||
|
||||
659
mod/project/node/file_transferMod.py
Normal file
659
mod/project/node/file_transferMod.py
Normal file
@@ -0,0 +1,659 @@
|
||||
import json
|
||||
import os.path
|
||||
import traceback
|
||||
from typing import List, Dict, Optional
|
||||
|
||||
import simple_websocket
|
||||
from mod.base import json_response
|
||||
from mod.project.node.dbutil import FileTransfer, FileTransferTask, FileTransferDB, ServerNodeDB
|
||||
from mod.project.node.nodeutil import ServerNode, LocalNode, LPanelNode
|
||||
from mod.project.node.filetransfer import task_running_log, wait_running
|
||||
|
||||
import public
|
||||
|
||||
|
||||
class main():
|
||||
log_dir = "{}/logs/node_file_transfers".format(public.get_panel_path())
|
||||
if not os.path.exists(log_dir):
|
||||
os.makedirs(log_dir)
|
||||
|
||||
@staticmethod
|
||||
def file_upload(args):
|
||||
node_id = args.get('node_id', -1)
|
||||
if node_id == -1:
|
||||
from YakPanel import request
|
||||
node_id = request.form.get('node_id', 0)
|
||||
|
||||
if not node_id:
|
||||
return public.return_message(-1, 0,"node_id is null")
|
||||
|
||||
if isinstance(node_id, str):
|
||||
try:
|
||||
node_id = int(node_id)
|
||||
except:
|
||||
return public.return_message(-1, 0,"node_id is null")
|
||||
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"node not exists")
|
||||
|
||||
return node.upload_proxy()
|
||||
|
||||
@staticmethod
|
||||
def file_download(args):
|
||||
node_id = args.get('node_id', 0)
|
||||
if not node_id:
|
||||
return public.return_message(-1, 0,"node_id is null")
|
||||
|
||||
filename = args.get('filename/s', "")
|
||||
if not filename:
|
||||
return jpublic.return_message(-1, 0,"The filename parameter cannot be empty")
|
||||
if isinstance(node_id, str):
|
||||
try:
|
||||
node_id = int(node_id)
|
||||
except:
|
||||
return public.return_message(-1, 0,"node_id is null")
|
||||
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"node not exists")
|
||||
|
||||
return node.download_proxy(filename)
|
||||
|
||||
@staticmethod
|
||||
def dir_walk(get):
|
||||
path = get.get('path/s', "")
|
||||
if not path:
|
||||
return public.return_message(-1, 0,"The path parameter cannot be empty")
|
||||
|
||||
res_list, err = LocalNode().dir_walk(path)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
return res_list
|
||||
|
||||
@classmethod
|
||||
def create_filetransfer_task(cls, get):
|
||||
ft_db = FileTransferDB()
|
||||
task_data, err = ft_db.get_last_task()
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
if task_data and task_data["status"] not in ("complete", "failed"):
|
||||
return public.return_message(-1, 0,"There are ongoing tasks on the current node, please wait for them to complete before submitting")
|
||||
|
||||
public.set_module_logs("nodes_create_filetransfer_9", "create_filetransfer")
|
||||
source_node_id = get.get('source_node_id/d', -1)
|
||||
target_node_id = get.get('target_node_id/d', -1)
|
||||
source_path_list = get.get('source_path_list/s', "")
|
||||
target_path = get.get('target_path/s', "")
|
||||
default_mode = get.get('default_mode/s', "cover")
|
||||
if default_mode not in ("cover", "ignore", "rename"):
|
||||
return public.return_message(-1, 0,"Default mode parameter error")
|
||||
if source_node_id == target_node_id:
|
||||
return public.return_message(-1, 0,"The source node and target node cannot be the same")
|
||||
if source_node_id == -1 or target_node_id == -1:
|
||||
return public.return_message(-1, 0,"The source or destination node cannot be empty")
|
||||
|
||||
try:
|
||||
source_path_list = json.loads(source_path_list)
|
||||
except:
|
||||
return public.return_message(-1, 0,"Error in the parameter 'sourcew_path_ist'")
|
||||
keys = ("path", "size", "is_dir")
|
||||
for items in source_path_list:
|
||||
if not all(item in keys for item in items.keys()):
|
||||
return public.return_message(-1, 0,"Error in the parameter 'sourcew_path_ist'")
|
||||
if not (isinstance(items["path"], str) and isinstance(items["is_dir"], bool) and
|
||||
isinstance(items["size"], int)):
|
||||
return public.return_message(-1, 0,"Error in the parameter 'sourcew_path_ist'")
|
||||
|
||||
if not target_path:
|
||||
return public.return_message(-1, 0,"The target_cath parameter cannot be empty")
|
||||
node_db = ServerNodeDB()
|
||||
if source_node_id == 0:
|
||||
src_node = node_db.get_local_node()
|
||||
else:
|
||||
src_node = node_db.get_node_by_id(source_node_id)
|
||||
|
||||
|
||||
if not src_node:
|
||||
return public.return_message(-1, 0,"The source node does not exist")
|
||||
if target_node_id == 0:
|
||||
target_node = node_db.get_local_node()
|
||||
else:
|
||||
target_node = node_db.get_node_by_id(target_node_id)
|
||||
if not target_node:
|
||||
return public.return_message(-1, 0,"The target node does not exist")
|
||||
if src_node["id"] == target_node["id"]:
|
||||
return public.return_message(-1, 0,"The source node and target node cannot be the same")
|
||||
|
||||
# public.print_log("src_node:", src_node, "target_node:", target_node)
|
||||
real_create_res: Optional[dict] = None # 实际上创建的结果,创建的任务不在本地时使用
|
||||
if src_node["api_key"] == src_node["app_key"] == "local":
|
||||
return cls._create_filetransfer_task(
|
||||
source_node={
|
||||
"name": "local",
|
||||
},
|
||||
target_node={
|
||||
"name": "{}({})".format(target_node["remarks"], target_node["server_ip"]),
|
||||
"address": target_node["address"],
|
||||
"api_key": target_node["api_key"],
|
||||
"app_key": target_node["app_key"],
|
||||
"node_id": target_node_id,
|
||||
"lpver": target_node["lpver"]
|
||||
},
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
created_by="local",
|
||||
default_mode=default_mode,
|
||||
)
|
||||
elif target_node["api_key"] == target_node["app_key"] == "local":
|
||||
return cls._create_filetransfer_task(
|
||||
source_node={
|
||||
"name": "{}({})".format(src_node["remarks"], src_node["server_ip"]),
|
||||
"address": src_node["address"],
|
||||
"api_key": src_node["api_key"],
|
||||
"app_key": src_node["app_key"],
|
||||
"node_id": source_node_id,
|
||||
"lpver": src_node["lpver"]
|
||||
},
|
||||
target_node={
|
||||
"name": "local",
|
||||
},
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
created_by="local",
|
||||
default_mode=default_mode,
|
||||
)
|
||||
elif src_node["lpver"]:
|
||||
if target_node["lpver"]:
|
||||
return public.return_message(-1, 0,"Cannot support file transfer between 1panel nodes")
|
||||
# 源节点是1panel时,只能下载去目标节点操作
|
||||
if target_node["api_key"] == target_node["app_key"] == "local":
|
||||
return cls._create_filetransfer_task(
|
||||
source_node={
|
||||
"name": "{}".format(target_node["remarks"]) + ("({})".format(target_node["server_ip"]) if target_node["server_ip"] else ""),
|
||||
"address": src_node["address"],
|
||||
"api_key": src_node["api_key"],
|
||||
"app_key": "",
|
||||
"node_id": source_node_id,
|
||||
"lpver": src_node["lpver"]
|
||||
},
|
||||
target_node={
|
||||
"name": "local",
|
||||
},
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
created_by="local",
|
||||
default_mode=default_mode,
|
||||
)
|
||||
else:
|
||||
srv = ServerNode(target_node["address"], target_node["api_key"], target_node["app_key"])
|
||||
real_create_res = srv.node_create_filetransfer_task(
|
||||
source_node={
|
||||
"name": "{}".format(target_node["remarks"]) + ("({})".format(target_node["server_ip"]) if target_node["server_ip"] else ""),
|
||||
"address": src_node["address"],
|
||||
"api_key": src_node["api_key"],
|
||||
"app_key": "",
|
||||
"node_id": source_node_id,
|
||||
"lpver": src_node["lpver"]
|
||||
},
|
||||
target_node={
|
||||
"name": "local",
|
||||
},
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
created_by="{}({})".format(public.GetConfigValue("title"), public.get_server_ip()),
|
||||
default_mode=default_mode
|
||||
)
|
||||
else: # 都是YakPanel 节点的情况下
|
||||
srv = ServerNode(src_node["address"], src_node["api_key"], src_node["app_key"])
|
||||
if srv.filetransfer_version_check():
|
||||
srv = ServerNode(target_node["address"], target_node["api_key"], target_node["app_key"])
|
||||
res = srv.filetransfer_version_check()
|
||||
if res:
|
||||
return public.return_message(-1, 0,"{} Node check error:".format(target_node["remarks"]) + res)
|
||||
real_create_res = srv.node_create_filetransfer_task(
|
||||
source_node={
|
||||
"name": "{}".format(target_node["remarks"]) + ("({})".format(target_node["server_ip"]) if target_node["server_ip"] else ""),
|
||||
"address": src_node["address"],
|
||||
"api_key": src_node["api_key"],
|
||||
"app_key": src_node["app_key"],
|
||||
"node_id": source_node_id,
|
||||
"lpver": src_node["lpver"]
|
||||
},
|
||||
target_node={
|
||||
"name": "local",
|
||||
},
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
created_by="{}({})".format(public.GetConfigValue("title"), public.get_server_ip()),
|
||||
default_mode=default_mode,
|
||||
)
|
||||
else:
|
||||
real_create_res = srv.node_create_filetransfer_task(
|
||||
source_node={
|
||||
"name": "local",
|
||||
},
|
||||
target_node={
|
||||
"name": "{}".format(target_node["remarks"]) + ("({})".format(target_node["server_ip"]) if target_node["server_ip"] else ""),
|
||||
"address": target_node["address"],
|
||||
"api_key": target_node["api_key"],
|
||||
"app_key": target_node["app_key"],
|
||||
"node_id": target_node_id,
|
||||
"lpver": target_node["lpver"]
|
||||
},
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
created_by="{}({})".format(public.GetConfigValue("title"), public.get_server_ip()),
|
||||
default_mode=default_mode,
|
||||
)
|
||||
|
||||
if not real_create_res["status"]:
|
||||
return public.return_message(-1, 0,real_create_res["msg"])
|
||||
|
||||
tt_task_id = real_create_res["data"]["task_id"]
|
||||
db = FileTransferDB()
|
||||
tt = FileTransferTask(
|
||||
source_node={"node_id": source_node_id},
|
||||
target_node={"node_id": target_node_id},
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
task_action=real_create_res["data"]["task_action"],
|
||||
status="running",
|
||||
created_by="local",
|
||||
default_mode=default_mode,
|
||||
target_task_id=tt_task_id,
|
||||
is_source_node=node_db.is_local_node(source_node_id),
|
||||
is_target_node=node_db.is_local_node(target_node_id),
|
||||
)
|
||||
db.create_task(tt)
|
||||
return public.return_message(-1, 0,tt.to_dict())
|
||||
|
||||
@classmethod
|
||||
def node_create_filetransfer_task(cls, get):
|
||||
from YakPanel import g
|
||||
if not g.api_request:
|
||||
return public.return_message(-1, 0,"Unable to activate")
|
||||
|
||||
source_node = get.get("source_node/s", "")
|
||||
target_node = get.get("target_node/s", "")
|
||||
source_path_list = get.get("source_path_list/s", "")
|
||||
target_path = get.get("target_path/s", "")
|
||||
created_by = get.get("created_by/s", "")
|
||||
default_mode = get.get("default_mode/s", "")
|
||||
|
||||
try:
|
||||
source_node = json.loads(source_node)
|
||||
target_node = json.loads(target_node)
|
||||
source_path_list = json.loads(source_path_list)
|
||||
except Exception:
|
||||
return public.return_message(-1, 0,"Parameter error")
|
||||
if not target_path or not created_by or not default_mode or not source_node or not target_node or not source_path_list:
|
||||
return public.return_message(-1, 0,"Parameter loss")
|
||||
|
||||
ft_db = FileTransferDB()
|
||||
task_data, err = ft_db.get_last_task()
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
if task_data and task_data["status"] not in ("complete", "failed"):
|
||||
return public.return_message(-1, 0,"There is an ongoing task on the node, please wait for it to complete before submitting")
|
||||
return cls._create_filetransfer_task(
|
||||
source_node=source_node,
|
||||
target_node=target_node,
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
created_by=created_by,
|
||||
default_mode=default_mode
|
||||
)
|
||||
|
||||
# 实际创建任务
|
||||
# 可能的情况
|
||||
# 1.source_node 是当前节点,target_node 是其他节点, 此时为上传
|
||||
# 2.target_node 是当前节点, 此时为下载
|
||||
@classmethod
|
||||
def _create_filetransfer_task(cls, source_node: dict,
|
||||
target_node: dict,
|
||||
source_path_list: List[dict],
|
||||
target_path: str,
|
||||
created_by: str,
|
||||
default_mode: str = "cover") -> Dict:
|
||||
if source_node["name"] == "local":
|
||||
task_action = "upload"
|
||||
check_node = LocalNode()
|
||||
if target_node["lpver"]:
|
||||
t_node = LPanelNode(target_node["address"], target_node["api_key"], target_node["lpver"])
|
||||
err = t_node.test_conn()
|
||||
else:
|
||||
t_node = ServerNode(target_node["address"], target_node["api_key"], target_node["app_key"])
|
||||
err = t_node.test_conn()
|
||||
# public.print_log(target_node["address"], err)
|
||||
if err:
|
||||
return public.return_message(-1, 0,"{} Node cannot connect, error message: {}".format(target_node["name"], err))
|
||||
elif target_node["name"] == "local":
|
||||
task_action = "download"
|
||||
if source_node["lpver"]:
|
||||
check_node = LPanelNode(source_node["address"], source_node["api_key"], source_node["lpver"])
|
||||
err = check_node.test_conn()
|
||||
else:
|
||||
check_node = ServerNode(source_node["address"], source_node["api_key"], source_node["app_key"])
|
||||
err = check_node.test_conn()
|
||||
# public.print_log(source_node["address"], err)
|
||||
if err:
|
||||
return public.return_message(-1, 0,"{} Node cannot connect, error message: {}".format(source_node["name"], err))
|
||||
else:
|
||||
return public.return_message(-1, 0,"Node information that cannot be processed")
|
||||
|
||||
if check_node.__class__ is ServerNode:
|
||||
ver_check = check_node.filetransfer_version_check()
|
||||
if ver_check:
|
||||
return public.return_message(-1, 0,"{} Node check error::".format(source_node["name"]) + ver_check)
|
||||
|
||||
target_path = target_path.rstrip("/")
|
||||
file_list = []
|
||||
for src_item in source_path_list:
|
||||
if src_item["is_dir"]:
|
||||
f_list, err = check_node.dir_walk(src_item["path"])
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
if not f_list:
|
||||
src_item["dst_file"] = os.path.join(target_path, os.path.basename(src_item["path"]))
|
||||
file_list.append(src_item)
|
||||
else:
|
||||
for f_item in f_list:
|
||||
f_item["dst_file"] = f_item["path"].replace(os.path.dirname(src_item["path"]), target_path)
|
||||
file_list.append(f_item)
|
||||
else:
|
||||
src_item["dst_file"] = os.path.join(target_path, os.path.basename(src_item["path"]))
|
||||
file_list.append(src_item)
|
||||
|
||||
if len(file_list) > 1000:
|
||||
return public.return_message(-1, 0,"More than 1000 files, please compress before transferring")
|
||||
|
||||
db = FileTransferDB()
|
||||
tt = FileTransferTask(
|
||||
source_node=source_node,
|
||||
target_node=target_node,
|
||||
source_path_list=source_path_list,
|
||||
target_path=target_path,
|
||||
task_action=task_action,
|
||||
status="pending",
|
||||
created_by=created_by,
|
||||
default_mode=default_mode,
|
||||
is_source_node=source_node["name"] == "local",
|
||||
is_target_node=target_node["name"] == "local",
|
||||
)
|
||||
err = db.create_task(tt)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
ft_list = []
|
||||
for f_item in file_list:
|
||||
ft = FileTransfer(
|
||||
task_id=tt.task_id,
|
||||
src_file=f_item["path"],
|
||||
dst_file=f_item["dst_file"],
|
||||
file_size=f_item["size"],
|
||||
is_dir=f_item.get("is_dir", 0),
|
||||
status="pending",
|
||||
progress=0,
|
||||
)
|
||||
ft_list.append(ft)
|
||||
if not ft_list:
|
||||
return public.return_message(-1, 0,"There are no files available for transfer")
|
||||
err = db.batch_create_file_transfers(ft_list)
|
||||
if err:
|
||||
db.delete_task(tt.task_id)
|
||||
return public.return_message(-1, 0,err)
|
||||
|
||||
py_bin = public.get_python_bin()
|
||||
log_file = "{}/task_{}.log".format(cls.log_dir, tt.task_id)
|
||||
start_task = "nohup {} {}/script/node_file_transfers.py {} > {} 2>&1 &".format(
|
||||
py_bin,
|
||||
public.get_panel_path(),
|
||||
tt.task_id,
|
||||
log_file,
|
||||
)
|
||||
res = public.ExecShell(start_task)
|
||||
wait_timeout = wait_running(tt.task_id, timeout=10.0)
|
||||
if wait_timeout:
|
||||
return public.return_message(-1, 0,wait_timeout)
|
||||
return public.return_message(0, 0,tt.to_dict())
|
||||
|
||||
@staticmethod
|
||||
def file_list(get):
|
||||
node_id = get.get("node_id/d", -1)
|
||||
p = get.get("p/d", 1)
|
||||
row = get.get("showRow/d", 50)
|
||||
path = get.get("path/s", "")
|
||||
search = get.get("search/s", "")
|
||||
|
||||
if node_id == -1:
|
||||
return public.return_message(-1, 0,"Node parameter error")
|
||||
if node_id == 0:
|
||||
node = LocalNode()
|
||||
else:
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"Node does not exist")
|
||||
|
||||
if not path:
|
||||
return public.return_message(-1, 0,"Path parameter error")
|
||||
|
||||
data, err = node.file_list(path, p, row, search)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
if "status" not in data and "message" not in data:return public.return_message(0, 0,data)
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def delete_file(get):
|
||||
node_id = get.get("node_id/d", -1)
|
||||
if node_id == -1:
|
||||
return public.return_message(-1, 0,"Node parameter error")
|
||||
if node_id == 0:
|
||||
node = LocalNode()
|
||||
else:
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"Node does not exist")
|
||||
|
||||
path = get.get("path/s", "")
|
||||
is_dir = get.get("is_dir/d", 0)
|
||||
if not path:
|
||||
return public.return_message(-1, 0,"Path parameter error")
|
||||
res=node.remove_file(path, is_dir=is_dir == 1)
|
||||
if res.get('status',-1)==0: return public.return_message(0, 0,res.get('message',{}).get('result',"File/Directory deleted successfully or moved to recycle bin!"))
|
||||
return public.return_message(-1, 0,res.get('msg',"File delete failed"))
|
||||
# return node.remove_file(path, is_dir=is_dir == 1)
|
||||
|
||||
def transfer_status(self, get):
|
||||
ws: simple_websocket.Server = getattr(get, '_ws', None)
|
||||
if not ws:
|
||||
return jpublic.return_message(-1, 0,"Please use WebSocket connection")
|
||||
|
||||
ft_db = FileTransferDB()
|
||||
task_data, err = ft_db.get_last_task()
|
||||
if err:
|
||||
ws.send(json.dumps({"type": "error", "msg": err}))
|
||||
return
|
||||
if not task_data:
|
||||
ws.send(json.dumps({"type": "end", "msg": "No tasks"}))
|
||||
return
|
||||
task = FileTransferTask.from_dict(task_data)
|
||||
if task.target_task_id:
|
||||
if task.task_action == "upload":
|
||||
run_node_id = task.source_node["node_id"]
|
||||
else:
|
||||
run_node_id = task.target_node["node_id"]
|
||||
run_node = ServerNode.new_by_id(run_node_id)
|
||||
res = run_node.get_transfer_status(task.target_task_id)
|
||||
if not res["status"]:
|
||||
ws.send(json.dumps({"type": "error", "msg": res["msg"]}))
|
||||
return
|
||||
if res["data"]["task"]["status"] in ("complete", "failed"):
|
||||
task.status = res["data"]["task"]["status"]
|
||||
task.completed_at = res["data"]["task"]["completed_at"]
|
||||
ft_db.update_task(task)
|
||||
res_data = res["data"]
|
||||
res_data["type"] = "end"
|
||||
res_data["msg"] = "Mission completed"
|
||||
ws.send(json.dumps(res_data))
|
||||
return
|
||||
|
||||
run_node.proxy_transfer_status(task.target_task_id, ws)
|
||||
else:
|
||||
if task.status in ("complete", "failed"):
|
||||
data, _ = ft_db.last_task_all_status()
|
||||
data.update({"type": "end", "msg": "Mission completed"})
|
||||
ws.send(json.dumps(data))
|
||||
return
|
||||
self._proxy_transfer_status(task, ws)
|
||||
|
||||
def node_proxy_transfer_status(self, get):
|
||||
ws: simple_websocket.Server = getattr(get, '_ws', None)
|
||||
if not ws:
|
||||
return public.return_message(-1, 0, "Please use WebSocket connection")
|
||||
task_id = get.get("task_id/d", 0)
|
||||
if not task_id:
|
||||
ws.send(json.dumps({"type": "error", "msg": "Task ID parameter error"}))
|
||||
return
|
||||
ft_db = FileTransferDB()
|
||||
task_data, err = ft_db.get_task(task_id)
|
||||
if err:
|
||||
ws.send(json.dumps({"type": "error", "msg": err}))
|
||||
return
|
||||
|
||||
task = FileTransferTask.from_dict(task_data)
|
||||
if task.status in ("complete", "failed"):
|
||||
data, _ = ft_db.last_task_all_status()
|
||||
data["type"] = "end"
|
||||
data["msg"] = "Mission completed"
|
||||
ws.send(json.dumps(data))
|
||||
return
|
||||
self._proxy_transfer_status(task, ws)
|
||||
|
||||
@staticmethod
|
||||
def _proxy_transfer_status(task: FileTransferTask, ws: simple_websocket.Server):
|
||||
def call_log(data):
|
||||
if isinstance(data, str):
|
||||
ws.send(json.dumps({"type": "end", "msg": data}))
|
||||
else:
|
||||
if data["task"]["status"] in ("complete", "failed"):
|
||||
data["msg"] = "Mission completed"
|
||||
data["type"] = "end"
|
||||
else:
|
||||
data["type"] = "status"
|
||||
data["msg"] = "Task in progress"
|
||||
ws.send(json.dumps(data))
|
||||
|
||||
task_running_log(task.task_id, call_log)
|
||||
|
||||
@staticmethod
|
||||
def get_transfer_status(get):
|
||||
task_id = get.get("task_id/d", 0)
|
||||
if not task_id:
|
||||
return public.return_message(-1, 0, "Task ID parameter error")
|
||||
|
||||
ft_db = FileTransferDB()
|
||||
task_data, err = ft_db.get_task(task_id)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
task = FileTransferTask.from_dict(task_data)
|
||||
file_list = ft_db.get_task_file_transfers(task_id)
|
||||
return public.return_message(0, 0, {
|
||||
"task": task.to_dict(),
|
||||
"file_list": file_list,
|
||||
})
|
||||
|
||||
@staticmethod
|
||||
def upload_check(get):
|
||||
node_id = get.get("node_id/d", -1)
|
||||
if node_id == -1:
|
||||
return public.return_message(-1, 0,"Node parameter error")
|
||||
if node_id == 0:
|
||||
node = LocalNode()
|
||||
else:
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"Node does not exist")
|
||||
filename = get.get("files/s", "")
|
||||
if "\n" in filename:
|
||||
f_list = filename.split("\n")
|
||||
else:
|
||||
f_list = [filename]
|
||||
res, err = node.upload_check(f_list)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
if 'message' not in res and 'status' not in res:return public.return_message(0, 0,res)
|
||||
return res
|
||||
|
||||
@staticmethod
|
||||
def dir_size(get):
|
||||
node_id = get.get("node_id/d", -1)
|
||||
if node_id < 0:
|
||||
return public.return_message(-1, 0,"Node parameter error")
|
||||
if node_id == 0:
|
||||
node = LocalNode()
|
||||
else:
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"Node does not exist")
|
||||
path = get.get("path/s", "")
|
||||
size, err = node.dir_size(path)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
return public.return_message(0, 0, {
|
||||
"size": public.to_size(size),
|
||||
"size_b": size,
|
||||
})
|
||||
|
||||
@staticmethod
|
||||
def create_dir(get):
|
||||
node_id = get.get("node_id/d", -1)
|
||||
if node_id < 0:
|
||||
return public.return_message(-1, 0,"Node parameter error")
|
||||
if node_id == 0:
|
||||
node = LocalNode()
|
||||
else:
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"Node does not exist")
|
||||
|
||||
path = get.get("path/s", "")
|
||||
res, err = node.create_dir(path)
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
# if res["status"]:
|
||||
# return public.return_message(0, 0,res["msg"])
|
||||
return public.return_message(0, 0,"Successfully created directory")
|
||||
# return res
|
||||
|
||||
@staticmethod
|
||||
def delete_dir(get):
|
||||
node_id = get.get("node_id/d", -1)
|
||||
if node_id < 0:
|
||||
return public.return_message(-1, 0,"Node parameter error")
|
||||
if res["status"]:
|
||||
return public.return_message(0, 0,res["msg"])
|
||||
return public.return_message(-1, 0,res["msg"])
|
||||
|
||||
@staticmethod
|
||||
def node_get_dir(get):
|
||||
node_id = get.get("node_id/d", -1)
|
||||
if node_id < 0:
|
||||
return public.return_message(-1, 0,"Node parameter error")
|
||||
if node_id == 0:
|
||||
node = LocalNode()
|
||||
else:
|
||||
node = ServerNode.new_by_id(node_id)
|
||||
|
||||
if not node:
|
||||
return public.return_message(-1, 0,"Node does not exist")
|
||||
|
||||
search = get.get("search/s", "")
|
||||
disk = get.get("disk/s", "")
|
||||
path = get.get("path/s", "")
|
||||
return node.get_dir(path, search, disk)
|
||||
198
mod/project/node/filetransfer/__init__.py
Normal file
198
mod/project/node/filetransfer/__init__.py
Normal file
@@ -0,0 +1,198 @@
|
||||
import os
|
||||
import time
|
||||
import threading
|
||||
from datetime import datetime
|
||||
|
||||
from .socket_server import StatusServer, StatusClient, register_cleanup
|
||||
from mod.project.node.dbutil import FileTransferDB, FileTransfer, FileTransferTask
|
||||
from mod.project.node.nodeutil import ServerNode, LocalNode, LPanelNode
|
||||
from typing import Optional, Callable, Union
|
||||
|
||||
|
||||
class Filetransfer:
|
||||
SOCKET_FILE_DIR = "/tmp/filetransfer"
|
||||
if not os.path.exists(SOCKET_FILE_DIR):
|
||||
os.mkdir(SOCKET_FILE_DIR)
|
||||
|
||||
def __init__(self, task_id: int):
|
||||
self.ft_db = FileTransferDB()
|
||||
task_data, err = self.ft_db.get_task(task_id)
|
||||
if err is None:
|
||||
raise ValueError(err)
|
||||
|
||||
self.task = FileTransferTask.from_dict(task_data)
|
||||
|
||||
file_list = self.ft_db.get_task_file_transfers(task_id)
|
||||
if not file_list:
|
||||
raise ValueError("task_id:{} file_list is empty".format(task_id))
|
||||
|
||||
self.file_map = {file_data["transfer_id"]: FileTransfer.from_dict(file_data) for file_data in file_list}
|
||||
self.file_count = len(self.file_map)
|
||||
self.file_complete = sum([1 for file in self.file_map.values() if file.status == "complete"])
|
||||
self.file_error = sum([1 for file in self.file_map.values() if file.status == "error"])
|
||||
self.count_size = sum([file.file_size for file in self.file_map.values()])
|
||||
self.complete_size = sum([file.file_size for file in self.file_map.values() if file.status == "complete"])
|
||||
self.current_file_size = 0 # 记录当前文件完成的大小
|
||||
self._srv = StatusServer(self.get_task_status, self.SOCKET_FILE_DIR + "/task_" + str(task_id))
|
||||
|
||||
if self.task.task_action == "upload":
|
||||
self.sn = LocalNode()
|
||||
if self.task.target_node["lpver"]:
|
||||
self.dn = LPanelNode(self.task.target_node["address"], self.task.target_node["api_key"],
|
||||
self.task.target_node["lpver"])
|
||||
else:
|
||||
self.dn = ServerNode(self.task.target_node["address"], self.task.target_node["api_key"],
|
||||
self.task.target_node["app_key"])
|
||||
else:
|
||||
if self.task.source_node["lpver"]:
|
||||
self.sn = LPanelNode(self.task.source_node["address"], self.task.source_node["api_key"],
|
||||
self.task.source_node["lpver"])
|
||||
else:
|
||||
self.sn = ServerNode(self.task.source_node["address"], self.task.source_node["api_key"],
|
||||
self.task.source_node["app_key"])
|
||||
self.dn = LocalNode()
|
||||
|
||||
self._close_func: Optional[Callable]= None
|
||||
|
||||
def get_task_status(self, init: bool = False) -> dict:
|
||||
task_dict = self.task.to_dict()
|
||||
task_dict.update({
|
||||
"file_count": self.file_count,
|
||||
"file_complete": self.file_complete,
|
||||
"file_error": self.file_error,
|
||||
"count_size": self.count_size,
|
||||
"complete_size": self.complete_size,
|
||||
"progress": (self.complete_size + self.current_file_size) * 100 / self.count_size if self.count_size > 0 else 0,
|
||||
})
|
||||
return {
|
||||
"task": task_dict,
|
||||
"file_status_list": [{
|
||||
"source_path": file.src_file,
|
||||
"target_path": file.dst_file,
|
||||
"status": file.status,
|
||||
"progress": file.progress,
|
||||
"log": file.message,
|
||||
} for file in self.file_map.values()],
|
||||
}
|
||||
|
||||
def start_server(self):
|
||||
t = threading.Thread(target=self._srv.start_server, args=(), daemon=True)
|
||||
t.start()
|
||||
register_cleanup(self._srv)
|
||||
def close():
|
||||
self._srv.stop()
|
||||
|
||||
self._close_func = close
|
||||
|
||||
def close(self):
|
||||
if self._close_func is None:
|
||||
return
|
||||
self._close_func()
|
||||
|
||||
def update_status(self):
|
||||
self._srv.update_status()
|
||||
|
||||
def run(self):
|
||||
self.task.status = "running"
|
||||
self.ft_db.update_task(self.task)
|
||||
self.start_server()
|
||||
|
||||
pending_list = [file for file in self.file_map.values() if file.status == "pending"]
|
||||
for file in pending_list:
|
||||
if file.is_dir > 0:
|
||||
# 空文件夹处理部分
|
||||
exits, _ = self.dn.target_file_exits(file.dst_file)
|
||||
if exits:
|
||||
file.progress = 100
|
||||
file.status = "complete"
|
||||
self.ft_db.update_file_transfer(file)
|
||||
continue
|
||||
res, err = self.dn.create_dir(path=file.dst_file)
|
||||
if err:
|
||||
file.progress = 0
|
||||
file.status = "failed"
|
||||
file.message = err
|
||||
else:
|
||||
if res.get("status",False) or res.get("status",-1) == 0:
|
||||
file.progress = 100
|
||||
file.status = "complete"
|
||||
else:
|
||||
file.progress = 0
|
||||
file.status = "failed"
|
||||
file.message = res["msg"]
|
||||
self.ft_db.update_file_transfer(file)
|
||||
continue
|
||||
|
||||
|
||||
file.status = "running"
|
||||
file.started_at = datetime.now()
|
||||
self.ft_db.update_file_transfer(file)
|
||||
|
||||
def call_log(progress, log):
|
||||
file.progress = progress
|
||||
self.current_file_size = file.file_size * progress // 100
|
||||
self.ft_db.update_file_transfer(file)
|
||||
self.update_status()
|
||||
|
||||
if self.task.task_action == "upload":
|
||||
self.ft_db.update_file_transfer(file)
|
||||
res = self.dn.upload_file(
|
||||
filename=file.src_file,
|
||||
target_path=os.path.dirname(file.dst_file),
|
||||
mode=self.task.default_mode,
|
||||
call_log=call_log,
|
||||
)
|
||||
else:
|
||||
self.ft_db.update_file_transfer(file)
|
||||
res = self.sn.download_file(
|
||||
filename=file.src_file,
|
||||
target_path=os.path.dirname(file.dst_file),
|
||||
mode=self.task.default_mode,
|
||||
call_log=call_log,
|
||||
)
|
||||
|
||||
self.current_file_size = 0
|
||||
|
||||
if res:
|
||||
file.status = "failed"
|
||||
file.message = res
|
||||
self.file_error += 1
|
||||
else:
|
||||
file.status = "complete"
|
||||
file.progress = 100
|
||||
self.file_complete += 1
|
||||
self.complete_size += file.file_size
|
||||
|
||||
self.ft_db.update_file_transfer(file)
|
||||
self.update_status()
|
||||
|
||||
if self.file_error == 0:
|
||||
self.task.status = "complete"
|
||||
else:
|
||||
self.task.status = "failed"
|
||||
self.ft_db.update_task(self.task)
|
||||
self.update_status()
|
||||
time.sleep(10)
|
||||
self.close()
|
||||
|
||||
|
||||
def run_file_transfer_task(task_id: int):
|
||||
ft = Filetransfer(task_id)
|
||||
ft.run()
|
||||
|
||||
def task_running_log(task_id: int, call_log: Callable[[Union[str,dict]], None]):
|
||||
socket_file = Filetransfer.SOCKET_FILE_DIR + "/task_" + str(task_id)
|
||||
if not os.path.exists(socket_file):
|
||||
call_log("The task status link does not exist")
|
||||
return
|
||||
s_client = StatusClient(socket_file, callback=call_log)
|
||||
s_client.connect()
|
||||
|
||||
def wait_running(task_id: int, timeout:float = 3.0) -> str:
|
||||
socket_file = Filetransfer.SOCKET_FILE_DIR + "/task_" + str(task_id)
|
||||
while not os.path.exists(socket_file):
|
||||
if timeout <= 0:
|
||||
return "Task startup timeout"
|
||||
timeout -= 0.05
|
||||
time.sleep(0.05)
|
||||
return ""
|
||||
300
mod/project/node/filetransfer/socket_server.py
Normal file
300
mod/project/node/filetransfer/socket_server.py
Normal file
@@ -0,0 +1,300 @@
|
||||
import json
|
||||
import socket
|
||||
import struct
|
||||
import sys
|
||||
import threading
|
||||
import os
|
||||
import atexit
|
||||
from typing import Callable, Any, Union, Tuple, Optional, List
|
||||
|
||||
|
||||
class StatusServer:
|
||||
def __init__(self, get_status_func: Callable[[bool], Any], server_address: Union[str, Tuple[str, int]]):
|
||||
"""
|
||||
初始化服务端
|
||||
:param get_status_func: 获取状态的函数,返回当前进程状态字典, 支持一个参数 init,
|
||||
当init为True时,表示获取初始化状态,否则为更新状态
|
||||
:param server_address: 本地套接字文件路径(Unix域)或 (host, port)(TCP)
|
||||
"""
|
||||
self.get_status_func = get_status_func
|
||||
self.server_address = server_address
|
||||
self.clients: List[socket.socket] = []
|
||||
self.lock = threading.Lock() # 线程锁
|
||||
self.running = False
|
||||
self.server_socket = None
|
||||
|
||||
def handle_client(self, client_socket):
|
||||
"""处理客户端连接"""
|
||||
try:
|
||||
# 发送初始状态
|
||||
new_status = self.get_status_func(True)
|
||||
status_bytes = json.dumps(new_status).encode() # 使用 JSON 更安全
|
||||
packed_data = len(status_bytes).to_bytes(4, 'little') + status_bytes # 添加长度头
|
||||
|
||||
# 添加到客户端列表
|
||||
try:
|
||||
# 分块发送
|
||||
client_socket.sendall(packed_data) # 发送结束标志
|
||||
except Exception as e:
|
||||
print(f"Failed to send update to client: {e}")
|
||||
client_socket.close()
|
||||
return
|
||||
|
||||
with self.lock:
|
||||
self.clients.append(client_socket)
|
||||
|
||||
# 保持连接以支持后续更新
|
||||
while self.running:
|
||||
try:
|
||||
# 可选:接收客户端心跳或命令
|
||||
data = client_socket.recv(1024)
|
||||
if not data:
|
||||
break
|
||||
except:
|
||||
break
|
||||
|
||||
finally:
|
||||
# 关闭连接并从列表中移除
|
||||
client_socket.close()
|
||||
with self.lock:
|
||||
if client_socket in self.clients:
|
||||
self.clients.remove(client_socket)
|
||||
|
||||
def start_server(self):
|
||||
"""启动本地套接字服务端"""
|
||||
self.running = True
|
||||
|
||||
if isinstance(self.server_address, str):
|
||||
# Unix 域套接字
|
||||
self.server_socket = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
try:
|
||||
os.unlink(self.server_address)
|
||||
except OSError:
|
||||
if os.path.exists(self.server_address):
|
||||
raise
|
||||
self.server_socket.bind(self.server_address)
|
||||
else:
|
||||
# TCP 套接字
|
||||
self.server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
|
||||
self.server_socket.bind(self.server_address)
|
||||
|
||||
self.server_socket.listen(5)
|
||||
print(f"Server is listening on {self.server_address}...")
|
||||
|
||||
try:
|
||||
self.running = True
|
||||
while self.running:
|
||||
client_socket, _ = self.server_socket.accept()
|
||||
print("Client connected")
|
||||
|
||||
# 启动新线程处理客户端
|
||||
client_thread = threading.Thread(target=self.handle_client, args=(client_socket,))
|
||||
client_thread.start()
|
||||
except KeyboardInterrupt:
|
||||
print("Shutting down server...")
|
||||
finally:
|
||||
self.stop()
|
||||
|
||||
def stop(self):
|
||||
"""停止服务端并清理资源"""
|
||||
if not self.running:
|
||||
return
|
||||
self.running = False
|
||||
|
||||
with self.lock:
|
||||
for client in self.clients:
|
||||
client.close()
|
||||
self.clients.clear()
|
||||
|
||||
if self.server_socket:
|
||||
self.server_socket.close()
|
||||
self.server_socket = None
|
||||
|
||||
# 清理 Unix 套接字文件
|
||||
if isinstance(self.server_address, str) and os.path.exists(self.server_address):
|
||||
os.remove(self.server_address)
|
||||
print(f"Socket file removed: {self.server_address}")
|
||||
|
||||
def update_status(self, update_data: Optional[dict]=None):
|
||||
"""获取最新的状态并推送给所有客户端"""
|
||||
if not update_data:
|
||||
new_status = self.get_status_func(False)
|
||||
else:
|
||||
new_status = update_data
|
||||
status_bytes = json.dumps(new_status).encode() # 使用 JSON 更安全
|
||||
packed_data = len(status_bytes).to_bytes(4, 'little') + status_bytes # 添加长度头
|
||||
|
||||
with self.lock:
|
||||
for client in self.clients:
|
||||
print("Sending update to client...")
|
||||
print(len(status_bytes), status_bytes, packed_data)
|
||||
try:
|
||||
client.sendall(packed_data) # 直接发送完整数据
|
||||
except Exception as e:
|
||||
print(f"Failed to send update to client: {e}")
|
||||
client.close()
|
||||
if client in self.clients:
|
||||
self.clients.remove(client)
|
||||
|
||||
|
||||
class StatusClient:
|
||||
def __init__(self, server_address, callback=None):
|
||||
"""
|
||||
初始化客户端
|
||||
:param server_address: Unix 域路径(字符串) 或 TCP 地址元组 (host, port)
|
||||
:param callback: 接收到状态更新时的回调函数,接受一个 dict 参数
|
||||
"""
|
||||
self.server_address = server_address
|
||||
self.callback = callback
|
||||
self.sock: Optional[socket.socket] = None
|
||||
self.running = False
|
||||
self.receive_thread = None
|
||||
|
||||
def connect(self):
|
||||
"""连接到服务端"""
|
||||
if isinstance(self.server_address, str):
|
||||
print("Connecting to Unix socket...", self.server_address)
|
||||
# Unix 域套接字
|
||||
self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
|
||||
self.sock.connect(self.server_address)
|
||||
else:
|
||||
# TCP 套接字
|
||||
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
self.sock.connect(self.server_address)
|
||||
|
||||
print("Connected to server.")
|
||||
|
||||
# 启动接收线程
|
||||
self.running = True
|
||||
self.receive_thread = threading.Thread(target=self._receive_loop, daemon=True)
|
||||
self.receive_thread.start()
|
||||
|
||||
def _receive_loop(self):
|
||||
buffer = b''
|
||||
while self.running:
|
||||
try:
|
||||
# 读取长度头
|
||||
while len(buffer) < 4:
|
||||
data = self.sock.recv(4)
|
||||
if not data:
|
||||
raise ConnectionResetError("Server closed the connection")
|
||||
buffer += data
|
||||
length = int.from_bytes(buffer[:4], 'little')
|
||||
buffer = buffer[4:]
|
||||
|
||||
# 读取完整数据
|
||||
while len(buffer) < length:
|
||||
data = self.sock.recv(length - len(buffer))
|
||||
if not data:
|
||||
raise ConnectionResetError("Server closed the connection")
|
||||
buffer += data
|
||||
message = buffer[:length]
|
||||
buffer = buffer[length:]
|
||||
|
||||
# 解析JSON
|
||||
status = json.loads(message.decode())
|
||||
if self.callback:
|
||||
self.callback(status)
|
||||
except ConnectionResetError as e:
|
||||
print("Connection interrupted:", e)
|
||||
self.disconnect()
|
||||
break
|
||||
except json.JSONDecodeError as e:
|
||||
print("JSON parsing failed:", e)
|
||||
continue
|
||||
except Exception as e:
|
||||
print("reception error:", e)
|
||||
self.disconnect()
|
||||
break
|
||||
|
||||
def disconnect(self):
|
||||
"""断开连接"""
|
||||
self.running = False
|
||||
if self.sock:
|
||||
self.sock.close()
|
||||
self.sock = None
|
||||
print("Disconnected from server.")
|
||||
|
||||
def stop(self):
|
||||
"""停止客户端"""
|
||||
self.disconnect()
|
||||
if self.receive_thread and self.receive_thread.is_alive():
|
||||
self.receive_thread.join()
|
||||
print("Client stopped.")
|
||||
|
||||
def wait_receive(self):
|
||||
if self.receive_thread and self.receive_thread.is_alive():
|
||||
self.receive_thread.join()
|
||||
|
||||
|
||||
# 注册退出清理钩子
|
||||
def register_cleanup(server_instance):
|
||||
def cleanup():
|
||||
server_instance.stop()
|
||||
|
||||
atexit.register(cleanup)
|
||||
|
||||
# # 示例使用
|
||||
# if __name__ == '__main__' and "server" in sys.argv:
|
||||
#
|
||||
# import time
|
||||
#
|
||||
# # 模拟的状态存储
|
||||
# process_status = {
|
||||
# 'process1': 'running',
|
||||
# 'process2': 'stopped',
|
||||
# "big_data": "<AAAAAAAAAAAAAAAAAFFFFFFFFFFFFFFFFFFAAAAFFFFFFFFFFFFFFFAAAAAAAAAAAAAAAAAAAAAAAAA>"
|
||||
# }
|
||||
#
|
||||
# def get_status():
|
||||
# return process_status
|
||||
#
|
||||
# # 设置 Unix 域套接字地址
|
||||
# server_address = './socket_filetransfer.sock'
|
||||
#
|
||||
# # 创建服务端实例
|
||||
# server = StatusServer(get_status, server_address)
|
||||
# register_cleanup(server) # 注册退出时清理
|
||||
#
|
||||
# # 启动服务端线程
|
||||
# server_thread = threading.Thread(target=server.start_server)
|
||||
# server_thread.daemon = True
|
||||
# server_thread.start()
|
||||
#
|
||||
# # 模拟状态更新
|
||||
# try:
|
||||
# while True:
|
||||
# print(">>>>>>>change<<<<<<<<<<<<<<<")
|
||||
# time.sleep(5)
|
||||
# process_status['process1'] = 'stopped' if process_status['process1'] == 'running' else 'running'
|
||||
# server.update_status()
|
||||
#
|
||||
# time.sleep(5)
|
||||
# process_status['process2'] = 'running' if process_status['process2'] == 'stopped' else 'stopped'
|
||||
# server.update_status()
|
||||
# except KeyboardInterrupt:
|
||||
# pass
|
||||
#
|
||||
# # 示例使用
|
||||
# if __name__ == '__main__' and "client" in sys.argv:
|
||||
# # Unix 域套接字示例:
|
||||
# server_address = './socket_filetransfer.sock'
|
||||
#
|
||||
# # 示例回调函数
|
||||
# def on_status_update(status_dict):
|
||||
# print("[Callback] New status received:")
|
||||
# for k, v in status_dict.items():
|
||||
# print(f" - {k}: {v}")
|
||||
#
|
||||
#
|
||||
# client = StatusClient(server_address, callback=on_status_update)
|
||||
# try:
|
||||
# client.connect()
|
||||
#
|
||||
# # 主线程保持运行,防止程序退出
|
||||
# while client.running:
|
||||
# pass
|
||||
# except KeyboardInterrupt:
|
||||
# print("Client shutting down...")
|
||||
# client.stop()
|
||||
561
mod/project/node/nodeMod.py
Normal file
561
mod/project/node/nodeMod.py
Normal file
@@ -0,0 +1,561 @@
|
||||
import json
|
||||
import os.path
|
||||
import threading
|
||||
import traceback
|
||||
import public
|
||||
from mod.base import json_response
|
||||
from mod.base.ssh_executor import test_ssh_config
|
||||
try:
|
||||
from mod.project.node.nodeutil import ServerNode, LocalNode, monitor_node_once_with_timeout
|
||||
except:
|
||||
# 定义处理h11的命令变量
|
||||
cmd_h11 = "cd /www/server/panel/pyenv/bin && source activate && H11_VERSION=$(./pip3 show h11 | grep -i Version | awk '{print $2}') && if [ \"$H11_VERSION\" != \"0.14.0\" ]; then ./pip3 uninstall h11 -y; fi; ./pip3 install h11==0.14.0"
|
||||
|
||||
# 定义处理wsproto的命令变量
|
||||
cmd_wsproto = "cd /www/server/panel/pyenv/bin && source activate && WSPROTO_VERSION=$(./pip3 show wsproto | grep -i Version | awk '{print $2}') && if [ \"$WSPROTO_VERSION\" != \"1.2.0\" ]; then ./pip3 uninstall wsproto -y; fi; ./pip3 install wsproto==1.2.0"
|
||||
public.ExecShell(cmd_h11)
|
||||
public.ExecShell(cmd_wsproto)
|
||||
from mod.project.node.nodeutil import ServerNode, LocalNode, monitor_node_once_with_timeout
|
||||
from mod.project.node.dbutil import Node, ServerNodeDB, ServerMonitorRepo
|
||||
from mod.project.node.task_flow import flow_useful_version
|
||||
|
||||
|
||||
class main():
|
||||
node_db_obj = ServerNodeDB()
|
||||
node_db_file =node_db_obj._DB_FILE
|
||||
def __init__(self):
|
||||
# self.node_db_obj = ServerNodeDB()
|
||||
self.tip_file = public.get_panel_path() + "/data/mod_node_used.pl"
|
||||
self.show_mode_file = public.get_panel_path() + "/data/mod_node_show_mode.pl"
|
||||
|
||||
def add_node(self, get):
|
||||
"""
|
||||
增加节点
|
||||
:param get: address节点地址 api_key节点API Key remarks节点备注 category_id节点分类ID
|
||||
:return:
|
||||
"""
|
||||
ssh_conf = get.get('ssh_conf', "{}")
|
||||
try:
|
||||
get.ssh_conf = json.loads(ssh_conf)
|
||||
except Exception:
|
||||
return public.return_message(-1, 0,"SSH_conf data format error")
|
||||
n, err = Node.from_dict(get)
|
||||
if not n:
|
||||
return public.return_message(-1, 0, err)
|
||||
public.set_module_logs("nodes_node_adds_9", "add_node")
|
||||
if n.app_key or n.api_key:
|
||||
err = ServerNode.check_api_key(n)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
else:
|
||||
# ssh 节点,不用处理
|
||||
pass
|
||||
|
||||
n.server_ip = n.parse_server_ip()
|
||||
err = ServerNodeDB().create_node(n)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
node = ServerNodeDB().get_node_by_id(n.id)
|
||||
if node:
|
||||
monitor_node_once_with_timeout(node)
|
||||
return public.return_message(0, 0, "Node added successfully")
|
||||
|
||||
@staticmethod
|
||||
def bind_app(get):
|
||||
n, err = Node.from_dict(get)
|
||||
if not n:
|
||||
return public.return_message(-1, 0, err)
|
||||
if not n.app_key:
|
||||
return public.return_message(-1, 0, "Please specify the app key to bind to")
|
||||
srv = ServerNode("", "", n.app_key)
|
||||
res = srv.app_bind()
|
||||
if res:
|
||||
return public.return_message(-1, 0, res)
|
||||
else:
|
||||
return public.return_message(0, 0, "Binding request has been sent out")
|
||||
|
||||
@staticmethod
|
||||
def bind_app_status(get):
|
||||
n, err = Node.from_dict(get)
|
||||
if not n:
|
||||
return public.return_message(-1, 0, err)
|
||||
if not n.app_key:
|
||||
return public.return_message(-1, 0, "Please specify the app key to bind to")
|
||||
srv = ServerNode("", "", n.app_key)
|
||||
res = srv.app_bind_status()
|
||||
if res:
|
||||
return public.return_message(-1, 0, res)
|
||||
else:
|
||||
return public.return_message(0, 0, "Binding successful")
|
||||
|
||||
|
||||
def del_node(self, get):
|
||||
"""
|
||||
删除节点
|
||||
:param get: ids节点ID
|
||||
:return:
|
||||
"""
|
||||
node_ids = get.get('ids', "")
|
||||
if not node_ids:
|
||||
return public.return_message(-1, 0, "Node ID cannot be empty, at least one")
|
||||
try:
|
||||
node_ids = json.loads(node_ids)
|
||||
if not isinstance(node_ids, list) and isinstance(node_ids, int):
|
||||
node_ids = [node_ids]
|
||||
except Exception:
|
||||
return public.return_message(-1, 0, "The format of the node ID data passed in is incorrect")
|
||||
|
||||
srv_db = ServerNodeDB()
|
||||
for node_id in node_ids:
|
||||
if srv_db.is_local_node(node_id):
|
||||
continue
|
||||
err = srv_db.delete_node(node_id)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
return public.return_message(0, 0, "Node deleted successfully")
|
||||
|
||||
def update_node(self, get):
|
||||
"""
|
||||
更新节点
|
||||
:param get: id节点ID address节点地址 api_key节点API Key remarks节点备注 category_id节点分类ID
|
||||
:return:
|
||||
"""
|
||||
node_id = get.get('id/d', 0)
|
||||
ssh_conf = get.get('ssh_conf', "{}")
|
||||
try:
|
||||
get.ssh_conf = json.loads(ssh_conf)
|
||||
except Exception:
|
||||
return public.return_message(-1, 0, "SSH_conf data format error")
|
||||
if not node_id:
|
||||
return public.return_message(-1, 0, "Node ID cannot be empty")
|
||||
n, err = Node.from_dict(get)
|
||||
if not n:
|
||||
return public.return_message(-1, 0, err)
|
||||
if n.app_key or n.api_key:
|
||||
err = ServerNode.check_api_key(n)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
|
||||
n.server_ip = n.parse_server_ip()
|
||||
srv_db = ServerNodeDB()
|
||||
err = srv_db.update_node(n, with_out_fields=["id", "status", "error", "error_num"])
|
||||
if err:
|
||||
return public.return_message(-1, 0,err)
|
||||
node = ServerNodeDB().get_node_by_id(n.id)
|
||||
if node:
|
||||
monitor_node_once_with_timeout(node)
|
||||
return public.return_message(0, 0, "Node update successful")
|
||||
|
||||
def default_show_mode(self) -> str:
|
||||
if not os.path.exists(self.show_mode_file):
|
||||
return "list"
|
||||
show_mode = public.readFile(self.show_mode_file)
|
||||
if not show_mode:
|
||||
return "list"
|
||||
if show_mode not in ["list", "block"]:
|
||||
return "list"
|
||||
return show_mode
|
||||
|
||||
def set_show_mode(self, mode_name: str):
|
||||
if mode_name not in ["list", "block"]:
|
||||
return False
|
||||
if mode_name == "block":
|
||||
public.set_module_logs("node_show_block", "node_show_block")
|
||||
public.writeFile(self.show_mode_file, mode_name)
|
||||
return True
|
||||
|
||||
def get_node_list(self, get):
|
||||
"""
|
||||
获取节点列表
|
||||
:param get: p页码 limit每页数量 search搜索关键字 category_id分类ID
|
||||
:return:
|
||||
"""
|
||||
page_num = max(int(get.get('p/d', 1)), 1)
|
||||
limit = max(int(get.get('limit/d', 10)), 10)
|
||||
search = get.get('search', "").strip()
|
||||
category_id = get.get('category_id/d', -1)
|
||||
refresh = get.get('refresh/s', "")
|
||||
show_mode = get.get('show_mode/s', "")
|
||||
if not show_mode or show_mode not in ["list", "block"]:
|
||||
show_mode = self.default_show_mode()
|
||||
else:
|
||||
if not self.set_show_mode(show_mode):
|
||||
show_mode = self.default_show_mode()
|
||||
|
||||
if show_mode == "block": # 返回所有数据
|
||||
page_num = 1
|
||||
limit = 9999999
|
||||
|
||||
srv_db = ServerNodeDB()
|
||||
data, err = srv_db.get_node_list(search, category_id, (page_num - 1) * limit, limit)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
|
||||
if refresh and refresh == "1":
|
||||
th_list = []
|
||||
for node in data:
|
||||
th = threading.Thread(target=monitor_node_once_with_timeout, args=(node,5))
|
||||
th.start()
|
||||
th_list.append(th)
|
||||
|
||||
for th in th_list:
|
||||
th.join()
|
||||
|
||||
for node in data:
|
||||
if isinstance(node["ssh_conf"], str):
|
||||
node["ssh_conf"] = json.loads(node["ssh_conf"])
|
||||
if isinstance(node["error"], str):
|
||||
node["error"] = json.loads(node["error"])
|
||||
if node["app_key"] == "local" and node["api_key"] == "local":
|
||||
node["address"] = public.getPanelAddr()
|
||||
if node["lpver"] and not node["remarks"].endswith(" | 1Panel"):
|
||||
node["remarks"] = node["remarks"] + " | 1Panel"
|
||||
node_data = self.get_node_data(node)
|
||||
node['data'] = node_data
|
||||
count = srv_db.node_count(search, category_id)
|
||||
page = public.get_page(count, page_num, limit)
|
||||
page["data"] = data
|
||||
page["show_mode"] = show_mode
|
||||
return public.return_message(0, 0,page)
|
||||
|
||||
@staticmethod
|
||||
def get_node_data(node: dict):
|
||||
if node["app_key"] == "local" and node["api_key"] == "local":
|
||||
data = ServerMonitorRepo.get_local_server_status()
|
||||
else:
|
||||
srv_m = ServerMonitorRepo()
|
||||
if srv_m.is_reboot_wait(node["server_ip"]):
|
||||
return {'status': 4, 'msg': "Server restart in progress..."}
|
||||
# public.print_log("get_node_data-------------------------1:{}".format(node["id"]))
|
||||
data = srv_m.get_server_status(node['id'])
|
||||
# public.print_log("get_node_data------------data----------2---:{}".format(data))
|
||||
if data:
|
||||
cpu_data = data.get('cpu', {})
|
||||
memory_data = data.get('mem', {})
|
||||
if cpu_data and memory_data:
|
||||
return {
|
||||
'status': 0,
|
||||
'cpu': cpu_data[0],
|
||||
'cpu_usage': cpu_data[1],
|
||||
'memory': round(float(memory_data['memRealUsed']) / float(memory_data['memTotal']) * 100, 2),
|
||||
'mem_usage': memory_data['memRealUsed'],
|
||||
'memNewTotal': memory_data.get('memNewTotal', "") or public.to_size(
|
||||
memory_data['memTotal'] * 1024 * 1024)
|
||||
}
|
||||
return {'status': 2, 'msg': "Failed to obtain node data"}
|
||||
|
||||
def add_category(self, get):
|
||||
"""
|
||||
添加分类
|
||||
:param get:
|
||||
:return:
|
||||
"""
|
||||
name = get.get('name', "").strip()
|
||||
srv_db = ServerNodeDB()
|
||||
if not name:
|
||||
return public.return_message(-1, 0, "Classification name cannot be empty")
|
||||
if srv_db.category_exites(name):
|
||||
return public.return_message(-1, 0, "The category name already exists")
|
||||
err = srv_db.create_category(name)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
return public.return_message(0, 0, "Category added successfully")
|
||||
|
||||
def del_category(self, get):
|
||||
"""
|
||||
删除分类
|
||||
:param get:
|
||||
:return:
|
||||
"""
|
||||
category_id = get.get('id/d', 0)
|
||||
if not category_id:
|
||||
return public.return_message(-1, 0, "Classification ID cannot be empty")
|
||||
srv_db = ServerNodeDB()
|
||||
if srv_db.category_exites(category_id):
|
||||
srv_db.delete_category(category_id)
|
||||
|
||||
return public.return_message(0, 0, "Category deleted successfully")
|
||||
|
||||
def bind_node_to_category(self, get):
|
||||
"""
|
||||
绑定节点到分类 可以批量绑定
|
||||
:param get: 如果传入单个node_id则是绑定单个,如果是传入列表则批量绑定
|
||||
:return:
|
||||
"""
|
||||
node_ids = get.get('ids', "")
|
||||
category_id = get.get('category_id/d', 0)
|
||||
try:
|
||||
node_ids = json.loads(node_ids)
|
||||
if not isinstance(node_ids, list) and isinstance(node_ids, int):
|
||||
node_ids = [node_ids]
|
||||
except Exception:
|
||||
return public.return_message(-1, 0, "Node ID format error")
|
||||
|
||||
if not node_ids:
|
||||
return public.return_message(-1, 0, "Node ID cannot be empty, at least one")
|
||||
|
||||
if category_id < 0:
|
||||
return public.return_message(-1, 0, "Classification ID cannot be empty")
|
||||
|
||||
srv_db = ServerNodeDB()
|
||||
err = srv_db.bind_category_to_node(node_ids, category_id)
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
return public.return_message(0, 0, "Node grouping modification successful")
|
||||
|
||||
def get_category_list(self, get):
|
||||
"""
|
||||
获取分类列表
|
||||
:param get:
|
||||
:return:
|
||||
"""
|
||||
try:
|
||||
categorys = public.S('category', self.node_db_obj._DB_FILE).select()
|
||||
return public.return_message(0, 0, categorys)
|
||||
except Exception:
|
||||
public.print_log(traceback.print_exc())
|
||||
return public.return_message(-1, 0, "Data query failed")
|
||||
|
||||
@staticmethod
|
||||
def get_panel_url(get):
|
||||
"""
|
||||
获取目标面板的访问url
|
||||
:param get: address节点地址 api_key节点API Key
|
||||
:return:
|
||||
"""
|
||||
node_id = get.get('node_id/d', 0)
|
||||
if not node_id:
|
||||
return public.return_message(-1, 0, "node_id cannot be empty")
|
||||
srv = ServerNode.new_by_id(node_id)
|
||||
if not srv:
|
||||
return public.return_message(-1, 0, "node_id does not exist")
|
||||
token, err = srv.get_tmp_token()
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
target_panel_url = srv.origin + "/login?tmp_token=" + token
|
||||
return public.return_message(0, 0, {'target_panel_url': target_panel_url})
|
||||
|
||||
@classmethod
|
||||
def get_all_node(cls, get):
|
||||
"""
|
||||
@route /mod/node/node/get_all_node
|
||||
@param query: str
|
||||
@return: [
|
||||
{
|
||||
"node_id": int,
|
||||
"remarks": str,
|
||||
"ip": str,
|
||||
}
|
||||
]
|
||||
"""
|
||||
query_type = get.get('node_type/s', "api")
|
||||
field_str = "id,remarks,server_ip,address,app_key,api_key,lpver,category_id,error_num,ssh_conf"
|
||||
if query_type == "api":
|
||||
data = public.S('node', cls.node_db_file).where("app_key != '' or api_key != ''", ()).field(field_str).select()
|
||||
elif query_type == "ssh":
|
||||
data = public.S('node', self.node_db_obj._DB_FILE).field(field_str).where("ssh_conf != '{}'", ()).select()
|
||||
elif query_type == "file_src":
|
||||
data = public.S('node', self.node_db_obj._DB_FILE).field(field_str).where(
|
||||
"(app_key != '' or api_key != '') and lpver = ''", ()).select()
|
||||
else: # all 除本机之外的节点
|
||||
data = public.S('node', self.node_db_obj._DB_FILE).where("api_key != 'local'", ()).field(field_str).select()
|
||||
|
||||
srv_cache = ServerMonitorRepo()
|
||||
for i in data:
|
||||
i["has_ssh"] = bool(json.loads(i["ssh_conf"]))
|
||||
i.pop("ssh_conf")
|
||||
i["is_local"] = (i["app_key"] == "local" and i["api_key"] == "local")
|
||||
i.pop("app_key")
|
||||
i.pop("api_key")
|
||||
if i["server_ip"] == "":
|
||||
server_ip = ServerNode.get_node_ip(i['id'])
|
||||
if server_ip:
|
||||
i["server_ip"] = server_ip
|
||||
if i["lpver"] and not i["remarks"].endswith(" | 1Panel"):
|
||||
i["remarks"] = i["remarks"] + " | 1Panel"
|
||||
|
||||
tmp_data = srv_cache.get_server_status(i['id'])
|
||||
tmp_data = tmp_data or {}
|
||||
if query_type == "file_src":
|
||||
if not tmp_data and not i["is_local"]:
|
||||
i["version"] = ""
|
||||
i["useful_version"] = True
|
||||
continue
|
||||
if not i["is_local"] or not flow_useful_version(tmp_data.get('version', "")):
|
||||
continue
|
||||
else:
|
||||
if not tmp_data:
|
||||
i["version"] = ""
|
||||
i["useful_version"] = True
|
||||
continue
|
||||
i['version'] = tmp_data.get('version', "")
|
||||
i['useful_version'] = cls._useful_version(i['version'])
|
||||
|
||||
return public.return_message(0, 0, data)
|
||||
|
||||
@staticmethod
|
||||
def _useful_version(ver: str):
|
||||
try:
|
||||
if ver == "1Panel":
|
||||
return True
|
||||
ver_list = [int(i) for i in ver.split(".")]
|
||||
if ver_list[0] >= 10:
|
||||
return True
|
||||
elif ver_list[0] == 9 and ver_list[1] >= 7:
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def get_node_sites(get):
|
||||
"""
|
||||
@route /mod/node/node/get_node_sites
|
||||
@param node_id: int
|
||||
@param query: str
|
||||
@return: [
|
||||
{
|
||||
"node_id": int,
|
||||
"site_id": int,
|
||||
"site_name": str,
|
||||
"site_port": int
|
||||
}
|
||||
]
|
||||
"""
|
||||
node_id = get.get('node_id/d', 0)
|
||||
if not node_id:
|
||||
return public.return_message(-1, 0, "node_id cannot be empty")
|
||||
srv = ServerNode.new_by_id(node_id)
|
||||
if not srv:
|
||||
return public.return_message(-1, 0, "node_id does not exist")
|
||||
data_list, err = srv.php_site_list()
|
||||
if err:
|
||||
return public.return_message(-1, 0, err)
|
||||
return public.return_message(0, 0, data_list)
|
||||
|
||||
@staticmethod
|
||||
def php_site_list(get):
|
||||
"""
|
||||
@route /mod/node/node/php_site_list
|
||||
@return: [
|
||||
{
|
||||
"site_id": int,
|
||||
"site_name": str,
|
||||
"ports": []int,
|
||||
"domains": []str,
|
||||
"ssl":bool
|
||||
}
|
||||
]
|
||||
"""
|
||||
return LocalNode().php_site_list()[0]
|
||||
|
||||
def node_used_status(self, get):
|
||||
if os.path.exists(self.tip_file):
|
||||
return public.return_message(0, 0, "Used")
|
||||
return public.return_message(-1, 0, "Unused")
|
||||
|
||||
def set_used_status(self, get):
|
||||
# 定义处理h11的命令变量
|
||||
cmd_h11 = "cd /www/server/panel/pyenv/bin && source activate && H11_VERSION=$(./pip3 show h11 | grep -i Version | awk '{print $2}') && if [ \"$H11_VERSION\" != \"0.14.0\" ]; then ./pip3 uninstall h11 -y; fi; ./pip3 install h11==0.14.0"
|
||||
|
||||
# 定义处理wsproto的命令变量
|
||||
cmd_wsproto = "cd /www/server/panel/pyenv/bin && source activate && WSPROTO_VERSION=$(./pip3 show wsproto | grep -i Version | awk '{print $2}') && if [ \"$WSPROTO_VERSION\" != \"1.2.0\" ]; then ./pip3 uninstall wsproto -y; fi; ./pip3 install wsproto==1.2.0"
|
||||
public.ExecShell(cmd_h11)
|
||||
public.ExecShell(cmd_wsproto)
|
||||
if os.path.exists(self.tip_file):
|
||||
os.remove(self.tip_file)
|
||||
else:
|
||||
public.set_module_logs("nodes_installed_9", "set_used_status")
|
||||
public.writeFile(self.tip_file, "True")
|
||||
return public.return_message(0, 0, "Setup successful")
|
||||
|
||||
|
||||
@staticmethod
|
||||
def remove_ssh_conf(get):
|
||||
node_id = get.get("node_id/d", 0)
|
||||
ServerNodeDB().remove_node_ssh_conf(node_id)
|
||||
return public.return_message(0, 0, "Deleted successfully")
|
||||
|
||||
@staticmethod
|
||||
def set_ssh_conf(get):
|
||||
"""设置ssh配置信息"""
|
||||
host = get.get("host/s", "")
|
||||
port = get.get("port/d", 22)
|
||||
username = get.get("username/s", "root")
|
||||
password = get.get("password/s", "")
|
||||
pkey = get.get("pkey/s", "")
|
||||
pkey_passwd = get.get("pkey_passwd/s", "")
|
||||
node_id = get.get("node_id/d", 0)
|
||||
test_case = get.get("test_case/d", 0)
|
||||
|
||||
if not node_id and not test_case:
|
||||
return public.return_message(-1, 0, "Node does not exist")
|
||||
|
||||
if not host and node_id:
|
||||
host = ServerNode.get_node_ip(node_id)
|
||||
if not username:
|
||||
username = "root"
|
||||
if not host or not username or not port:
|
||||
return public.return_message(-1, 0, "Host IP, host port, and user name cannot be empty")
|
||||
if not password and not pkey:
|
||||
return public.return_message(-1, 0, "Password or key cannot be empty")
|
||||
|
||||
res = test_ssh_config(host, port, username, password, pkey, pkey_passwd)
|
||||
if res:
|
||||
return public.return_message(-1, 0, res)
|
||||
if test_case:
|
||||
return public.return_message(0, 0, "Test successful")
|
||||
ServerNodeDB().set_node_ssh_conf(node_id, {
|
||||
"host": host,
|
||||
"port": port,
|
||||
"username": username,
|
||||
"password": password,
|
||||
"pkey": pkey,
|
||||
"pkey_passwd": pkey_passwd
|
||||
})
|
||||
return public.return_message(0, 0, "Setup successful")
|
||||
|
||||
@staticmethod
|
||||
def get_sshd_port(get):
|
||||
node_id = get.get("node_id/d", 0)
|
||||
srv = ServerNode.new_by_id(node_id)
|
||||
if not srv:
|
||||
return public.return_message(-1, 0, "Node does not exist")
|
||||
|
||||
port = srv.get_sshd_port()
|
||||
if not port:
|
||||
port = 22
|
||||
return public.return_message(0, 0, {"port": port})
|
||||
|
||||
|
||||
@staticmethod
|
||||
def restart_bt_panel(get):
|
||||
node_id = get.get("node_id/d", 0)
|
||||
srv = ServerNode.new_by_id(node_id)
|
||||
if not srv:
|
||||
return public.return_message(-1, 0, "Node does not exist")
|
||||
if srv.is_local:
|
||||
return public.return_message(-1, 0, "The local node does not support this operation")
|
||||
ret = srv.restart_bt_panel()
|
||||
if ret.get("status"):
|
||||
return public.return_message(0, 0, ret.get("msg"))
|
||||
else:
|
||||
return public.return_message(-1, 0, ret.get("msg"))
|
||||
|
||||
|
||||
@staticmethod
|
||||
def server_reboot(get):
|
||||
node_id = get.get("node_id/d", 0)
|
||||
srv = ServerNode.new_by_id(node_id)
|
||||
if not srv:
|
||||
return public.return_message(-1, 0, "Node does not exist")
|
||||
if srv.is_local:
|
||||
return public.return_message(-1, 0, "The local node does not support this operation")
|
||||
repo = ServerMonitorRepo()
|
||||
if repo.is_reboot_wait(srv.node_server_ip):
|
||||
return public.return_message(-1, 0, "Node is restarting, please try again later")
|
||||
ret = srv.server_reboot()
|
||||
if ret.get("status"):
|
||||
return public.return_message(0, 0, ret.get("msg"))
|
||||
else:
|
||||
return public.return_message(-1, 0, ret.get("msg"))
|
||||
11
mod/project/node/nodeutil/__init__.py
Normal file
11
mod/project/node/nodeutil/__init__.py
Normal file
@@ -0,0 +1,11 @@
|
||||
from .base import *
|
||||
from .ssh_wrap import SSHApi
|
||||
|
||||
__all__ = [
|
||||
"ServerNode",
|
||||
"LocalNode",
|
||||
"LPanelNode",
|
||||
"monitor_node_once_with_timeout",
|
||||
"monitor_node_once",
|
||||
"SSHApi"
|
||||
]
|
||||
1961
mod/project/node/nodeutil/base.py
Normal file
1961
mod/project/node/nodeutil/base.py
Normal file
File diff suppressed because it is too large
Load Diff
573
mod/project/node/nodeutil/one_panel_api.py
Normal file
573
mod/project/node/nodeutil/one_panel_api.py
Normal file
@@ -0,0 +1,573 @@
|
||||
import os.path
|
||||
import shutil
|
||||
import traceback
|
||||
from uuid import uuid4
|
||||
|
||||
import requests
|
||||
import time
|
||||
import hashlib
|
||||
import json
|
||||
from typing import Optional, List, Any, Tuple, Dict
|
||||
|
||||
|
||||
class OnePanelApiClient:
|
||||
def __init__(self, panel_address, api_key, ver: str = "v2", timeout: int = 20):
|
||||
"""
|
||||
初始化 OnePanel API 客户端
|
||||
|
||||
Args:
|
||||
panel_address (str): 1Panel 的访问地址 (例如: "http://your_server_ip:4004")
|
||||
api_key (str): 您的 1Panel API Key
|
||||
"""
|
||||
self.panel_address = panel_address
|
||||
self.api_key = api_key
|
||||
self.ver = ver
|
||||
self.timeout = timeout
|
||||
self._call_err: Optional[Exception] = None
|
||||
|
||||
def _generate_token(self):
|
||||
"""生成 1Panel API token 和时间戳"""
|
||||
timestamp = str(int(time.time()))
|
||||
sign_string = f"1panel{self.api_key}{timestamp}"
|
||||
md5_hash = hashlib.md5(sign_string.encode()).hexdigest()
|
||||
return md5_hash, timestamp
|
||||
|
||||
def _call_api(self, method, endpoint, json_data=None):
|
||||
"""发送 API 请求"""
|
||||
token, timestamp = self._generate_token()
|
||||
headers = {
|
||||
"1Panel-Token": token,
|
||||
"1Panel-Timestamp": timestamp,
|
||||
"Content-Type": "application/json"
|
||||
}
|
||||
url = "{}{}".format(self.panel_address, endpoint)
|
||||
|
||||
# print(f"Calling API: {method} {url}")
|
||||
try:
|
||||
response = requests.request(method, url, headers=headers, json=json_data, timeout=self.timeout)
|
||||
response.raise_for_status() # 检查 HTTP 错误 (例如 4xx 或 5xx)
|
||||
print(f"API Response Status: {response.status_code}")
|
||||
return response.json()
|
||||
except requests.exceptions.RequestException as e:
|
||||
self._call_err = e
|
||||
print(f"API call failed: {e}")
|
||||
return None
|
||||
except Exception as e:
|
||||
self._call_err = e
|
||||
print(f"API call failed: {e}")
|
||||
return None
|
||||
|
||||
def add_website(self, site_name: str, port: int, **kwargs):
|
||||
"""
|
||||
添加网站
|
||||
"""
|
||||
endpoint = "/api/{}/websites".format(self.ver)
|
||||
return self._call_api("POST", endpoint, json_data={
|
||||
"primaryDomain": site_name,
|
||||
"type": "static",
|
||||
"alias": site_name,
|
||||
"remark": kwargs.get("ps") if kwargs.get("ps", None) else "Pagoda yakpanel load balancing station",
|
||||
"appType": "installed",
|
||||
"webSiteGroupId": 1,
|
||||
"otherDomains": "",
|
||||
"proxy": "",
|
||||
"appinstall": {
|
||||
"appId": 0,
|
||||
"name": "",
|
||||
"appDetailId": 0,
|
||||
"params": {},
|
||||
"version": "",
|
||||
"appkey": "",
|
||||
"advanced": False,
|
||||
"cpuQuota": 0,
|
||||
"memoryLimit": 0,
|
||||
"memoryUnit": "MB",
|
||||
"containerName": "",
|
||||
"allowPort": False
|
||||
},
|
||||
"IPV6": False,
|
||||
"enableFtp": False,
|
||||
"ftpUser": "",
|
||||
"ftpPassword": "",
|
||||
"proxyType": "tcp",
|
||||
"port": 9000,
|
||||
"proxyProtocol": "http://",
|
||||
"proxyAddress": "",
|
||||
"runtimeType": "php",
|
||||
"taskID": str(uuid4()),
|
||||
"createDb": False,
|
||||
"dbName": "",
|
||||
"dbPassword": "",
|
||||
"dbFormat": "utf8mb4",
|
||||
"dbUser": "",
|
||||
"dbType": "mysql",
|
||||
"dbHost": "",
|
||||
"enableSSL": False,
|
||||
"domains": [
|
||||
{
|
||||
"domain": site_name,
|
||||
"port": port,
|
||||
"ssl": False
|
||||
}
|
||||
],
|
||||
"siteDir": ""
|
||||
})
|
||||
|
||||
def check_site_create(self, site_name: str) -> Optional[int]:
|
||||
endpoint = "/api/{}/websites/search".format(self.ver)
|
||||
res_data = self._call_api("POST", endpoint, json_data={
|
||||
"name": site_name,
|
||||
"page": 1,
|
||||
"pageSize": 10,
|
||||
"orderBy": "favorite",
|
||||
"order": "descending",
|
||||
"websiteGroupId": 0,
|
||||
"type": "static"
|
||||
})
|
||||
|
||||
if res_data is not None and "data" in res_data and isinstance(res_data["data"], dict):
|
||||
for item in res_data["data"].get("items", {}):
|
||||
if item["alias"] == site_name:
|
||||
return item["id"]
|
||||
return None
|
||||
|
||||
def get_websites(self):
|
||||
"""
|
||||
获取所有网站信息
|
||||
|
||||
Returns:
|
||||
dict: API 返回结果 (网站列表),失败返回 None
|
||||
"""
|
||||
# 示例接口路径,请根据您的 Swagger 文档修改
|
||||
endpoint = "/api/{}/websites/list".format(self.ver)
|
||||
return self._call_api("GET", endpoint)
|
||||
|
||||
def add_website_domain(self, website_id: int, new_domain: str, port: int):
|
||||
"""
|
||||
设置网站域名
|
||||
"""
|
||||
# 示例接口路径和参数,请根据您的 Swagger 文档修改
|
||||
endpoint = "/api/{}/websites/domains".format(self.ver)
|
||||
return self._call_api("POST", endpoint, json_data={
|
||||
"websiteID": website_id,
|
||||
"domains": [
|
||||
{
|
||||
"domain": new_domain,
|
||||
"port": port,
|
||||
"ssl": False
|
||||
}
|
||||
],
|
||||
"domainStr": ""
|
||||
})
|
||||
|
||||
def website_domains(self, website_id: int):
|
||||
"""
|
||||
获取网站域名列表
|
||||
"""
|
||||
endpoint = "/api/{}/websites/domains/{website_id}".format(self.ver, website_id=website_id)
|
||||
return self._call_api("GET", endpoint)
|
||||
|
||||
def list_file_test(self):
|
||||
endpoint = "/api/{}/files/search".format(self.ver)
|
||||
return self._call_api("POST", endpoint, json_data={
|
||||
"containSub": False,
|
||||
"dir": True,
|
||||
"expand": True,
|
||||
"isDetail": True,
|
||||
"page": 0,
|
||||
"pageSize": 0,
|
||||
"path": "/",
|
||||
"search": "",
|
||||
"showHidden": True,
|
||||
"sortBy": "",
|
||||
"sortOrder": ""
|
||||
})
|
||||
|
||||
def list_file(self, path: str) -> Tuple[List[Dict], str]:
|
||||
endpoint = "/api/{}/files/search".format(self.ver)
|
||||
res = self._call_api("POST", endpoint, json_data={
|
||||
"containSub": False,
|
||||
"expand": True,
|
||||
"isDetail": True,
|
||||
"page": 1,
|
||||
"pageSize": 1000,
|
||||
"path": path,
|
||||
"search": "",
|
||||
"showHidden": True,
|
||||
"sortBy": "name",
|
||||
"sortOrder": "ascending"
|
||||
})
|
||||
if res is None:
|
||||
return [], "Failed to retrieve file list"
|
||||
if res["code"] != 200:
|
||||
return [], res["message"]
|
||||
if res["data"]["itemTotal"] > 1000:
|
||||
return [], "The number of directory files exceeds 1000, please compress before operating"
|
||||
elif res["data"]["itemTotal"] == 0:
|
||||
return [], ""
|
||||
return [] if res["data"]["items"] is None else res["data"]["items"], ""
|
||||
|
||||
def files_search(self, path: str, page: int, page_size: int, search: str):
|
||||
endpoint = "/api/{}/files/search".format(self.ver)
|
||||
res = self._call_api("POST", endpoint, json_data={
|
||||
"containSub": False,
|
||||
"expand": True,
|
||||
"isDetail": True,
|
||||
"page": page,
|
||||
"pageSize": page_size,
|
||||
"path": path,
|
||||
"search": search,
|
||||
"showHidden": True,
|
||||
"sortBy": "name",
|
||||
"sortOrder": "ascending"
|
||||
})
|
||||
if res is None:
|
||||
return {}, "Failed to retrieve file list"
|
||||
elif res["code"] != 200:
|
||||
return {}, res["message"]
|
||||
return res["data"], ""
|
||||
|
||||
def test_ver(self) -> bool:
|
||||
self.ver = "v2"
|
||||
self._call_err = None
|
||||
res_data = self.list_file_test()
|
||||
if res_data is None and isinstance(self._call_err, json.JSONDecodeError):
|
||||
self.ver = "v1"
|
||||
res_data = self.list_file_test()
|
||||
if isinstance(res_data, dict):
|
||||
return True
|
||||
elif isinstance(res_data, dict):
|
||||
return True
|
||||
return False
|
||||
|
||||
def system_status(self):
|
||||
endpoint = "/api/{}/dashboard/current".format(self.ver)
|
||||
if self.ver == "v1":
|
||||
return self._call_api("POST", endpoint, json_data={
|
||||
"scope": "basic",
|
||||
"ioOption": "all",
|
||||
"netOption": "all"
|
||||
})
|
||||
else:
|
||||
return self._call_api("GET", endpoint + "/all/all")
|
||||
|
||||
def open_port(self, port: int, protocol: str):
|
||||
endpoint = "/api/{}/hosts/firewall/port".format(self.ver)
|
||||
return self._call_api("POST", endpoint, json_data={
|
||||
"protocol": protocol,
|
||||
"source": "anyWhere",
|
||||
"strategy": "accept",
|
||||
"port": str(port),
|
||||
"description": "aaaa",
|
||||
"operation": "add",
|
||||
"address": ""
|
||||
})
|
||||
|
||||
def ws_shell(self, work_dir: str, cmd: str) -> Optional[str]:
|
||||
import websocket
|
||||
import base64
|
||||
import threading
|
||||
from urllib.parse import urlencode, urlparse
|
||||
if self.ver != "v2":
|
||||
return None
|
||||
try:
|
||||
pre_command = "PS1="" && stty -echo && clear && cd {}".format(work_dir, cmd)
|
||||
p = {
|
||||
"cols": 80,
|
||||
"rows": 24,
|
||||
"command": pre_command,
|
||||
"operateNode": "local"
|
||||
}
|
||||
token, timestamp = self._generate_token()
|
||||
u = urlparse(self.panel_address)
|
||||
url = ("{}://{}/api/{}/hosts/terminal?{}".format
|
||||
("ws" if u.scheme == "http" else "wss", u.netloc, self.ver, urlencode(p)))
|
||||
ws = websocket.WebSocket()
|
||||
ws.connect(url, header={"1Panel-Token": token, "1Panel-Timestamp": timestamp, })
|
||||
if not cmd.endswith("\n"):
|
||||
cmd += "\n"
|
||||
ws.send(json.dumps({"type": "cmd", "data": base64.b64encode(cmd.encode("utf-8")).decode("utf-8")}))
|
||||
res_str = ""
|
||||
|
||||
wait = False
|
||||
|
||||
def close_timeout():
|
||||
time.sleep(5)
|
||||
if wait:
|
||||
ws.close()
|
||||
|
||||
threading.Thread(target=close_timeout).start()
|
||||
|
||||
while True:
|
||||
wait = True
|
||||
result = ws.recv()
|
||||
wait = False
|
||||
if result == "":
|
||||
break
|
||||
res_data = json.loads(result)
|
||||
if res_data["type"] == "cmd":
|
||||
res_str += base64.b64decode(res_data["data"]).decode("utf-8")
|
||||
|
||||
if pre_command in res_str:
|
||||
res_str = res_str[res_str.index(pre_command) + len(pre_command):]
|
||||
|
||||
res_str = res_str.strip()
|
||||
real_data = []
|
||||
for line in res_str.split("\r\n"):
|
||||
if line[0] == '\x1b':
|
||||
continue
|
||||
real_data.append(line)
|
||||
|
||||
real_data = "\n".join(real_data)
|
||||
with open("test.txt", "w") as f:
|
||||
f.write(real_data)
|
||||
return real_data
|
||||
except Exception as e:
|
||||
print("error:{}".format(str(e)))
|
||||
traceback.print_exc()
|
||||
return None
|
||||
|
||||
def chunkupload(self,
|
||||
upload_name: str,
|
||||
target_path: str,
|
||||
chunk: Any, chunk_index: int, chunk_count: int) -> Tuple[str, Optional[dict]]:
|
||||
token, timestamp = self._generate_token()
|
||||
header = {"User-Agent": "Yak-Panel/Node Manager", "1Panel-Token": token, "1Panel-Timestamp": timestamp}
|
||||
files = {'chunk': ("chunk", chunk, 'application/octet-stream')}
|
||||
data = {
|
||||
'path': target_path,
|
||||
'filename': upload_name,
|
||||
'chunkIndex': chunk_index,
|
||||
'chunkCount': chunk_count,
|
||||
}
|
||||
url = "{}/api/{}/files/chunkupload".format(self.panel_address, self.ver)
|
||||
try:
|
||||
resp = requests.post(url, data=data, files=files, headers=header, verify=False, timeout=self.timeout)
|
||||
if not resp.status_code == 200:
|
||||
return "The response status code for uploading the file is incorrect. Please check if the node address and API are correct. The current status code is {}, and the return message is:{}".format(
|
||||
resp.status_code, resp.text), None
|
||||
|
||||
return "", None if len(resp.text) < 3 else json.loads(resp.text)
|
||||
except Exception as e:
|
||||
return "Upload file: {} failed with error message:{}".format(upload_name, str(e)), None
|
||||
|
||||
def upload(self, filename: str, target_path: str, upload_name: str) -> str:
|
||||
token, timestamp = self._generate_token()
|
||||
header = {"User-Agent": "Yak-Panel/Node Manager", "1Panel-Token": token, "1Panel-Timestamp": timestamp}
|
||||
try:
|
||||
with open(filename, 'rb') as f:
|
||||
file_data = f.read()
|
||||
except Exception as e:
|
||||
return "File {} failed to open, please check file permissions, error message is:{}".format(filename, str(e))
|
||||
|
||||
files = {'file': (upload_name, file_data, 'application/octet-stream')}
|
||||
data = {
|
||||
'path': target_path,
|
||||
'overwrite': True
|
||||
}
|
||||
url = "{}/api/{}/files/upload".format(self.panel_address, self.ver)
|
||||
try:
|
||||
resp = requests.post(url, data=data, files=files, headers=header, verify=False, timeout=self.timeout)
|
||||
if not resp.status_code == 200:
|
||||
return "The response status code for uploading the file is incorrect. Please check if the node address and API are correct. The current status code is {}, and the return message is:{}".format(
|
||||
resp.status_code, resp.text)
|
||||
if not resp.json()["code"] == 200:
|
||||
return "Upload file failed with error message:{}".format(resp.json()["message"])
|
||||
return ""
|
||||
except Exception as e:
|
||||
return "Upload file: {} failed with error message:{}".format(filename, str(e))
|
||||
|
||||
def files_exits(self, paths: List[str]) -> Optional[dict]:
|
||||
endpoint = "/api/{}/files/batch/check".format(self.ver)
|
||||
return self._call_api("POST", endpoint, json_data={
|
||||
"paths": paths,
|
||||
})
|
||||
|
||||
def download_file(self, filename: str, target_path: str, download_name: str, **kwargs) -> str:
|
||||
data = self.files_exits([filename])
|
||||
file_size: Optional[int] = None
|
||||
if not isinstance(data, dict):
|
||||
return "Request file: {} status failed".format(filename)
|
||||
for i in data["data"]:
|
||||
if i["path"] == filename:
|
||||
file_size = i["size"]
|
||||
break
|
||||
if file_size is None:
|
||||
return "File {} does not exist, skip download".format(filename)
|
||||
try:
|
||||
if not os.path.isdir(target_path):
|
||||
os.makedirs(target_path)
|
||||
except Exception as e:
|
||||
return "Failed to create folder {}, please check folder permissions, error message is:{}".format(target_path, str(e))
|
||||
|
||||
if file_size == 0:
|
||||
fp = open(os.path.join(target_path, download_name), "w")
|
||||
fp.close()
|
||||
return ""
|
||||
|
||||
tmp_file = os.path.join(target_path, "{}.{}".format(download_name, uuid4().hex))
|
||||
try:
|
||||
if not os.path.exists(target_path):
|
||||
os.makedirs(target_path)
|
||||
fb = open(tmp_file, 'wb')
|
||||
except Exception as e:
|
||||
return "Failed to create temporary file {}, please check folder permissions, error message is:{}".format(tmp_file, str(e))
|
||||
|
||||
call_log = lambda *args, **keyword_args: None
|
||||
if "call_log" in kwargs and callable(kwargs["call_log"]):
|
||||
call_log = kwargs["call_log"]
|
||||
try:
|
||||
for i in range(0, file_size, 1024 * 1024 * 5):
|
||||
start = i
|
||||
end = min(i + 1024 * 1024 * 5 - 1, file_size - 1)
|
||||
url = "{}/api/{}/files/chunkdownload".format(self.panel_address, self.ver)
|
||||
data = {
|
||||
'path': filename,
|
||||
'name': os.path.basename(filename),
|
||||
}
|
||||
token, timestamp = self._generate_token()
|
||||
header = {"User-Agent": "Yak-Panel/Node Manager", "1Panel-Token": token, "1Panel-Timestamp": timestamp}
|
||||
header.update({"Range": "bytes={}-{}".format(start, end)})
|
||||
resp = requests.post(url, json=data, headers=header, verify=False, stream=True, timeout=self.timeout)
|
||||
if resp.status_code != 206:
|
||||
return "The response status code for downloading the file is incorrect. Please check if the node address and API are correct. The current status code is {}, and the response header for the returned message is:{}".format(
|
||||
resp.status_code, resp.headers)
|
||||
fb.write(resp.content)
|
||||
call_log(end // file_size, "File Download:{} -> {}, Downloaded size:{}".format(filename, target_path, end))
|
||||
fb.flush()
|
||||
if fb.tell() != file_size:
|
||||
print(fb.tell(), file_size)
|
||||
return "Download file {} failed with error message:{}".format(filename, "files are different sizes")
|
||||
else:
|
||||
fb.close()
|
||||
shutil.move(tmp_file, os.path.join(target_path, download_name))
|
||||
return ""
|
||||
except Exception as e:
|
||||
return "Download file {} failed with error message:{}".format(filename, str(e))
|
||||
finally:
|
||||
if not fb.closed:
|
||||
fb.close()
|
||||
if os.path.exists(tmp_file):
|
||||
os.remove(tmp_file)
|
||||
|
||||
def dir_walk(self, path: str) -> Tuple[List[dict], str]:
|
||||
dirs = [path]
|
||||
res = []
|
||||
count = 0
|
||||
empty_dir = []
|
||||
while dirs:
|
||||
dir_path = dirs.pop(0)
|
||||
try:
|
||||
files, err = self.list_file(dir_path)
|
||||
except Exception as e:
|
||||
return [], str(e)
|
||||
if err:
|
||||
return [], err
|
||||
if not files:
|
||||
empty_dir.append(dir_path)
|
||||
for i in files:
|
||||
if i["isDir"]:
|
||||
dirs.append(i["path"])
|
||||
else:
|
||||
res.append({
|
||||
"path": i["path"],
|
||||
"size": i["size"],
|
||||
"is_dir": 0
|
||||
})
|
||||
count += 1
|
||||
if count > 1000:
|
||||
return [], "The number of directory files exceeds 1000, please compress before operating"
|
||||
|
||||
return [{"path": i, "size": 0, "is_dir": 1} for i in empty_dir] + res, ""
|
||||
|
||||
def remove_file(self, path: str, is_dir: bool) -> str:
|
||||
return self._call_api("POST", "/api/{}/files/del".format(self.ver), json_data={
|
||||
"isDir": is_dir,
|
||||
"path": path,
|
||||
"forceDelete": False
|
||||
})
|
||||
|
||||
def download_proxy(self, filename: str):
|
||||
try:
|
||||
url = "{}/api/{}/files/download".format(self.panel_address, self.ver)
|
||||
token, timestamp = self._generate_token()
|
||||
header = {"User-Agent": "Yak-Panel/Node Manager", "1Panel-Token": token, "1Panel-Timestamp": timestamp}
|
||||
resp = requests.get(url, params={
|
||||
"operateNode": "local",
|
||||
"path": filename
|
||||
}, headers=header, stream=True, verify=False, timeout=self.timeout)
|
||||
if not resp.status_code == 200:
|
||||
return "The response status code for downloading the file is incorrect. Please check if the node address and API are correct. The current status code is {}, and the return message is:{}".format(
|
||||
resp.status_code, resp.text)
|
||||
|
||||
from flask import send_file, stream_with_context, Response
|
||||
filename = os.path.basename(filename)
|
||||
if resp.headers.get("Content-Disposition", "").find("filename=") != -1:
|
||||
filename = resp.headers.get("Content-Disposition", "").split("filename=")[1]
|
||||
|
||||
def generate():
|
||||
for chunk in resp.iter_content(chunk_size=1024 * 1024 * 5):
|
||||
if chunk:
|
||||
yield chunk
|
||||
|
||||
# 设置响应头
|
||||
headers = {
|
||||
'Content-Type': resp.headers.get('Content-Type', 'application/octet-stream'),
|
||||
'Content-Disposition': 'attachment; filename="{}"'.format(filename),
|
||||
'Content-Length': resp.headers.get('Content-Length', ''),
|
||||
'Accept-Ranges': 'bytes'
|
||||
}
|
||||
|
||||
# 使用 stream_with_context 确保请求上下文在生成器运行时保持活跃
|
||||
return Response(
|
||||
stream_with_context(generate()),
|
||||
headers=headers,
|
||||
direct_passthrough=True
|
||||
)
|
||||
except Exception as e:
|
||||
return "Download file: {} failed with error message:{}".format(filename, traceback.format_exc())
|
||||
|
||||
def dir_size(self, path: str):
|
||||
return self._call_api("POST", "/api/{}/files/size".format(self.ver), json_data={
|
||||
"path": path
|
||||
})
|
||||
|
||||
def get_sshd_config(self) -> Optional[dict]:
|
||||
res = self._call_api("POST", "/api/{}/hosts/ssh/search".format(self.ver))
|
||||
if res is None:
|
||||
return None
|
||||
if res["code"] == 200:
|
||||
return res.get("data", {})
|
||||
return None
|
||||
|
||||
def create_dir(self, path: str):
|
||||
return self._call_api("POST", "/api/{}/files".format(self.ver), {
|
||||
"content": "",
|
||||
"isDir": True,
|
||||
"isLink": False,
|
||||
"isSymlink": False,
|
||||
"linkPath": "",
|
||||
"mode": 0,
|
||||
"path": path,
|
||||
"sub": False
|
||||
})
|
||||
|
||||
def restart_panel(self):
|
||||
return self._call_api("POST", "/api/{}/dashboard/system/restart/{}".format(self.ver, "1panel"))
|
||||
|
||||
def server_reboot(self):
|
||||
return self._call_api("POST", "/api/{}/dashboard/system/restart/{}".format(self.ver, "system"))
|
||||
|
||||
def get_file_body(self, path: str) -> Tuple[Optional[dict], str]:
|
||||
res = self._call_api("POST", "/api/{}/files/content".format(self.ver), json_data={
|
||||
"path": path,
|
||||
"expand":True,
|
||||
"isDetail": False,
|
||||
"page":1,
|
||||
"pageSize":100
|
||||
})
|
||||
if res is None:
|
||||
return None, "Failed to retrieve file content"
|
||||
if res["code"] == 200:
|
||||
return res.get("data", {}), ""
|
||||
return None, res.get("message")
|
||||
129
mod/project/node/nodeutil/rsync_api.py
Normal file
129
mod/project/node/nodeutil/rsync_api.py
Normal file
@@ -0,0 +1,129 @@
|
||||
import json
|
||||
from typing import Optional, Union, Tuple, List, Any, Dict
|
||||
|
||||
from .base import ServerNode, LocalNode
|
||||
|
||||
import public
|
||||
|
||||
|
||||
class _RsyncAPIBase:
|
||||
|
||||
def has_rsync_perm(self) -> bool:
|
||||
raise NotImplementedError()
|
||||
|
||||
def is_setup_rsync(self) -> bool:
|
||||
raise NotImplementedError()
|
||||
|
||||
def add_module(self, path: str, name: str, password: str, add_white_ips: List[str]) -> Tuple[Optional[dict], str]:
|
||||
raise NotImplementedError()
|
||||
|
||||
def add_send_task(self, sou):
|
||||
pass
|
||||
|
||||
def get_secretkey(self, ip_type: str = "local_ip") -> Tuple[str, str]:
|
||||
pass
|
||||
|
||||
def check_receiver_conn(self, secret_key: str, work_type: int) -> Tuple[Dict, str]:
|
||||
pass
|
||||
|
||||
|
||||
|
||||
class BtLocalRsyncAPI(LocalNode, _RsyncAPIBase):
|
||||
@classmethod
|
||||
def new_by_id(cls, node_id: int) -> Optional['BtLocalRsyncAPI']:
|
||||
node_data = public.S('node', self.node_db_obj._DB_FILE).where('id=?', (node_id,)).find()
|
||||
if not node_data:
|
||||
return None
|
||||
|
||||
if node_data["api_key"] == "local" and node_data["app_key"] == "local":
|
||||
return BtLocalRsyncAPI()
|
||||
|
||||
return None
|
||||
|
||||
@staticmethod
|
||||
def _plugin_func(func_name: str, **kwargs) -> Any:
|
||||
from panelPlugin import panelPlugin
|
||||
return panelPlugin().a(public.to_dict_obj({
|
||||
"name": "rsync",
|
||||
"s": func_name,
|
||||
**kwargs,
|
||||
}))
|
||||
|
||||
def has_rsync_perm(self) -> bool:
|
||||
from panelPlugin import panelPlugin
|
||||
res = panelPlugin().a(public.to_dict_obj({"name": "rsync"}))
|
||||
if not res["status"]:
|
||||
return False
|
||||
return True
|
||||
|
||||
def is_setup_rsync(self) -> bool:
|
||||
from panelPlugin import panelPlugin
|
||||
res = panelPlugin().get_soft_find(public.to_dict_obj({"sName": "rsync"}))
|
||||
try:
|
||||
return res["setup"]
|
||||
except:
|
||||
return False
|
||||
|
||||
def add_module(self, path: str, name: str, password: str, add_white_ips: List[str]) -> Tuple[Optional[dict], str]:
|
||||
res = self._plugin_func("add_module", **{
|
||||
"path": path,
|
||||
"mName": name,
|
||||
"password": password,
|
||||
"add_white_ips": json.dumps(add_white_ips)
|
||||
})
|
||||
return res, ""
|
||||
|
||||
|
||||
class BtRsyncAPI(ServerNode, _RsyncAPIBase):
|
||||
|
||||
def _plugin_api_func(self, func_name: str, **kwargs) -> Tuple[Any, str]:
|
||||
return self._request("/plugin", "a", pdata={
|
||||
"name": "rsync",
|
||||
"s": func_name,
|
||||
**kwargs
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def new_by_id(cls, node_id: int) -> Optional['BtRsyncAPI']:
|
||||
node_data = public.S('node', self.node_db_obj._DB_FILE).where('id=?', (node_id,)).find()
|
||||
if not node_data:
|
||||
return None
|
||||
|
||||
if node_data["api_key"] == "local" and node_data["app_key"] == "local":
|
||||
return None
|
||||
|
||||
if node_data['lpver']:
|
||||
return None
|
||||
|
||||
return BtRsyncAPI(node_data["address"], node_data["api_key"], "")
|
||||
|
||||
def has_rsync_perm(self) -> bool:
|
||||
data, err = self._request("/plugin", "a", pdata={"name": "rsync"})
|
||||
if err:
|
||||
return False
|
||||
return data["status"]
|
||||
|
||||
def is_setup_rsync(self) -> bool:
|
||||
data, err = self._request("/plugin", "get_soft_find", pdata={"sName": "rsync"})
|
||||
if err:
|
||||
return False
|
||||
try:
|
||||
return data["setup"]
|
||||
except:
|
||||
return False
|
||||
|
||||
def add_module(self, path: str, name: str, password: str, add_white_ips: List[str]) -> Tuple[Optional[dict], str]:
|
||||
return self._plugin_api_func("add_module", **{
|
||||
"path": path,
|
||||
"mName": name,
|
||||
"password": password,
|
||||
"add_white_ips": json.dumps(add_white_ips)
|
||||
})
|
||||
|
||||
|
||||
|
||||
def get_rsync_api_node(node_id: int) -> Optional[Union['BtRsyncAPI', 'BtLocalRsyncAPI']]:
|
||||
srv = BtLocalRsyncAPI.new_by_id(node_id)
|
||||
if srv:
|
||||
return srv
|
||||
return BtRsyncAPI.new_by_id(node_id)
|
||||
783
mod/project/node/nodeutil/ssh_warp_scripts/system_info.sh
Normal file
783
mod/project/node/nodeutil/ssh_warp_scripts/system_info.sh
Normal file
@@ -0,0 +1,783 @@
|
||||
#!/bin/bash
|
||||
|
||||
# 确保输出是纯JSON,不包含其他信息
|
||||
export LANG=C
|
||||
export LC_ALL=C
|
||||
|
||||
# 定义临时文件路径
|
||||
NETWORK_DATA_FILE="/tmp/system_network_data_$(id -u).json"
|
||||
|
||||
# 收集网络接口数据并计算速率
|
||||
collect_network() {
|
||||
result="{"
|
||||
first=true
|
||||
current_time=$(date +%s)
|
||||
|
||||
# 读取之前的数据(如果存在)
|
||||
prev_data=""
|
||||
prev_time=0
|
||||
if [ -f "$NETWORK_DATA_FILE" ]; then
|
||||
prev_data=$(cat "$NETWORK_DATA_FILE")
|
||||
prev_time=$(echo "$prev_data" | grep -o '"time": [0-9]*' | head -1 | awk '{print $2}')
|
||||
[ -z "$prev_time" ] && prev_time=0
|
||||
fi
|
||||
|
||||
# 创建临时存储当前数据的文件
|
||||
temp_current_data="/tmp/system_network_current_$(id -u).json"
|
||||
echo "{\"time\": $current_time," > "$temp_current_data"
|
||||
|
||||
# 计算时间间隔(秒)
|
||||
time_diff=1
|
||||
if [ $prev_time -ne 0 ]; then
|
||||
time_diff=$((current_time - prev_time))
|
||||
[ $time_diff -le 0 ] && time_diff=1 # 防止除以零
|
||||
fi
|
||||
|
||||
# 收集所有网络接口的信息
|
||||
for iface in $(ls /sys/class/net/ | grep -v "lo"); do
|
||||
if [ "$first" = true ]; then
|
||||
first=false
|
||||
echo "\"interfaces\": {" >> "$temp_current_data"
|
||||
else
|
||||
result+=","
|
||||
echo "," >> "$temp_current_data"
|
||||
fi
|
||||
|
||||
# 读取当前网络接口统计
|
||||
rx_bytes=$(cat /sys/class/net/$iface/statistics/rx_bytes 2>/dev/null || echo 0)
|
||||
tx_bytes=$(cat /sys/class/net/$iface/statistics/tx_bytes 2>/dev/null || echo 0)
|
||||
rx_packets=$(cat /sys/class/net/$iface/statistics/rx_packets 2>/dev/null || echo 0)
|
||||
tx_packets=$(cat /sys/class/net/$iface/statistics/tx_packets 2>/dev/null || echo 0)
|
||||
|
||||
# 保存当前数据到临时文件
|
||||
echo "\"$iface\": {\"rx_bytes\": $rx_bytes, \"tx_bytes\": $tx_bytes, \"rx_packets\": $rx_packets, \"tx_packets\": $tx_packets}" >> "$temp_current_data"
|
||||
|
||||
# 计算速率(如果有之前的数据)
|
||||
down_speed=0
|
||||
up_speed=0
|
||||
|
||||
if [ -n "$prev_data" ]; then
|
||||
# 提取之前的数据
|
||||
prev_rx_bytes=$(echo "$prev_data" | grep -o "\"$iface\".*rx_bytes.*tx_bytes" | grep -o "rx_bytes\": [0-9]*" | awk '{print $2}')
|
||||
prev_tx_bytes=$(echo "$prev_data" | grep -o "\"$iface\".*tx_bytes.*rx_packets" | grep -o "tx_bytes\": [0-9]*" | awk '{print $2}')
|
||||
|
||||
# 如果找到了之前的数据,计算速率
|
||||
if [ -n "$prev_rx_bytes" ] && [ -n "$prev_tx_bytes" ]; then
|
||||
# 计算差值
|
||||
rx_diff=$((rx_bytes - prev_rx_bytes))
|
||||
tx_diff=$((tx_bytes - prev_tx_bytes))
|
||||
|
||||
# 确保值不是负数(可能由于系统重启计数器重置)
|
||||
[ $rx_diff -lt 0 ] && rx_diff=0
|
||||
[ $tx_diff -lt 0 ] && tx_diff=0
|
||||
|
||||
# 安全地计算速率
|
||||
down_speed=$(awk "BEGIN {printf \"%.2f\", $rx_diff / $time_diff / 1024}")
|
||||
up_speed=$(awk "BEGIN {printf \"%.2f\", $tx_diff / $time_diff / 1024}")
|
||||
fi
|
||||
fi
|
||||
|
||||
# 添加接口信息到结果
|
||||
result+=$(cat << EOF
|
||||
"$iface": {
|
||||
"down": $down_speed,
|
||||
"up": $up_speed,
|
||||
"downTotal": $rx_bytes,
|
||||
"upTotal": $tx_bytes,
|
||||
"downPackets": $rx_packets,
|
||||
"upPackets": $tx_packets
|
||||
}
|
||||
EOF
|
||||
)
|
||||
done
|
||||
|
||||
# 完成当前数据文件
|
||||
if [ "$first" = false ]; then
|
||||
echo "}" >> "$temp_current_data"
|
||||
else
|
||||
echo "\"interfaces\": {}" >> "$temp_current_data"
|
||||
fi
|
||||
echo "}" >> "$temp_current_data"
|
||||
|
||||
# 移动临时文件到持久文件位置
|
||||
mv "$temp_current_data" "$NETWORK_DATA_FILE"
|
||||
|
||||
result+="}"
|
||||
echo "$result"
|
||||
}
|
||||
|
||||
# 收集总体网络统计
|
||||
collect_total_network() {
|
||||
current_time=$(date +%s)
|
||||
|
||||
# 初始化计数器
|
||||
down_total=0
|
||||
up_total=0
|
||||
down_packets=0
|
||||
up_packets=0
|
||||
down_speed=0
|
||||
up_speed=0
|
||||
|
||||
# 读取之前的数据(如果存在)
|
||||
prev_data=""
|
||||
prev_time=0
|
||||
|
||||
if [ -f "$NETWORK_DATA_FILE" ]; then
|
||||
prev_data=$(cat "$NETWORK_DATA_FILE")
|
||||
prev_time=$(echo "$prev_data" | grep -o '"time": [0-9]*' | head -1 | awk '{print $2}')
|
||||
[ -z "$prev_time" ] && prev_time=0
|
||||
fi
|
||||
|
||||
# 计算时间间隔(秒)
|
||||
time_diff=1
|
||||
if [ $prev_time -ne 0 ]; then
|
||||
time_diff=$((current_time - prev_time))
|
||||
[ $time_diff -le 0 ] && time_diff=1 # 防止除以零
|
||||
fi
|
||||
|
||||
# 收集当前总流量
|
||||
for iface in $(ls /sys/class/net/ | grep -v "lo"); do
|
||||
# 读取当前网络接口统计
|
||||
rx_bytes=$(cat /sys/class/net/$iface/statistics/rx_bytes 2>/dev/null || echo 0)
|
||||
tx_bytes=$(cat /sys/class/net/$iface/statistics/tx_bytes 2>/dev/null || echo 0)
|
||||
rx_packets=$(cat /sys/class/net/$iface/statistics/rx_packets 2>/dev/null || echo 0)
|
||||
tx_packets=$(cat /sys/class/net/$iface/statistics/tx_packets 2>/dev/null || echo 0)
|
||||
|
||||
# 累加当前总量
|
||||
down_total=$((down_total + rx_bytes))
|
||||
up_total=$((up_total + tx_bytes))
|
||||
down_packets=$((down_packets + rx_packets))
|
||||
up_packets=$((up_packets + tx_packets))
|
||||
done
|
||||
|
||||
# 收集之前的总流量
|
||||
if [ -f "$NETWORK_DATA_FILE" ]; then
|
||||
for iface in $(ls /sys/class/net/ | grep -v "lo"); do
|
||||
# 提取之前的数据
|
||||
iface_prev_rx=$(echo "$prev_data" | grep -o "\"$iface\".*rx_bytes.*tx_bytes" | grep -o "rx_bytes\": [0-9]*" | awk '{print $2}')
|
||||
iface_prev_tx=$(echo "$prev_data" | grep -o "\"$iface\".*tx_bytes.*rx_packets" | grep -o "tx_bytes\": [0-9]*" | awk '{print $2}')
|
||||
|
||||
# 累加总流量
|
||||
if [ -n "$iface_prev_rx" ]; then
|
||||
prev_down_total=$((prev_down_total + iface_prev_rx))
|
||||
fi
|
||||
if [ -n "$iface_prev_tx" ]; then
|
||||
prev_up_total=$((prev_up_total + iface_prev_tx))
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# 计算总体速率
|
||||
if [ $prev_time -ne 0 ]; then
|
||||
rx_diff=$((down_total - prev_down_total))
|
||||
tx_diff=$((up_total - prev_up_total))
|
||||
|
||||
# 确保值不是负数
|
||||
[ $rx_diff -lt 0 ] && rx_diff=0
|
||||
[ $tx_diff -lt 0 ] && tx_diff=0
|
||||
|
||||
down_speed=$(awk "BEGIN {printf \"%.2f\", $rx_diff / $time_diff/ 1024}")
|
||||
up_speed=$(awk "BEGIN {printf \"%.2f\", $tx_diff / $time_diff/ 1024}")
|
||||
fi
|
||||
|
||||
# 返回结果
|
||||
cat << EOF
|
||||
{
|
||||
"down": $down_speed,
|
||||
"up": $up_speed,
|
||||
"downPackets": $down_packets,
|
||||
"upPackets": $up_packets,
|
||||
"downTotal": $down_total,
|
||||
"upTotal": $up_total
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# 收集CPU信息
|
||||
collect_cpu() {
|
||||
# 定义临时文件路径(使用 mktemp 提高安全性)
|
||||
CPU_DATA_FILE="/tmp/system_cpu_data_$(id -u).json"
|
||||
TEMP_CURRENT_DATA=$(mktemp "/tmp/system_cpu_current_XXXXXXX.json")
|
||||
|
||||
# 初始化返回值
|
||||
local current_time
|
||||
current_time=$(date +%s)
|
||||
|
||||
# 读取当前CPU统计信息
|
||||
local current_cpu_stat
|
||||
if ! current_cpu_stat=$(cat /proc/stat | grep '^cpu ' | awk '{
|
||||
user_nice_system = ($2 + $3 + $4) + 0
|
||||
idle = $5 + 0
|
||||
total = (user_nice_system + idle + ($6 + 0) + ($7 + 0) + ($8 + 0))
|
||||
printf "%d,%d,%d", user_nice_system, idle, total
|
||||
}'); then
|
||||
echo "Unable to read CPU statistics information" >&2
|
||||
return 1
|
||||
fi
|
||||
local current_user_time=$(echo "$current_cpu_stat" | cut -d',' -f1)
|
||||
local current_idle_time=$(echo "$current_cpu_stat" | cut -d',' -f2)
|
||||
local current_total_time=$(echo "$current_cpu_stat" | cut -d',' -f3)
|
||||
|
||||
# 收集各核心当前统计信息
|
||||
local core_stats=()
|
||||
while read -r line; do
|
||||
if [[ $line =~ ^cpu[0-9]+ ]]; then
|
||||
local core_stat=$(echo "$line" | awk '{printf "%d,%d,%d", $2+$3+$4+$6+$7+$8, $5, $2+$3+$4+$5+$6+$7+$8}')
|
||||
core_stats+=("$core_stat")
|
||||
fi
|
||||
done < /proc/stat
|
||||
|
||||
# 读取之前的数据(如果存在)
|
||||
local prev_data=""
|
||||
local prev_time=0
|
||||
local prev_user_time=0
|
||||
local prev_idle_time=0
|
||||
local prev_total_time=0
|
||||
local prev_core_stats=()
|
||||
|
||||
if [[ -f "$CPU_DATA_FILE" ]]; then
|
||||
if ! prev_data=$(cat "$CPU_DATA_FILE"); then
|
||||
echo "Unable to read historical CPU data" >&2
|
||||
return 1
|
||||
fi
|
||||
|
||||
prev_time=$(echo "$prev_data" | grep -o '"time": [0-9]*' | head -1 | awk '{print $2}')
|
||||
prev_user_time=$(echo "$prev_data" | grep -o '"user_time": [0-9]*' | head -1 | awk '{print $2}')
|
||||
prev_idle_time=$(echo "$prev_data" | grep -o '"idle_time": [0-9]*' | head -1 | awk '{print $2}')
|
||||
prev_total_time=$(echo "$prev_data" | grep -o '"total_time": [0-9]*' | head -1 | awk '{print $2}')
|
||||
|
||||
# 使用 awk 跨行匹配核心数据
|
||||
local i=0
|
||||
while true; do
|
||||
local core_data
|
||||
core_data=$(echo "$prev_data" | awk -v core="core_$i" '
|
||||
$0 ~ "\"" core "\": {" {flag=1; print; next}
|
||||
flag && /}/ {print; flag=0; exit}
|
||||
flag {print}
|
||||
')
|
||||
|
||||
if [[ -z "$core_data" ]]; then
|
||||
break
|
||||
fi
|
||||
|
||||
local core_user_time=$(echo "$core_data" | grep -o '"user_time": [0-9]*' | awk '{print $2}')
|
||||
local core_idle_time=$(echo "$core_data" | grep -o '"idle_time": [0-9]*' | awk '{print $2}')
|
||||
local core_total_time=$(echo "$core_data" | grep -o '"total_time": [0-9]*' | awk '{print $2}')
|
||||
|
||||
prev_core_stats+=("$core_user_time,$core_idle_time,$core_total_time")
|
||||
((i++))
|
||||
done
|
||||
fi
|
||||
|
||||
# 计算时间间隔(秒)
|
||||
local time_diff=$((current_time - prev_time))
|
||||
((time_diff <= 0)) && time_diff=1 # 防止除以零
|
||||
|
||||
# 计算总CPU使用率
|
||||
local cpu_usage=0
|
||||
if ((prev_total_time > 0)); then
|
||||
local user_diff=$((current_user_time - prev_user_time))
|
||||
local total_diff=$((current_total_time - prev_total_time))
|
||||
|
||||
# 防止负值(可能由于系统重启导致计数器重置)
|
||||
((user_diff < 0)) && user_diff=0
|
||||
((total_diff < 0)) && total_diff=0
|
||||
|
||||
if ((total_diff > 0)); then
|
||||
cpu_usage=$(awk "BEGIN {printf \"%.2f\", ($user_diff / $total_diff) * 100}")
|
||||
fi
|
||||
fi
|
||||
|
||||
# 获取逻辑核心数
|
||||
local logical_cores
|
||||
logical_cores=$(nproc 2>/dev/null || getconf _NPROCESSORS_ONLN 2>/dev/null || echo 1)
|
||||
|
||||
# 计算每个核心的使用率
|
||||
local cpu_cores_usage="["
|
||||
local first=true
|
||||
local i=0
|
||||
|
||||
for core_stat in "${core_stats[@]}"; do
|
||||
local core_user_time=$(echo "$core_stat" | cut -d',' -f1)
|
||||
local core_idle_time=$(echo "$core_stat" | cut -d',' -f2)
|
||||
local core_total_time=$(echo "$core_stat" | cut -d',' -f3)
|
||||
|
||||
local core_usage=0
|
||||
if ((i < ${#prev_core_stats[@]})); then
|
||||
local prev_core_stat=${prev_core_stats[$i]}
|
||||
local prev_core_user_time=$(echo "$prev_core_stat" | cut -d',' -f1)
|
||||
local prev_core_idle_time=$(echo "$prev_core_stat" | cut -d',' -f2)
|
||||
local prev_core_total_time=$(echo "$prev_core_stat" | cut -d',' -f3)
|
||||
|
||||
local core_user_diff=$((core_user_time - prev_core_user_time))
|
||||
local core_total_diff=$((core_total_time - prev_core_total_time))
|
||||
|
||||
# 防止负值
|
||||
((core_user_diff < 0)) && core_user_diff=0
|
||||
((core_total_diff < 0)) && core_total_diff=0
|
||||
|
||||
if ((core_total_diff > 0)); then
|
||||
core_usage=$(awk "BEGIN {printf \"%.2f\", ($core_user_diff / $core_total_diff) * 100}")
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$first" == true ]]; then
|
||||
first=false
|
||||
else
|
||||
cpu_cores_usage+=","
|
||||
fi
|
||||
|
||||
cpu_cores_usage+="$core_usage"
|
||||
((i++))
|
||||
done
|
||||
|
||||
cpu_cores_usage+="]"
|
||||
|
||||
# 获取CPU名称(优先使用lscpu)
|
||||
local cpu_name
|
||||
if command -v lscpu >/dev/null 2>&1; then
|
||||
cpu_name=$(lscpu | grep "Model name" | head -n 1 | cut -d':' -f2 | sed 's/^[[:space:]]*//')
|
||||
else
|
||||
cpu_name=$(grep "model name" /proc/cpuinfo | head -n 1 | cut -d':' -f2 | sed 's/^[[:space:]]*//')
|
||||
fi
|
||||
|
||||
# 获取核心数(优先使用lscpu)
|
||||
local physical_cores=1
|
||||
local physical_cpus=1
|
||||
if command -v lscpu >/dev/null 2>&1; then
|
||||
physical_cores=$(lscpu | grep "Core(s) per socket" | awk '{print $4}')
|
||||
physical_cpus=$(lscpu | grep "Socket(s)" | awk '{print $2}')
|
||||
else
|
||||
# 备用方法:解析/proc/cpuinfo
|
||||
physical_cpus=$(grep "physical id" /proc/cpuinfo | sort -u | wc -l)
|
||||
physical_cores=$(grep "cpu cores" /proc/cpuinfo | head -1 | awk '{print $4}')
|
||||
|
||||
# 如果无法获取核心数,计算保守估算
|
||||
if [[ -z "$physical_cores" ]]; then
|
||||
physical_cores=$(( logical_cores / physical_cpus ))
|
||||
fi
|
||||
fi
|
||||
|
||||
# 确保变量有值
|
||||
[[ -z "$physical_cores" ]] && physical_cores=1
|
||||
[[ -z "$physical_cpus" ]] && physical_cpus=1
|
||||
|
||||
# 保存当前CPU统计信息到临时文件用于下次比较
|
||||
{
|
||||
echo "{"
|
||||
echo " \"time\": $current_time,"
|
||||
echo " \"user_time\": $current_user_time,"
|
||||
echo " \"idle_time\": $current_idle_time,"
|
||||
echo " \"total_time\": $current_total_time,"
|
||||
|
||||
# 保存每个核心的统计信息
|
||||
local i=0
|
||||
for core_stat in "${core_stats[@]}"; do
|
||||
local core_user_time=$(echo "$core_stat" | cut -d',' -f1)
|
||||
local core_idle_time=$(echo "$core_stat" | cut -d',' -f2)
|
||||
local core_total_time=$(echo "$core_stat" | cut -d',' -f3)
|
||||
|
||||
echo " \"core_$i\": {"
|
||||
echo " \"user_time\": $core_user_time,"
|
||||
echo " \"idle_time\": $core_idle_time,"
|
||||
echo " \"total_time\": $core_total_time"
|
||||
|
||||
if ((i < ${#core_stats[@]} - 1)); then
|
||||
echo " },"
|
||||
else
|
||||
echo " }"
|
||||
fi
|
||||
|
||||
((i++))
|
||||
done
|
||||
|
||||
echo "}"
|
||||
} > "$TEMP_CURRENT_DATA"
|
||||
|
||||
# 原子性替换文件
|
||||
if ! mv "$TEMP_CURRENT_DATA" "$CPU_DATA_FILE"; then
|
||||
echo "Unable to save CPU data to $CPU_DATA_FILE" >&2
|
||||
rm -f "$TEMP_CURRENT_DATA"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# 返回格式化的结果
|
||||
echo "[$cpu_usage, $logical_cores, $cpu_cores_usage, \"$cpu_name\", $physical_cores, $physical_cpus]"
|
||||
}
|
||||
|
||||
# 收集CPU时间
|
||||
collect_cpu_times() {
|
||||
# 获取CPU时间
|
||||
cpu_line=$(cat /proc/stat | grep '^cpu ' | awk '{print $2,$3,$4,$5,$6,$7,$8,$9,$10,$11}')
|
||||
read -r user nice system idle iowait irq softirq steal guest guest_nice <<< "$cpu_line"
|
||||
|
||||
# 获取进程信息
|
||||
total_processes=$(ps -e | wc -l)
|
||||
active_processes=$(ps -eo stat | grep -c "R")
|
||||
|
||||
cat << EOF
|
||||
{
|
||||
"user": $user,
|
||||
"nice": $nice,
|
||||
"system": $system,
|
||||
"idle": $idle,
|
||||
"iowait": $iowait,
|
||||
"irq": $irq,
|
||||
"softirq": $softirq,
|
||||
"steal": $steal,
|
||||
"guest": $guest,
|
||||
"guest_nice": $guest_nice,
|
||||
"Total number of processes": $total_processes,
|
||||
"Number of activity processes": $active_processes
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# 收集磁盘信息
|
||||
collect_disk() {
|
||||
df_output=$(df -TPB1 -x tmpfs -x devtmpfs | tail -n +2 | grep -vE "/boot\$" | grep -vE "docker/overlay2")
|
||||
|
||||
result="["
|
||||
first=true
|
||||
|
||||
while read -r filesystem type total used avail pcent mountpoint; do
|
||||
if [ "$first" = true ]; then
|
||||
first=false
|
||||
else
|
||||
result+=","
|
||||
fi
|
||||
|
||||
size_bytes=$total
|
||||
size_used=$used
|
||||
size_avail=$avail
|
||||
|
||||
# 格式化为人类可读大小(使用单独的awk命令处理每个值)
|
||||
size_human=$(echo "$size_bytes" | awk '{
|
||||
suffix="BKMGT"; value=$1;
|
||||
for(i=1; value>=1024 && i<length(suffix); i++) value/=1024;
|
||||
printf("%.2f%s", value, substr(suffix,i,1));
|
||||
}')
|
||||
|
||||
size_used_human=$(echo "$size_used" | awk '{
|
||||
suffix="BKMGT"; value=$1;
|
||||
for(i=1; value>=1024 && i<length(suffix); i++) value/=1024;
|
||||
printf("%.2f%s", value, substr(suffix,i,1));
|
||||
}')
|
||||
|
||||
size_avail_human=$(echo "$size_avail" | awk '{
|
||||
suffix="BKMGT"; value=$1;
|
||||
for(i=1; value>=1024 && i<length(suffix); i++) value/=1024;
|
||||
printf("%.2f%s", value, substr(suffix,i,1));
|
||||
}')
|
||||
|
||||
# 收集inode信息
|
||||
inode_info=$(df -i | grep -E "$mountpoint\$" | awk '{print $2,$3,$4,$5}')
|
||||
read -r itotal iused iavail ipcent <<< "$inode_info"
|
||||
|
||||
# 确保inode值不为空
|
||||
[ -z "$itotal" ] && itotal=0
|
||||
[ -z "$iused" ] && iused=0
|
||||
[ -z "$iavail" ] && iavail=0
|
||||
[ -z "$ipcent" ] && ipcent="0%"
|
||||
|
||||
result+=$(cat << EOF
|
||||
{
|
||||
"filesystem": "$filesystem",
|
||||
"types": "$type",
|
||||
"path": "$mountpoint",
|
||||
"rname": "$(basename "$mountpoint")",
|
||||
"byte_size": [$size_bytes, $size_used, $size_avail],
|
||||
"size": ["$size_human", "$size_used_human", "$size_avail_human"],
|
||||
"d_size": "$pcent",
|
||||
"inodes": [$itotal, $iused, $iavail, "$ipcent"]
|
||||
}
|
||||
EOF
|
||||
)
|
||||
done <<< "$df_output"
|
||||
|
||||
result+="]"
|
||||
echo "$result"
|
||||
}
|
||||
|
||||
# 收集IO统计
|
||||
collect_iostat() {
|
||||
result="{"
|
||||
first=true
|
||||
|
||||
disks=$(ls /sys/block/ 2>/dev/null | grep -E '^(sd|hd|vd|nvme)' 2>/dev/null || echo "")
|
||||
|
||||
for disk in $disks; do
|
||||
if [ -r "/sys/block/$disk/stat" ]; then
|
||||
if [ "$first" = true ]; then
|
||||
first=false
|
||||
else
|
||||
result+=","
|
||||
fi
|
||||
|
||||
# 读取磁盘统计信息
|
||||
disk_stats=$(cat /sys/block/$disk/stat 2>/dev/null)
|
||||
if [ -n "$disk_stats" ]; then
|
||||
# 使用默认值以防读取失败
|
||||
read_comp=0 read_merged=0 read_sectors=0 read_ms=0 write_comp=0 write_merged=0 write_sectors=0 write_ms=0 io_in_progress=0 io_ms_weighted=0
|
||||
|
||||
# 尝试读取值
|
||||
read read_comp read_merged read_sectors read_ms write_comp write_merged write_sectors write_ms io_in_progress io_ms_weighted <<< "$disk_stats"
|
||||
|
||||
# 转换扇区为字节 (512字节为一个扇区)
|
||||
read_bytes=$((read_sectors * 512))
|
||||
write_bytes=$((write_sectors * 512))
|
||||
|
||||
result+=$(cat << EOF
|
||||
"$disk": {
|
||||
"read_count": $read_comp,
|
||||
"read_merged_count": $read_merged,
|
||||
"read_bytes": $read_bytes,
|
||||
"read_time": $read_ms,
|
||||
"write_count": $write_comp,
|
||||
"write_merged_count": $write_merged,
|
||||
"write_bytes": $write_bytes,
|
||||
"write_time": $write_ms
|
||||
}
|
||||
EOF
|
||||
)
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
result+="}"
|
||||
echo "$result"
|
||||
}
|
||||
|
||||
# 收集负载信息
|
||||
collect_load() {
|
||||
load_avg=$(cat /proc/loadavg)
|
||||
read -r one five fifteen others <<< "$load_avg"
|
||||
|
||||
cpu_count=$(nproc)
|
||||
max_load=$((cpu_count * 2))
|
||||
|
||||
# 安全计算安全负载
|
||||
safe_load=$(awk "BEGIN {printf \"%.2f\", $max_load * 0.7}")
|
||||
|
||||
cat << EOF
|
||||
{
|
||||
"one": $one,
|
||||
"five": $five,
|
||||
"fifteen": $fifteen,
|
||||
"max": $max_load,
|
||||
"limit": $cpu_count,
|
||||
"safe": $safe_load
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# 收集内存信息
|
||||
collect_mem() {
|
||||
mem_info=$(cat /proc/meminfo)
|
||||
|
||||
# 提取内存数据 (单位: KB)
|
||||
mem_total=$(awk '/^MemTotal/ {print $2; exit}' <<< "$mem_info" || echo 0)
|
||||
mem_free=$(awk '/^MemFree/ {print $2; exit}' <<< "$mem_info" || echo 0)
|
||||
mem_available=$(awk '/^MemAvailable/ {print $2; exit}' <<< "$mem_info" || echo "$mem_free")
|
||||
mem_buffers=$(awk '/^Buffers/ {print $2; exit}' <<< "$mem_info" || echo 0)
|
||||
mem_cached=$(awk '/^Cached:/ {print $2; exit}' <<< "$mem_info" || echo 0)
|
||||
mem_sreclaimable=$(awk '/^SReclaimable:/ {print $2; exit}' <<< "$mem_info" || echo 0)
|
||||
mem_buffers=$(awk '/^Buffers:/ {print $2; exit}' <<< "$mem_info" || echo 0)
|
||||
mem_shared=$(awk '/^Shmem/ {print $2; exit}' <<< "$mem_info" || echo 0)
|
||||
|
||||
# 确保数值有效
|
||||
[ -z "$mem_total" ] && mem_total=0
|
||||
[ -z "$mem_free" ] && mem_free=0
|
||||
[ -z "$mem_available" ] && mem_available=0
|
||||
[ -z "$mem_buffers" ] && mem_buffers=0
|
||||
[ -z "$mem_cached" ] && mem_cached=0
|
||||
[ -z "$mem_shared" ] && mem_shared=0
|
||||
[ -z "$mem_sreclaimable" ] && mem_sreclaimable=0
|
||||
[ -z "$mem_buffers" ] && mem_buffers=0
|
||||
|
||||
# 安全计算实际使用的内存
|
||||
mem_real_used=$((mem_total - mem_free - mem_buffers - mem_cached - mem_sreclaimable - mem_buffers))
|
||||
[ $mem_real_used -lt 0 ] && mem_real_used=0
|
||||
|
||||
# 转换为人类可读格式(单独处理每个值)
|
||||
mem_new_total=$(awk -v bytes="$((mem_total * 1024))" 'BEGIN {
|
||||
suffix="BKMGT"; value=bytes;
|
||||
for(i=1; value>=1024 && i<length(suffix); i++) value/=1024;
|
||||
printf("%.2f%s", value, substr(suffix,i,1));
|
||||
}')
|
||||
|
||||
mem_new_real_used=$(awk -v bytes="$((mem_real_used * 1024))" 'BEGIN {
|
||||
suffix="BKMGT"; value=bytes;
|
||||
for(i=1; value>=1024 && i<length(suffix); i++) value/=1024;
|
||||
printf("%.2f%s", value, substr(suffix,i,1));
|
||||
}')
|
||||
|
||||
# 转为字节
|
||||
mem_total_bytes=$((mem_total * 1024))
|
||||
mem_free_bytes=$((mem_free * 1024))
|
||||
mem_available_bytes=$((mem_available * 1024))
|
||||
mem_buffers_bytes=$((mem_buffers * 1024))
|
||||
mem_cached_bytes=$((mem_cached * 1024 + mem_sreclaimable * 1024 + mem_buffers* 1024))
|
||||
mem_real_used_bytes=$((mem_real_used * 1024))
|
||||
mem_shared_bytes=$((mem_shared * 1024))
|
||||
|
||||
cat << EOF
|
||||
{
|
||||
"memTotal": $mem_total_bytes,
|
||||
"memFree": $mem_free_bytes,
|
||||
"memAvailable": $mem_available_bytes,
|
||||
"memBuffers": $mem_buffers_bytes,
|
||||
"memCached": $mem_cached_bytes,
|
||||
"memRealUsed": $mem_real_used_bytes,
|
||||
"memShared": $mem_shared_bytes,
|
||||
"memNewTotal": "$mem_new_total",
|
||||
"memNewRealUsed": "$mem_new_real_used"
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# 收集dmidecode物理内存信息
|
||||
collect_physical_memory() {
|
||||
# 检查是否有sudo权限
|
||||
if command -v sudo >/dev/null 2>&1; then
|
||||
SUDO_CMD="sudo"
|
||||
else
|
||||
SUDO_CMD=""
|
||||
fi
|
||||
|
||||
# 检查dmidecode是否已安装
|
||||
if ! command -v dmidecode >/dev/null 2>&1; then
|
||||
# 尝试安装dmidecode
|
||||
if command -v apt-get >/dev/null 2>&1; then
|
||||
$SUDO_CMD apt-get update >/dev/null 2>&1 && $SUDO_CMD apt-get install -y dmidecode >/dev/null 2>&1
|
||||
elif command -v yum >/dev/null 2>&1; then
|
||||
$SUDO_CMD yum install -y dmidecode >/dev/null 2>&1
|
||||
elif command -v dnf >/dev/null 2>&1; then
|
||||
$SUDO_CMD dnf install -y dmidecode >/dev/null 2>&1
|
||||
elif command -v zypper >/dev/null 2>&1; then
|
||||
$SUDO_CMD zypper install -y dmidecode >/dev/null 2>&1
|
||||
elif command -v pacman >/dev/null 2>&1; then
|
||||
$SUDO_CMD pacman -S --noconfirm dmidecode >/dev/null 2>&1
|
||||
fi
|
||||
fi
|
||||
|
||||
# 再次检查dmidecode是否可用
|
||||
if command -v dmidecode >/dev/null 2>&1; then
|
||||
# 首先尝试获取Maximum Capacity
|
||||
max_capacity=$($SUDO_CMD dmidecode -t memory 2>/dev/null | grep -i "Maximum Capacity:" | head -n1 | awk '
|
||||
{
|
||||
value = $3
|
||||
unit = $4
|
||||
# 转换为字节
|
||||
if (unit == "GB" || unit == "gb") {
|
||||
bytes = value * 1024 * 1024 * 1024
|
||||
} else if (unit == "MB" || unit == "mb") {
|
||||
bytes = value * 1024 * 1024
|
||||
} else if (unit == "TB" || unit == "tb") {
|
||||
bytes = value * 1024 * 1024 * 1024 * 1024
|
||||
} else {
|
||||
bytes = 0
|
||||
}
|
||||
printf "%.0f", bytes
|
||||
}
|
||||
')
|
||||
|
||||
if [ -n "$max_capacity" ] && [ "$max_capacity" -gt 0 ] 2>/dev/null; then
|
||||
echo "$max_capacity"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# 如果Maximum Capacity获取失败,尝试获取已安装内存大小
|
||||
total_memory=$($SUDO_CMD dmidecode -t memory 2>/dev/null | grep -i "Size:" | grep -i "[0-9]* GB\|[0-9]* MB" | awk '
|
||||
BEGIN { total = 0 }
|
||||
{
|
||||
value = $2
|
||||
unit = $3
|
||||
# 转换为字节
|
||||
if (unit == "GB" || unit == "gb") {
|
||||
bytes = value * 1024 * 1024 * 1024
|
||||
} else if (unit == "MB" || unit == "mb") {
|
||||
bytes = value * 1024 * 1024
|
||||
}
|
||||
total += bytes
|
||||
}
|
||||
END {
|
||||
printf "%.0f", total
|
||||
}
|
||||
')
|
||||
|
||||
if [ -n "$total_memory" ] && [ "$total_memory" -gt 0 ] 2>/dev/null; then
|
||||
echo "$total_memory"
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# 如果任何步骤失败,返回0
|
||||
echo "0"
|
||||
return 1
|
||||
}
|
||||
|
||||
# 主函数:收集所有信息并生成JSON
|
||||
main() {
|
||||
# 收集系统信息
|
||||
os_name=$(cat /etc/os-release 2>/dev/null | grep "PRETTY_NAME" | cut -d "=" -f 2 | tr -d '"' || echo "Unknown")
|
||||
simple_system=$(awk -F= '
|
||||
/^ID=/ {id=$2}
|
||||
/^VERSION_ID=/ {gsub(/"/,"",$2); version=$2}
|
||||
END {
|
||||
gsub(/"/,"",id);
|
||||
print toupper(substr(id,1,1)) substr(id,2) " " version
|
||||
}' /etc/os-release 2>/dev/null || echo "Unknown")
|
||||
|
||||
hostname=$(hostname)
|
||||
current_time=$(date "+%Y-%m-%d %H:%M:%S")
|
||||
version="1.0.0" # 自定义版本
|
||||
|
||||
# 假设的站点和数据库计数 (实际需要根据具体环境采集)
|
||||
site_total=0
|
||||
database_total=0
|
||||
ftp_total=0
|
||||
installed=true
|
||||
|
||||
# 收集网络总统计
|
||||
network_stats=$(collect_total_network)
|
||||
down=$(echo "$network_stats" | grep -o '"down": [0-9.]*' | cut -d ":" -f 2 | tr -d " " || echo "0.00")
|
||||
up=$(echo "$network_stats" | grep -o '"up": [0-9.]*' | cut -d ":" -f 2 | tr -d " " || echo "0.00")
|
||||
down_packets=$(echo "$network_stats" | grep -o '"downPackets": [0-9]*' | cut -d ":" -f 2 | tr -d " " || echo "0")
|
||||
up_packets=$(echo "$network_stats" | grep -o '"upPackets": [0-9]*' | cut -d ":" -f 2 | tr -d " " || echo "0")
|
||||
down_total=$(echo "$network_stats" | grep -o '"downTotal": [0-9]*' | cut -d ":" -f 2 | tr -d " " || echo "0")
|
||||
up_total=$(echo "$network_stats" | grep -o '"upTotal": [0-9]*' | cut -d ":" -f 2 | tr -d " " || echo "0")
|
||||
physical_memory=$(collect_physical_memory)
|
||||
|
||||
# 生成最终JSON
|
||||
cat << EOF
|
||||
{
|
||||
"cpu": $(collect_cpu),
|
||||
"cpu_times": $(collect_cpu_times),
|
||||
"disk": $(collect_disk),
|
||||
"iostat": $(collect_iostat),
|
||||
"load": $(collect_load),
|
||||
"mem": $(collect_mem),
|
||||
"network": $(collect_network),
|
||||
"system": "$os_name",
|
||||
"simple_system": "$simple_system",
|
||||
"title": "$hostname",
|
||||
"time": "$current_time",
|
||||
"version": "$version",
|
||||
"site_total": $site_total,
|
||||
"database_total": $database_total,
|
||||
"ftp_total": $ftp_total,
|
||||
"installed": $installed,
|
||||
"down": $down,
|
||||
"up": $up,
|
||||
"downPackets": $down_packets,
|
||||
"upPackets": $up_packets,
|
||||
"downTotal": $down_total,
|
||||
"upTotal": $up_total,
|
||||
"physical_memory": $physical_memory
|
||||
}
|
||||
EOF
|
||||
}
|
||||
|
||||
# 执行主函数
|
||||
main
|
||||
199
mod/project/node/nodeutil/ssh_wrap.py
Normal file
199
mod/project/node/nodeutil/ssh_wrap.py
Normal file
@@ -0,0 +1,199 @@
|
||||
import json
|
||||
import os.path
|
||||
import traceback
|
||||
from typing import Optional, Tuple, Callable, Union, Dict
|
||||
from mod.base.ssh_executor import SSHExecutor, CommandResult
|
||||
from mod.project.node.dbutil import ServerNodeDB, Node
|
||||
|
||||
import public
|
||||
|
||||
def is_much_difference(a:float, b:float)->bool:
|
||||
if a == 0 or b == 0:
|
||||
return True
|
||||
ratio = a / b
|
||||
return ratio >= 10 or ratio <= 0.1
|
||||
|
||||
class SSHApi:
|
||||
is_local = False
|
||||
_local_scripts_dir = os.path.join(os.path.dirname(__file__), "ssh_warp_scripts")
|
||||
|
||||
def __init__(self, host, port: int=22, username: str="root", password=None, pkey=None,
|
||||
pkey_passwd=None, threading_mod=False, timeout=20):
|
||||
self._real_ssh_conf = {
|
||||
"host": host,
|
||||
"username": username,
|
||||
"port": port,
|
||||
"password": password,
|
||||
"key_file": "",
|
||||
"passphrase": pkey_passwd,
|
||||
"key_data": pkey,
|
||||
"strict_host_key_checking": False,
|
||||
"allow_agent": False,
|
||||
"look_for_keys": False,
|
||||
"threading_mod": threading_mod,
|
||||
"timeout": timeout,
|
||||
}
|
||||
self._ssh_executor: Optional[SSHExecutor] = None
|
||||
|
||||
|
||||
@classmethod
|
||||
def new_by_id(cls, node_id: int, threading_mod=False) -> Optional["SSHApi"]:
|
||||
data = ServerNodeDB().get_node_by_id(node_id)
|
||||
if not data or not isinstance(data, dict):
|
||||
return None
|
||||
data["ssh_conf"] = json.loads(data["ssh_conf"])
|
||||
if not data["ssh_conf"]:
|
||||
return None
|
||||
data["ssh_conf"]["threading_mod"] = threading_mod
|
||||
return cls(**data["ssh_conf"])
|
||||
|
||||
def _get_ssh_executor(self) -> SSHExecutor:
|
||||
if self._ssh_executor:
|
||||
return self._ssh_executor
|
||||
self._ssh_executor = SSHExecutor(**self._real_ssh_conf)
|
||||
return self._ssh_executor
|
||||
|
||||
def get_net_work(self) -> Tuple[Optional[dict], str]:
|
||||
data, err = self._run_script("system_info.sh")
|
||||
if err:
|
||||
return None, err
|
||||
if not data.exit_code == 0:
|
||||
return None, data.stderr
|
||||
try:
|
||||
data = json.loads(data.stdout)
|
||||
if isinstance(data, dict) and "cpu" in data and "mem" in data:
|
||||
return self._tans_net_work_form_data(data), ""
|
||||
return None, "data in wrong format: %s" % str(data)
|
||||
except Exception as e:
|
||||
return None, str(e)
|
||||
|
||||
@staticmethod
|
||||
def _tans_net_work_form_data(data: dict):
|
||||
data["mem"]["memAvailable"] = round(data["mem"]["memAvailable"] / 1024 / 1024, 2)
|
||||
data["mem"]["memBuffers"] = round(data["mem"]["memBuffers"] / 1024 / 1024, 2)
|
||||
data["mem"]["memCached"] = round(data["mem"]["memCached"] / 1024 / 1024, 2)
|
||||
data["mem"]["memFree"] = round(data["mem"]["memFree"] / 1024 / 1024, 2)
|
||||
data["mem"]["memRealUsed"] = round(data["mem"]["memRealUsed"] / 1024 / 1024, 2)
|
||||
data["mem"]["memShared"] = round(data["mem"]["memShared"] / 1024 / 1024, 2)
|
||||
data["mem"]["memTotal"] = round(data["mem"]["memTotal"] / 1024 / 1024, 2)
|
||||
data["physical_memory"]= round(data["physical_memory"] / 1024 / 1024, 2)
|
||||
if is_much_difference(data["mem"]["memTotal"], data["physical_memory"]):
|
||||
if data["mem"]["memTotal"] >= 1024:
|
||||
data["mem"]["memNewTotal"] = "%.2fGB" % (data["mem"]["memTotal"] / 1024)
|
||||
else:
|
||||
data["mem"]["memNewTotal"] = "%.2fMB" % data["mem"]["memTotal"]
|
||||
else:
|
||||
if data["physical_memory"] >= 1024:
|
||||
data["mem"]["memNewTotal"] = "%.2fGB" % (data["physical_memory"] / 1024)
|
||||
else:
|
||||
data["mem"]["memNewTotal"] = "%.2fMB" % data["physical_memory"]
|
||||
return data
|
||||
|
||||
def _run_script(self, script_name: str) -> Tuple[Optional[CommandResult], str]:
|
||||
local_file = os.path.join(self._local_scripts_dir, script_name)
|
||||
if not os.path.exists(local_file):
|
||||
return None, "Script does not exist"
|
||||
executor = None
|
||||
try:
|
||||
executor = self._get_ssh_executor()
|
||||
executor.open()
|
||||
result = executor.execute_local_script_collect(local_file)
|
||||
return result, ""
|
||||
except RuntimeError:
|
||||
return None, "SSH connection failed"
|
||||
except Exception as e:
|
||||
return None, str(e)
|
||||
finally:
|
||||
if executor:
|
||||
executor.close()
|
||||
|
||||
def target_file_exits(self, target_file: str) -> Tuple[bool, str]:
|
||||
try:
|
||||
executor = self._get_ssh_executor()
|
||||
executor.open()
|
||||
result, err = executor.path_exists(target_file)
|
||||
return result, err
|
||||
except RuntimeError:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return False, "SSH connection failed"
|
||||
except Exception as e:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return False, str(e)
|
||||
|
||||
def create_dir(self, path: str) -> Tuple[bool, str]:
|
||||
try:
|
||||
executor = self._get_ssh_executor()
|
||||
executor.open()
|
||||
result, err = executor.create_dir(path)
|
||||
return result, err
|
||||
except RuntimeError:
|
||||
print(traceback.format_exc())
|
||||
return False, "SSH connection failed"
|
||||
except Exception as e:
|
||||
return False, str(e)
|
||||
|
||||
def upload_file(self, filename: str, target_path: str, mode: str = "cover",
|
||||
call_log: Callable[[int, str], None] = None) -> str:
|
||||
|
||||
if not os.path.isfile(filename):
|
||||
return "File: {} does not exist".format(filename)
|
||||
|
||||
target_file = os.path.join(target_path, os.path.basename(filename))
|
||||
path_info = self.path_info(target_file)
|
||||
if isinstance(path_info, str):
|
||||
return path_info
|
||||
|
||||
if path_info['exists'] and mode == "ignore":
|
||||
call_log(0, "File upload:{} -> {},The target file already exists, skip uploading".format(filename, target_file))
|
||||
return ""
|
||||
if path_info['exists'] and mode == "rename":
|
||||
upload_name = "{}_{}".format(os.path.basename(filename), public.md5(filename))
|
||||
call_log(0, "File upload:{} -> {},The target file already exists, it will be renamed to {}".format(filename, target_file, upload_name))
|
||||
else:
|
||||
upload_name = os.path.basename(filename)
|
||||
|
||||
try:
|
||||
executor = self._get_ssh_executor()
|
||||
executor.open()
|
||||
def progress_callback(current_size: int, total_size: int):
|
||||
if total_size == 0:
|
||||
return
|
||||
call_log(current_size * 100 // total_size, "" )
|
||||
executor.upload(filename, os.path.join(target_path, upload_name), progress_callback=progress_callback)
|
||||
except RuntimeError:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return "SSH connection failed"
|
||||
except Exception as e:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return str(e)
|
||||
return ""
|
||||
|
||||
def upload_dir_check(self, target_file: str) -> str:
|
||||
try:
|
||||
executor = self._get_ssh_executor()
|
||||
executor.open()
|
||||
path_info = executor.path_info(target_file)
|
||||
if not path_info['exists']:
|
||||
return ""
|
||||
if path_info['is_dir']:
|
||||
return "The name path is not a directory"
|
||||
return ""
|
||||
except RuntimeError:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return "SSH connection failed"
|
||||
except Exception as e:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return str(e)
|
||||
|
||||
def path_info(self, path: str) -> Union[str, Dict]:
|
||||
try:
|
||||
executor = self._get_ssh_executor()
|
||||
executor.open()
|
||||
path_info = executor.path_info(path)
|
||||
return path_info
|
||||
except RuntimeError as e:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return "SSH connection failed: {}".format(str(e))
|
||||
except Exception as e:
|
||||
print(traceback.format_exc(), flush=True)
|
||||
return "Failed to obtain path information:{}".format(str(e))
|
||||
3
mod/project/node/task_flow/__init__.py
Normal file
3
mod/project/node/task_flow/__init__.py
Normal file
@@ -0,0 +1,3 @@
|
||||
from .file_task import self_file_running_log, file_task_run_sync
|
||||
from .command_task import command_task_run_sync
|
||||
from .flow import flow_running_log, flow_useful_version
|
||||
195
mod/project/node/task_flow/command_task.py
Normal file
195
mod/project/node/task_flow/command_task.py
Normal file
@@ -0,0 +1,195 @@
|
||||
import json
|
||||
import threading
|
||||
import queue
|
||||
import time
|
||||
import traceback
|
||||
from typing import List, Dict, Callable, Any, Union
|
||||
|
||||
from mod.base.ssh_executor import SSHExecutor
|
||||
from mod.project.node.dbutil import ServerNodeDB, CommandTask, CommandLog, TaskFlowsDB
|
||||
|
||||
|
||||
class CMDTask(object):
|
||||
|
||||
def __init__(self, task: Union[int, CommandTask], log_id: int, call_update: Callable[[Any], None], exclude_nodes: List[int] = None):
|
||||
self._edb = TaskFlowsDB()
|
||||
if isinstance(task, int):
|
||||
self.task = self._edb.CommandTask.find("id = ?", (task,))
|
||||
elif isinstance(task, CommandTask):
|
||||
self.task = task
|
||||
else:
|
||||
raise ValueError("Task parameter error")
|
||||
if not self.task:
|
||||
raise RuntimeError("The specified task does not exist")
|
||||
if log_id == 0:
|
||||
self.task.elogs = self._edb.CommandLog.query("command_task_id = ? ", (self.task.id,))
|
||||
else:
|
||||
self.task.elogs = [self._edb.CommandLog.find("command_task_id = ? AND id = ?", (self.task.id, log_id))]
|
||||
if not self.task.elogs:
|
||||
raise RuntimeError("Task has no execution entry")
|
||||
|
||||
self._exclude_nodes = exclude_nodes or []
|
||||
self.task.elogs = [x for x in self.task.elogs if x.server_id not in self._exclude_nodes]
|
||||
|
||||
self.task.status = 1
|
||||
self._edb.CommandTask.update(self.task)
|
||||
self.end_queue = queue.Queue()
|
||||
self.end_status = False
|
||||
self.status: List[Dict] = []
|
||||
self.call_update = call_update
|
||||
self.status_dict: Dict[str, Union[List[Any], int]] = {
|
||||
"task_id": self.task.id,
|
||||
"task_type": "command",
|
||||
"flow_idx": self.task.step_index -1,
|
||||
"count": len(self.task.elogs),
|
||||
"complete": 0,
|
||||
"error": 0,
|
||||
"exclude_nodes": self._exclude_nodes,
|
||||
"error_nodes": [],
|
||||
"data": [],
|
||||
}
|
||||
|
||||
def end_func(self):
|
||||
edb = TaskFlowsDB()
|
||||
tmp_dict: Dict[int, CommandLog] = {}
|
||||
last_time = time.time()
|
||||
update_fields=("status",)
|
||||
complete_set, error_set = set(), set()
|
||||
while True:
|
||||
try:
|
||||
elog: CommandLog = self.end_queue.get(timeout=0.1)
|
||||
except queue.Empty:
|
||||
if self.end_status:
|
||||
break
|
||||
else:
|
||||
continue
|
||||
except Exception as e:
|
||||
print(e)
|
||||
break
|
||||
|
||||
if elog.status in (3, 4):
|
||||
error_set.add(elog.id)
|
||||
self.status_dict["error_nodes"].append(int(elog.server_id))
|
||||
self.status_dict["error"] = len(error_set)
|
||||
elif elog.status == 2:
|
||||
complete_set.add(elog.id)
|
||||
self.status_dict["complete"] = len(complete_set)
|
||||
|
||||
tmp_dict[elog.id] = elog
|
||||
if time.time() - last_time > 0.5:
|
||||
edb.CommandLog.bath_update(tmp_dict.values(), update_fields=update_fields)
|
||||
self.status_dict["data"] = [ l.to_show_data() for l in tmp_dict.values()]
|
||||
self.call_update(self.status_dict)
|
||||
tmp_dict.clear()
|
||||
|
||||
if tmp_dict:
|
||||
edb.CommandLog.bath_update(tmp_dict.values(), update_fields=update_fields)
|
||||
self.status_dict["data"] = [ l.to_show_data() for l in tmp_dict.values()]
|
||||
self.call_update(self.status_dict)
|
||||
|
||||
return
|
||||
|
||||
def start(self):
|
||||
thread_list = []
|
||||
s_db = ServerNodeDB()
|
||||
end_th = threading.Thread(target=self.end_func)
|
||||
end_th.start()
|
||||
|
||||
for (idx, log) in enumerate(self.task.elogs):
|
||||
log.log_idx = idx
|
||||
if log.status == 2: # 跳过已完成的
|
||||
self.end_queue.put(log)
|
||||
continue
|
||||
|
||||
log.status = 1
|
||||
ssh_conf = None
|
||||
node = s_db.get_node_by_id(log.server_id)
|
||||
if not node:
|
||||
log.status = 3
|
||||
log.write_log("Node data loss, unable to execute\n")
|
||||
|
||||
else:
|
||||
ssh_conf = json.loads(node["ssh_conf"])
|
||||
if not ssh_conf:
|
||||
log.status = 3
|
||||
log.write_log("Node SSH configuration data lost, unable to execute\n")
|
||||
|
||||
self.end_queue.put(log)
|
||||
|
||||
if not ssh_conf:
|
||||
continue
|
||||
|
||||
thread = threading.Thread(target=self.run_one, args=(ssh_conf, log))
|
||||
thread.start()
|
||||
thread_list.append(thread)
|
||||
|
||||
for i in thread_list:
|
||||
i.join()
|
||||
self.end_status = True
|
||||
end_th.join()
|
||||
if self.status_dict["error"] > 0:
|
||||
self.task.status = 3
|
||||
else:
|
||||
self.task.status = 2
|
||||
self._edb.CommandTask.update(self.task)
|
||||
self._edb.close()
|
||||
|
||||
def run_one(self, ssh_conf: dict, elog: CommandLog):
|
||||
ssh = SSHExecutor(
|
||||
host=ssh_conf["host"],
|
||||
port=ssh_conf["port"],
|
||||
username=ssh_conf["username"],
|
||||
password=ssh_conf["password"],
|
||||
key_data=ssh_conf["pkey"],
|
||||
passphrase=ssh_conf["pkey_passwd"])
|
||||
elog.write_log("Start executing the task\nStart establishing SSH connection...\n")
|
||||
try:
|
||||
ssh.open()
|
||||
def on_stdout(data):
|
||||
if isinstance(data, bytes):
|
||||
data = data.decode()
|
||||
elog.write_log(data)
|
||||
|
||||
elog.write_log("Start executing script...\n\n")
|
||||
t = time.time()
|
||||
res_code = ssh.execute_script_streaming(
|
||||
script_content=self.task.script_content,
|
||||
script_type=self.task.script_type,
|
||||
timeout=60*60,
|
||||
on_stdout=on_stdout,
|
||||
on_stderr=on_stdout
|
||||
)
|
||||
take_time = round((time.time() - t)* 1000, 2)
|
||||
elog.write_log("\n\nExecution completed, time-consuming [{}ms]\n".format(take_time))
|
||||
if res_code == 0:
|
||||
elog.status = 2
|
||||
elog.write_log("Mission accomplished\n", is_end_log=True)
|
||||
else:
|
||||
elog.status = 4
|
||||
elog.write_log("Task exception, return status code is:{}\n".format(res_code), is_end_log=True)
|
||||
self.end_queue.put(elog)
|
||||
except Exception as e:
|
||||
traceback.print_exc()
|
||||
elog.status = 3
|
||||
elog.write_log("\nTask failed, error:" + str(e), is_end_log=True)
|
||||
self.end_queue.put(elog)
|
||||
return
|
||||
|
||||
|
||||
# 同步执行命令相关任务的重试
|
||||
def command_task_run_sync(task_id: int, log_id: int) -> Union[str, Dict[str, Any]]:
|
||||
fdb = TaskFlowsDB()
|
||||
task = fdb.CommandTask.get_byid(task_id)
|
||||
if not task:
|
||||
return "Task does not exist"
|
||||
log = fdb.CommandLog.get_byid(log_id)
|
||||
if not log:
|
||||
return "Subtask does not exist"
|
||||
if log.status not in (3, 4):
|
||||
return "Subtask status is not failed, unable to retry"
|
||||
if log.command_task_id != task_id:
|
||||
return "The subtask does not belong to this task and cannot be retried"
|
||||
cmd_task = CMDTask(task, log_id=log_id, call_update=print)
|
||||
cmd_task.start()
|
||||
return cmd_task.status_dict
|
||||
|
||||
484
mod/project/node/task_flow/file_task.py
Normal file
484
mod/project/node/task_flow/file_task.py
Normal file
@@ -0,0 +1,484 @@
|
||||
import json
|
||||
import os
|
||||
import threading
|
||||
import queue
|
||||
import time
|
||||
import traceback
|
||||
import itertools
|
||||
from datetime import datetime
|
||||
from typing import List, Dict, Callable, Any, Tuple, Union, Optional
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
from mod.base.ssh_executor import SSHExecutor
|
||||
from mod.project.node.dbutil import ServerNodeDB, TaskFlowsDB, TransferTask, TransferFile, TransferLog
|
||||
from mod.project.node.nodeutil import ServerNode, LPanelNode, SSHApi
|
||||
from mod.project.node.filetransfer.socket_server import StatusServer, StatusClient, register_cleanup
|
||||
|
||||
|
||||
_SOCKET_FILE_DIR = "/tmp/flow_task"
|
||||
if not os.path.exists(_SOCKET_FILE_DIR):
|
||||
os.mkdir(_SOCKET_FILE_DIR)
|
||||
|
||||
def _dir_walk(path: str) -> Tuple[List[dict], str]:
|
||||
if not os.path.isdir(path):
|
||||
return [], "{} Not a directory".format(path)
|
||||
res_file = []
|
||||
count = 0
|
||||
empty_dir = []
|
||||
for root, dirs, files in os.walk(path):
|
||||
if not files:
|
||||
empty_dir.append(root)
|
||||
for f in files:
|
||||
count += 1
|
||||
try:
|
||||
res_file.append({
|
||||
"path": os.path.join(root, f),
|
||||
"size": os.path.getsize(os.path.join(root, f)),
|
||||
"is_dir": 0
|
||||
})
|
||||
except:
|
||||
pass
|
||||
return [{"path": d, "size": 0, "is_dir": 1} for d in empty_dir] + res_file, ""
|
||||
|
||||
|
||||
class FiletransferTask(object):
|
||||
|
||||
def __init__(self, task: Union[int, TransferTask],
|
||||
call_update: Callable[[Any], None],
|
||||
exclude_nodes: List[int] = None,
|
||||
the_log_id: int = None,
|
||||
):
|
||||
self._fdb = TaskFlowsDB()
|
||||
if isinstance(task, int):
|
||||
self.task = self._fdb.TransferTask.get_byid(task)
|
||||
elif isinstance(task, TransferTask):
|
||||
self.task = task
|
||||
else:
|
||||
raise ValueError("Parameter exception")
|
||||
|
||||
if not self.task:
|
||||
raise RuntimeError("Task does not exist")
|
||||
|
||||
self.exclude_nodes = exclude_nodes or []
|
||||
self.the_log_id = max(the_log_id, 0) if isinstance(the_log_id, int) else 0
|
||||
self.event_queue = queue.Queue()
|
||||
self.trans_queue = queue.Queue()
|
||||
self.mut = threading.Lock()
|
||||
self._srv_cache: Dict[int, Union[SSHApi, LPanelNode, ServerNode]] = {}
|
||||
self.status_dict: Dict[str, Any] = {
|
||||
"task_id": self.task.id,
|
||||
"task_type": "file",
|
||||
"flow_idx": self.task.step_index -1,
|
||||
"count": 0,
|
||||
"complete": 0,
|
||||
"error": 0,
|
||||
"error_nodes": [],
|
||||
"exclude_nodes": self.exclude_nodes,
|
||||
"data": None,
|
||||
}
|
||||
self.is_trans_end = False
|
||||
self.call_update = call_update
|
||||
|
||||
def _init_files(self): # 初始化文件列表
|
||||
has_file = self._fdb.TransferFile.find("flow_id = ? AND transfer_task_id = ?", (self.task.flow_id, self.task.id))
|
||||
# 判断文件列表是否已经初始化
|
||||
if has_file:
|
||||
return
|
||||
|
||||
file_list = []
|
||||
for src_item in self.task.path_list:
|
||||
dst_path = src_item["dst_path"].rstrip("/")
|
||||
src_item["path"] = src_item["path"].rstrip("/")
|
||||
if not os.path.exists(src_item["path"]):
|
||||
continue
|
||||
src_item["is_dir"] = os.path.isdir(src_item["path"])
|
||||
if src_item["is_dir"]:
|
||||
f_list, err = _dir_walk(src_item["path"])
|
||||
if not f_list:
|
||||
src_item["dst_file"] = os.path.join(dst_path, os.path.basename(src_item["path"]))
|
||||
file_list.append(src_item)
|
||||
else:
|
||||
for f_item in f_list:
|
||||
f_item["dst_file"] = f_item["path"].replace(os.path.dirname(src_item["path"]), dst_path)
|
||||
file_list.extend(f_list)
|
||||
else:
|
||||
if not os.path.isfile(src_item["path"]):
|
||||
continue
|
||||
src_item["dst_file"] = os.path.join(dst_path, os.path.basename(src_item["path"]))
|
||||
src_item["size"] = os.path.getsize(src_item["path"])
|
||||
file_list.append(src_item)
|
||||
|
||||
t_list = []
|
||||
for f_item in file_list:
|
||||
fl = TransferFile(
|
||||
flow_id=self.task.flow_id,
|
||||
transfer_task_id=self.task.id,
|
||||
src_file=f_item["path"],
|
||||
dst_file=f_item["dst_file"],
|
||||
file_size=f_item["size"],
|
||||
is_dir=f_item["is_dir"],
|
||||
)
|
||||
t_list.append(fl)
|
||||
try:
|
||||
self._fdb.TransferFile.create(t_list)
|
||||
except:
|
||||
print("Failed to initialize file list", traceback.format_exc())
|
||||
|
||||
def _init_files_log(self):
|
||||
tf_list = self._fdb.TransferFile.query("flow_id = ? AND transfer_task_id = ?", (self.task.flow_id, self.task.id))
|
||||
if not tf_list:
|
||||
return []
|
||||
has_fl = self._fdb.TransferLog.query("transfer_task_id = ? AND transfer_file_id = ?", (self.task.id, tf_list[0].id))
|
||||
if has_fl:
|
||||
return self._fdb.TransferLog.query("transfer_task_id = ?", (self.task.id,))
|
||||
|
||||
fl_list = []
|
||||
for (tf, dst_node_id) in itertools.product(tf_list, self.task.dst_nodes.keys()):
|
||||
fl = TransferLog(
|
||||
flow_id=self.task.flow_id,
|
||||
transfer_task_id=self.task.id,
|
||||
transfer_file_id=tf.id,
|
||||
dst_node_idx=int(dst_node_id),
|
||||
status=0,
|
||||
progress=0,
|
||||
message=""
|
||||
)
|
||||
fl_list.append(fl)
|
||||
|
||||
try:
|
||||
self._fdb.TransferLog.create(fl_list)
|
||||
except:
|
||||
print("Failed to initialize file list", traceback.format_exc())
|
||||
|
||||
|
||||
def _get_srv(self, idx: int) -> Union[SSHApi, LPanelNode, ServerNode]:
|
||||
idx = int(idx)
|
||||
if idx in self._srv_cache:
|
||||
return self._srv_cache[idx]
|
||||
with self.mut:
|
||||
if idx in self._srv_cache:
|
||||
return self._srv_cache[idx]
|
||||
if idx not in self.task.dst_nodes:
|
||||
raise RuntimeError("Node index is out of range")
|
||||
srv_data: dict = self.task.dst_nodes[idx]
|
||||
if srv_data.get("lpver", None):
|
||||
srv = LPanelNode(srv_data["address"], srv_data["api_key"], srv_data["lpver"])
|
||||
elif srv_data["api_key"] or srv_data["app_key"]:
|
||||
srv = ServerNode(srv_data["address"], srv_data["api_key"], srv_data["app_key"])
|
||||
else:
|
||||
srv_data["ssh_conf"]["threading_mod"] = True # 线程模式, 在不同线程中使用同一个ssh链接的不同会话
|
||||
srv = SSHApi(**srv_data["ssh_conf"])
|
||||
self._srv_cache[idx] = srv
|
||||
return srv
|
||||
|
||||
def start(self):
|
||||
self.task.status = 1
|
||||
self._fdb.TransferTask.update(self.task)
|
||||
self._init_files()
|
||||
self._init_files_log()
|
||||
if self.the_log_id > 0: # 重试某个固定任务
|
||||
query_where = "transfer_task_id = ? and id = ?"
|
||||
files_logs = self._fdb.TransferLog.query(query_where, (self.task.id, self.the_log_id))
|
||||
else:
|
||||
if self.exclude_nodes:
|
||||
# 获取未完成文件列表
|
||||
query_where = "transfer_task_id = ? and status not in (2, 4) and dst_node_idx not in ({})".format(
|
||||
",".join(["?"] * len(self.exclude_nodes))
|
||||
)
|
||||
else:
|
||||
query_where = "transfer_task_id = ? and status not in (2, 4)"
|
||||
files_logs = self._fdb.TransferLog.query(query_where, (self.task.id, *self.exclude_nodes))
|
||||
files_list = self._fdb.TransferFile.query("transfer_task_id = ?", (self.task.id,))
|
||||
if not files_logs:
|
||||
return
|
||||
files_map = {fl.id: fl for fl in files_list}
|
||||
for (idx, fl) in enumerate(files_logs):
|
||||
fl.log_idx = idx
|
||||
fl.tf = files_map[fl.transfer_file_id]
|
||||
self.trans_queue.put(fl)
|
||||
|
||||
self.status_dict["count"] = len(files_logs)
|
||||
|
||||
th_event = threading.Thread(target=self.event_func,)
|
||||
th_event.start()
|
||||
|
||||
with ThreadPoolExecutor(max_workers=8) as executor:
|
||||
futures = [executor.submit(self.once_trans, worker_id) for worker_id in range(8)]
|
||||
for i in range(8):
|
||||
executor.submit(self.once_trans)
|
||||
for future in as_completed(futures):
|
||||
print("Completed result:", future.result())
|
||||
|
||||
self.is_trans_end = True
|
||||
th_event.join()
|
||||
if self.the_log_id > 0:
|
||||
# 如果有未完成或错误的文件, 则任务完成
|
||||
if self._fdb.TransferLog.count("transfer_task_id = ? and status in (0,1,3)", (self.task.id, )) == 0:
|
||||
self.task.status = 3
|
||||
else:
|
||||
self.task.status = 2
|
||||
else:
|
||||
self.task.status = 2 if self.status_dict["error"] == 0 else 3
|
||||
self._fdb.TransferTask.update(self.task)
|
||||
self._fdb.close()
|
||||
|
||||
def once_trans(self, worker_id: int):
|
||||
while True:
|
||||
try:
|
||||
tl = self.trans_queue.get(block=False)
|
||||
except queue.Empty:
|
||||
print("worker_id: %s, The queue is empty" % worker_id)
|
||||
break
|
||||
except Exception as e:
|
||||
print("worker_id: %s, Failed to obtain task" % worker_id)
|
||||
print(traceback.format_exc())
|
||||
break
|
||||
|
||||
# 执行一次文件传输
|
||||
try:
|
||||
if tl.status == 2: # 跳过已完成的文件
|
||||
self.event_queue.put(tl)
|
||||
continue
|
||||
srv = self._get_srv(tl.dst_node_idx)
|
||||
if tl.tf.is_dir: # 处理空目录
|
||||
exits, err = srv.target_file_exits(tl.tf.dst_file)
|
||||
if err: # 获取文件状态错误
|
||||
tl.message = err
|
||||
tl.status = 3
|
||||
self.event_queue.put(tl)
|
||||
elif exits: # 目标文件已存在
|
||||
tl.status = 4
|
||||
tl.progress = 100
|
||||
self.event_queue.put(tl)
|
||||
else: # 目标文件不存在, 创建目录
|
||||
res, err = srv.create_dir(tl.tf.dst_file)
|
||||
if err:
|
||||
tl.message = err
|
||||
tl.status = 3
|
||||
elif isinstance(res, dict):
|
||||
if res["status"]:
|
||||
tl.status = 2
|
||||
tl.message = ""
|
||||
tl.progress = 100
|
||||
else:
|
||||
tl.message = res["msg"]
|
||||
tl.status = 3
|
||||
else:
|
||||
tl.status = 2
|
||||
tl.message = ""
|
||||
tl.progress = 100
|
||||
|
||||
self.event_queue.put(tl)
|
||||
else: # 处理文件上传
|
||||
tl.status = 1
|
||||
self.event_queue.put(tl)
|
||||
def _call_log(progress, log):
|
||||
tl.progress = progress
|
||||
self.event_queue.put(tl)
|
||||
|
||||
err = srv.upload_file(
|
||||
filename=tl.tf.src_file,
|
||||
target_path=os.path.dirname(tl.tf.dst_file),
|
||||
mode="cover",
|
||||
call_log=_call_log)
|
||||
|
||||
if err:
|
||||
tl.status = 3
|
||||
tl.message = err
|
||||
else:
|
||||
tl.status = 2
|
||||
tl.message = ""
|
||||
tl.progress = 100
|
||||
|
||||
self.event_queue.put(tl)
|
||||
except Exception as e:
|
||||
err = traceback.format_exc()
|
||||
tl.status = 3
|
||||
tl.message = str(e) + "\n" + err
|
||||
self.event_queue.put(tl)
|
||||
|
||||
def event_func(self):
|
||||
fdb = TaskFlowsDB()
|
||||
last_time = time.time()
|
||||
tmp_dict = {}
|
||||
update_fields = ("status", "message", "progress", "completed_at", "started_at")
|
||||
complete_set, error_set = set(), set()
|
||||
error_node_set = set()
|
||||
while True:
|
||||
try:
|
||||
tl: TransferLog = self.event_queue.get(timeout=0.1)
|
||||
except queue.Empty:
|
||||
if self.is_trans_end:
|
||||
break
|
||||
else:
|
||||
continue
|
||||
except Exception as e:
|
||||
print(e)
|
||||
break
|
||||
if tl.status in (2, 4):
|
||||
complete_set.add(tl.id)
|
||||
self.status_dict["complete"] = len(complete_set)
|
||||
if not tl.started_at:
|
||||
tl.started_at = tl.started_at or datetime.now()
|
||||
tl.completed_at = tl.completed_at or datetime.now()
|
||||
elif tl.status == 3:
|
||||
error_set.add(tl.id)
|
||||
self.status_dict["error"] = len(error_set)
|
||||
tl.completed_at = datetime.now()
|
||||
error_node_set.add(tl.dst_node_idx)
|
||||
elif tl.status == 1:
|
||||
tl.started_at = datetime.now()
|
||||
|
||||
tmp_dict[tl.id] = tl
|
||||
if time.time() - last_time > 0.5:
|
||||
fdb.TransferLog.bath_update(tmp_dict.values(), update_fields=update_fields)
|
||||
last_time = time.time()
|
||||
|
||||
self.status_dict["data"] = [i.to_show_data() for i in tmp_dict.values()]
|
||||
self.status_dict["error_nodes"] = list(error_node_set)
|
||||
self.call_update(self.status_dict)
|
||||
tmp_dict.clear()
|
||||
|
||||
|
||||
if tmp_dict:
|
||||
fdb.TransferLog.bath_update(tmp_dict.values(), update_fields=update_fields)
|
||||
self.status_dict["data"] = [i.to_show_data() for i in tmp_dict.values()]
|
||||
self.status_dict["error_nodes"] = list(error_node_set)
|
||||
self.call_update(self.status_dict)
|
||||
|
||||
fdb.close()
|
||||
|
||||
|
||||
# 在远程节点上执行文件传输
|
||||
class NodeFiletransferTask(object):
|
||||
|
||||
def __init__(self, task: TransferTask,
|
||||
call_update: Callable[[Any], None],
|
||||
exclude_nodes: List[int] = None,
|
||||
the_log_id: int = None,
|
||||
):
|
||||
self.task = task
|
||||
src_node = task.src_node
|
||||
self.exclude_nodes = exclude_nodes or []
|
||||
self.srv = ServerNode(src_node["address"],src_node["api_key"], src_node["app_key"], src_node["name"])
|
||||
self.the_log_id = max(the_log_id, 0) if isinstance(the_log_id, int) else 0
|
||||
self.call_update = call_update
|
||||
self.default_status_data = {
|
||||
"task_id": self.task.id,
|
||||
"task_type": "file",
|
||||
}
|
||||
self.status_dict = dict() # 状态数据
|
||||
|
||||
def start(self):
|
||||
fdb = TaskFlowsDB()
|
||||
self.task.status = 1
|
||||
fdb.TransferTask.update(self.task)
|
||||
err = self.srv.proxy_transferfile_status(self.task.src_node_task_id, self.exclude_nodes, self.the_log_id, self.handle_proxy_data)
|
||||
if err:
|
||||
self.task.status = 3
|
||||
self.task.message += ";" + err
|
||||
else:
|
||||
if self.status_dict and self.status_dict.get("error", 0):
|
||||
self.task.status = 3
|
||||
else:
|
||||
self.task.status = 2
|
||||
if self.task.message:
|
||||
self.task.status = 3
|
||||
|
||||
fdb.TransferTask.update(self.task)
|
||||
|
||||
def handle_proxy_data(self, data):
|
||||
ret = {"count": 0,"complete": 0,"error": 0, "error_nodes":[], "data": []}
|
||||
try:
|
||||
data_dict = json.loads(data)
|
||||
if "type" not in data_dict:
|
||||
return
|
||||
|
||||
if data_dict["type"] == "status":
|
||||
if "init" in data_dict["data"]: # 初始化状态跳过
|
||||
return
|
||||
ret.update(data_dict["data"])
|
||||
ret.update(self.default_status_data)
|
||||
else: # end / error 状态 获取历史数据或错误信息
|
||||
if "data" in data_dict:
|
||||
ret.update(data_dict["data"])
|
||||
ret.update(self.default_status_data)
|
||||
elif "msg" in data_dict:
|
||||
self.task.message = data_dict["msg"]
|
||||
return
|
||||
except:
|
||||
print(traceback.format_exc())
|
||||
ret["data"].append({"message": "Data source node execution transmission exception, please check if the node is functioning properly"})
|
||||
ret.update(self.default_status_data)
|
||||
|
||||
self.status_dict = ret
|
||||
self.call_update(ret)
|
||||
|
||||
|
||||
# 本机执行文件传输,返回信息到远程节点
|
||||
class SelfFiletransferTask(object):
|
||||
|
||||
def __init__(self, task_id: int, exclude_nodes: List[int] = None, the_log_id: int = None):
|
||||
self.status_server = StatusServer(self.get_status, (_SOCKET_FILE_DIR + "/file_task_" + str(task_id)))
|
||||
self.f_task = FiletransferTask(task_id, self.update_status, exclude_nodes, the_log_id)
|
||||
|
||||
@staticmethod
|
||||
def get_status( init: bool = False) -> Dict:
|
||||
return {"init": True }
|
||||
|
||||
def start_status_server(self):
|
||||
t = threading.Thread(target=self.status_server.start_server, args=(), daemon=True)
|
||||
t.start()
|
||||
register_cleanup(self.status_server)
|
||||
|
||||
def update_status(self, update_data: Dict):
|
||||
self.status_server.update_status(update_data)
|
||||
|
||||
def start(self):
|
||||
self.start_status_server()
|
||||
self.f_task.start()
|
||||
return
|
||||
|
||||
|
||||
def self_file_running_log(task_id: int, call_log: Callable[[Union[str,dict]], None], timeout:float = 3.0) -> str:
|
||||
socket_file = _SOCKET_FILE_DIR + "/file_task_" + str(task_id)
|
||||
while not os.path.exists(socket_file):
|
||||
if timeout <= 0:
|
||||
return "Task startup timeout"
|
||||
timeout -= 0.05
|
||||
time.sleep(0.05)
|
||||
|
||||
s_client = StatusClient(socket_file, callback=call_log)
|
||||
s_client.connect()
|
||||
s_client.wait_receive()
|
||||
return ""
|
||||
|
||||
|
||||
# 同步执行文件相关任务的重试
|
||||
def file_task_run_sync(task_id: int, log_id: int) -> Union[str, Dict[str, Any]]:
|
||||
fdb = TaskFlowsDB()
|
||||
task = fdb.TransferTask.get_byid(task_id)
|
||||
if not task:
|
||||
return "Task does not exist"
|
||||
|
||||
# 远程节点任务
|
||||
if task.src_node_task_id > 0:
|
||||
node_file_task = NodeFiletransferTask(task, print, exclude_nodes=[], the_log_id=log_id)
|
||||
node_file_task.start()
|
||||
return node_file_task.status_dict
|
||||
|
||||
if not log_id:
|
||||
return "The log ID cannot be empty"
|
||||
log = fdb.TransferLog.get_byid(log_id)
|
||||
if not log:
|
||||
return "log does not exist"
|
||||
|
||||
if log.status != 3:
|
||||
return "The task status is not abnormal, no need to retry"
|
||||
|
||||
if log.transfer_task_id != task_id:
|
||||
return "The log ID does not match the task ID"
|
||||
|
||||
file_task = FiletransferTask(task, print, exclude_nodes=[], the_log_id=log_id)
|
||||
file_task.start()
|
||||
return file_task.status_dict
|
||||
152
mod/project/node/task_flow/flow.py
Normal file
152
mod/project/node/task_flow/flow.py
Normal file
@@ -0,0 +1,152 @@
|
||||
import json
|
||||
import os
|
||||
import threading
|
||||
import queue
|
||||
import time
|
||||
import traceback
|
||||
from typing import List, Dict, Callable, Any, Union, Optional, Tuple
|
||||
|
||||
from mod.base.ssh_executor import SSHExecutor
|
||||
from mod.project.node.dbutil import ServerNodeDB, CommandTask, CommandLog, TaskFlowsDB, TransferTask
|
||||
from mod.project.node.dbutil import TaskFlowsDB
|
||||
from mod.project.node.nodeutil import LPanelNode, ServerNode, SSHApi
|
||||
from mod.project.node.filetransfer.socket_server import StatusServer, StatusClient, register_cleanup
|
||||
|
||||
from .command_task import CMDTask
|
||||
from .file_task import FiletransferTask, NodeFiletransferTask
|
||||
|
||||
_SOCKET_FILE_DIR = "/tmp/flow_task"
|
||||
if not os.path.exists(_SOCKET_FILE_DIR):
|
||||
os.mkdir(_SOCKET_FILE_DIR)
|
||||
|
||||
|
||||
|
||||
class FlowTask:
|
||||
|
||||
def __init__(self, flow_id: int, step_idx: int=0, sub_id: int=0):
|
||||
self._fdb = TaskFlowsDB()
|
||||
self.flow = self._fdb.Flow.get_byid(flow_id)
|
||||
if not self.flow:
|
||||
raise RuntimeError("Task does not exist")
|
||||
|
||||
self.steps: List[Union[CommandTask, TransferTask]] = [
|
||||
*self._fdb.CommandTask.query("flow_id = ?", (flow_id,)),
|
||||
*self._fdb.TransferTask.query("flow_id = ?", (flow_id,))
|
||||
]
|
||||
|
||||
self.steps.sort(key=lambda x: x.step_index, reverse=False)
|
||||
|
||||
if not self.steps:
|
||||
raise RuntimeError("The task content does not exist")
|
||||
self.now_idx = 1
|
||||
# 当任意错误出现时,是否继续执行
|
||||
self.run_when_error = False
|
||||
if self.flow.strategy.get("run_when_error", False):
|
||||
self.run_when_error = True
|
||||
# 当某个节点出错时,是否在后续步骤中跳过
|
||||
self.exclude_when_error = True
|
||||
if not self.flow.strategy.get("exclude_when_error", True):
|
||||
self.exclude_when_error = False
|
||||
|
||||
self.status_server = StatusServer(self.get_status, (_SOCKET_FILE_DIR + "/flow_task_" + str(flow_id)))
|
||||
self.flow_all_nodes = set([int(i) for i in self.flow.server_ids.split("|") if i and i.isdigit()])
|
||||
|
||||
def get_status(self, init: bool = False):
|
||||
flow_data = self.flow.to_dict()
|
||||
flow_data["steps"] = [x.to_show_data() for x in self.steps]
|
||||
flow_data["now_idx"] = self.now_idx
|
||||
return flow_data
|
||||
|
||||
def start_status_server(self):
|
||||
t = threading.Thread(target=self.status_server.start_server, args=(), daemon=True)
|
||||
t.start()
|
||||
register_cleanup(self.status_server)
|
||||
|
||||
def update_status(self, update_data: Dict):
|
||||
self.status_server.update_status(update_data)
|
||||
|
||||
def _run(self) -> bool:
|
||||
def call_log(log_data):
|
||||
self.update_status(log_data)
|
||||
|
||||
all_status = True # 任务全部成功
|
||||
error_nodes = set()
|
||||
for step in self.steps:
|
||||
if not (self.flow_all_nodes - error_nodes): # 没有节点可执行
|
||||
continue
|
||||
if isinstance(step, CommandTask):
|
||||
if step.status != 2: # 跳过已完成的
|
||||
has_err, task_error_nodes = self.run_cmd_task(step, call_log, exclude_nodes=list(error_nodes))
|
||||
all_status = all_status and not has_err
|
||||
if has_err and not self.run_when_error:
|
||||
return False
|
||||
if self.exclude_when_error and task_error_nodes:
|
||||
error_nodes.update(task_error_nodes)
|
||||
elif isinstance(step, TransferTask):
|
||||
if step.status != 2: # 跳过已完成的
|
||||
has_err, task_error_nodes = self.run_transfer_task(step, call_log, exclude_nodes=list(error_nodes))
|
||||
all_status = all_status and not has_err
|
||||
if has_err and not self.run_when_error:
|
||||
return False
|
||||
if self.exclude_when_error and task_error_nodes:
|
||||
error_nodes.update(task_error_nodes)
|
||||
self.now_idx += 1
|
||||
return all_status
|
||||
|
||||
def start(self):
|
||||
self.start_status_server()
|
||||
|
||||
self.flow.status = "running"
|
||||
self._fdb.Flow.update(self.flow)
|
||||
all_status = self._run()
|
||||
self.flow.status = "complete" if all_status else "error"
|
||||
self._fdb.Flow.update(self.flow)
|
||||
|
||||
self.status_server.stop()
|
||||
# fdb = TaskFlowsDB()
|
||||
# print(fdb.history_flow_task(self.flow.id))
|
||||
return
|
||||
|
||||
@staticmethod
|
||||
def run_cmd_task(task: CommandTask, call_log: Callable[[Any], None], exclude_nodes: List[int] = None) -> Tuple[bool, List[int]]:
|
||||
task = CMDTask(task, 0, call_log, exclude_nodes=exclude_nodes)
|
||||
task.start()
|
||||
return task.status_dict["error"] > 0, task.status_dict["error_nodes"]
|
||||
|
||||
@staticmethod
|
||||
def run_transfer_task(task: TransferTask, call_log: Callable[[Any], None], exclude_nodes: List[int] = None) -> Tuple[bool, List[int]]:
|
||||
if task.src_node_task_id != 0:
|
||||
task = NodeFiletransferTask(task, call_log, exclude_nodes=exclude_nodes, the_log_id=None)
|
||||
task.start()
|
||||
return task.status_dict["error"] > 0, task.status_dict["error_nodes"]
|
||||
else:
|
||||
task = FiletransferTask(task, call_log, exclude_nodes=exclude_nodes)
|
||||
task.start()
|
||||
return task.status_dict["error"] > 0, task.status_dict["error_nodes"]
|
||||
|
||||
|
||||
def flow_running_log(task_id: int, call_log: Callable[[Union[str,dict]], None], timeout:float = 3.0) -> str:
|
||||
socket_file = _SOCKET_FILE_DIR + "/flow_task_" + str(task_id)
|
||||
while not os.path.exists(socket_file):
|
||||
if timeout <= 0:
|
||||
return "Task startup timeout"
|
||||
timeout -= 0.05
|
||||
time.sleep(0.05)
|
||||
|
||||
s_client = StatusClient(socket_file, callback=call_log)
|
||||
s_client.connect()
|
||||
s_client.wait_receive()
|
||||
return ""
|
||||
|
||||
def flow_useful_version(ver: str):
|
||||
# # todo: 临时处理, 上线前确认最新版本号检查逻辑
|
||||
# return True
|
||||
try:
|
||||
ver_list = [int(i) for i in ver.split(".")]
|
||||
if ver_list[0] > 11:
|
||||
return True
|
||||
if ver_list[0] == 11 and ver_list[1] >= 4:
|
||||
return True
|
||||
except:
|
||||
pass
|
||||
return False
|
||||
0
mod/project/php/__init__.py
Normal file
0
mod/project/php/__init__.py
Normal file
1691
mod/project/php/aepgMod.py
Normal file
1691
mod/project/php/aepgMod.py
Normal file
File diff suppressed because it is too large
Load Diff
1898
mod/project/php/php_asyncMod.py
Normal file
1898
mod/project/php/php_asyncMod.py
Normal file
File diff suppressed because it is too large
Load Diff
17
mod/project/php/serviceconfMod.py
Normal file
17
mod/project/php/serviceconfMod.py
Normal file
@@ -0,0 +1,17 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2017 yakpanel(https://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: baozi <baozi@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
# 服务配置模块
|
||||
# ------------------------------
|
||||
|
||||
from mod.base.web_conf import IpRestrict
|
||||
|
||||
|
||||
class main(IpRestrict): # 继承并使用同ip黑白名单限制
|
||||
def __init__(self):
|
||||
super().__init__(config_prefix="")
|
||||
0
mod/project/proxy/__init__.py
Normal file
0
mod/project/proxy/__init__.py
Normal file
4010
mod/project/proxy/comMod.py
Normal file
4010
mod/project/proxy/comMod.py
Normal file
File diff suppressed because it is too large
Load Diff
0
mod/project/push/__init__.py
Normal file
0
mod/project/push/__init__.py
Normal file
69
mod/project/push/msgconfMod.py
Normal file
69
mod/project/push/msgconfMod.py
Normal file
@@ -0,0 +1,69 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2017 yakpanel(https://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: baozi <baozi@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
# 新告警通道管理模块
|
||||
# ------------------------------
|
||||
from mod.base.msg import SenderManager, update_mod_push_msg
|
||||
from mod.base.push_mod import SenderConfig
|
||||
from mod.base import json_response
|
||||
|
||||
import public
|
||||
|
||||
|
||||
update_mod_push_msg()
|
||||
|
||||
|
||||
class main(SenderManager):
|
||||
|
||||
@staticmethod
|
||||
def wx_account_auth(get=None):
|
||||
return WeChatAccountMsg.get_auth_url()
|
||||
|
||||
@staticmethod
|
||||
def unbind_wx_account(get):
|
||||
try:
|
||||
sender_id = get.sender_id.strip()
|
||||
except AttributeError:
|
||||
return json_response(status=False, msg="Parameter error")
|
||||
|
||||
conf = SenderConfig().get_by_id(sender_id)
|
||||
if not conf:
|
||||
return json_response(status=False, msg="No binding information was found")
|
||||
|
||||
res = WeChatAccountMsg.unbind(conf["data"]["id"])
|
||||
public.WriteFile(WeChatAccountMsg.need_refresh_file, "")
|
||||
return res
|
||||
|
||||
def set_default_sender(self, get):
|
||||
try:
|
||||
|
||||
try:
|
||||
sender_id = get.sender_id.strip()
|
||||
sender_type = get.sender_type.strip()
|
||||
except AttributeError:
|
||||
return json_response(status=False, msg="Parameter error")
|
||||
|
||||
sc = SenderConfig()
|
||||
change = False
|
||||
print("SenderConfig",sc.config)
|
||||
for conf in sc.config:
|
||||
if conf["sender_type"] == sender_type:
|
||||
is_original = conf.get("original", False)
|
||||
if conf["id"] == sender_id:
|
||||
change = True
|
||||
conf["original"] = True
|
||||
else:
|
||||
conf["original"] = False
|
||||
|
||||
sc.save_config()
|
||||
if change:
|
||||
self.set_default_for_compatible(sc.get_by_id(sender_id))
|
||||
return json_response(status=True, msg="Successfully set")
|
||||
except Exception as e:
|
||||
return json_response(status=False, msg=e)
|
||||
|
||||
258
mod/project/push/taskMod.py
Normal file
258
mod/project/push/taskMod.py
Normal file
@@ -0,0 +1,258 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2015-2017 yakpanel(https://www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: baozi <baozi@yakpanel.com>
|
||||
# -------------------------------------------------------------------
|
||||
# 新告警通道管理模块
|
||||
# ------------------------------
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
import public
|
||||
|
||||
from mod.base import json_response
|
||||
|
||||
from mod.base.push_mod import PushManager, TaskConfig, TaskRecordConfig, TaskTemplateConfig, PushSystem
|
||||
from mod.base.push_mod import update_mod_push_system, UPDATE_MOD_PUSH_FILE, load_task_template_by_file, \
|
||||
UPDATE_VERSION_FILE
|
||||
from mod.base.msg import update_mod_push_msg
|
||||
from mod.base.push_mod.rsync_push import load_rsync_template
|
||||
from mod.base.push_mod.task_manager_push import load_task_manager_template
|
||||
from mod.base.push_mod.load_push import load_load_template
|
||||
|
||||
def update_mod():
|
||||
# todo 缺少移除弃用模板的操作
|
||||
template_ver = "8"
|
||||
try:
|
||||
with open(UPDATE_VERSION_FILE, 'r') as f:
|
||||
if f.read() == template_ver:
|
||||
pl = False
|
||||
else:
|
||||
pl = True
|
||||
except:
|
||||
pl = True
|
||||
|
||||
if pl:
|
||||
# 更新标志pl存在时, 只强制更新以下模板的最新配置, 其他不更新
|
||||
load_task_template_by_file("/www/server/panel/mod/base/push_mod/site_push_template.json")
|
||||
load_task_template_by_file("/www/server/panel/mod/base/push_mod/system_push_template.json")
|
||||
load_task_template_by_file("/www/server/panel/mod/base/push_mod/database_push_template.json")
|
||||
load_task_template_by_file("/www/server/panel/mod/base/push_mod/domain_blcheck_push_template.json")
|
||||
with open(UPDATE_VERSION_FILE, "w") as f:
|
||||
f.write(template_ver)
|
||||
|
||||
if not os.path.exists(UPDATE_MOD_PUSH_FILE):
|
||||
update_mod_push_msg()
|
||||
|
||||
load_rsync_template()
|
||||
load_task_manager_template()
|
||||
load_load_template()
|
||||
|
||||
update_mod_push_system()
|
||||
|
||||
|
||||
update_mod()
|
||||
del update_mod
|
||||
|
||||
|
||||
class main(PushManager):
|
||||
def get_task_list(self, get=None):
|
||||
|
||||
# 通道类型映射,包含模糊匹配规则
|
||||
channel_map = {
|
||||
# "wx_account": "wx_account",
|
||||
"mail": "mail",
|
||||
"webhook": "webhook",
|
||||
"feishu": "feishu",
|
||||
"dingding": "dingding",
|
||||
# "短信": "sms",
|
||||
"tg": "tg"
|
||||
}
|
||||
try:
|
||||
if get:
|
||||
# get["status"] = "false"
|
||||
# get["keyword"] = "shylock"
|
||||
# 获取状态和关键词参数
|
||||
status_filter = get.get("status", None)
|
||||
keyword_filter = get.get("keyword", None)
|
||||
else:
|
||||
status_filter = ""
|
||||
keyword_filter =""
|
||||
res = TaskConfig().config
|
||||
|
||||
# 按创建时间排序
|
||||
res.sort(key=lambda x: x["create_time"])
|
||||
# 读取发送者信息
|
||||
sender_info = self.get_sender_info()
|
||||
|
||||
# 根据状态过滤任务
|
||||
if status_filter:
|
||||
res = [task for task in res if str(task["status"]).lower() == status_filter.lower()]
|
||||
|
||||
# 根据关键词过滤任务
|
||||
if keyword_filter:
|
||||
keyword_filter_lower = keyword_filter.lower()
|
||||
filtered_res = []
|
||||
for task in res:
|
||||
# print("task",task)
|
||||
task_match = False
|
||||
if keyword_filter_lower=="Alert when the panel is logged in":
|
||||
if task['keyword']=="panel_login":
|
||||
task_match = True
|
||||
if keyword_filter_lower in task["title"].lower() or \
|
||||
(task["task_data"].get("title") and keyword_filter_lower in task["task_data"]["title"].lower()) or \
|
||||
(task["time_rule"].get("send_interval") and keyword_filter_lower in str(task["time_rule"]["send_interval"])) or \
|
||||
(task["number_rule"].get("day_num") and keyword_filter_lower in str(task["number_rule"]["day_num"])):
|
||||
task_match = True
|
||||
else:
|
||||
for sender_id in task["sender"]:
|
||||
sender = sender_info.get(sender_id, {})
|
||||
sender_title = sender.get("data", {}).get("title", "").lower()
|
||||
sender_type = sender.get("sender_type", "").lower()
|
||||
if keyword_filter_lower in sender_title or \
|
||||
keyword_filter_lower in sender_type:
|
||||
task_match = True
|
||||
break
|
||||
# 检查关键词是否包含在通道类型的映射键中
|
||||
for chinese_name, channel_type in channel_map.items():
|
||||
if keyword_filter_lower in chinese_name.lower() and channel_type == sender_type:
|
||||
task_match = True
|
||||
break
|
||||
if task_match:
|
||||
filtered_res.append(task)
|
||||
res = filtered_res
|
||||
for i in res:
|
||||
i['view_msg'] = self.get_view_msg_format(i)
|
||||
|
||||
return json_response(status=True, data=res)
|
||||
except:
|
||||
import traceback
|
||||
|
||||
# public.print_log(traceback.format_exc())
|
||||
print(traceback.format_exc())
|
||||
return json_response(status=True, data=res)
|
||||
|
||||
def get_sender_info(self):
|
||||
sender_file = '/www/server/panel/data/mod_push_data/sender.json'
|
||||
try:
|
||||
with open(sender_file, 'r', encoding='utf-8') as f:
|
||||
sender_data = json.load(f)
|
||||
return {sender['id']: sender for sender in sender_data}
|
||||
except Exception as e:
|
||||
return {}
|
||||
|
||||
@staticmethod
|
||||
def get_task_record(get):
|
||||
page = 1
|
||||
size = 10
|
||||
try:
|
||||
if hasattr(get, "page"):
|
||||
page = int(get.page.strip())
|
||||
if hasattr(get, "size"):
|
||||
size = int(get.size.strip())
|
||||
task_id = get.task_id.strip()
|
||||
except (AttributeError, ValueError, TypeError):
|
||||
return json_response(status=False, msg="Parameter error")
|
||||
|
||||
t = TaskRecordConfig(task_id)
|
||||
t.config.sort(key=lambda x: x["create_time"])
|
||||
page = max(page, 1)
|
||||
size = max(size, 1)
|
||||
count = len(t.config)
|
||||
data = t.config[(page - 1) * size: page * size]
|
||||
return json_response(status=True, data={
|
||||
"count": count,
|
||||
"list": data,
|
||||
})
|
||||
|
||||
def clear_task_record(self, get):
|
||||
try:
|
||||
task_id = get.task_id.strip()
|
||||
except (AttributeError, ValueError, TypeError):
|
||||
return json_response(status=False, msg="Parameter error")
|
||||
self.clear_task_record_by_task_id(task_id)
|
||||
|
||||
return json_response(status=True, msg="Cleared successfully")
|
||||
|
||||
@staticmethod
|
||||
def remove_task_records(get):
|
||||
try:
|
||||
task_id = get.task_id.strip()
|
||||
record_ids = set(json.loads(get.record_ids.strip()))
|
||||
except (AttributeError, ValueError, TypeError):
|
||||
return json_response(status=False, msg="Parameter error")
|
||||
task_records = TaskRecordConfig(task_id)
|
||||
for i in range(len(task_records.config) - 1, -1, -1):
|
||||
if task_records.config[i]["id"] in record_ids:
|
||||
del task_records.config[i]
|
||||
|
||||
task_records.save_config()
|
||||
return json_response(status=True, msg="Cleared successfully")
|
||||
|
||||
@staticmethod
|
||||
def get_task_template_list(get=None):
|
||||
|
||||
# todo 弃用表
|
||||
public.check_table('ssl_domains', """CREATE TABLE IF NOT EXISTS `ssl_domains` (
|
||||
`id` INTEGER PRIMARY KEY AUTOINCREMENT,
|
||||
`domain` TEXT,
|
||||
`dns_id` TEXT,
|
||||
`type_id` INTEGER,
|
||||
`endtime` INTEGER,
|
||||
`ps` TEXT
|
||||
)
|
||||
""")
|
||||
|
||||
# 增加缓存
|
||||
cache_key = 'mod_task:get_task_template_list'
|
||||
cache = public.cache_get(cache_key)
|
||||
if cache:
|
||||
return json_response(status=True, data=cache)
|
||||
|
||||
|
||||
res = []
|
||||
p_sys = PushSystem()
|
||||
for i in TaskTemplateConfig().config:
|
||||
|
||||
if not i['used']:
|
||||
continue
|
||||
to = p_sys.get_task_object(i["id"], i["load_cls"])
|
||||
if not to:
|
||||
continue
|
||||
# 以下模板,只允许在安全模块中使用
|
||||
if i['id'] in ['121','122','123','124']:
|
||||
continue
|
||||
t = to.filter_template(i["template"])
|
||||
if not t:
|
||||
continue
|
||||
i["template"] = t
|
||||
res.append(i)
|
||||
# 缓存两分钟
|
||||
public.cache_set(cache_key, res, 120)
|
||||
return json_response(status=True, data=res)
|
||||
|
||||
|
||||
@staticmethod
|
||||
def get_view_msg_format(task: dict) -> str:
|
||||
from mod.base.push_mod.rsync_push import ViewMsgFormat as Rv
|
||||
from mod.base.push_mod.site_push import ViewMsgFormat as Sv
|
||||
from mod.base.push_mod.task_manager_push import ViewMsgFormat as Tv
|
||||
from mod.base.push_mod.database_push import ViewMsgFormat as Dv
|
||||
from mod.base.push_mod.system_push import ViewMsgFormat as SSv
|
||||
from mod.base.push_mod.load_push import ViewMsgFormat as Lv
|
||||
from mod.base.push_mod.domain_blcheck_push import ViewMsgFormat as DBv
|
||||
from mod.base.push_mod.safe_mod_push import ViewMsgFormat as SAv
|
||||
|
||||
list_obj = [Rv(), Sv(), Tv(), Dv(), SSv(), Lv(), DBv(), SAv()]
|
||||
for i in list_obj:
|
||||
res = i.get_msg(task)
|
||||
if res is not None:
|
||||
return res
|
||||
return '<span>--</span>'
|
||||
|
||||
12
mod/project/python/__init__.py
Normal file
12
mod/project/python/__init__.py
Normal file
@@ -0,0 +1,12 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# YakPanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2014-2099 YakPanel(www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
|
||||
# ------------------------------
|
||||
# pythonModel app init
|
||||
# ------------------------------
|
||||
184
mod/project/python/environmentMod.py
Normal file
184
mod/project/python/environmentMod.py
Normal file
@@ -0,0 +1,184 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# YakPanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2014-2099 YakPanel(www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# ------------------------------
|
||||
# Python Env app
|
||||
# ------------------------------
|
||||
import os.path
|
||||
import sys
|
||||
import json
|
||||
import re
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
from mod.project.python.pyenv_tool import (
|
||||
EnvironmentReporter,
|
||||
EnvironmentManager,
|
||||
_SYS_BIN_PATH,
|
||||
python_manager_path,
|
||||
pyenv_path,
|
||||
)
|
||||
from mod.base import json_response
|
||||
EnvironmentReporter().init_report()
|
||||
|
||||
class main:
|
||||
@staticmethod
|
||||
def create_python_env(get):
|
||||
try:
|
||||
venv_name = get.venv_name.strip()
|
||||
src_python_bin = get.python_bin.strip()
|
||||
if "ps" in get:
|
||||
ps = get.ps.strip()
|
||||
else:
|
||||
ps = ""
|
||||
except:
|
||||
return json_response(False, 'Invalid parameters')
|
||||
|
||||
if not re.match(r'^[a-zA-Z0-9_.-]+$', venv_name):
|
||||
return json_response(False, "Virtual environment name contains invalid characters")
|
||||
|
||||
ws_send = None
|
||||
if "_ws" in get:
|
||||
ws_send = lambda x: get._ws.send(json.dumps({"callback": "create_python_env", "result": {"log": x}}))
|
||||
res = EnvironmentManager().create_python_env(venv_name, src_python_bin, ps, ws_send)
|
||||
if isinstance(res, str):
|
||||
return json_response(False, res)
|
||||
try:
|
||||
import public
|
||||
public.set_module_logs('python_project_env', 'create_python_env', 1)
|
||||
except:
|
||||
pass
|
||||
return json_response(True, msg="Created successfully")
|
||||
|
||||
@staticmethod
|
||||
def list_environment(get):
|
||||
if get and get.get("sort_not_use/s") in ("1", "true"):
|
||||
sort_not_use = True
|
||||
else:
|
||||
sort_not_use = False
|
||||
|
||||
em = EnvironmentManager()
|
||||
all_project_map = em.all_python_project_map()
|
||||
env_map = {
|
||||
i.bin_path: i.to_dict(
|
||||
project_name=all_project_map.get(i.bin_path, []),
|
||||
can_remove=i.can_remove,
|
||||
can_create=i.can_create,
|
||||
can_set_default=i.can_set_default,
|
||||
path_name=i.path_name,
|
||||
from_panel=any((i.bin_path.startswith(x) for x in
|
||||
(python_manager_path(), pyenv_path()))) and i.env_type == "system"
|
||||
) for i in em.all_env
|
||||
}
|
||||
data = list(env_map.values())
|
||||
if sort_not_use:
|
||||
data.sort(key=lambda x: (
|
||||
len(x["project_name"]),
|
||||
("venv", "conda", "system").index(x["type"]),
|
||||
any(i == os.path.dirname(x["bin_path"]) for i in _SYS_BIN_PATH)
|
||||
))
|
||||
else:
|
||||
data.sort(key=lambda x: (
|
||||
0 - len(x["project_name"]),
|
||||
("venv", "conda", "system").index(x["type"]),
|
||||
any(i == os.path.dirname(x["bin_path"]) for i in _SYS_BIN_PATH)
|
||||
))
|
||||
for i in data[::-1]:
|
||||
i["name"] = i["venv_name"] or i["path_name"] or i["version"]
|
||||
if i["type"] == "venv":
|
||||
i["system_data"] = env_map[i["system_path"]]
|
||||
i["can_remove"] = False if i["project_name"] else i["can_remove"]
|
||||
now_env = em.get_default_python_env()
|
||||
if now_env:
|
||||
now_env = env_map[now_env.bin_path]
|
||||
now_env["can_set_default"] = False
|
||||
|
||||
return json_response(True, data={"env_list": data, "now_env": now_env})
|
||||
|
||||
@staticmethod
|
||||
def add_environment(get):
|
||||
try:
|
||||
add_type = get.add_type.strip()
|
||||
path = get.path.strip()
|
||||
except:
|
||||
return json_response(False, 'Invalid parameters')
|
||||
res = EnvironmentManager().add_python_env(add_type, path)
|
||||
if isinstance(res, str):
|
||||
return json_response(False, res)
|
||||
try:
|
||||
import public
|
||||
public.set_module_logs('python_project_env', 'add_environment', 1)
|
||||
except:
|
||||
pass
|
||||
return json_response(True, msg="Added successfully")
|
||||
|
||||
@staticmethod
|
||||
def remove_environment(get):
|
||||
try:
|
||||
path_data = get.path_data.strip()
|
||||
except:
|
||||
return json_response(False, 'Invalid parameters')
|
||||
if not path_data:
|
||||
return json_response(False, 'Invalid parameters')
|
||||
em = EnvironmentManager()
|
||||
if "," in path_data:
|
||||
path_list = path_data.split(",")
|
||||
res = em.multi_remove_env(*path_list)
|
||||
status = all(x.get("status") for x in res)
|
||||
if not status:
|
||||
err_msg = "\n".join([x.get("msg", "") for x in res if not x.get("status")])
|
||||
return json_response(False, err_msg)
|
||||
return json_response(status, data=res)
|
||||
else:
|
||||
res = em.multi_remove_env(path_data)
|
||||
for r in res:
|
||||
if r.get("status"):
|
||||
return json_response(True, msg="Removed successfully")
|
||||
return json_response(False, r.get("msg", "Failed to remove"))
|
||||
return json_response(True, msg="Removed successfully")
|
||||
|
||||
@staticmethod
|
||||
def set_environment_default(get):
|
||||
try:
|
||||
path = get.path.strip()
|
||||
except:
|
||||
return json_response(False, 'Invalid parameters')
|
||||
if not path or path == "close":
|
||||
path = ""
|
||||
em = EnvironmentManager()
|
||||
res = em.set_python2env(path)
|
||||
if not path:
|
||||
if not res:
|
||||
return json_response(True, msg="Disabled successfully")
|
||||
else:
|
||||
return json_response(False, res)
|
||||
else:
|
||||
if not res:
|
||||
return json_response(True, msg="Set successfully")
|
||||
else:
|
||||
return json_response(False, res)
|
||||
|
||||
@staticmethod
|
||||
def set_environment_ps(get):
|
||||
try:
|
||||
path = get.path.strip()
|
||||
ps = get.ps.strip()
|
||||
except:
|
||||
return json_response(False, 'Invalid parameters')
|
||||
if not path or not ps:
|
||||
return json_response(False, 'Invalid parameters')
|
||||
em = EnvironmentManager()
|
||||
res = em.set_python_env_ps(path, ps)
|
||||
if not res:
|
||||
return json_response(True, msg="Set successfully")
|
||||
else:
|
||||
return json_response(False, res)
|
||||
1438
mod/project/python/pyenv_tool.py
Normal file
1438
mod/project/python/pyenv_tool.py
Normal file
File diff suppressed because it is too large
Load Diff
788
mod/project/python/serviceMod.py
Normal file
788
mod/project/python/serviceMod.py
Normal file
@@ -0,0 +1,788 @@
|
||||
# coding: utf-8
|
||||
# -------------------------------------------------------------------
|
||||
# YakPanel
|
||||
# -------------------------------------------------------------------
|
||||
# Copyright (c) 2014-2099 YakPanel(www.yakpanel.com) All rights reserved.
|
||||
# -------------------------------------------------------------------
|
||||
# Author: yakpanel
|
||||
# -------------------------------------------------------------------
|
||||
# ------------------------------
|
||||
# py service model app
|
||||
# ------------------------------
|
||||
import json
|
||||
import os.path
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
import psutil
|
||||
from uuid import uuid4
|
||||
from typing import Optional, Union, List, Dict, Tuple, Any, Set
|
||||
|
||||
SERVICE_PATH = "/www/server/python_project/service"
|
||||
if not os.path.isdir(SERVICE_PATH):
|
||||
try:
|
||||
os.makedirs(SERVICE_PATH, 0o755)
|
||||
except:
|
||||
pass
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
if "/www/server/panel" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel")
|
||||
|
||||
import public
|
||||
from mod.base import json_response
|
||||
from mod.project.python.pyenv_tool import EnvironmentManager
|
||||
from public.exceptions import HintException
|
||||
|
||||
|
||||
class Environment(object):
|
||||
def __init__(self, project_name: str, python_path: str, python_bin: str, project_path: str, user: str,
|
||||
env_list: List[Dict[str, str]], env_file: str):
|
||||
self.python_path = python_path
|
||||
self.project_path = project_path
|
||||
self.env_list = env_list
|
||||
self.env_file = env_file
|
||||
self.project_name = project_name
|
||||
self.user = user
|
||||
self._env_cache: Optional[str] = None
|
||||
self.pyenv = EnvironmentManager().get_env_py_path(python_bin)
|
||||
|
||||
@classmethod
|
||||
def form_project_conf(cls, project_config: dict) -> Union["Environment", str]:
|
||||
if not isinstance(project_config, dict):
|
||||
return 'Invalid project configuration file format'
|
||||
|
||||
python_path: str = project_config.get("vpath")
|
||||
python_bin: str = project_config.get("python_bin", project_config.get("vpath"))
|
||||
project_path: str = project_config.get("path")
|
||||
env_list = project_config.get("env_list", [])
|
||||
env_file = project_config.get("env_file", "")
|
||||
project_name = project_config.get("pjname")
|
||||
user = project_config.get("user", "root")
|
||||
if not python_path or not project_path or not project_name:
|
||||
return 'Invalid project configuration file format'
|
||||
|
||||
if not os.path.isdir(python_path) or not os.path.isdir(project_path):
|
||||
return 'The project directory or virtual environment directory specified in the config does not exist'
|
||||
|
||||
python_path = python_path.rstrip("/")
|
||||
project_path = project_path.rstrip("/")
|
||||
if not python_path.endswith("/bin"):
|
||||
python_path = python_path + "/bin"
|
||||
if not os.path.isdir(python_path):
|
||||
return 'The virtual environment directory specified in the config does not exist'
|
||||
|
||||
return cls(project_name, python_path, python_bin, project_path, user, env_list, env_file)
|
||||
|
||||
# 组合环境变量,用于启动服务
|
||||
def shell_env(self) -> str:
|
||||
if self._env_cache is not None:
|
||||
return self._env_cache
|
||||
|
||||
# cd 到指定路径, 加载环境变量, 加载环境变量文件, 设置Python环境到首位
|
||||
res_env_list = ["cd {}".format(self.project_path)]
|
||||
if isinstance(self.env_list, list):
|
||||
for i in self.env_list:
|
||||
if not isinstance(i, dict):
|
||||
continue
|
||||
if 'k' in i and 'v' in i:
|
||||
res_env_list.append("export {}={}".format(i['k'], i['v']))
|
||||
|
||||
if self.env_file and os.path.isfile(self.env_file):
|
||||
res_env_list.append("source {}".format(self.env_file))
|
||||
|
||||
res_env_list.append(self.pyenv.activate_shell())
|
||||
|
||||
self._env_cache = "\n".join(res_env_list)
|
||||
|
||||
return self._env_cache
|
||||
|
||||
|
||||
class PythonService(object):
|
||||
def __init__(self, sid: str, name: str, command: str, level: Optional[int], log_type: Optional[str]):
|
||||
self.sid = sid
|
||||
self.name = name
|
||||
self.command = command
|
||||
self.level = level
|
||||
self.log_type = log_type
|
||||
self.env: Optional[Environment] = None
|
||||
|
||||
def set_env(self, env: Environment):
|
||||
self.env = env
|
||||
|
||||
def write_pid(self, pid: int):
|
||||
if not self.env:
|
||||
raise RuntimeError('Env not set')
|
||||
pid_file = os.path.join(SERVICE_PATH, '{}/{}.pid'.format(self.env.project_name, self.name))
|
||||
if not os.path.isdir(os.path.dirname(pid_file)):
|
||||
os.makedirs(os.path.dirname(pid_file), 0o755)
|
||||
public.writeFile(pid_file, str(pid))
|
||||
|
||||
def read_pid(self) -> Optional[int]:
|
||||
if not self.env:
|
||||
raise RuntimeError('Env not set')
|
||||
pid_file = os.path.join(SERVICE_PATH, '{}/{}.pid'.format(self.env.project_name, self.name))
|
||||
if not os.path.isfile(pid_file):
|
||||
return None
|
||||
|
||||
res = None
|
||||
try:
|
||||
res = int(public.readFile(pid_file))
|
||||
except:
|
||||
pass
|
||||
if isinstance(res, int) and res > 0:
|
||||
return res
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def from_config(cls, config: dict,
|
||||
env: Optional[Environment]) -> Union['PythonService', "MainPythonService", 'CeleryService', str]:
|
||||
sid = config.get('sid', None)
|
||||
if sid == 'main':
|
||||
return MainPythonService(env)
|
||||
|
||||
name: str = config.get('name', "")
|
||||
command: str = config.get('command', "")
|
||||
if not sid or not name or not command:
|
||||
return 'Missing required parameters'
|
||||
|
||||
if not isinstance(command, str) or not isinstance(name, str) or not isinstance(sid, str):
|
||||
return 'Invalid parameter type'
|
||||
|
||||
level = config.get('level', 11)
|
||||
log_type = config.get('log_type', "append")
|
||||
|
||||
if command.split()[0].endswith("celery"):
|
||||
res = CeleryService(sid, name, command, level, log_type)
|
||||
else:
|
||||
res = cls(sid, name, command, level, log_type)
|
||||
|
||||
if env:
|
||||
res.set_env(env)
|
||||
return res
|
||||
|
||||
# 执行启动服务并返回PID信息或错误信息
|
||||
def start(self) -> Optional[int]:
|
||||
if not self.env:
|
||||
raise RuntimeError('Env not set')
|
||||
log_file = os.path.join(SERVICE_PATH, '{}/{}.log'.format(self.env.project_name, self.name))
|
||||
pid_file = os.path.join(SERVICE_PATH, '{}/{}.pid'.format(self.env.project_name, self.name))
|
||||
if not os.path.exists(os.path.dirname(pid_file)):
|
||||
os.makedirs(os.path.dirname(pid_file), 0o755)
|
||||
if os.path.exists(pid_file):
|
||||
os.remove(pid_file)
|
||||
prep_sh = self.env.shell_env()
|
||||
prep_sh += "\nexport BT_PYTHON_SERVICE_SID={}".format(self.sid)
|
||||
if not os.path.isfile(log_file):
|
||||
public.writeFile(log_file, '')
|
||||
public.set_own(log_file, self.env.user)
|
||||
public.set_mode(log_file, "755")
|
||||
if self.log_type == "append":
|
||||
prep_sh += "\nnohup {} &>> {} &".format(self.command, log_file)
|
||||
else:
|
||||
prep_sh += "\nnohup {} &> {} &".format(self.command, log_file)
|
||||
|
||||
public.ExecShell(prep_sh, user=self.env.user)
|
||||
time.sleep(0.5)
|
||||
return self.get_service_pid()
|
||||
|
||||
def get_service_pid(self, only_service: bool = False) -> Optional[int]:
|
||||
pid = self.read_pid()
|
||||
if pid and psutil.pid_exists(pid):
|
||||
return pid
|
||||
|
||||
if not pid:
|
||||
pid = self.get_pid_by_env_key()
|
||||
if not pid and not only_service:
|
||||
pid = self.get_pid_by_command()
|
||||
|
||||
if pid:
|
||||
self.write_pid(pid)
|
||||
return pid
|
||||
return None
|
||||
|
||||
def get_pid_by_env_key(self) -> Optional[int]:
|
||||
env_key = "BT_PYTHON_SERVICE_SID={}".format(self.sid)
|
||||
target = []
|
||||
for p in psutil.pids():
|
||||
try:
|
||||
data: str = public.readFile("/proc/{}/environ".format(p))
|
||||
if data.rfind(env_key) != -1:
|
||||
target.append(p)
|
||||
except:
|
||||
continue
|
||||
|
||||
for i in target:
|
||||
try:
|
||||
p = psutil.Process(i)
|
||||
if p.ppid() not in target:
|
||||
return i
|
||||
except:
|
||||
continue
|
||||
return None
|
||||
|
||||
def get_pid_by_command(self) -> Optional[int]:
|
||||
cmd_list = self.split_command()
|
||||
target = []
|
||||
for p in psutil.process_iter(["cmdline", "pid", "exe"]):
|
||||
try:
|
||||
real_cmd = p.cmdline()
|
||||
if cmd_list == real_cmd:
|
||||
target.append(p)
|
||||
if real_cmd[2:] == cmd_list[1:] and real_cmd[0].startswith(self.env.python_path):
|
||||
target.append(p)
|
||||
except:
|
||||
continue
|
||||
|
||||
for p in target:
|
||||
try:
|
||||
if p.ppid() not in target:
|
||||
return p.pid
|
||||
except:
|
||||
continue
|
||||
return None
|
||||
|
||||
def split_command(self) -> List[str]:
|
||||
res = []
|
||||
tmp = ""
|
||||
in_quot = False
|
||||
for i in self.command:
|
||||
if i in (' ', '\t', '\r'):
|
||||
if tmp and not in_quot:
|
||||
res.append(tmp)
|
||||
tmp = ""
|
||||
if in_quot:
|
||||
tmp += ' '
|
||||
|
||||
elif i in ("'", '"'):
|
||||
if in_quot:
|
||||
in_quot = False
|
||||
else:
|
||||
in_quot = True
|
||||
else:
|
||||
tmp += i
|
||||
|
||||
if tmp:
|
||||
res.append(tmp)
|
||||
|
||||
return res
|
||||
|
||||
def stop(self) -> None:
|
||||
pid = self.get_service_pid()
|
||||
if not pid:
|
||||
return
|
||||
try:
|
||||
p = psutil.Process(pid)
|
||||
p.kill()
|
||||
except:
|
||||
pass
|
||||
|
||||
def get_log(self) -> str:
|
||||
if not self.env:
|
||||
raise RuntimeError('env not set')
|
||||
log_file = os.path.join(SERVICE_PATH, '{}/{}.log'.format(self.env.project_name, self.name))
|
||||
if not os.path.isfile(log_file):
|
||||
return 'No logs available'
|
||||
data = public.GetNumLines(log_file, 1000)
|
||||
if not data:
|
||||
return 'No logs available'
|
||||
return data
|
||||
|
||||
@staticmethod
|
||||
def _get_ports_by_pid(pid: int) -> List[int]:
|
||||
try:
|
||||
res = set()
|
||||
for con in psutil.Process(pid).connections(): # NOQA
|
||||
if con.status == 'LISTEN':
|
||||
res.add(con.laddr.port)
|
||||
return list(res)
|
||||
except:
|
||||
return []
|
||||
|
||||
def get_info(self) -> Dict[str, Any]:
|
||||
if not self.env:
|
||||
raise RuntimeError('env not set')
|
||||
pid = self.get_service_pid()
|
||||
if isinstance(pid, int) and psutil.pid_exists(pid):
|
||||
ports = self._get_ports_by_pid(pid)
|
||||
return {
|
||||
'pid': pid,
|
||||
'ports': ports
|
||||
}
|
||||
return {"pid": None, "ports": []}
|
||||
|
||||
|
||||
class MainPythonService(PythonService):
|
||||
from projectModelV2.pythonModel import main as py_project_main
|
||||
_py_main_class = py_project_main
|
||||
|
||||
def __init__(self, env: Environment):
|
||||
super().__init__('main', 'main', 'main', 10, 'append')
|
||||
self.set_env(env)
|
||||
|
||||
@property
|
||||
def py_main(self):
|
||||
return self._py_main_class()
|
||||
|
||||
def start(self) -> Optional[int]:
|
||||
if not self.env:
|
||||
raise RuntimeError('Env not set')
|
||||
self.py_main.only_start_main_project(self.env.project_name)
|
||||
return self.get_service_pid()
|
||||
|
||||
def get_service_pid(self, only_service: bool = False) -> Optional[int]:
|
||||
if not self.env:
|
||||
raise RuntimeError('env not set')
|
||||
pids: List[int] = self.py_main.get_project_run_state(self.env.project_name)
|
||||
if not pids:
|
||||
return None
|
||||
pids.sort()
|
||||
return pids[0]
|
||||
|
||||
def stop(self) -> None:
|
||||
if not self.env:
|
||||
raise RuntimeError('env not set')
|
||||
self.py_main.only_stop_main_project(self.env.project_name)
|
||||
|
||||
def get_log(self) -> str:
|
||||
if not self.env:
|
||||
raise RuntimeError('env not set')
|
||||
get_obj = public.dict_obj()
|
||||
get_obj.name = self.env.project_name
|
||||
res = self.py_main.GetProjectLog(get_obj)
|
||||
data = None
|
||||
if res.get("status"):
|
||||
data = res.get("message", {}).get("data", "")
|
||||
|
||||
if not data:
|
||||
return 'no log found'
|
||||
return data
|
||||
|
||||
def get_info(self):
|
||||
res = super().get_info()
|
||||
res['name'] = "Main project service"
|
||||
return res
|
||||
|
||||
|
||||
class CeleryService(PythonService):
|
||||
def get_celery_env(self) -> Tuple[str, str]:
|
||||
celery = "{}/celery".format(self.env.python_path)
|
||||
if not os.path.isfile(celery):
|
||||
return '', ''
|
||||
celery_data = public.readFile(celery)
|
||||
if not isinstance(celery_data, str):
|
||||
return '', ''
|
||||
celery_python = celery_data.split("\n", 1)[0]
|
||||
if celery_python.startswith("#!"):
|
||||
celery_python = celery_python[2:].strip()
|
||||
return celery_python, celery
|
||||
|
||||
def get_pid_by_command(self) -> Optional[int]:
|
||||
celery_env = self.get_celery_env()
|
||||
if not celery_env[0] or not celery_env[1]:
|
||||
return super().get_pid_by_command()
|
||||
target = []
|
||||
cmd_list = list(celery_env) + self.split_command()[1:]
|
||||
for p in psutil.process_iter(["cmdline", "pid"]):
|
||||
try:
|
||||
if cmd_list == p.cmdline():
|
||||
target.append(p)
|
||||
except:
|
||||
continue
|
||||
|
||||
for p in target:
|
||||
try:
|
||||
if p.ppid() not in target:
|
||||
return p.pid
|
||||
except:
|
||||
continue
|
||||
return None
|
||||
|
||||
|
||||
# 协同服务管理类, 包括主服务和其他服务
|
||||
class ServiceManager:
|
||||
MAIN_SERVICE_CONF = {
|
||||
"sid": "main",
|
||||
"name": "main",
|
||||
"command": "main",
|
||||
"level": 10,
|
||||
"log_type": "append",
|
||||
}
|
||||
|
||||
def __init__(self, project_name: str, project_config: dict):
|
||||
self.project_name = project_name
|
||||
self.project_config = project_config
|
||||
self._other_services: Optional[List[Dict]] = None
|
||||
self._env: Optional[Environment] = None
|
||||
|
||||
@classmethod
|
||||
def new_mgr(cls, project_name: str) -> Union["ServiceManager", str]:
|
||||
data = public.M("sites").where(
|
||||
'project_type=? AND name=? ', ('Python', project_name)
|
||||
).field('id,project_config').find()
|
||||
if not data:
|
||||
raise HintException("Project [{}] Not Found!".format(project_name))
|
||||
try:
|
||||
project_config = json.loads(data['project_config'])
|
||||
except json.JSONDecodeError:
|
||||
raise HintException("Project [{}] db's Project Config Error!".format(project_name))
|
||||
return cls(project_name, project_config)
|
||||
|
||||
@property
|
||||
def service_list(self) -> List[Dict]:
|
||||
res = [self.MAIN_SERVICE_CONF]
|
||||
res.extend(self.other_services)
|
||||
res.sort(key=lambda x: x['level'])
|
||||
return res
|
||||
|
||||
@property
|
||||
def other_services(self) -> List[Dict]:
|
||||
if self._other_services is None:
|
||||
services = []
|
||||
for service in self.project_config.get('services', []):
|
||||
if service.get('sid') == 'main':
|
||||
continue
|
||||
services.append(service)
|
||||
self._other_services = services
|
||||
return self._other_services
|
||||
|
||||
@staticmethod
|
||||
def new_id() -> str:
|
||||
return uuid4().hex[::3]
|
||||
|
||||
def save_service_conf(self) -> Optional[str]:
|
||||
data = public.M("sites").where(
|
||||
'project_type=? AND name=? ', ('Python', self.project_name)
|
||||
).field('id,project_config').find()
|
||||
if not data:
|
||||
return "Website information not found"
|
||||
data['project_config'] = json.loads(data['project_config'])
|
||||
data['project_config']['services'] = self.other_services
|
||||
public.M("sites").where('id=?', (data['id'],)).update({'project_config': json.dumps(data['project_config'])})
|
||||
return None
|
||||
|
||||
def add_service(self, service_conf: dict) -> Optional[str]:
|
||||
try:
|
||||
conf = {
|
||||
"name": service_conf.get("name", "").strip(),
|
||||
"command": service_conf.get("command", "").strip(),
|
||||
"level": int(service_conf.get("level", 11)),
|
||||
"log_type": service_conf.get("log_type", "append"),
|
||||
}
|
||||
except:
|
||||
return "Parameter error"
|
||||
|
||||
if re.search(r"[\s$^`]+", conf['name']):
|
||||
return "Service name cannot contain spaces or special characters"
|
||||
|
||||
for i in self.other_services:
|
||||
if i['name'] == conf['name']:
|
||||
return "Service name must be unique"
|
||||
if i['command'] == conf['command']:
|
||||
return "This start command already exists; service name: {}".format(i['name'])
|
||||
|
||||
if not (conf['name'] and conf['command']):
|
||||
return "Service name and start command cannot be empty"
|
||||
|
||||
conf["sid"] = self.new_id()
|
||||
|
||||
self.other_services.append(conf)
|
||||
self.save_service_conf()
|
||||
return None
|
||||
|
||||
def modify_service(self, sid: str, service_conf: dict) -> Optional[str]:
|
||||
target_data = None
|
||||
for i in self.other_services:
|
||||
if i["sid"] == sid:
|
||||
target_data = i
|
||||
break
|
||||
if target_data is None:
|
||||
return "Service not found"
|
||||
|
||||
name = target_data["name"]
|
||||
if "name" in service_conf and service_conf["name"] != target_data["name"]:
|
||||
name = service_conf["name"].strip()
|
||||
if re.search(r"[\s$^`]+", name):
|
||||
return "Service name cannot contain spaces or special characters"
|
||||
command = target_data["command"]
|
||||
if "command" in service_conf and service_conf["command"] != target_data["command"]:
|
||||
command = service_conf["command"].strip()
|
||||
|
||||
for i in self.other_services:
|
||||
if i["sid"] == sid:
|
||||
continue
|
||||
if i["name"] == name:
|
||||
return "Service name must be unique"
|
||||
if i["command"] == command:
|
||||
return "This start command already exists; service name: {}".format(i["name"])
|
||||
|
||||
if name != target_data["name"]:
|
||||
log_file = os.path.join(SERVICE_PATH, '{}/{}.log'.format(self.project_name, target_data["name"]))
|
||||
pid_file = os.path.join(SERVICE_PATH, '{}/{}.pid'.format(self.project_name, target_data["name"]))
|
||||
if os.path.exists(log_file):
|
||||
os.rename(log_file, os.path.join(SERVICE_PATH, '{}/{}.log'.format(self.project_name, name)))
|
||||
if os.path.exists(pid_file):
|
||||
os.rename(pid_file, os.path.join(SERVICE_PATH, '{}/{}.pid'.format(self.project_name, name)))
|
||||
|
||||
target_data["name"] = name
|
||||
|
||||
target_data["command"] = command
|
||||
target_data["level"] = int(service_conf.get("level", int(target_data.get("level", 11))))
|
||||
target_data["log_type"] = service_conf.get("log_type", target_data["log_type"])
|
||||
self.save_service_conf()
|
||||
return None
|
||||
|
||||
def remove_service(self, sid: str) -> Optional[str]:
|
||||
del_idx = None
|
||||
for idx, i in enumerate(self.other_services):
|
||||
if i["sid"] == sid:
|
||||
del_idx = idx
|
||||
break
|
||||
|
||||
if del_idx is None:
|
||||
return "Service not found"
|
||||
del_conf = self.other_services.pop(del_idx)
|
||||
self.save_service_conf()
|
||||
log_file = os.path.join(SERVICE_PATH, '{}/{}.log'.format(self.project_name, del_conf["name"]))
|
||||
pid_file = os.path.join(SERVICE_PATH, '{}/{}.pid'.format(self.project_name, del_conf["name"]))
|
||||
if os.path.exists(log_file):
|
||||
os.remove(log_file)
|
||||
if os.path.exists(pid_file):
|
||||
os.remove(pid_file)
|
||||
return None
|
||||
|
||||
def _get_service_conf_by_sid(self, sid: str) -> Optional[Dict]:
|
||||
for i in self.service_list:
|
||||
if i["sid"] == sid:
|
||||
return i
|
||||
return None
|
||||
|
||||
def _build_service_by_conf(self, conf: dict) -> Union[PythonService, str]:
|
||||
if not self._env:
|
||||
self._env = Environment.form_project_conf(self.project_config)
|
||||
if isinstance(self._env, str):
|
||||
return self._env
|
||||
return PythonService.from_config(conf, env=self._env)
|
||||
|
||||
def handle_service(self, sid: str, action: str = "start") -> Optional[str]:
|
||||
conf = self._get_service_conf_by_sid(sid)
|
||||
if conf is None:
|
||||
return "Service not found"
|
||||
|
||||
service = self._build_service_by_conf(conf)
|
||||
if isinstance(service, str):
|
||||
return service
|
||||
pid = service.get_service_pid()
|
||||
if not pid:
|
||||
pid = -1
|
||||
if action == "start":
|
||||
if not psutil.pid_exists(pid):
|
||||
service.start()
|
||||
elif action == "stop":
|
||||
service.stop()
|
||||
elif action == "restart":
|
||||
if psutil.pid_exists(pid):
|
||||
service.stop()
|
||||
for i in range(50):
|
||||
if not psutil.pid_exists(pid):
|
||||
break
|
||||
time.sleep(0.1)
|
||||
else:
|
||||
service.stop()
|
||||
time.sleep(1)
|
||||
|
||||
service.start()
|
||||
else:
|
||||
return "Unknown action"
|
||||
|
||||
return None
|
||||
|
||||
def get_service_log(self, sid: str) -> Tuple[bool, str]:
|
||||
conf = self._get_service_conf_by_sid(sid)
|
||||
if conf is None:
|
||||
return False, "Service Not Found"
|
||||
service = self._build_service_by_conf(conf)
|
||||
if isinstance(service, str):
|
||||
return False, service
|
||||
return True, service.get_log()
|
||||
|
||||
def get_services_info(self) -> List[dict]:
|
||||
res = []
|
||||
for i in self.service_list:
|
||||
service = self._build_service_by_conf(i)
|
||||
if isinstance(service, str):
|
||||
i["error"] = service
|
||||
res.append(i)
|
||||
else:
|
||||
i.update(service.get_info())
|
||||
res.append(i)
|
||||
|
||||
return res
|
||||
|
||||
def start_project(self):
|
||||
services = [
|
||||
self._build_service_by_conf(i) for i in self.service_list
|
||||
]
|
||||
for i in services:
|
||||
try:
|
||||
if isinstance(i, str):
|
||||
continue
|
||||
pid = i.get_service_pid()
|
||||
if isinstance(pid, int) and pid > 0 and psutil.pid_exists(pid):
|
||||
continue
|
||||
i.start()
|
||||
time.sleep(0.5)
|
||||
except Exception:
|
||||
import traceback
|
||||
public.print_log("start project service error: {}".format(traceback.format_exc()))
|
||||
continue
|
||||
return True
|
||||
|
||||
def stop_project(self):
|
||||
services = [self._build_service_by_conf(i) for i in self.service_list]
|
||||
for i in services[::-1]:
|
||||
if isinstance(i, str):
|
||||
continue
|
||||
i.stop()
|
||||
return "Stop command executed"
|
||||
|
||||
def other_service_pids(self) -> Set[int]:
|
||||
res_pid = []
|
||||
for i in self.other_services:
|
||||
service = self._build_service_by_conf(i)
|
||||
if isinstance(service, str):
|
||||
continue
|
||||
pid = service.get_service_pid()
|
||||
if isinstance(pid, int) and pid > 0 and psutil.pid_exists(pid):
|
||||
res_pid.append(pid)
|
||||
|
||||
sub_pid = []
|
||||
|
||||
def get_sub_pid(pro: psutil.Process) -> List[int]:
|
||||
tmp_res = []
|
||||
if pro.status() != psutil.STATUS_ZOMBIE and pro.children():
|
||||
for sub_pro in pro.children():
|
||||
tmp_res.append(sub_pro.pid)
|
||||
tmp_res.extend(get_sub_pid(sub_pro))
|
||||
return tmp_res
|
||||
|
||||
for i in res_pid:
|
||||
try:
|
||||
p = psutil.Process(i)
|
||||
sub_pid.extend(get_sub_pid(p))
|
||||
except:
|
||||
pass
|
||||
|
||||
return set(res_pid + sub_pid)
|
||||
|
||||
|
||||
# 协同服务api
|
||||
class main:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def get_services_info(get):
|
||||
try:
|
||||
project_name = get.project_name.strip()
|
||||
except:
|
||||
return json_response(False, 'Parameter error')
|
||||
|
||||
s_mgr = ServiceManager.new_mgr(project_name)
|
||||
if isinstance(s_mgr, str):
|
||||
return json_response(False, s_mgr)
|
||||
return json_response(True, data=s_mgr.get_services_info())
|
||||
|
||||
@staticmethod
|
||||
def add_service(get):
|
||||
try:
|
||||
project_name = get.project_name.strip()
|
||||
service_conf = get.service_conf
|
||||
if isinstance(service_conf, str):
|
||||
service_conf = json.loads(service_conf)
|
||||
if not isinstance(service_conf, dict):
|
||||
return json_response(False, 'Invalid service configuration parameters')
|
||||
except:
|
||||
return json_response(False, 'Parameter error')
|
||||
|
||||
s_mgr = ServiceManager.new_mgr(project_name)
|
||||
if isinstance(s_mgr, str):
|
||||
return json_response(False, s_mgr)
|
||||
res = s_mgr.add_service(service_conf)
|
||||
if isinstance(res, str):
|
||||
return json_response(False, res)
|
||||
public.set_module_logs('python_project', 'add_service', 1)
|
||||
return json_response(True, msg="Added successfully")
|
||||
|
||||
@staticmethod
|
||||
def modify_service(get):
|
||||
try:
|
||||
project_name = get.project_name.strip()
|
||||
sid = get.sid.strip()
|
||||
service_conf = get.service_conf
|
||||
if isinstance(service_conf, str):
|
||||
service_conf = json.loads(service_conf)
|
||||
if not isinstance(service_conf, dict):
|
||||
return json_response(False, 'Invalid service configuration parameters')
|
||||
except:
|
||||
return json_response(False, 'Parameter error')
|
||||
|
||||
s_mgr = ServiceManager.new_mgr(project_name)
|
||||
if isinstance(s_mgr, str):
|
||||
return json_response(False, s_mgr)
|
||||
|
||||
res = s_mgr.modify_service(sid, service_conf)
|
||||
if isinstance(res, str):
|
||||
return json_response(False, res)
|
||||
return json_response(True, msg="Updated successfully")
|
||||
|
||||
@staticmethod
|
||||
def remove_service(get):
|
||||
try:
|
||||
project_name = get.project_name.strip()
|
||||
sid = get.sid.strip()
|
||||
except:
|
||||
return json_response(False, 'Parameter error')
|
||||
|
||||
s_mgr = ServiceManager.new_mgr(project_name)
|
||||
if isinstance(s_mgr, str):
|
||||
return json_response(False, s_mgr)
|
||||
res = s_mgr.remove_service(sid)
|
||||
if isinstance(res, str):
|
||||
return json_response(False, res)
|
||||
|
||||
return json_response(True, msg="Deleted successfully")
|
||||
|
||||
@staticmethod
|
||||
def handle_service(get):
|
||||
try:
|
||||
project_name = get.project_name.strip()
|
||||
sid = get.sid.strip()
|
||||
action = get.option.strip()
|
||||
except:
|
||||
return json_response(False, 'Parameter error')
|
||||
s_mgr = ServiceManager.new_mgr(project_name)
|
||||
if isinstance(s_mgr, str):
|
||||
return json_response(False, s_mgr)
|
||||
res = s_mgr.handle_service(sid, action)
|
||||
if isinstance(res, str):
|
||||
return json_response(False, res)
|
||||
|
||||
return json_response(True, msg="Operation successful")
|
||||
|
||||
@staticmethod
|
||||
def get_service_log(get):
|
||||
try:
|
||||
project_name = get.project_name.strip()
|
||||
sid = get.sid.strip()
|
||||
except:
|
||||
return json_response(False, 'Parameter error')
|
||||
s_mgr = ServiceManager.new_mgr(project_name)
|
||||
if isinstance(s_mgr, str):
|
||||
return json_response(False, s_mgr)
|
||||
res, log = s_mgr.get_service_log(sid)
|
||||
if not res:
|
||||
return json_response(False, log)
|
||||
return json_response(True, data=log)
|
||||
0
mod/project/ssh/__init__.py
Normal file
0
mod/project/ssh/__init__.py
Normal file
159
mod/project/ssh/base.py
Normal file
159
mod/project/ssh/base.py
Normal file
@@ -0,0 +1,159 @@
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
os.chdir("/www/server/panel")
|
||||
import public
|
||||
|
||||
|
||||
class SSHbase:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@staticmethod
|
||||
def return_area(result, key):
|
||||
"""
|
||||
@name 格式化返回带IP归属地的数组
|
||||
@param result<list> 数据数组
|
||||
@param key<str> ip所在字段
|
||||
@return list
|
||||
"""
|
||||
if not result:
|
||||
return result
|
||||
|
||||
# 添加IP查询缓存
|
||||
ip_cache_file = 'data/ip_location_cache.json'
|
||||
ip_cache = {}
|
||||
|
||||
# 确保缓存目录存在
|
||||
cache_dir = os.path.dirname(ip_cache_file)
|
||||
if not os.path.exists(cache_dir):
|
||||
os.makedirs(cache_dir, exist_ok=True)
|
||||
|
||||
# 读取IP缓存
|
||||
try:
|
||||
if os.path.exists(ip_cache_file):
|
||||
ip_cache = json.loads(public.readFile(ip_cache_file))
|
||||
except Exception as e:
|
||||
public.print_log('Failed to read IP cache: {}'.format(str(e)))
|
||||
ip_cache = {}
|
||||
|
||||
# 只查询未缓存的IP
|
||||
new_ips = set()
|
||||
for data in result:
|
||||
ip = data.get(key)
|
||||
if not ip or public.is_ipv6(ip):
|
||||
continue
|
||||
if ip not in ip_cache:
|
||||
new_ips.add(ip)
|
||||
|
||||
# 批量查询新IP
|
||||
for ip in new_ips:
|
||||
try:
|
||||
if "127.0.0" in ip:
|
||||
ip_cache[ip] = {"info": "Local address (e.g. left terminal)"}
|
||||
continue
|
||||
|
||||
ip_area = public.get_ip_location(ip)
|
||||
if not ip_area:
|
||||
ip_cache[ip] = {"info": "unknown area"}
|
||||
continue
|
||||
|
||||
ip_area = ip_area.raw
|
||||
country = ip_area.get("country", {})
|
||||
ip_area["info"] = "{} {} {}".format(
|
||||
country.get('country', 'unknown'),
|
||||
country.get('province', 'unknown'),
|
||||
country.get('city', 'unknown')
|
||||
) if country else "unknown area"
|
||||
ip_cache[ip] = ip_area
|
||||
except Exception as e:
|
||||
public.print_log('Query IP {} Failed: {}'.format(ip, str(e)))
|
||||
ip_cache[ip] = {"info": "unknown area"}
|
||||
|
||||
# 只有当有新IP被查询时才更新缓存文件
|
||||
if new_ips:
|
||||
try:
|
||||
public.writeFile(ip_cache_file, json.dumps(ip_cache))
|
||||
except Exception as e:
|
||||
public.print_log('Failed to update IP cache: {}'.format(str(e)))
|
||||
pass
|
||||
|
||||
# 使用缓存数据,确保不修改原始数据
|
||||
result_with_area = []
|
||||
for data in result:
|
||||
data_copy = data.copy() # 创建数据副本
|
||||
ip = data_copy.get(key, '')
|
||||
data_copy['area'] = ip_cache.get(ip, {"info": "unknown area"})
|
||||
result_with_area.append(data_copy)
|
||||
|
||||
return result_with_area
|
||||
|
||||
@staticmethod
|
||||
def journalctl_system():
|
||||
try:
|
||||
if os.path.exists('/etc/os-release'):
|
||||
f = public.readFile('/etc/os-release')
|
||||
f = f.split('\n')
|
||||
ID = ''
|
||||
VERSION_ID = 0
|
||||
for line in f:
|
||||
if line.startswith('VERSION_ID'):
|
||||
VERSION_ID = int(line.split('=')[1].split('.')[0].strip('"'))
|
||||
if line.startswith('ID'):
|
||||
if ID != '': continue
|
||||
ID = line.strip().split('=')[1].strip('"')
|
||||
try:
|
||||
ID = ID.split('.')[0]
|
||||
except:
|
||||
pass
|
||||
if (ID.lower() == 'debian' and VERSION_ID >= 11) or (ID.lower() == 'ubuntu' and VERSION_ID >= 20):
|
||||
return True
|
||||
return False
|
||||
except:
|
||||
return False
|
||||
|
||||
@staticmethod
|
||||
def parse_login_entry(parts, year):
|
||||
"""解析登录条目"""
|
||||
try:
|
||||
# 判断日志格式类型
|
||||
if 'T' in parts[0]: # centos7以外的格式
|
||||
# 解析ISO格式时间戳
|
||||
dt = datetime.fromisoformat(parts[0].replace('Z', '+00:00'))
|
||||
user_index = parts.index('user') + 1 if 'user' in parts else parts.index('for') + 1
|
||||
ip_index = parts.index('from') + 1
|
||||
port_index = parts.index('port') + 1 if 'port' in parts else -1
|
||||
else:
|
||||
# 解析传统格式时间
|
||||
month = parts[0]
|
||||
day = parts[1]
|
||||
time_str = parts[2]
|
||||
# 如果月份大于当前月,说明年份不对,直接把year修改成1970年
|
||||
if datetime.strptime("{} {}".format(month, day), "%b %d").month > datetime.now().month:
|
||||
year = "1970"
|
||||
dt_str = "{} {} {} {}".format(month, day, year, time_str)
|
||||
dt = datetime.strptime(dt_str, "%b %d %Y %H:%M:%S")
|
||||
user_index = parts.index('for') + 1 if "invalid" not in parts else -6
|
||||
ip_index = parts.index('from') + 1
|
||||
port_index = parts.index('port') + 1 if 'port' in parts else -1
|
||||
|
||||
entry = {
|
||||
"timestamp": int(dt.timestamp()),
|
||||
"time": dt.strftime("%Y-%m-%d %H:%M:%S"),
|
||||
"type": "success" if ("Accepted" in parts) else "failed",
|
||||
"status": 1 if ("Accepted" in parts) else 0,
|
||||
"user": parts[user_index],
|
||||
"address": parts[ip_index],
|
||||
"port": parts[port_index] if port_index != -1 else "",
|
||||
"deny_status": 0,
|
||||
"login_type": "publickey" if "publickey" in parts else "password" # 添加登录类型
|
||||
}
|
||||
return entry
|
||||
except Exception as e:
|
||||
public.print_log(public.get_error_info())
|
||||
return None
|
||||
231
mod/project/ssh/comMod.py
Normal file
231
mod/project/ssh/comMod.py
Normal file
@@ -0,0 +1,231 @@
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
from datetime import datetime
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.insert(0, "/www/server/panel/class")
|
||||
|
||||
os.chdir("/www/server/panel")
|
||||
import public
|
||||
# from mod.project.ssh.base import SSHbase
|
||||
from mod.project.ssh.journalctlMod import JournalctlManage
|
||||
from mod.project.ssh.secureMod import SecureManage
|
||||
|
||||
|
||||
class main(JournalctlManage, SecureManage):
|
||||
|
||||
def __init__(self):
|
||||
super(main,self).__init__()
|
||||
|
||||
def get_ssh_list(self, get):
|
||||
"""
|
||||
@name 获取日志列表
|
||||
@param data:{"p":1,"limit":20,"search":"","select":"ALL"}
|
||||
@return list
|
||||
"""
|
||||
page = int(get.p) if hasattr(get, 'p') else 1
|
||||
limit = int(get.limit) if hasattr(get, 'limit') else 20
|
||||
query = get.get("search", "").strip().lower()
|
||||
history = get.get("historyType", "").strip().lower()
|
||||
|
||||
# 读取IP封禁规则
|
||||
ip_rules_file = "data/ssh_deny_ip_rules.json"
|
||||
try:
|
||||
ip_rules = json.loads(public.readFile(ip_rules_file))
|
||||
except Exception:
|
||||
ip_rules = []
|
||||
|
||||
login_type = self.login_all_flag
|
||||
get.select = get.get("select", "ALL")
|
||||
if get.select == "Failed":
|
||||
login_type = self.login_failed_flag
|
||||
elif get.select == "Accepted":
|
||||
login_type = self.login_access_flag
|
||||
|
||||
if history == "all":
|
||||
self.ssh_log_path += "*"
|
||||
total,login_list = self.get_secure_logs(login_type=login_type,pagesize=limit, page=page, query=query)
|
||||
|
||||
for log in login_list:
|
||||
if log["address"] in ip_rules:
|
||||
log["deny_status"] = 1
|
||||
data = self.return_area(login_list, 'address')
|
||||
return public.return_message(0, 0, {"data":data, "total":total})
|
||||
|
||||
def get_ssh_intrusion(self, get):
|
||||
"""
|
||||
@name 登陆详情统计 周期 昨天/今天 类型 成功/失败
|
||||
@return {"error": 0, "success": 0, "today_error": 0, "today_success": 0}
|
||||
"""
|
||||
stats = {
|
||||
'error': 0,
|
||||
'success': 0,
|
||||
'today_error': 0,
|
||||
'today_success': 0,
|
||||
'yesterday_error': 0,
|
||||
'yesterday_success': 0,
|
||||
'sevenday_error': 0,
|
||||
'sevenday_success': 0
|
||||
}
|
||||
try:
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
# 获取并更新日志数据
|
||||
today = datetime.now()
|
||||
yesterday = today - timedelta(days=1)
|
||||
|
||||
osv = public.get_os_version().lower()
|
||||
#个别系统使用标准时间格式
|
||||
date_v1 = ["debian", "opencloudos"]
|
||||
is_iso_date = any(d in osv for d in date_v1)
|
||||
if is_iso_date:
|
||||
# Debian/OpenCloudOS 日志为标准时间
|
||||
today_str = today.strftime("%Y-%m-%d")
|
||||
yesterday_str = yesterday.strftime("%Y-%m-%d")
|
||||
else:
|
||||
#centos ubuntu 等日志为月份日期
|
||||
today_str = today.strftime("%b %d").replace(" 0", " ")
|
||||
yesterday_str = yesterday.strftime("%b %d").replace(" 0", " ")
|
||||
|
||||
stats['today_error'] = self.get_secure_log_count(self.login_failed_flag, today_str)
|
||||
stats['today_success'] = self.get_secure_log_count(self.login_access_flag, today_str)
|
||||
stats['yesterday_success'] = self.get_secure_log_count(self.login_access_flag, yesterday_str)
|
||||
stats['yesterday_error'] = self.get_secure_log_count(self.login_failed_flag, yesterday_str)
|
||||
stats['sevenday_error'] = self.get_secure_log_count(self.login_failed_flag, "")
|
||||
stats['sevenday_success'] = self.get_secure_log_count(self.login_access_flag, "")
|
||||
|
||||
self.ssh_log_path += "*"
|
||||
stats['error'] = self.get_secure_log_count(self.login_failed_flag)
|
||||
stats['success'] = self.get_secure_log_count(self.login_access_flag)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
public.print_log(f"Failed to get SSH login information: {traceback.format_exc()}")
|
||||
return public.return_message(0, 0,stats)
|
||||
|
||||
def clean_ssh_list(self, get):
|
||||
"""
|
||||
@name 清空SSH登录记录 只保留最近一周的数据(从周日开始为一周)
|
||||
@return: {"status": True, "msg": "清空成功"}
|
||||
"""
|
||||
|
||||
public.ExecShell("rm -rf /var/log/secure-*;rm -rf /var/log/auth.log.*".format())
|
||||
|
||||
return public.return_message(0, 0, 'Clearance successful.')
|
||||
|
||||
def index_ssh_info(self, get):
|
||||
"""
|
||||
获取今天和昨天的SSH登录统计
|
||||
@return: list [今天登录次数, 昨天登录次数]
|
||||
"""
|
||||
from datetime import datetime, timedelta
|
||||
|
||||
today_count = 0
|
||||
yesterday_count = 0
|
||||
|
||||
try:
|
||||
# 获取并更新日志数据
|
||||
today = datetime.now()
|
||||
yesterday = today - timedelta(days=1)
|
||||
|
||||
if "debian" in public.get_os_version().lower():
|
||||
today_str = today.strftime("%Y-%m-%d")
|
||||
yesterday_str = yesterday.strftime("%Y-%m-%d")
|
||||
else:
|
||||
today_str = today.strftime("%b %d").replace(" 0", " ")
|
||||
yesterday_str = yesterday.strftime("%b %d").replace(" 0", " ")
|
||||
|
||||
today_count = self.get_secure_log_count(self.login_all_flag, today_str)
|
||||
yesterday_count = self.get_secure_log_count(self.login_all_flag, yesterday_str)
|
||||
except Exception as e:
|
||||
import traceback
|
||||
public.print_log(f"Failed to count SSH login information: {traceback.format_exc()}")
|
||||
|
||||
return [today_count, yesterday_count]
|
||||
|
||||
def add_cron_job(self,get):
|
||||
"""
|
||||
将 SSH爆破的脚本 添加到定时任务中
|
||||
"""
|
||||
cron_hour = get.get("cron_hour", 1)
|
||||
fail_count = get.get("fail_count", 10)
|
||||
ban_hour = get.get("ban_hour", 10)
|
||||
public.print_log(f"{cron_hour},{fail_count},{ban_hour}")
|
||||
cron_exist = public.M('crontab').where("name='aa-SSH Blast IP Blocking [Security - SSH Admin - Add to Login Logs]'", ()).get()
|
||||
if len(cron_exist) > 0:
|
||||
return public.return_message(-1, 0, 'Timed tasks already exist! Task details can be viewed in the panel scheduled tasks')
|
||||
|
||||
|
||||
from time import localtime
|
||||
run_minute = localtime().tm_min + 1
|
||||
if run_minute == 60: run_minute = 0
|
||||
|
||||
get.name = "aa-SSH Blast IP Blocking [Security - SSH Admin - Add to Login Logs]"
|
||||
get.type = "hour-n"
|
||||
get.hour = cron_hour
|
||||
get.minute = run_minute
|
||||
get.where1 = cron_hour
|
||||
get.where_hour = cron_hour
|
||||
get.week = "1"
|
||||
get.timeType = "sday"
|
||||
get.timeSet = "1"
|
||||
get.sType = "toShell"
|
||||
get.sBody = "{path}/pyenv/bin/python3 -u {path}/script/ssh_ban_login_failed.py {cron_hour} {fail_count} {ban_second}".format(
|
||||
path = public.get_panel_path(),
|
||||
cron_hour = cron_hour,
|
||||
fail_count = fail_count,
|
||||
ban_second = ban_hour * 3600
|
||||
)
|
||||
get.sName = ""
|
||||
get.backupTo = ""
|
||||
get.save = ""
|
||||
get.urladdress = ""
|
||||
get.save_local = "0"
|
||||
get.notice = "0"
|
||||
get.notice_channel = ""
|
||||
get.datab_name = ""
|
||||
get.tables_name = ""
|
||||
get.keyword = ""
|
||||
get.flock = "1"
|
||||
get.stop_site = "0"
|
||||
get.version = ""
|
||||
get.user = "root"
|
||||
from crontab import crontab
|
||||
|
||||
res = crontab().AddCrontab(get)
|
||||
if res["status"] == True:
|
||||
return public.return_message(0, 0,"Added successfully, the task will run at {} minutes per {} hour.".format(cron_hour,run_minute))
|
||||
public.set_module_logs('SSH', 'add_cron_job', 1)
|
||||
return res
|
||||
|
||||
def remove_cron_job(self,get):
|
||||
"""
|
||||
将 SSH爆破的脚本 在定时任务中移除
|
||||
"""
|
||||
cron_exist = public.M('crontab').where("name='aa-SSH Blast IP Blocking [Security - SSH Admin - Add to Login Logs]'", ()).get()
|
||||
if len(cron_exist) > 0:
|
||||
for crontask in cron_exist:
|
||||
get.id = crontask["id"]
|
||||
from crontab import crontab
|
||||
crontab().DelCrontab(get)
|
||||
return public.return_message(0 ,0, 'Timed tasks have been removed!')
|
||||
else:
|
||||
return public.return_message(-1, 0, 'Removal failed, timed task does not exist!')
|
||||
|
||||
def run_ban_login_failed_ip(self,get):
|
||||
hour = get.get("hour", 1)
|
||||
fail_count = get.get("fail_count", 10)
|
||||
ban_hour = get.get("ban_hour", 10)
|
||||
|
||||
exec_shell = "{path}/pyenv/bin/python3 -u {path}/script/ssh_ban_login_failed.py {hour} {fail_count} {ban_second}".format(
|
||||
path=public.get_panel_path(),
|
||||
hour=hour,
|
||||
fail_count=fail_count,
|
||||
ban_second=ban_hour * 3600
|
||||
)
|
||||
import panelTask
|
||||
task_obj = panelTask.bt_task()
|
||||
task_id = task_obj.create_task('SSH blocking and IP bursting programme', 0, exec_shell)
|
||||
public.set_module_logs('SSH', 'run_ban_login_failed_ip', 1)
|
||||
return {'status': True, 'msg': 'Task created.', 'task_id': task_id}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user