Initial YakPanel commit
This commit is contained in:
0
mod/project/docker/app/__init__.py
Normal file
0
mod/project/docker/app/__init__.py
Normal file
1784
mod/project/docker/app/appManageMod.py
Normal file
1784
mod/project/docker/app/appManageMod.py
Normal file
File diff suppressed because it is too large
Load Diff
1159
mod/project/docker/app/base.py
Normal file
1159
mod/project/docker/app/base.py
Normal file
File diff suppressed because it is too large
Load Diff
23
mod/project/docker/app/gpu/__init__.py
Normal file
23
mod/project/docker/app/gpu/__init__.py
Normal file
@@ -0,0 +1,23 @@
|
||||
from typing import List
|
||||
|
||||
from .base import GPUBase
|
||||
from .nvidia import NVIDIA
|
||||
from .amd import AMD
|
||||
|
||||
class Driver:
|
||||
drivers: List[GPUBase] = []
|
||||
|
||||
def __init__(self):
|
||||
if NVIDIA.is_support():
|
||||
self.drivers.append(NVIDIA())
|
||||
|
||||
if AMD.is_support():
|
||||
self.drivers.append(AMD())
|
||||
|
||||
@property
|
||||
def support(self):
|
||||
return len(self.drivers) > 0
|
||||
|
||||
def get_all_device_info(self, get):
|
||||
for _driver in self.drivers:
|
||||
pass
|
||||
36
mod/project/docker/app/gpu/amd.py
Normal file
36
mod/project/docker/app/gpu/amd.py
Normal file
@@ -0,0 +1,36 @@
|
||||
from mod.project.docker.app.gpu.base import GPUBase
|
||||
|
||||
class AMD(GPUBase):
|
||||
@classmethod
|
||||
def is_support(cls):
|
||||
pass
|
||||
|
||||
def _get_device_version(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def _get_device_name(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def _get_fan_info(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
def main(self):
|
||||
pass
|
||||
|
||||
def get_info(self, gpu_id=0):
|
||||
pass
|
||||
|
||||
def _get_mem_info(self):
|
||||
pass
|
||||
|
||||
def _get_clock_info(self):
|
||||
pass
|
||||
|
||||
def _get_temp_info(self):
|
||||
pass
|
||||
|
||||
def _get_uti_info(self):
|
||||
pass
|
||||
|
||||
def _get_proc_uti(self, proc_name='', proc_pid=0):
|
||||
pass
|
||||
70
mod/project/docker/app/gpu/base.py
Normal file
70
mod/project/docker/app/gpu/base.py
Normal file
@@ -0,0 +1,70 @@
|
||||
from abc import ABC, abstractmethod
|
||||
|
||||
|
||||
class GPUBase(ABC):
|
||||
name = 'base'
|
||||
support = None
|
||||
@abstractmethod
|
||||
def _get_mem_info(self, *args, **kwargs):
|
||||
"""
|
||||
获取显存占用
|
||||
Returns:
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_clock_info(self, *args, **kwargs):
|
||||
"""
|
||||
获取时钟信息
|
||||
Returns:
|
||||
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_temp_info(self, *args, **kwargs):
|
||||
"""
|
||||
获取温度
|
||||
Returns:
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_uti_info(self, *args, **kwargs):
|
||||
"""
|
||||
获取占用
|
||||
|
||||
|
||||
Returns:
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_proc_uti(self, *args, **kwargs):
|
||||
"""
|
||||
获取进程占用
|
||||
Returns:
|
||||
|
||||
"""
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_fan_info(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_device_name(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
@abstractmethod
|
||||
def _get_device_version(self, *args, **kwargs):
|
||||
pass
|
||||
|
||||
@classmethod
|
||||
@abstractmethod
|
||||
def is_support(cls):
|
||||
pass
|
||||
27
mod/project/docker/app/gpu/constants.py
Normal file
27
mod/project/docker/app/gpu/constants.py
Normal file
@@ -0,0 +1,27 @@
|
||||
from dataclasses import dataclass
|
||||
|
||||
|
||||
@dataclass
|
||||
class CMD:
|
||||
@dataclass
|
||||
class CTK:
|
||||
@dataclass
|
||||
class APT:
|
||||
GetGPGKey = "curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | sudo gpg --dearmor -o /usr/share/keyrings/nvidia-container-toolkit-keyring.gpg"
|
||||
AddSourcesList = "curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list | sed 's#deb https://#deb [signed-by=/usr/share/keyrings/nvidia-container-toolkit-keyring.gpg] https://#g' | sudo tee /etc/apt/sources.list.d/nvidia-container-toolkit.list"
|
||||
APTUpdate = "sudo apt-get update"
|
||||
Install = "sudo apt-get install -y nvidia-container-toolkit"
|
||||
OneInstall = GetGPGKey + ';' + AddSourcesList + ';' + APTUpdate + ';' + Install
|
||||
|
||||
@dataclass
|
||||
class YUM:
|
||||
AddRepo = "curl -s -L https://nvidia.github.io/libnvidia-container/stable/rpm/nvidia-container-toolkit.repo | sudo tee /etc/yum.repos.d/nvidia-container-toolkit.repo"
|
||||
Install = "sudo yum install -y nvidia-container-toolkit"
|
||||
OneInstall = AddRepo + ';' + Install
|
||||
|
||||
@dataclass
|
||||
class ConfigureDocker:
|
||||
Runtime = "sudo nvidia-ctk runtime configure --runtime=docker"
|
||||
Restart = "sudo systemctl restart docker"
|
||||
|
||||
CheckVersion = "nvidia-ctk -v"
|
||||
199
mod/project/docker/app/gpu/nvidia.py
Normal file
199
mod/project/docker/app/gpu/nvidia.py
Normal file
@@ -0,0 +1,199 @@
|
||||
import sys
|
||||
from collections import defaultdict
|
||||
from functools import wraps
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.append('/www/server/panel/class')
|
||||
|
||||
import public
|
||||
|
||||
try:
|
||||
import pynvml
|
||||
except:
|
||||
public.ExecShell("btpip install nvidia-ml-py")
|
||||
import pynvml
|
||||
|
||||
try:
|
||||
from mod.project.docker.app.gpu.base import GPUBase
|
||||
except:
|
||||
class GPUBase:
|
||||
pass
|
||||
|
||||
device_tasks = defaultdict()
|
||||
system_tasks = defaultdict()
|
||||
|
||||
|
||||
def register_task(name: str):
|
||||
def task_decorator(task_func):
|
||||
_task_type, _task_name = name.split(':')
|
||||
if _task_type == 'device':
|
||||
device_tasks[_task_name] = task_func
|
||||
elif _task_type == 'system':
|
||||
system_tasks[_task_name] = task_func
|
||||
|
||||
@wraps(task_func)
|
||||
def func_wrapper(*args, **kwargs):
|
||||
return task_func(*args, **kwargs)
|
||||
|
||||
return func_wrapper
|
||||
|
||||
return task_decorator
|
||||
|
||||
|
||||
class NVIDIA(GPUBase):
|
||||
name = 'nvidia'
|
||||
support = None
|
||||
|
||||
def __init__(self):
|
||||
# 判断是否支持,并在判断时初始化pynvml库。
|
||||
self.device_count = 0
|
||||
if self.is_support():
|
||||
self.device_count = pynvml.nvmlDeviceGetCount()
|
||||
|
||||
def __del__(self):
|
||||
if self.is_support():
|
||||
pynvml.nvmlShutdown()
|
||||
|
||||
def get_all_device_info(self):
|
||||
all_info = defaultdict()
|
||||
all_info['system'] = self.get_system_info()
|
||||
for index in range(self.device_count):
|
||||
all_info[index] = self.get_info_by_index(index)
|
||||
return all_info
|
||||
|
||||
def get_info_by_index(self, index=0):
|
||||
info = defaultdict()
|
||||
handle = pynvml.nvmlDeviceGetHandleByIndex(index)
|
||||
|
||||
for t_name, t_func in device_tasks.items():
|
||||
try:
|
||||
info[t_name] = t_func(self, handle)
|
||||
except:
|
||||
# public.print_log("pynvml {t_name} error: {}")
|
||||
info[t_name] = None
|
||||
|
||||
return info
|
||||
|
||||
def get_system_info(self):
|
||||
info = defaultdict()
|
||||
for t_name, t_func in system_tasks.items():
|
||||
try:
|
||||
info[t_name] = t_func(self)
|
||||
except:
|
||||
# public.print_log(f"pynvml {t_name} error: {e}")
|
||||
info[t_name] = None
|
||||
return info
|
||||
|
||||
@classmethod
|
||||
def is_support(cls):
|
||||
try:
|
||||
pynvml.nvmlInit()
|
||||
cls.support = True
|
||||
return True
|
||||
|
||||
except pynvml.NVMLError:
|
||||
cls.support = False
|
||||
# public.print_log("Nvidia was not supported!")
|
||||
return False
|
||||
|
||||
@register_task('device:memory')
|
||||
def _get_mem_info(self, handle):
|
||||
info = defaultdict()
|
||||
info['size'] = int(pynvml.nvmlDeviceGetMemoryInfo(handle).total) / 1024 ** 3
|
||||
info['free'] = int(pynvml.nvmlDeviceGetMemoryInfo(handle).free) / 1024 ** 3
|
||||
info['used'] = int(pynvml.nvmlDeviceGetMemoryInfo(handle).used) / 1024 ** 3
|
||||
return info
|
||||
|
||||
@register_task('device:clock')
|
||||
def _get_clock_info(self, handle):
|
||||
info = defaultdict()
|
||||
info['graphics'] = pynvml.nvmlDeviceGetClockInfo(handle, pynvml.NVML_CLOCK_GRAPHICS)
|
||||
info['sm'] = pynvml.nvmlDeviceGetClockInfo(handle, pynvml.NVML_CLOCK_SM)
|
||||
info['memory'] = pynvml.nvmlDeviceGetClockInfo(handle, pynvml.NVML_CLOCK_MEM)
|
||||
info['video'] = pynvml.nvmlDeviceGetClockInfo(handle, pynvml.NVML_CLOCK_VIDEO)
|
||||
|
||||
return info
|
||||
|
||||
@register_task('device:temperature')
|
||||
def _get_temp_info(self, handle):
|
||||
info = 0
|
||||
try:
|
||||
info = pynvml.nvmlDeviceGetTemperature(handle, pynvml.NVML_TEMPERATURE_GPU)
|
||||
except pynvml.NVMLError or AttributeError:
|
||||
info = pynvml.nvmlDeviceGetTemperatureV1(handle, pynvml.NVML_TEMPERATURE_GPU)
|
||||
return info
|
||||
|
||||
@register_task('device:utilization')
|
||||
def _get_uti_info(self, handle):
|
||||
info = defaultdict()
|
||||
info['gpu'] = pynvml.nvmlDeviceGetUtilizationRates(handle).gpu
|
||||
info['memory'] = pynvml.nvmlDeviceGetUtilizationRates(handle).memory
|
||||
|
||||
return info
|
||||
|
||||
@register_task('device:processes')
|
||||
def _get_proc_uti(self, handle):
|
||||
info = list()
|
||||
for p in pynvml.nvmlDeviceGetComputeRunningProcesses(handle):
|
||||
p.__dict__['name'] = pynvml.nvmlSystemGetProcessName(p.pid)
|
||||
p.__dict__['type'] = 'Compute'
|
||||
info.append(p.__dict__)
|
||||
|
||||
for p in pynvml.nvmlDeviceGetGraphicsRunningProcesses(handle):
|
||||
p.__dict__['name'] = pynvml.nvmlSystemGetProcessName(p.pid)
|
||||
p.__dict__['type'] = 'Graphics'
|
||||
info.append(p.__dict__)
|
||||
|
||||
for p in pynvml.nvmlDeviceGetMPSComputeRunningProcesses(handle):
|
||||
p.__dict__['name'] = pynvml.nvmlSystemGetProcessName(p.pid)
|
||||
p.__dict__['type'] = 'MPS'
|
||||
info.append(p.__dict__)
|
||||
|
||||
return info
|
||||
|
||||
@register_task('device:fan')
|
||||
def _get_fan_info(self, handle):
|
||||
info = defaultdict()
|
||||
try:
|
||||
info['speed'] = pynvml.nvmlDeviceGetFanSpeedRPM(handle).speed
|
||||
except AttributeError:
|
||||
info['speed'] = pynvml.nvmlDeviceGetFanSpeed(handle)
|
||||
except pynvml.NVMLError:
|
||||
info['speed'] = pynvml.nvmlDeviceGetFanSpeed_v2(handle, 0)
|
||||
except:
|
||||
info['speed'] = 0
|
||||
return info
|
||||
|
||||
@register_task('device:name')
|
||||
def _get_device_name(self, handle):
|
||||
return pynvml.nvmlDeviceGetName(handle)
|
||||
|
||||
@register_task('device:power')
|
||||
def _get_device_power(self, handle):
|
||||
info = defaultdict()
|
||||
info['current'] = pynvml.nvmlDeviceGetPowerUsage(handle)
|
||||
info['max'] = pynvml.nvmlDeviceGetPowerManagementLimit(handle)
|
||||
return info
|
||||
|
||||
@register_task('system:version')
|
||||
def _get_device_version(self):
|
||||
info = defaultdict()
|
||||
info['driver'] = pynvml.nvmlSystemGetDriverVersion()
|
||||
|
||||
try:
|
||||
info['cuda'] = pynvml.nvmlSystemGetCudaDriverVersion()
|
||||
except pynvml.NVMLError or AttributeError:
|
||||
info['cuda'] = pynvml.nvmlSystemGetCudaDriverVersion_v2()
|
||||
|
||||
return info
|
||||
|
||||
@register_task('system:count')
|
||||
def _get_device_count(self):
|
||||
info = 0
|
||||
info = pynvml.nvmlDeviceGetCount()
|
||||
return info
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
nvidia = NVIDIA()
|
||||
print(nvidia.get_all_device_info())
|
||||
158
mod/project/docker/app/gpu/tools.py
Normal file
158
mod/project/docker/app/gpu/tools.py
Normal file
@@ -0,0 +1,158 @@
|
||||
import os
|
||||
import sys
|
||||
from typing import Tuple
|
||||
|
||||
from mod.project.docker.app.gpu.constants import CMD
|
||||
from mod.project.docker.app.gpu.nvidia import NVIDIA
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.append('/www/server/panel/class')
|
||||
|
||||
import public
|
||||
|
||||
|
||||
class GPUTool:
|
||||
gpu_option = None
|
||||
option_default = None
|
||||
|
||||
@staticmethod
|
||||
def __get_linux_distribution():
|
||||
"""检测系统是否为 Debian/Ubuntu 或 CentOS/Red Hat 系列"""
|
||||
try:
|
||||
# 优先解析 /etc/os-release
|
||||
with open("/etc/os-release", "r", encoding="utf-8") as f:
|
||||
os_release = {}
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if line and "=" in line:
|
||||
key, value = line.split("=", 1)
|
||||
os_release[key] = value.strip('"')
|
||||
|
||||
dist_id = os_release.get("ID", "").lower()
|
||||
id_like = os_release.get("ID_LIKE", "").lower()
|
||||
|
||||
# 根据 ID 或 ID_LIKE 判断
|
||||
if dist_id in ["debian", "ubuntu"]:
|
||||
return "debian"
|
||||
elif dist_id in ["centos", "rhel", "fedora"]:
|
||||
return "centos"
|
||||
elif "debian" in id_like:
|
||||
return "debian"
|
||||
elif "rhel" in id_like or "fedora" in id_like:
|
||||
return "centos"
|
||||
|
||||
except FileNotFoundError:
|
||||
# 如果 /etc/os-release 不存在,检查其他文件
|
||||
if os.path.exists("/etc/debian_version"):
|
||||
return "debian"
|
||||
elif os.path.exists("/etc/redhat-release"):
|
||||
return "centos"
|
||||
|
||||
except Exception:
|
||||
raise ValueError("System Distribution Is Unknown")
|
||||
|
||||
@classmethod
|
||||
def __gpu_default_setting(cls) -> Tuple[bool, bool]:
|
||||
"""
|
||||
检测是否开启GPU
|
||||
Returns:
|
||||
gpu_option: 返回是否开启GPU选择
|
||||
option_default: 默认GPU选择是否开启
|
||||
"""
|
||||
if cls.gpu_option is not None and cls.option_default is not None:
|
||||
return cls.gpu_option, cls.option_default
|
||||
|
||||
driver = NVIDIA()
|
||||
# 如果不支持直接返回
|
||||
if driver.support is None or driver.support is False:
|
||||
cls.gpu_option = False
|
||||
cls.option_default = False
|
||||
return cls.gpu_option, cls.option_default
|
||||
|
||||
# 如果支持则检查显存大小
|
||||
device_info = driver.get_all_device_info()
|
||||
mem_size = 0
|
||||
for _, _device in device_info.items():
|
||||
mem_size = mem_size + _device.get('memory', {}).get('size', 0)
|
||||
if mem_size > 3:
|
||||
cls.gpu_option = True
|
||||
cls.option_default = True
|
||||
else:
|
||||
cls.gpu_option = True
|
||||
cls.option_default = False
|
||||
|
||||
return cls.gpu_option, cls.option_default
|
||||
|
||||
@classmethod
|
||||
def register_app_gpu_option(cls, app):
|
||||
option, default = cls.__gpu_default_setting()
|
||||
for field in app.get('field', []):
|
||||
if option == False and field.get('attr', '') == 'gpu':
|
||||
app['field'].remove(field)
|
||||
elif option == True and field.get('attr', '') == 'gpu':
|
||||
field['default'] = default
|
||||
field['suffix'] = field['suffix'] + ' | 已默认设置为{}'.format(default)
|
||||
# public.print_log("\n\n\n\n{}\n\n\n\n".format(field['suffix']))
|
||||
return app
|
||||
|
||||
@staticmethod
|
||||
def is_install_ctk():
|
||||
stdout, stderr = public.ExecShell(CMD.CTK.CheckVersion)
|
||||
if len(stderr) != 0:
|
||||
return False
|
||||
if not stdout.lower().find('version'):
|
||||
public.print_log("Not Nvidia Container Toolkit")
|
||||
return False
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def __ctk_install_cmd_apt(cls, app_log):
|
||||
return ("{get_gpg_key} >> {app_log};"
|
||||
"{add_sources_list} >> {app_log};"
|
||||
"{apt_update} >> {app_log};"
|
||||
"{install} >> {app_log}"
|
||||
.format(get_gpg_key=CMD.CTK.APT.GetGPGKey,
|
||||
add_sources_list=CMD.CTK.APT.AddSourcesList,
|
||||
apt_update=CMD.CTK.APT.APTUpdate,
|
||||
install=CMD.CTK.APT.Install,
|
||||
app_log=app_log
|
||||
))
|
||||
|
||||
@classmethod
|
||||
def __ctk_install_cmd_yum(cls, app_log):
|
||||
return ("{add_repo} >> {app_log};"
|
||||
"{install} >> {app_log}"
|
||||
.format(add_repo=CMD.CTK.YUM.AddRepo,
|
||||
install=CMD.CTK.YUM.Install,
|
||||
app_log=app_log
|
||||
))
|
||||
|
||||
@classmethod
|
||||
def __config_docker(cls, app_log):
|
||||
return ("{runtime} >> {app_log};"
|
||||
"{restart} >> {app_log}"
|
||||
.format(runtime=CMD.CTK.ConfigureDocker.Runtime,
|
||||
restart=CMD.CTK.ConfigureDocker.Restart,
|
||||
app_log=app_log))
|
||||
|
||||
@classmethod
|
||||
def ctk_install_cmd(cls, app_log):
|
||||
dtb = cls.__get_linux_distribution()
|
||||
cmd = ''
|
||||
if dtb == 'debian':
|
||||
cmd = (
|
||||
"{install_cmd};"
|
||||
"{config_docker}"
|
||||
.format(
|
||||
install_cmd=cls.__ctk_install_cmd_apt(app_log),
|
||||
config_docker=cls.__config_docker(app_log),
|
||||
))
|
||||
elif dtb == 'centos':
|
||||
cmd = (
|
||||
"{install_cmd};"
|
||||
"{config_docker}"
|
||||
.format(
|
||||
install_cmd=cls.__ctk_install_cmd_yum(app_log),
|
||||
config_docker=cls.__config_docker(app_log),
|
||||
))
|
||||
return cmd
|
||||
0
mod/project/docker/app/gpu/type.py
Normal file
0
mod/project/docker/app/gpu/type.py
Normal file
0
mod/project/docker/app/sub_app/__init__.py
Normal file
0
mod/project/docker/app/sub_app/__init__.py
Normal file
16
mod/project/docker/app/sub_app/base.py
Normal file
16
mod/project/docker/app/sub_app/base.py
Normal file
@@ -0,0 +1,16 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# +-------------------------------------------------------------------
|
||||
# | YakPanel
|
||||
# +-------------------------------------------------------------------
|
||||
# | Copyleft (c) 2015-2099 YakPanel(www.yakpanel.com) All lefts reserved.
|
||||
# +-------------------------------------------------------------------
|
||||
# | Author: wzz
|
||||
# | email : wzz@yakpanel.com
|
||||
# +-------------------------------------------------------------------
|
||||
# +-------------------------------------------------------------------
|
||||
# | docker sub_app 管理模型 -
|
||||
# +-------------------------------------------------------------------
|
||||
|
||||
class base():
|
||||
def __init__(self):
|
||||
pass
|
||||
198
mod/project/docker/app/sub_app/downModel.py
Normal file
198
mod/project/docker/app/sub_app/downModel.py
Normal file
@@ -0,0 +1,198 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# +-------------------------------------------------------------------
|
||||
# | YakPanel
|
||||
# +-------------------------------------------------------------------
|
||||
# | Copyleft (c) 2015-2099 YakPanel(www.yakpanel.com) All lefts reserved.
|
||||
# +-------------------------------------------------------------------
|
||||
# | Author: wzz
|
||||
# | email : wzz@yakpanel.com
|
||||
# +-------------------------------------------------------------------
|
||||
# +-------------------------------------------------------------------
|
||||
# | docker sub_app 管理模型 -
|
||||
# +-------------------------------------------------------------------
|
||||
import json
|
||||
import sys
|
||||
import time
|
||||
from collections import deque
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.append('/www/server/panel/class')
|
||||
|
||||
import public
|
||||
|
||||
def download_model(service_name, model_name, model_version, ollama_url, app_cmd_log):
|
||||
"""
|
||||
下载Ollama模型的具体实现
|
||||
@param service_name: 服务名称
|
||||
@param model_name: 模型名称
|
||||
@param model_version: 模型版本
|
||||
@param ollama_url: Ollama API URL
|
||||
@param app_cmd_log: 日志文件路径
|
||||
"""
|
||||
def start_download():
|
||||
url = ollama_url + "/api/pull"
|
||||
|
||||
# 准备请求数据
|
||||
data = {
|
||||
"model": "{}:{}".format(model_name, model_version),
|
||||
"stream": True
|
||||
}
|
||||
|
||||
try:
|
||||
import requests
|
||||
response = requests.post(url, json=data, stream=True)
|
||||
|
||||
with open(app_cmd_log, 'a') as log_file:
|
||||
log_file.write('{} model is being downloaded, and may need to wait more than 1-30 minutes...\n'.format(model_name))
|
||||
|
||||
download_tag = None
|
||||
last_completed = 0
|
||||
last_time = time.time()
|
||||
# 使用双端队列存储最近10秒的速度
|
||||
speed_history = deque(maxlen=60)
|
||||
|
||||
count_sum = 0
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
json_response = json.loads(line)
|
||||
status = json_response.get("status", "")
|
||||
|
||||
# 记录下载进度
|
||||
if "pulling" in status:
|
||||
status = status.split(" ")
|
||||
if download_tag is None or status[1] != download_tag:
|
||||
download_tag = status[1]
|
||||
last_completed = 0
|
||||
last_time = time.time()
|
||||
speed_history.clear()
|
||||
|
||||
completed = json_response.get("completed", 0)
|
||||
total = json_response.get("total", 0)
|
||||
|
||||
if total > 0:
|
||||
# 计算下载速度
|
||||
current_time = time.time()
|
||||
time_diff = current_time - last_time
|
||||
if time_diff >= 1: # 每秒更新一次
|
||||
bytes_diff = completed - last_completed
|
||||
speed = bytes_diff / time_diff # bytes per second
|
||||
|
||||
# 存储当前速度
|
||||
count_sum += 1
|
||||
if count_sum > 5:
|
||||
speed_history.append(speed)
|
||||
|
||||
# 检查速度是否异常
|
||||
avg_speed = None
|
||||
if len(speed_history) >= 10:
|
||||
avg_speed = sum(list(speed_history)[:-1]) / (len(speed_history) - 1)
|
||||
current_speed = speed_history[-1]
|
||||
|
||||
if current_speed < 1024000 and avg_speed < 1536000: # 当前速度小于1.2MB/s且平均速度小于1.5MB/s
|
||||
log_file.write('Detected that the download speed is too low and is trying to reset the download...\n')
|
||||
log_file.flush()
|
||||
return False # 返回False表示需要重新下载
|
||||
|
||||
if current_speed < (avg_speed / 4) and avg_speed > 1024: # 确保有足够的平均速度
|
||||
log_file.write('Abnormal download speed or CF slowdown detected, trying to reset the download...\n')
|
||||
log_file.flush()
|
||||
return False # 返回False表示需要重新下载
|
||||
|
||||
# 转换速度单位
|
||||
speed_str = ""
|
||||
if speed < 1024:
|
||||
speed_str = "{:.2f} B/s".format(speed)
|
||||
elif speed < 1024 * 1024:
|
||||
speed_str = "{:.2f} KB/s".format(speed / 1024)
|
||||
else:
|
||||
speed_str = "{:.2f} MB/s".format(speed / (1024 * 1024))
|
||||
|
||||
avg_speed_str = ""
|
||||
if not avg_speed is None:
|
||||
if avg_speed < 1024:
|
||||
avg_speed_str = "{:.2f} B/s".format(avg_speed)
|
||||
elif avg_speed < 1024 * 1024:
|
||||
avg_speed_str = "{:.2f} KB/s".format(avg_speed / 1024)
|
||||
else:
|
||||
avg_speed_str = "{:.2f} MB/s".format(avg_speed / (1024 * 1024))
|
||||
|
||||
progress = (completed / total) * 100
|
||||
log_file.write('File: {}, Download Progress: {:.2f}%, Average Speed: {}, Current Speed: {}\n'.format(
|
||||
download_tag,
|
||||
progress,
|
||||
avg_speed_str,
|
||||
speed_str
|
||||
))
|
||||
log_file.flush()
|
||||
|
||||
# 更新上次的数据
|
||||
last_completed = completed
|
||||
last_time = current_time
|
||||
else:
|
||||
log_file.write(status + '\n')
|
||||
log_file.flush()
|
||||
|
||||
# 下载完成后验证模型是否存在
|
||||
verify_cmd = "docker-compose -p {service_name} exec -it {service_name_} ollama list | grep {model_name}:{model_version}".format(
|
||||
service_name=service_name.lower(),
|
||||
service_name_=service_name,
|
||||
model_name=model_name,
|
||||
model_version=model_version
|
||||
)
|
||||
result = public.ExecShell(verify_cmd)[0]
|
||||
|
||||
if model_name in result:
|
||||
log_file.write('bt_successful\n')
|
||||
return True
|
||||
else:
|
||||
public.writeFile("/tmp/{model_name}:{model_version}.failed".format(
|
||||
model_name=model_name,
|
||||
model_version=model_version,
|
||||
), "failed")
|
||||
log_file.write('bt_failed\n')
|
||||
return False
|
||||
|
||||
except Exception as e:
|
||||
# 发生异常时记录错误并标记失败
|
||||
with open(app_cmd_log, 'a') as log_file:
|
||||
log_file.write('Download failed: {}\n'.format(str(e)))
|
||||
log_file.write('bt_failed\n')
|
||||
public.writeFile("/tmp/{model_name}:{model_version}.failed".format(
|
||||
model_name=model_name,
|
||||
model_version=model_version,
|
||||
), "failed")
|
||||
return False
|
||||
|
||||
# 设置下载状态标记
|
||||
public.ExecShell("echo 'downloading' > /tmp/{model_name}:{model_version}.pl".format(
|
||||
model_name=model_name,
|
||||
model_version=model_version
|
||||
))
|
||||
public.ExecShell("echo 'downloading' > /tmp/nocandown.pl")
|
||||
public.ExecShell("rm -f /tmp/{model_name}:{model_version}.failed".format(
|
||||
model_name=model_name,
|
||||
model_version=model_version,
|
||||
))
|
||||
|
||||
try:
|
||||
max_retries = 30
|
||||
retry_count = 0
|
||||
|
||||
while retry_count < max_retries:
|
||||
if retry_count > 0:
|
||||
with open(app_cmd_log, 'a') as log_file:
|
||||
log_file.write('\n{} retry in progress...\n'.format(retry_count + 1))
|
||||
|
||||
if start_download():
|
||||
break
|
||||
|
||||
retry_count += 1
|
||||
time.sleep(3) # 重试前等待3秒
|
||||
|
||||
finally:
|
||||
# 清理状态文件
|
||||
public.ExecShell("rm -f /tmp/{model_name}:{model_version}.pl".format(
|
||||
model_name=model_name,
|
||||
model_version=model_version,
|
||||
))
|
||||
public.ExecShell("rm -f /tmp/nocandown.pl")
|
||||
324
mod/project/docker/app/sub_app/ollamaMod.py
Normal file
324
mod/project/docker/app/sub_app/ollamaMod.py
Normal file
@@ -0,0 +1,324 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# +-------------------------------------------------------------------
|
||||
# | YakPanel
|
||||
# +-------------------------------------------------------------------
|
||||
# | Copyleft (c) 2015-2099 YakPanel(www.yakpanel.com) All lefts reserved.
|
||||
# +-------------------------------------------------------------------
|
||||
# | Author: wzz
|
||||
# | email : wzz@yakpanel.com
|
||||
# +-------------------------------------------------------------------
|
||||
# +-------------------------------------------------------------------
|
||||
# | docker sub_app 管理模型 -
|
||||
# +-------------------------------------------------------------------
|
||||
import json
|
||||
import os.path
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
||||
if "/www/server/panel/class" not in sys.path:
|
||||
sys.path.append('/www/server/panel/class')
|
||||
|
||||
import public
|
||||
from mod.project.docker.app.base import App
|
||||
|
||||
class OllamaBase(App):
|
||||
def __init__(self):
|
||||
super(OllamaBase, self).__init__()
|
||||
self.ollama_port = "11434"
|
||||
self.ollama_local_url = "http://127.0.0.1:{}".format(self.ollama_port)
|
||||
|
||||
def set_ollama_port(self, port):
|
||||
self.ollama_port = port
|
||||
self.ollama_local_url = self.ollama_local_url.format(port)
|
||||
return self
|
||||
|
||||
def set_ollama_local_url(self, port):
|
||||
self.ollama_local_url = "http://127.0.0.1:{}".format(port)
|
||||
return self
|
||||
|
||||
|
||||
class OllamaMod(OllamaBase):
|
||||
|
||||
def __init__(self):
|
||||
super(OllamaMod, self).__init__()
|
||||
|
||||
# 2025/2/8 11:47 获取本地所有的models
|
||||
# https://github.com/ollama/ollama/blob/main/docs/api.md#list-local-models
|
||||
def list_local_models(self):
|
||||
uri = "/api/tags"
|
||||
|
||||
ps_json, stderr = public.ExecShell("docker-compose -p {service_name} ps --format json | {grep_v}".format(
|
||||
service_name=self.service_name.lower(),
|
||||
grep_v=self.grep_version,
|
||||
))
|
||||
if "Segmentation fault" in ps_json:
|
||||
return []
|
||||
|
||||
if not ps_json.startswith("["):
|
||||
ps = json.loads("[" + ps_json.strip().replace("\n", ",") + "]")
|
||||
else:
|
||||
ps = json.loads(ps_json.strip().replace("\n", ","))
|
||||
|
||||
try:
|
||||
p_port = "11434"
|
||||
for i in ps:
|
||||
if "ollama/ollama" in i["Image"]:
|
||||
if len(i["Publishers"]) == 0: break
|
||||
p_port = i["Publishers"][0]["PublishedPort"]
|
||||
except:
|
||||
p_port = "11434"
|
||||
|
||||
self.set_ollama_local_url(p_port)
|
||||
url = self.ollama_local_url + uri
|
||||
response = public.HttpGet(url)
|
||||
if not response: return []
|
||||
response = json.loads(response)
|
||||
|
||||
if "models" in response:
|
||||
models = response["models"]
|
||||
for i in models:
|
||||
i["version"] = i["name"].split(":")[-1] if ":" in i["name"] else i["name"]
|
||||
i["l_name"] = i["name"].split(":")[0] if ":" in i["name"] else i["name"]
|
||||
return models
|
||||
return []
|
||||
|
||||
# 2025/2/10 15:52 获取指定模型的信息
|
||||
# https://github.com/ollama/ollama/blob/main/docs/api.md#show-model-information
|
||||
def show_model_info(self, get):
|
||||
'''
|
||||
@name 获取指定模型的信息
|
||||
'''
|
||||
get.model_name = get.get("model_name", None)
|
||||
if get.model_name is None:
|
||||
return public.return_message(-1, 0, public.lang("model_name parameter cannot be null"))
|
||||
get.model_version = get.get("model_version", None)
|
||||
if get.model_version is None:
|
||||
return public.return_message(-1, 0, public.lang("model_version parameter cannot be null"))
|
||||
get.service_name = get.get("service_name", None)
|
||||
if get.service_name is None:
|
||||
return public.return_message(-1, 0, public.lang("service_name parameter cannot be null"))
|
||||
|
||||
self.set_service_name(get.service_name)
|
||||
uri = "/api/show"
|
||||
ps_json, stderr = public.ExecShell("docker-compose -p {service_name} ps --format json | {grep_v}".format(
|
||||
service_name=self.service_name.lower(),
|
||||
grep_v=self.grep_version,
|
||||
))
|
||||
if "Segmentation fault" in ps_json:
|
||||
return []
|
||||
|
||||
if not ps_json.startswith("["):
|
||||
ps = json.loads("[" + ps_json.strip().replace("\n", ",") + "]")
|
||||
else:
|
||||
ps = json.loads(ps_json.strip().replace("\n", ","))
|
||||
|
||||
try:
|
||||
p_port = "11434"
|
||||
for i in ps:
|
||||
if "ollama/ollama" in i["Image"]:
|
||||
if len(i["Publishers"]) == 0: break
|
||||
p_port = i["Publishers"][0]["PublishedPort"]
|
||||
except:
|
||||
p_port = "11434"
|
||||
|
||||
self.set_ollama_local_url(p_port)
|
||||
|
||||
url = self.ollama_local_url + uri
|
||||
param = {"model": "{}:{}".format(get.model_name, get.model_version)}
|
||||
|
||||
import requests
|
||||
response = requests.post(url, data=json.dumps(param), timeout=10)
|
||||
|
||||
return public.return_message(0, 0, response.json())
|
||||
|
||||
# 2025/2/10 14:51 获取在线的所有models
|
||||
def list_online_models(self):
|
||||
'''
|
||||
@name 获取在线的所有models
|
||||
'''
|
||||
if not os.path.exists(self.ollama_online_models_file):
|
||||
public.downloadFile(public.get_url() + '/src/dk_app/yakpanel/apps/ollama_model.json', self.ollama_online_models_file)
|
||||
|
||||
try:
|
||||
models = json.loads(public.readFile(self.ollama_online_models_file))
|
||||
|
||||
res = []
|
||||
for i in models:
|
||||
res.append({
|
||||
"name": i["name"],
|
||||
"description": i["zh_cn_msg"],
|
||||
"version": i["parameters"],
|
||||
"size": i["size"],
|
||||
"can_down": True,
|
||||
})
|
||||
|
||||
return res
|
||||
except:
|
||||
return []
|
||||
|
||||
# 2025/2/10 14:54 获取模型列表
|
||||
def get_models_list(self, get):
|
||||
'''
|
||||
@name 获取模型列表
|
||||
'''
|
||||
get.search = get.get("search", "")
|
||||
get.p = get.get("p/d", 1)
|
||||
get.row = get.get("limit/d", 20)
|
||||
get.service_name = get.get("service_name", None)
|
||||
if get.service_name is None:
|
||||
return public.return_message(-1, 0, public.lang("service_name parameter cannot be null"))
|
||||
get.status = get.get("status", "all")
|
||||
self.set_service_name(get.service_name)
|
||||
|
||||
local_models = self.list_local_models()
|
||||
public.print_log(local_models)
|
||||
online_models = self.list_online_models()
|
||||
res = []
|
||||
can_down = True
|
||||
if os.path.exists("/tmp/nocandown.pl"):
|
||||
can_down = False
|
||||
|
||||
# 2025/2/10 14:55 合并两个列表,增加status字段,已经安装了值为installed
|
||||
for i in online_models:
|
||||
i["can_down"] = can_down
|
||||
|
||||
i["status"] = "uninstall"
|
||||
for j in local_models:
|
||||
if i["name"] == j["l_name"]:
|
||||
i["status"] = "installed" if i["version"] == j["version"] else "uninstall"
|
||||
|
||||
if os.path.exists("/tmp/{model_name}:{model_version}.failed".format(
|
||||
model_name=i["name"],
|
||||
model_version=i["version"],
|
||||
)):
|
||||
i["status"] = "failed"
|
||||
|
||||
if os.path.exists("/tmp/{model_name}:{model_version}.pl".format(
|
||||
model_name=i["name"],
|
||||
model_version=i["version"],
|
||||
)):
|
||||
i["status"] = "downloading"
|
||||
|
||||
if i["status"] in ("installed", "failed", "downloading"):
|
||||
break
|
||||
|
||||
if get.status != "all":
|
||||
if get.status != i["status"]: continue
|
||||
if get.search != "":
|
||||
if get.search not in i["name"] and get.search not in i["description"]: continue
|
||||
|
||||
res.append(i)
|
||||
|
||||
page_data = self.get_page(res, get)
|
||||
return self.pageResult(True, data=page_data["data"], page=page_data["page"])
|
||||
|
||||
# 2025/2/17 16:34 给指定应用安装指定模型
|
||||
def down_models(self, get):
|
||||
'''
|
||||
@name 给指定应用安装指定模型
|
||||
@param service_name 服务名称
|
||||
@param model_name 模型名称
|
||||
@param model_version 模型版本
|
||||
'''
|
||||
get.service_name = get.get("service_name", None)
|
||||
if get.service_name is None:
|
||||
return public.return_message(-1, 0, public.lang("service_name parameter cannot be null"))
|
||||
get.model_name = get.get("model_name", None)
|
||||
if get.model_name is None:
|
||||
return public.return_message(-1, 0, public.lang("model_name parameter cannot be null"))
|
||||
get.model_version = get.get("model_version", None)
|
||||
if get.model_version is None:
|
||||
return public.return_message(-1, 0, public.lang("model_version parameter cannot be null"))
|
||||
|
||||
self.set_service_name(get.service_name)
|
||||
|
||||
# 获取容器信息
|
||||
ps_json, stderr = public.ExecShell("docker-compose -p {service_name} ps --format json | {grep_v}".format(
|
||||
service_name=self.service_name.lower(),
|
||||
grep_v=self.grep_version,
|
||||
))
|
||||
if "Segmentation fault" in ps_json:
|
||||
return public.return_message(-1, 0, public.lang("Failed to get container information, docker-compose execution is exceptional!"))
|
||||
|
||||
if not ps_json.startswith("["):
|
||||
ps = json.loads("[" + ps_json.strip().replace("\n", ",") + "]")
|
||||
else:
|
||||
ps = json.loads(ps_json.strip().replace("\n", ","))
|
||||
|
||||
try:
|
||||
p_port = "11434"
|
||||
for i in ps:
|
||||
if "ollama/ollama" in i["Image"]:
|
||||
if len(i["Publishers"]) == 0: break
|
||||
p_port = i["Publishers"][0]["PublishedPort"]
|
||||
except:
|
||||
p_port = "11434"
|
||||
|
||||
self.set_ollama_local_url(p_port)
|
||||
|
||||
# 设置日志文件
|
||||
self.set_cmd_log()
|
||||
public.ExecShell("echo > {}".format(self.app_cmd_log))
|
||||
|
||||
# 导入下载模块并执行下载
|
||||
from mod.project.docker.app.sub_app.downModel import download_model
|
||||
import threading
|
||||
|
||||
# 创建新线程执行下载
|
||||
download_thread = threading.Thread(
|
||||
target=download_model,
|
||||
args=(
|
||||
get.service_name,
|
||||
get.model_name,
|
||||
get.model_version,
|
||||
self.ollama_local_url,
|
||||
self.app_cmd_log
|
||||
)
|
||||
)
|
||||
download_thread.daemon = True
|
||||
download_thread.start()
|
||||
|
||||
return public.return_message(0, 0, public.lang("The model is being downloaded, please check the logs later"))
|
||||
|
||||
# 2025/2/10 15:50 删除指定应用的指定模型
|
||||
def del_models(self, get):
|
||||
'''
|
||||
@name 删除指定应用的指定模型
|
||||
'''
|
||||
get.service_name = get.get("service_name", None)
|
||||
if get.service_name is None:
|
||||
return public.return_message(-1, 0, public.lang("service_name parameter cannot be null"))
|
||||
get.model_name = get.get("model_name", None)
|
||||
if get.model_name is None:
|
||||
return public.return_message(-1, 0, public.lang("model_name parameter cannot be null"))
|
||||
get.model_version = get.get("model_version", None)
|
||||
if get.model_version is None:
|
||||
return public.return_message(-1, 0, public.lang("model_version parameter cannot be null"))
|
||||
|
||||
self.set_service_name(get.service_name)
|
||||
|
||||
ps_json, stderr = public.ExecShell("docker-compose -p {service_name} ps --format json | {grep_v}".format(
|
||||
service_name=self.service_name.lower(),
|
||||
grep_v=self.grep_version,
|
||||
))
|
||||
if "Segmentation fault" in ps_json:
|
||||
return public.return_message(0, 0, public.lang("Failed to delete model, docker-compose execution exception!"))
|
||||
|
||||
if not ps_json.startswith("["):
|
||||
ps = json.loads("[" + ps_json.strip().replace("\n", ",") + "]")
|
||||
else:
|
||||
ps = json.loads(ps_json.strip().replace("\n", ","))
|
||||
|
||||
serviceName = get.service_name
|
||||
if len(ps) == 2:
|
||||
serviceName = "ollama"
|
||||
|
||||
cmd = ("docker-compose -p {service_name} exec -it {serviceName} ollama rm {model_name}:{model_version}".format(
|
||||
service_name=get.service_name.lower(),
|
||||
serviceName=serviceName,
|
||||
model_name=get.model_name,
|
||||
model_version=get.model_version,
|
||||
))
|
||||
public.ExecShell(cmd)
|
||||
return public.return_message(0, 0, public.lang("Successful deletion of model!"))
|
||||
Reference in New Issue
Block a user