Browse Source

更新更新

tags/V2.4.0
chenyukun 2 years ago
parent
commit
eee294dd83
13 changed files with 316 additions and 133 deletions
  1. +4
    -5
      concurrency/FileUpdateThread.py
  2. +16
    -17
      concurrency/IntelligentRecognitionProcess.py
  3. +37
    -1
      dsp_application.yml
  4. +3
    -3
      entity/FeedBack.py
  5. +0
    -57
      enums/AnalysisLabelEnum.py
  6. +2
    -0
      enums/ExceptionEnum.py
  7. +0
    -16
      enums/ModelTypeEnum.py
  8. +0
    -24
      service/Dispatcher.py
  9. +9
    -0
      test/mysqltest.py
  10. +8
    -1
      util/Cv2Utils.py
  11. +8
    -8
      util/KafkaUtils.py
  12. +227
    -0
      util/MyConnectionPool.py
  13. +2
    -1
      util/YmlUtils.py

+ 4
- 5
concurrency/FileUpdateThread.py View File

@@ -5,7 +5,6 @@ from util.AliyunSdk import AliyunOssSdk
from util import TimeUtils
import uuid
from entity import FeedBack
from enums.AnalysisTypeEnum import AnalysisType
from enums.AnalysisStatusEnum import AnalysisStatus
import numpy as np
from PIL import Image
@@ -67,14 +66,14 @@ class ImageFileUpdate(FileUpdate):
or_image_name = self.build_image_name(self.msg.get('results_base_dir'), time_now,
str(image_dict.get("current_frame")),
str(image_dict.get("last_frame")),
image_dict.get("question_descrition"),
image_dict.get("model_detection_code"),
random_num,
image_dict.get("mode_service"),
self.msg.get('request_id'), "OR")
ai_image_name = self.build_image_name(self.msg.get('results_base_dir'), time_now,
str(image_dict.get("current_frame")),
str(image_dict.get("last_frame")),
image_dict.get("question_descrition"),
image_dict.get("model_detection_code"),
random_num,
image_dict.get("mode_service"),
self.msg.get('request_id'), "AI")
@@ -88,8 +87,8 @@ class ImageFileUpdate(FileUpdate):
self.sendResult(FeedBack.message_feedback(self.msg.get('request_id'), AnalysisStatus.RUNNING.value,
self.mode_service, "", "", image_dict.get("progress"),
or_image_name,
ai_image_name, image_dict.get("question_code"),
image_dict.get("question_descrition"),
ai_image_name, image_dict.get("model_type_code"),
image_dict.get("model_detection_code"),
TimeUtils.now_date_to_str()))
except Exception as e:
logger.error("requestId:{}, 图片上传异常:", self.msg.get("request_id"))

+ 16
- 17
concurrency/IntelligentRecognitionProcess.py View File

@@ -9,7 +9,6 @@ from loguru import logger
from enums.AnalysisStatusEnum import AnalysisStatus
from enums.AnalysisTypeEnum import AnalysisType
from enums.ExceptionEnum import ExceptionType
from enums.AnalysisLabelEnum import AnalysisLabel, LCAnalysisLabel
from enums.ModelTypeEnum import ModelType
from util import LogUtils, TimeUtils
from util.Cv2Utils import Cv2Util
@@ -67,9 +66,9 @@ class IntelligentRecognitionProcess(Process):
code = model.get("code")
needed_objectsIndex = [int(category.get("id")) for category in model.get("categories")]
if code == ModelType.WATER_SURFACE_MODEL.value[1]:
return ModelUtils.SZModel(gpuId, needed_objectsIndex), AnalysisLabel
return ModelUtils.SZModel(gpuId, needed_objectsIndex), code
elif code == ModelType.FOREST_FARM_MODEL.value[1]:
return ModelUtils.LCModel(gpuId, needed_objectsIndex), LCAnalysisLabel
return ModelUtils.LCModel(gpuId, needed_objectsIndex), code
else:
logger.error("未匹配到对应的模型")
raise ServiceException(ExceptionType.AI_MODEL_MATCH_EXCEPTION.value[0],
@@ -131,7 +130,7 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
try:
# 加载模型
logger.info("开始加载算法模型, requestId: {}", self.msg.get("request_id"))
mod, analyseLable = self.get_model(str(self.gpu_ids[0]), self.msg["models"])
mod, model_type_code = self.get_model(str(self.gpu_ids[0]), self.msg["models"])
logger.info("加载算法模型完成, requestId: {}", self.msg.get("request_id"))
# 定义原视频、AI视频保存名称
randomStr = str(uuid.uuid1().hex)
@@ -230,7 +229,8 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
try:
cv2tool.getP().stdin.write(p_result[1].tostring())
cv2tool.getOrVideoFile().write(frame)
cv2tool.getAiVideoFile().write(p_result[1])
frame_merge = cv2tool.video_merge(copy.deepcopy(frame), copy.deepcopy(p_result[1]))
cv2tool.getAiVideoFile().write(frame_merge)
except Exception as e:
logger.error("requestId:{}, 写流异常:", self.msg.get("request_id"))
logger.exception(e)
@@ -239,7 +239,6 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
ai_analyse_results = p_result[2]
for ai_analyse_result in ai_analyse_results:
order = str(int(ai_analyse_result[0]))
label = analyseLable.getLabel(order)
high_result = high_score_image.get(order)
conf_c = ai_analyse_result[5]
if high_result is None:
@@ -250,8 +249,8 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
"last_frame": current_frame + step,
"progress": "",
"mode_service": "online",
"question_code": label.value[2],
"question_descrition": label.value[1],
"model_type_code": model_type_code,
"model_detection_code": order,
"socre": conf_c
}
else:
@@ -263,8 +262,8 @@ class OnlineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
"last_frame": current_frame + step,
"progress": "",
"mode_service": "online",
"question_code": label.value[2],
"question_descrition": label.value[1],
"model_type_code": model_type_code,
"model_detection_code": order,
"socre": conf_c
}
if current_frame % int(self.content["service"]["frame_step"]) == 0 and len(high_score_image) > 0:
@@ -351,7 +350,7 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
try:
# 加载模型
logger.info("开始加载算法模型, requestId:{}", self.msg.get("request_id"))
mod, analyseLable = self.get_model(str(self.gpu_ids[0]), self.msg["models"])
mod, model_type_code = self.get_model(str(self.gpu_ids[0]), self.msg["models"])
# mod = ModelUtils.SZModel([0,1,2,3])
logger.info("加载算法模型完成, requestId:{}", self.msg.get("request_id"))
# 定义原视频、AI视频保存名称
@@ -422,7 +421,8 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
# logger.info("算法模型调度时间:{}s", int(time11-time00))
# 原视频保存本地、AI视频保存本地
try:
cv2tool.getAiVideoFile().write(p_result[1])
frame_merge = cv2tool.video_merge(copy.deepcopy(frame), copy.deepcopy(p_result[1]))
cv2tool.getAiVideoFile().write(frame_merge)
except Exception as e:
logger.exception(e)
# # 问题图片加入队列, 暂时写死,后期修改为真实问题
@@ -433,7 +433,6 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
ai_analyse_results = p_result[2]
for ai_analyse_result in ai_analyse_results:
order = str(int(ai_analyse_result[0]))
label = analyseLable.getLabel(order)
high_result = high_score_image.get(order)
conf_c = ai_analyse_result[5]
if high_result is None:
@@ -444,8 +443,8 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
"last_frame": current_frame + step,
"progress": "",
"mode_service": "offline",
"question_code": label.value[2],
"question_descrition": label.value[1],
"model_type_code": model_type_code,
"model_detection_code": order,
"socre": conf_c
}
else:
@@ -457,8 +456,8 @@ class OfflineIntelligentRecognitionProcess(IntelligentRecognitionProcess):
"last_frame": current_frame + step,
"progress": "",
"mode_service": "offline",
"question_code": label.value[2],
"question_descrition": label.value[1],
"model_type_code": model_type_code,
"model_detection_code": order,
"socre": conf_c
}
if current_frame % int(self.content["service"]["frame_step"]) == 0 and len(high_score_image) > 0:

+ 37
- 1
dsp_application.yml View File

@@ -1,3 +1,5 @@
dsp:
active: dev
kafka:
topic:
dsp-alg-online-tasks-topic: dsp-alg-online-tasks
@@ -9,7 +11,6 @@ kafka:
dsp-alg-results-topic: dsp-alg-task-results
dsp-alg-task-results:
partition: [0]
active: dev
local:
bootstrap_servers: ['192.168.10.11:9092']
producer:
@@ -122,3 +123,38 @@ log:
enqueue: True
# 编码格式
encoding: utf8
#mysql:
# # 数据库信息
# dev:
# host: 192.168.11.13
# port: 3306
# dbname: tuheng_dsp
# username: root
# password: idontcare
# test:
# host: 192.168.11.242
# port: 3306
# dbname: tuheng_dsp
# username: root
# password: idontcare
# prod:
# host: 172.16.1.22
# port: 3306
# dbname: tuheng_dsp
# username: root
# password: TH22#2022
# db_charset: utf8
# # mincached : 启动时开启的闲置连接数量(缺省值 0 开始时不创建连接)
# db_min_cached: 0
# # maxcached : 连接池中允许的闲置的最多连接数量(缺省值 0 代表不闲置连接池大小)
# db_max_cached: 10
# # maxshared : 共享连接数允许的最大数量(缺省值 0 代表所有连接都是专用的)如果达到了最大数量,被请求为共享的连接将会被共享使用
# db_max_shared: 10
# # maxconnecyions : 创建连接池的最大数量(缺省值 0 代表不限制)
# db_max_connecyions: 20
# # maxusage : 单个连接的最大允许复用次数(缺省值 0 或 False 代表不限制的复用).当达到最大数时,连接会自动重新连接(关闭和重新打开)
# db_blocking: True
# # maxusage : 单个连接的最大允许复用次数(缺省值 0 或 False 代表不限制的复用).当达到最大数时,连接会自动重新连接(关闭和重新打开)
# db_max_usage: 0
# # setsession : 一个可选的SQL命令列表用于准备每个会话,如["set datestyle to german", ...]
# db_set_session: None

+ 3
- 3
entity/FeedBack.py View File

@@ -1,6 +1,6 @@

def message_feedback(requestId, status, type, error_code="", error_msg="", progress="", original_url="", sign_url="",
category_id="", description="", analyse_time=""):
model_type_code="", model_detection_code="", analyse_time=""):
taskfb = {}
results = []
result_msg = {}
@@ -12,8 +12,8 @@ def message_feedback(requestId, status, type, error_code="", error_msg="", prog
taskfb["progress"] = progress
result_msg["original_url"] = original_url
result_msg["sign_url"] = sign_url
result_msg["category_id"] = category_id
result_msg["description"] = description
result_msg["model_type_code"] = model_type_code
result_msg["model_detection_coden"] = model_detection_code
result_msg["analyse_time"] = analyse_time
results.append(result_msg)
taskfb["results"] = results

+ 0
- 57
enums/AnalysisLabelEnum.py View File

@@ -1,57 +0,0 @@
from enum import Enum, unique


# 分析状态枚举
@unique
class AnalysisLabel(Enum):
VENT = ("0", "排口", "SL014")

SEWAGE_OUTLET = ("1", "水生植被", "SL013")

OTHER = ("2", "其他", "SL001")

FLOATING_OBJECTS = ("3", "漂浮物", "SL001")

AQUATIC_VEGETATION = ("4", "污染排口", "SL011")

VEGETABLE_FIELD = ("5", "菜地", "SL007")

NON_CONFORMING_BUILDING = ("6", "违建", "SL010")

BANK_SLOPE_GARBAGE = ("7", "岸坡垃圾", "SL009")

def checkLabel(id):
for label in AnalysisLabel:
if label.value[0] == id:
return True
return False

def getLabel(order):
for label in AnalysisLabel:
if label.value[0] == order:
return label
return None


# 林场
@unique
class LCAnalysisLabel(Enum):
PATTERN_SPOT = ("0", "林斑", "LC001")

DEAD_TREE = ("1", "病死树", "LC002")

PERSONNER_ACTIVITIES = ("2", "人员活动", "LC003")

FIRE_IMPLICATION = ("3", "火灾隐含", "LC004")

def checkLabel(id):
for label in LCAnalysisLabel:
if label.value[0] == id:
return True
return False

def getLabel(order):
for label in LCAnalysisLabel:
if label.value[0] == order:
return label
return None

+ 2
- 0
enums/ExceptionEnum.py View File

@@ -41,4 +41,6 @@ class ExceptionType(Enum):

AI_MODEL_MATCH_EXCEPTION = ("SP017", "The AI Model Is Not Matched!")

VIDEO_MERGE_EXCEPTION = ("SP018", "The Video Merge Exception!")

SERVICE_INNER_EXCEPTION = ("SP999", "系统内部异常, 请联系工程师定位处理!")

+ 0
- 16
enums/ModelTypeEnum.py View File

@@ -1,16 +0,0 @@
from enum import Enum, unique


# 异常枚举
@unique
class ModelType(Enum):

WATER_SURFACE_MODEL = ("1", "DSPSL000", "水面模型")

FOREST_FARM_MODEL = ("2", "DSPLC000", "林场模型")

def checkCode(code):
for model in ModelType:
if model.value[1] == code:
return True
return False

+ 0
- 24
service/Dispatcher.py View File

@@ -1,16 +1,12 @@
# -*- coding: utf-8 -*-
import torch
import time
import GPUtil
from util import YmlUtils, FileUtils, LogUtils
from loguru import logger
from multiprocessing import Queue
from enums.ModelTypeEnum import ModelType
from enums.AnalysisLabelEnum import AnalysisLabel, LCAnalysisLabel
from concurrency.IntelligentRecognitionProcess import OnlineIntelligentRecognitionProcess, OfflineIntelligentRecognitionProcess
from concurrency.MessagePollingThread import OfflineMessagePollingThread, OnlineMessagePollingThread
from util import GPUtils

'''
分发服务
'''
@@ -167,18 +163,8 @@ class DispatcherService():
for model in models:
if model.get("code") is None:
return False
if not ModelType.checkCode(model.get("code")):
return False
if model.get("categories") is None:
return False
if model.get("code") == ModelType.WATER_SURFACE_MODEL.value[1]:
for category in model.get("categories"):
if not AnalysisLabel.checkLabel(category.get("id")):
return False
if model.get("code") == ModelType.FOREST_FARM_MODEL.value[1]:
for category in model.get("categories"):
if not LCAnalysisLabel.checkLabel(category.get("id")):
return False
if command == "start" and pull_url is None:
return False
if command == "start" and push_url is None:
@@ -205,18 +191,8 @@ class DispatcherService():
for model in models:
if model.get("code") is None:
return False
if not ModelType.checkCode(model.get("code")):
return False
if model.get("categories") is None:
return False
if model.get("code") == ModelType.WATER_SURFACE_MODEL.value[1]:
for category in model.get("categories"):
if not AnalysisLabel.checkLabel(category.get("id")):
return False
if model.get("code") == ModelType.FOREST_FARM_MODEL.value[1]:
for category in model.get("categories"):
if not LCAnalysisLabel.checkLabel(category.get("id")):
return False
if command == 'start' and original_url is None:
return False
if command == 'start' and original_type is None:

+ 9
- 0
test/mysqltest.py View File

@@ -0,0 +1,9 @@
from util.MyConnectionPool import MySqLHelper
from util import YmlUtils
import json

if __name__=="__main__":
content = YmlUtils.getConfigs()
sql = MySqLHelper(content)
res = sql.selectall("select id, name, code, description, create_user, create_time, update_user, update_time, mark from dsp_model_classification where mark = %s", 1)
print(res)

+ 8
- 1
util/Cv2Utils.py View File

@@ -1,6 +1,7 @@
# -*- coding: utf-8 -*-
import cv2
import subprocess as sp
import numpy as np
from loguru import logger
from exception.CustomerException import ServiceException
from enums.ExceptionEnum import ExceptionType
@@ -105,13 +106,19 @@ class Cv2Util():
ExceptionType.OR_VIDEO_ADDRESS_EXCEPTION.value[1])

self.or_video_file = cv2.VideoWriter(self.orFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps, (self.width, self.height))
self.ai_video_file = cv2.VideoWriter(self.aiFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps, (self.width, self.height))
self.ai_video_file = cv2.VideoWriter(self.aiFilePath, cv2.VideoWriter_fourcc(*'mp4v'), self.fps, (self.width, int(self.height/2)))
except ServiceException as s:
raise s
except Exception as e:
logger.error("初始化管道失败:")
logger.exception(e)

def video_merge(self, frame1, frame2):
frameLeft = cv2.resize(frame1, (int(self.width / 2), int(self.height / 2)), interpolation=cv2.INTER_LINEAR)
frameRight = cv2.resize(frame2, (int(self.width / 2), int(self.height / 2)), interpolation=cv2.INTER_LINEAR)
frame_merge = np.hstack((frameLeft, frameRight))
return frame_merge

def getP(self):
if self.p is None:
logger.error("获取管道为空!")

+ 8
- 8
util/KafkaUtils.py View File

@@ -9,8 +9,8 @@ class CustomerKafkaProducer():

def __init__(self, content):
self.content = content
configs = self.content["kafka"][self.content["kafka"]["active"]]["producer"]
self.customerProducer = KafkaProducer(bootstrap_servers=self.content["kafka"][self.content["kafka"]["active"]]["bootstrap_servers"],
configs = self.content["kafka"][self.content["dsp"]["active"]]["producer"]
self.customerProducer = KafkaProducer(bootstrap_servers=self.content["kafka"][self.content["dsp"]["active"]]["bootstrap_servers"],
acks=configs["acks"],
retries=configs["retries"],
linger_ms=configs["linger_ms"],
@@ -25,8 +25,8 @@ class CustomerKafkaProducer():
if self.customerProducer:
return self.customerProducer
logger.info("配置kafka生产者")
configs = self.content["kafka"][self.content["kafka"]["active"]]["producer"]
self.customerProducer = KafkaProducer(bootstrap_servers=self.content["kafka"][self.content["kafka"]["active"]]["bootstrap_servers"],
configs = self.content["kafka"][self.content["dsp"]["active"]]["producer"]
self.customerProducer = KafkaProducer(bootstrap_servers=self.content["kafka"][self.content["dsp"]["active"]]["bootstrap_servers"],
acks=configs["acks"],
retries=configs["retries"],
linger_ms=configs["linger_ms"],
@@ -79,8 +79,8 @@ class CustomerKafkaConsumer():
def __init__(self, content):
logger.info("初始化消费者")
self.content = content
configs = self.content["kafka"][self.content["kafka"]["active"]]["consumer"]
self.customerConsumer = KafkaConsumer(bootstrap_servers=self.content["kafka"][self.content["kafka"]["active"]]["bootstrap_servers"],
configs = self.content["kafka"][self.content["dsp"]["active"]]["consumer"]
self.customerConsumer = KafkaConsumer(bootstrap_servers=self.content["kafka"][self.content["dsp"]["active"]]["bootstrap_servers"],
client_id=configs["client_id"],
group_id=configs["group_id"],
auto_offset_reset=configs["auto_offset_reset"],
@@ -94,8 +94,8 @@ class CustomerKafkaConsumer():
if self.customerConsumer:
logger.info("获取消费者成功!")
return self.customerConsumer
configs = self.content["kafka"][self.content["kafka"]["active"]]["consumer"]
self.customerConsumer = KafkaConsumer(bootstrap_servers=self.content["kafka"][self.content["kafka"]["active"]]["bootstrap_servers"],
configs = self.content["kafka"][self.content["dsp"]["active"]]["consumer"]
self.customerConsumer = KafkaConsumer(bootstrap_servers=self.content["kafka"][self.content["dsp"]["active"]]["bootstrap_servers"],
client_id=configs["client_id"],
group_id=configs["group_id"],
auto_offset_reset=configs["auto_offset_reset"],

+ 227
- 0
util/MyConnectionPool.py View File

@@ -0,0 +1,227 @@
# -*- coding: UTF-8 -*-
import pymysql
from loguru import logger
from dbutils.pooled_db import PooledDB


"""
@功能:创建数据库连接池
"""


class MyConnectionPool(object):
__pool = None

def __init__(self, content):
self.conn = self.__getConn(content)
self.cursor = self.conn.cursor()

# 创建数据库连接conn和游标cursor
# def __enter__(self):
# self.conn = self.__getconn()
# self.cursor = self.conn.cursor()

# 创建数据库连接池
def __getconn(self, content):
if self.__pool is None:
self.__pool = PooledDB(
creator=pymysql,
mincached=int(content["mysql"]["db_min_cached"]),
maxcached=int(content["mysql"]["db_max_cached"]),
maxshared=int(content["mysql"]["db_max_shared"]),
maxconnections=int(content["mysql"]["db_max_connecyions"]),
blocking=content["mysql"]["db_blocking"],
maxusage=content["mysql"]["db_max_usage"],
setsession=content["mysql"]["db_set_session"],
host=content["mysql"][content["dsp"]["active"]]["host"],
port=content["mysql"][content["dsp"]["active"]]["port"],
user=content["mysql"][content["dsp"]["active"]]["username"],
passwd=content["mysql"][content["dsp"]["active"]]["password"],
db=content["mysql"][content["dsp"]["active"]]["dbname"],
use_unicode=False,
charset=content["mysql"]["db_charset"]
)
return self.__pool.connection()

# 释放连接池资源
# def __exit__(self, exc_type, exc_val, exc_tb):
# self.cursor.close()
# self.conn.close()

# 关闭连接归还给链接池
def close(self):
self.cursor.close()
self.conn.close()

# 从连接池中取出一个连接
def getconn(self, content):
conn = self.__getconn(content)
cursor = conn.cursor()
return cursor, conn


# 获取连接池,实例化
def get_my_connection(content):
return MyConnectionPool(content)


'''
执行语句查询有结果返回结果没有返回0;增/删/改返回变更数据条数,没有返回0
'''


class MySqLHelper(object):
def __init__(self, content):
logger.info("开始加载数据库连接池!")
self.db = get_my_connection(content)
logger.info("加载数据库连接池完成!")

def __new__(cls, *args, **kwargs):
if not hasattr(cls, 'inst'): # 单例
cls.inst = super(MySqLHelper, cls).__new__(cls, *args, **kwargs)
return cls.inst

# 封装执行命令
def execute(self, sql, param=None, autoclose=False):
"""
【主要判断是否有参数和是否执行完就释放连接】
:param sql: 字符串类型,sql语句
:param param: sql语句中要替换的参数"select %s from tab where id=%s" 其中的%s就是参数
:param autoclose: 是否关闭连接
:return: 返回连接conn和游标cursor
"""
cursor, conn = self.db.getconn() # 从连接池获取连接
count = 0
try:
# count : 为改变的数据条数
if param:
count = cursor.execute(sql, param)
else:
count = cursor.execute(sql)
conn.commit()
if autoclose:
self.close(cursor, conn)
except Exception as e:
pass
return cursor, conn, count

# 执行多条命令
# def executemany(self, lis):
# """
# :param lis: 是一个列表,里面放的是每个sql的字典'[{"sql":"xxx","param":"xx"}....]'
# :return:
# """
# cursor, conn = self.db.getconn()
# try:
# for order in lis:
# sql = order['sql']
# param = order['param']
# if param:
# cursor.execute(sql, param)
# else:
# cursor.execute(sql)
# conn.commit()
# self.close(cursor, conn)
# return True
# except Exception as e:
# print(e)
# conn.rollback()
# self.close(cursor, conn)
# return False

# 释放连接
def close(self, cursor, conn):
logger.info("开始释放数据库连接!")
cursor.close()
conn.close()
logger.info("释放数据库连接完成!")

# 查询所有
def selectall(self, sql, param=None):
try:
cursor, conn, count = self.execute(sql, param)
res = cursor.fetchall()
return res
except Exception as e:
logger.error("查询所有数据异常:")
logger.exception(e)
self.close(cursor, conn)
return count

# 查询单条
def selectone(self, sql, param=None):
try:
cursor, conn, count = self.execute(sql, param)
res = cursor.fetchone()
self.close(cursor, conn)
return res
except Exception as e:
logger.error("查询单条数据异常:")
logger.exception(e)
self.close(cursor, conn)
return count

# 增加
def insertone(self, sql, param):
try:
cursor, conn, count = self.execute(sql, param)
# _id = cursor.lastrowid() # 获取当前插入数据的主键id,该id应该为自动生成为好
conn.commit()
self.close(cursor, conn)
return count
# 防止表中没有id返回0
# if _id == 0:
# return True
# return _id
except Exception as e:
logger.error("新增数据异常:")
logger.exception(e)
conn.rollback()
self.close(cursor, conn)
return count

# 增加多行
def insertmany(self, sql, param):
"""
:param sql:
:param param: 必须是元组或列表[(),()]或((),())
:return:
"""
cursor, conn, count = self.db.getconn()
try:
cursor.executemany(sql, param)
conn.commit()
return count
except Exception as e:
logger.error("增加多条数据异常:")
logger.exception(e)
conn.rollback()
self.close(cursor, conn)
return count

# 删除
def delete(self, sql, param=None):
try:
cursor, conn, count = self.execute(sql, param)
self.close(cursor, conn)
return count
except Exception as e:
logger.error("删除数据异常:")
logger.exception(e)
conn.rollback()
self.close(cursor, conn)
return count

# 更新
def update(self, sql, param=None):
try:
cursor, conn, count = self.execute(sql, param)
conn.commit()
self.close(cursor, conn)
return count
except Exception as e:
logger.error("更新数据异常:")
logger.exception(e)
conn.rollback()
self.close(cursor, conn)
return count

+ 2
- 1
util/YmlUtils.py View File

@@ -6,7 +6,8 @@ from common import Constant
# 从配置文件读取所有配置信息
def getConfigs():
print("开始读取配置文件,获取配置消息:", Constant.APPLICATION_CONFIG)
applicationConfigPath = os.path.abspath(Constant.APPLICATION_CONFIG)
applicationConfigPath = "../dsp_application.yml"
print(applicationConfigPath)
if not os.path.exists(applicationConfigPath):
raise Exception("未找到配置文件:{}".format(Constant.APPLICATION_CONFIG))
with open(applicationConfigPath, Constant.R, encoding=Constant.UTF_8) as f:

Loading…
Cancel
Save